1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/IR/Argument.h" 86 #include "llvm/IR/BasicBlock.h" 87 #include "llvm/IR/CFG.h" 88 #include "llvm/IR/CallSite.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/ConstantRange.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DerivedTypes.h" 94 #include "llvm/IR/Dominators.h" 95 #include "llvm/IR/Function.h" 96 #include "llvm/IR/GlobalAlias.h" 97 #include "llvm/IR/GlobalValue.h" 98 #include "llvm/IR/GlobalVariable.h" 99 #include "llvm/IR/InstIterator.h" 100 #include "llvm/IR/InstrTypes.h" 101 #include "llvm/IR/Instruction.h" 102 #include "llvm/IR/Instructions.h" 103 #include "llvm/IR/IntrinsicInst.h" 104 #include "llvm/IR/Intrinsics.h" 105 #include "llvm/IR/LLVMContext.h" 106 #include "llvm/IR/Metadata.h" 107 #include "llvm/IR/Operator.h" 108 #include "llvm/IR/PatternMatch.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/Use.h" 111 #include "llvm/IR/User.h" 112 #include "llvm/IR/Value.h" 113 #include "llvm/Pass.h" 114 #include "llvm/Support/Casting.h" 115 #include "llvm/Support/CommandLine.h" 116 #include "llvm/Support/Compiler.h" 117 #include "llvm/Support/Debug.h" 118 #include "llvm/Support/ErrorHandling.h" 119 #include "llvm/Support/KnownBits.h" 120 #include "llvm/Support/SaveAndRestore.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include <algorithm> 123 #include <cassert> 124 #include <climits> 125 #include <cstddef> 126 #include <cstdint> 127 #include <cstdlib> 128 #include <map> 129 #include <memory> 130 #include <tuple> 131 #include <utility> 132 #include <vector> 133 134 using namespace llvm; 135 136 #define DEBUG_TYPE "scalar-evolution" 137 138 STATISTIC(NumArrayLenItCounts, 139 "Number of trip counts computed with array length"); 140 STATISTIC(NumTripCountsComputed, 141 "Number of loops with predictable loop counts"); 142 STATISTIC(NumTripCountsNotComputed, 143 "Number of loops without predictable loop counts"); 144 STATISTIC(NumBruteForceTripCountsComputed, 145 "Number of loops with trip counts computed by force"); 146 147 static cl::opt<unsigned> 148 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 149 cl::desc("Maximum number of iterations SCEV will " 150 "symbolically execute a constant " 151 "derived loop"), 152 cl::init(100)); 153 154 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 155 static cl::opt<bool> 156 VerifySCEV("verify-scev", 157 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 158 static cl::opt<bool> 159 VerifySCEVMap("verify-scev-maps", 160 cl::desc("Verify no dangling value in ScalarEvolution's " 161 "ExprValueMap (slow)")); 162 163 static cl::opt<unsigned> MulOpsInlineThreshold( 164 "scev-mulops-inline-threshold", cl::Hidden, 165 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 166 cl::init(32)); 167 168 static cl::opt<unsigned> AddOpsInlineThreshold( 169 "scev-addops-inline-threshold", cl::Hidden, 170 cl::desc("Threshold for inlining addition operands into a SCEV"), 171 cl::init(500)); 172 173 static cl::opt<unsigned> MaxSCEVCompareDepth( 174 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 175 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 176 cl::init(32)); 177 178 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 179 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 180 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 181 cl::init(2)); 182 183 static cl::opt<unsigned> MaxValueCompareDepth( 184 "scalar-evolution-max-value-compare-depth", cl::Hidden, 185 cl::desc("Maximum depth of recursive value complexity comparisons"), 186 cl::init(2)); 187 188 static cl::opt<unsigned> 189 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 190 cl::desc("Maximum depth of recursive arithmetics"), 191 cl::init(32)); 192 193 static cl::opt<unsigned> MaxConstantEvolvingDepth( 194 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 195 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 196 197 static cl::opt<unsigned> 198 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 199 cl::desc("Maximum depth of recursive SExt/ZExt"), 200 cl::init(8)); 201 202 static cl::opt<unsigned> 203 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 204 cl::desc("Max coefficients in AddRec during evolving"), 205 cl::init(16)); 206 207 //===----------------------------------------------------------------------===// 208 // SCEV class definitions 209 //===----------------------------------------------------------------------===// 210 211 //===----------------------------------------------------------------------===// 212 // Implementation of the SCEV class. 213 // 214 215 #ifdef LLVM_ENABLE_DUMP 216 LLVM_DUMP_METHOD void SCEV::dump() const { 217 print(dbgs()); 218 dbgs() << '\n'; 219 } 220 #endif 221 222 void SCEV::print(raw_ostream &OS) const { 223 switch (static_cast<SCEVTypes>(getSCEVType())) { 224 case scConstant: 225 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 226 return; 227 case scTruncate: { 228 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 229 const SCEV *Op = Trunc->getOperand(); 230 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 231 << *Trunc->getType() << ")"; 232 return; 233 } 234 case scZeroExtend: { 235 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 236 const SCEV *Op = ZExt->getOperand(); 237 OS << "(zext " << *Op->getType() << " " << *Op << " to " 238 << *ZExt->getType() << ")"; 239 return; 240 } 241 case scSignExtend: { 242 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 243 const SCEV *Op = SExt->getOperand(); 244 OS << "(sext " << *Op->getType() << " " << *Op << " to " 245 << *SExt->getType() << ")"; 246 return; 247 } 248 case scAddRecExpr: { 249 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 250 OS << "{" << *AR->getOperand(0); 251 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 252 OS << ",+," << *AR->getOperand(i); 253 OS << "}<"; 254 if (AR->hasNoUnsignedWrap()) 255 OS << "nuw><"; 256 if (AR->hasNoSignedWrap()) 257 OS << "nsw><"; 258 if (AR->hasNoSelfWrap() && 259 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 260 OS << "nw><"; 261 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 262 OS << ">"; 263 return; 264 } 265 case scAddExpr: 266 case scMulExpr: 267 case scUMaxExpr: 268 case scSMaxExpr: { 269 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 270 const char *OpStr = nullptr; 271 switch (NAry->getSCEVType()) { 272 case scAddExpr: OpStr = " + "; break; 273 case scMulExpr: OpStr = " * "; break; 274 case scUMaxExpr: OpStr = " umax "; break; 275 case scSMaxExpr: OpStr = " smax "; break; 276 } 277 OS << "("; 278 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 279 I != E; ++I) { 280 OS << **I; 281 if (std::next(I) != E) 282 OS << OpStr; 283 } 284 OS << ")"; 285 switch (NAry->getSCEVType()) { 286 case scAddExpr: 287 case scMulExpr: 288 if (NAry->hasNoUnsignedWrap()) 289 OS << "<nuw>"; 290 if (NAry->hasNoSignedWrap()) 291 OS << "<nsw>"; 292 } 293 return; 294 } 295 case scUDivExpr: { 296 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 297 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 298 return; 299 } 300 case scUnknown: { 301 const SCEVUnknown *U = cast<SCEVUnknown>(this); 302 Type *AllocTy; 303 if (U->isSizeOf(AllocTy)) { 304 OS << "sizeof(" << *AllocTy << ")"; 305 return; 306 } 307 if (U->isAlignOf(AllocTy)) { 308 OS << "alignof(" << *AllocTy << ")"; 309 return; 310 } 311 312 Type *CTy; 313 Constant *FieldNo; 314 if (U->isOffsetOf(CTy, FieldNo)) { 315 OS << "offsetof(" << *CTy << ", "; 316 FieldNo->printAsOperand(OS, false); 317 OS << ")"; 318 return; 319 } 320 321 // Otherwise just print it normally. 322 U->getValue()->printAsOperand(OS, false); 323 return; 324 } 325 case scCouldNotCompute: 326 OS << "***COULDNOTCOMPUTE***"; 327 return; 328 } 329 llvm_unreachable("Unknown SCEV kind!"); 330 } 331 332 Type *SCEV::getType() const { 333 switch (static_cast<SCEVTypes>(getSCEVType())) { 334 case scConstant: 335 return cast<SCEVConstant>(this)->getType(); 336 case scTruncate: 337 case scZeroExtend: 338 case scSignExtend: 339 return cast<SCEVCastExpr>(this)->getType(); 340 case scAddRecExpr: 341 case scMulExpr: 342 case scUMaxExpr: 343 case scSMaxExpr: 344 return cast<SCEVNAryExpr>(this)->getType(); 345 case scAddExpr: 346 return cast<SCEVAddExpr>(this)->getType(); 347 case scUDivExpr: 348 return cast<SCEVUDivExpr>(this)->getType(); 349 case scUnknown: 350 return cast<SCEVUnknown>(this)->getType(); 351 case scCouldNotCompute: 352 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 353 } 354 llvm_unreachable("Unknown SCEV kind!"); 355 } 356 357 bool SCEV::isZero() const { 358 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 359 return SC->getValue()->isZero(); 360 return false; 361 } 362 363 bool SCEV::isOne() const { 364 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 365 return SC->getValue()->isOne(); 366 return false; 367 } 368 369 bool SCEV::isAllOnesValue() const { 370 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 371 return SC->getValue()->isMinusOne(); 372 return false; 373 } 374 375 bool SCEV::isNonConstantNegative() const { 376 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 377 if (!Mul) return false; 378 379 // If there is a constant factor, it will be first. 380 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 381 if (!SC) return false; 382 383 // Return true if the value is negative, this matches things like (-42 * V). 384 return SC->getAPInt().isNegative(); 385 } 386 387 SCEVCouldNotCompute::SCEVCouldNotCompute() : 388 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 389 390 bool SCEVCouldNotCompute::classof(const SCEV *S) { 391 return S->getSCEVType() == scCouldNotCompute; 392 } 393 394 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 395 FoldingSetNodeID ID; 396 ID.AddInteger(scConstant); 397 ID.AddPointer(V); 398 void *IP = nullptr; 399 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 400 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 401 UniqueSCEVs.InsertNode(S, IP); 402 return S; 403 } 404 405 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 406 return getConstant(ConstantInt::get(getContext(), Val)); 407 } 408 409 const SCEV * 410 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 411 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 412 return getConstant(ConstantInt::get(ITy, V, isSigned)); 413 } 414 415 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 416 unsigned SCEVTy, const SCEV *op, Type *ty) 417 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 418 419 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 420 const SCEV *op, Type *ty) 421 : SCEVCastExpr(ID, scTruncate, op, ty) { 422 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 423 (Ty->isIntegerTy() || Ty->isPointerTy()) && 424 "Cannot truncate non-integer value!"); 425 } 426 427 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 428 const SCEV *op, Type *ty) 429 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 430 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 431 (Ty->isIntegerTy() || Ty->isPointerTy()) && 432 "Cannot zero extend non-integer value!"); 433 } 434 435 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 436 const SCEV *op, Type *ty) 437 : SCEVCastExpr(ID, scSignExtend, op, ty) { 438 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 439 (Ty->isIntegerTy() || Ty->isPointerTy()) && 440 "Cannot sign extend non-integer value!"); 441 } 442 443 void SCEVUnknown::deleted() { 444 // Clear this SCEVUnknown from various maps. 445 SE->forgetMemoizedResults(this); 446 447 // Remove this SCEVUnknown from the uniquing map. 448 SE->UniqueSCEVs.RemoveNode(this); 449 450 // Release the value. 451 setValPtr(nullptr); 452 } 453 454 void SCEVUnknown::allUsesReplacedWith(Value *New) { 455 // Remove this SCEVUnknown from the uniquing map. 456 SE->UniqueSCEVs.RemoveNode(this); 457 458 // Update this SCEVUnknown to point to the new value. This is needed 459 // because there may still be outstanding SCEVs which still point to 460 // this SCEVUnknown. 461 setValPtr(New); 462 } 463 464 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 465 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 466 if (VCE->getOpcode() == Instruction::PtrToInt) 467 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 468 if (CE->getOpcode() == Instruction::GetElementPtr && 469 CE->getOperand(0)->isNullValue() && 470 CE->getNumOperands() == 2) 471 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 472 if (CI->isOne()) { 473 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 474 ->getElementType(); 475 return true; 476 } 477 478 return false; 479 } 480 481 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 482 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 483 if (VCE->getOpcode() == Instruction::PtrToInt) 484 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 485 if (CE->getOpcode() == Instruction::GetElementPtr && 486 CE->getOperand(0)->isNullValue()) { 487 Type *Ty = 488 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 489 if (StructType *STy = dyn_cast<StructType>(Ty)) 490 if (!STy->isPacked() && 491 CE->getNumOperands() == 3 && 492 CE->getOperand(1)->isNullValue()) { 493 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 494 if (CI->isOne() && 495 STy->getNumElements() == 2 && 496 STy->getElementType(0)->isIntegerTy(1)) { 497 AllocTy = STy->getElementType(1); 498 return true; 499 } 500 } 501 } 502 503 return false; 504 } 505 506 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 507 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 508 if (VCE->getOpcode() == Instruction::PtrToInt) 509 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 510 if (CE->getOpcode() == Instruction::GetElementPtr && 511 CE->getNumOperands() == 3 && 512 CE->getOperand(0)->isNullValue() && 513 CE->getOperand(1)->isNullValue()) { 514 Type *Ty = 515 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 516 // Ignore vector types here so that ScalarEvolutionExpander doesn't 517 // emit getelementptrs that index into vectors. 518 if (Ty->isStructTy() || Ty->isArrayTy()) { 519 CTy = Ty; 520 FieldNo = CE->getOperand(2); 521 return true; 522 } 523 } 524 525 return false; 526 } 527 528 //===----------------------------------------------------------------------===// 529 // SCEV Utilities 530 //===----------------------------------------------------------------------===// 531 532 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 533 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 534 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 535 /// have been previously deemed to be "equally complex" by this routine. It is 536 /// intended to avoid exponential time complexity in cases like: 537 /// 538 /// %a = f(%x, %y) 539 /// %b = f(%a, %a) 540 /// %c = f(%b, %b) 541 /// 542 /// %d = f(%x, %y) 543 /// %e = f(%d, %d) 544 /// %f = f(%e, %e) 545 /// 546 /// CompareValueComplexity(%f, %c) 547 /// 548 /// Since we do not continue running this routine on expression trees once we 549 /// have seen unequal values, there is no need to track them in the cache. 550 static int 551 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 552 const LoopInfo *const LI, Value *LV, Value *RV, 553 unsigned Depth) { 554 if (Depth > MaxValueCompareDepth || EqCache.count({LV, RV})) 555 return 0; 556 557 // Order pointer values after integer values. This helps SCEVExpander form 558 // GEPs. 559 bool LIsPointer = LV->getType()->isPointerTy(), 560 RIsPointer = RV->getType()->isPointerTy(); 561 if (LIsPointer != RIsPointer) 562 return (int)LIsPointer - (int)RIsPointer; 563 564 // Compare getValueID values. 565 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 566 if (LID != RID) 567 return (int)LID - (int)RID; 568 569 // Sort arguments by their position. 570 if (const auto *LA = dyn_cast<Argument>(LV)) { 571 const auto *RA = cast<Argument>(RV); 572 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 573 return (int)LArgNo - (int)RArgNo; 574 } 575 576 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 577 const auto *RGV = cast<GlobalValue>(RV); 578 579 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 580 auto LT = GV->getLinkage(); 581 return !(GlobalValue::isPrivateLinkage(LT) || 582 GlobalValue::isInternalLinkage(LT)); 583 }; 584 585 // Use the names to distinguish the two values, but only if the 586 // names are semantically important. 587 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 588 return LGV->getName().compare(RGV->getName()); 589 } 590 591 // For instructions, compare their loop depth, and their operand count. This 592 // is pretty loose. 593 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 594 const auto *RInst = cast<Instruction>(RV); 595 596 // Compare loop depths. 597 const BasicBlock *LParent = LInst->getParent(), 598 *RParent = RInst->getParent(); 599 if (LParent != RParent) { 600 unsigned LDepth = LI->getLoopDepth(LParent), 601 RDepth = LI->getLoopDepth(RParent); 602 if (LDepth != RDepth) 603 return (int)LDepth - (int)RDepth; 604 } 605 606 // Compare the number of operands. 607 unsigned LNumOps = LInst->getNumOperands(), 608 RNumOps = RInst->getNumOperands(); 609 if (LNumOps != RNumOps) 610 return (int)LNumOps - (int)RNumOps; 611 612 for (unsigned Idx : seq(0u, LNumOps)) { 613 int Result = 614 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 615 RInst->getOperand(Idx), Depth + 1); 616 if (Result != 0) 617 return Result; 618 } 619 } 620 621 EqCache.insert({LV, RV}); 622 return 0; 623 } 624 625 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 626 // than RHS, respectively. A three-way result allows recursive comparisons to be 627 // more efficient. 628 static int CompareSCEVComplexity( 629 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 630 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 631 DominatorTree &DT, unsigned Depth = 0) { 632 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 633 if (LHS == RHS) 634 return 0; 635 636 // Primarily, sort the SCEVs by their getSCEVType(). 637 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 638 if (LType != RType) 639 return (int)LType - (int)RType; 640 641 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.count({LHS, RHS})) 642 return 0; 643 // Aside from the getSCEVType() ordering, the particular ordering 644 // isn't very important except that it's beneficial to be consistent, 645 // so that (a + b) and (b + a) don't end up as different expressions. 646 switch (static_cast<SCEVTypes>(LType)) { 647 case scUnknown: { 648 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 649 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 650 651 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 652 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 653 Depth + 1); 654 if (X == 0) 655 EqCacheSCEV.insert({LHS, RHS}); 656 return X; 657 } 658 659 case scConstant: { 660 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 661 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 662 663 // Compare constant values. 664 const APInt &LA = LC->getAPInt(); 665 const APInt &RA = RC->getAPInt(); 666 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 667 if (LBitWidth != RBitWidth) 668 return (int)LBitWidth - (int)RBitWidth; 669 return LA.ult(RA) ? -1 : 1; 670 } 671 672 case scAddRecExpr: { 673 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 674 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 675 676 // There is always a dominance between two recs that are used by one SCEV, 677 // so we can safely sort recs by loop header dominance. We require such 678 // order in getAddExpr. 679 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 680 if (LLoop != RLoop) { 681 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 682 assert(LHead != RHead && "Two loops share the same header?"); 683 if (DT.dominates(LHead, RHead)) 684 return 1; 685 else 686 assert(DT.dominates(RHead, LHead) && 687 "No dominance between recurrences used by one SCEV?"); 688 return -1; 689 } 690 691 // Addrec complexity grows with operand count. 692 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 693 if (LNumOps != RNumOps) 694 return (int)LNumOps - (int)RNumOps; 695 696 // Lexicographically compare. 697 for (unsigned i = 0; i != LNumOps; ++i) { 698 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 699 RA->getOperand(i), DT, Depth + 1); 700 if (X != 0) 701 return X; 702 } 703 EqCacheSCEV.insert({LHS, RHS}); 704 return 0; 705 } 706 707 case scAddExpr: 708 case scMulExpr: 709 case scSMaxExpr: 710 case scUMaxExpr: { 711 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 712 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 713 714 // Lexicographically compare n-ary expressions. 715 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 716 if (LNumOps != RNumOps) 717 return (int)LNumOps - (int)RNumOps; 718 719 for (unsigned i = 0; i != LNumOps; ++i) { 720 if (i >= RNumOps) 721 return 1; 722 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 723 RC->getOperand(i), DT, Depth + 1); 724 if (X != 0) 725 return X; 726 } 727 EqCacheSCEV.insert({LHS, RHS}); 728 return 0; 729 } 730 731 case scUDivExpr: { 732 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 733 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 734 735 // Lexicographically compare udiv expressions. 736 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 737 DT, Depth + 1); 738 if (X != 0) 739 return X; 740 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 741 Depth + 1); 742 if (X == 0) 743 EqCacheSCEV.insert({LHS, RHS}); 744 return X; 745 } 746 747 case scTruncate: 748 case scZeroExtend: 749 case scSignExtend: { 750 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 751 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 752 753 // Compare cast expressions by operand. 754 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 755 RC->getOperand(), DT, Depth + 1); 756 if (X == 0) 757 EqCacheSCEV.insert({LHS, RHS}); 758 return X; 759 } 760 761 case scCouldNotCompute: 762 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 763 } 764 llvm_unreachable("Unknown SCEV kind!"); 765 } 766 767 /// Given a list of SCEV objects, order them by their complexity, and group 768 /// objects of the same complexity together by value. When this routine is 769 /// finished, we know that any duplicates in the vector are consecutive and that 770 /// complexity is monotonically increasing. 771 /// 772 /// Note that we go take special precautions to ensure that we get deterministic 773 /// results from this routine. In other words, we don't want the results of 774 /// this to depend on where the addresses of various SCEV objects happened to 775 /// land in memory. 776 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 777 LoopInfo *LI, DominatorTree &DT) { 778 if (Ops.size() < 2) return; // Noop 779 780 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 781 if (Ops.size() == 2) { 782 // This is the common case, which also happens to be trivially simple. 783 // Special case it. 784 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 785 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 786 std::swap(LHS, RHS); 787 return; 788 } 789 790 // Do the rough sort by complexity. 791 std::stable_sort(Ops.begin(), Ops.end(), 792 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 793 return 794 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 795 }); 796 797 // Now that we are sorted by complexity, group elements of the same 798 // complexity. Note that this is, at worst, N^2, but the vector is likely to 799 // be extremely short in practice. Note that we take this approach because we 800 // do not want to depend on the addresses of the objects we are grouping. 801 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 802 const SCEV *S = Ops[i]; 803 unsigned Complexity = S->getSCEVType(); 804 805 // If there are any objects of the same complexity and same value as this 806 // one, group them. 807 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 808 if (Ops[j] == S) { // Found a duplicate. 809 // Move it to immediately after i'th element. 810 std::swap(Ops[i+1], Ops[j]); 811 ++i; // no need to rescan it. 812 if (i == e-2) return; // Done! 813 } 814 } 815 } 816 } 817 818 // Returns the size of the SCEV S. 819 static inline int sizeOfSCEV(const SCEV *S) { 820 struct FindSCEVSize { 821 int Size = 0; 822 823 FindSCEVSize() = default; 824 825 bool follow(const SCEV *S) { 826 ++Size; 827 // Keep looking at all operands of S. 828 return true; 829 } 830 831 bool isDone() const { 832 return false; 833 } 834 }; 835 836 FindSCEVSize F; 837 SCEVTraversal<FindSCEVSize> ST(F); 838 ST.visitAll(S); 839 return F.Size; 840 } 841 842 namespace { 843 844 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 845 public: 846 // Computes the Quotient and Remainder of the division of Numerator by 847 // Denominator. 848 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 849 const SCEV *Denominator, const SCEV **Quotient, 850 const SCEV **Remainder) { 851 assert(Numerator && Denominator && "Uninitialized SCEV"); 852 853 SCEVDivision D(SE, Numerator, Denominator); 854 855 // Check for the trivial case here to avoid having to check for it in the 856 // rest of the code. 857 if (Numerator == Denominator) { 858 *Quotient = D.One; 859 *Remainder = D.Zero; 860 return; 861 } 862 863 if (Numerator->isZero()) { 864 *Quotient = D.Zero; 865 *Remainder = D.Zero; 866 return; 867 } 868 869 // A simple case when N/1. The quotient is N. 870 if (Denominator->isOne()) { 871 *Quotient = Numerator; 872 *Remainder = D.Zero; 873 return; 874 } 875 876 // Split the Denominator when it is a product. 877 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 878 const SCEV *Q, *R; 879 *Quotient = Numerator; 880 for (const SCEV *Op : T->operands()) { 881 divide(SE, *Quotient, Op, &Q, &R); 882 *Quotient = Q; 883 884 // Bail out when the Numerator is not divisible by one of the terms of 885 // the Denominator. 886 if (!R->isZero()) { 887 *Quotient = D.Zero; 888 *Remainder = Numerator; 889 return; 890 } 891 } 892 *Remainder = D.Zero; 893 return; 894 } 895 896 D.visit(Numerator); 897 *Quotient = D.Quotient; 898 *Remainder = D.Remainder; 899 } 900 901 // Except in the trivial case described above, we do not know how to divide 902 // Expr by Denominator for the following functions with empty implementation. 903 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 904 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 905 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 906 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 907 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 908 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 909 void visitUnknown(const SCEVUnknown *Numerator) {} 910 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 911 912 void visitConstant(const SCEVConstant *Numerator) { 913 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 914 APInt NumeratorVal = Numerator->getAPInt(); 915 APInt DenominatorVal = D->getAPInt(); 916 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 917 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 918 919 if (NumeratorBW > DenominatorBW) 920 DenominatorVal = DenominatorVal.sext(NumeratorBW); 921 else if (NumeratorBW < DenominatorBW) 922 NumeratorVal = NumeratorVal.sext(DenominatorBW); 923 924 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 925 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 926 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 927 Quotient = SE.getConstant(QuotientVal); 928 Remainder = SE.getConstant(RemainderVal); 929 return; 930 } 931 } 932 933 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 934 const SCEV *StartQ, *StartR, *StepQ, *StepR; 935 if (!Numerator->isAffine()) 936 return cannotDivide(Numerator); 937 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 938 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 939 // Bail out if the types do not match. 940 Type *Ty = Denominator->getType(); 941 if (Ty != StartQ->getType() || Ty != StartR->getType() || 942 Ty != StepQ->getType() || Ty != StepR->getType()) 943 return cannotDivide(Numerator); 944 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 945 Numerator->getNoWrapFlags()); 946 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 947 Numerator->getNoWrapFlags()); 948 } 949 950 void visitAddExpr(const SCEVAddExpr *Numerator) { 951 SmallVector<const SCEV *, 2> Qs, Rs; 952 Type *Ty = Denominator->getType(); 953 954 for (const SCEV *Op : Numerator->operands()) { 955 const SCEV *Q, *R; 956 divide(SE, Op, Denominator, &Q, &R); 957 958 // Bail out if types do not match. 959 if (Ty != Q->getType() || Ty != R->getType()) 960 return cannotDivide(Numerator); 961 962 Qs.push_back(Q); 963 Rs.push_back(R); 964 } 965 966 if (Qs.size() == 1) { 967 Quotient = Qs[0]; 968 Remainder = Rs[0]; 969 return; 970 } 971 972 Quotient = SE.getAddExpr(Qs); 973 Remainder = SE.getAddExpr(Rs); 974 } 975 976 void visitMulExpr(const SCEVMulExpr *Numerator) { 977 SmallVector<const SCEV *, 2> Qs; 978 Type *Ty = Denominator->getType(); 979 980 bool FoundDenominatorTerm = false; 981 for (const SCEV *Op : Numerator->operands()) { 982 // Bail out if types do not match. 983 if (Ty != Op->getType()) 984 return cannotDivide(Numerator); 985 986 if (FoundDenominatorTerm) { 987 Qs.push_back(Op); 988 continue; 989 } 990 991 // Check whether Denominator divides one of the product operands. 992 const SCEV *Q, *R; 993 divide(SE, Op, Denominator, &Q, &R); 994 if (!R->isZero()) { 995 Qs.push_back(Op); 996 continue; 997 } 998 999 // Bail out if types do not match. 1000 if (Ty != Q->getType()) 1001 return cannotDivide(Numerator); 1002 1003 FoundDenominatorTerm = true; 1004 Qs.push_back(Q); 1005 } 1006 1007 if (FoundDenominatorTerm) { 1008 Remainder = Zero; 1009 if (Qs.size() == 1) 1010 Quotient = Qs[0]; 1011 else 1012 Quotient = SE.getMulExpr(Qs); 1013 return; 1014 } 1015 1016 if (!isa<SCEVUnknown>(Denominator)) 1017 return cannotDivide(Numerator); 1018 1019 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1020 ValueToValueMap RewriteMap; 1021 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1022 cast<SCEVConstant>(Zero)->getValue(); 1023 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1024 1025 if (Remainder->isZero()) { 1026 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1027 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1028 cast<SCEVConstant>(One)->getValue(); 1029 Quotient = 1030 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1031 return; 1032 } 1033 1034 // Quotient is (Numerator - Remainder) divided by Denominator. 1035 const SCEV *Q, *R; 1036 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1037 // This SCEV does not seem to simplify: fail the division here. 1038 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1039 return cannotDivide(Numerator); 1040 divide(SE, Diff, Denominator, &Q, &R); 1041 if (R != Zero) 1042 return cannotDivide(Numerator); 1043 Quotient = Q; 1044 } 1045 1046 private: 1047 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1048 const SCEV *Denominator) 1049 : SE(S), Denominator(Denominator) { 1050 Zero = SE.getZero(Denominator->getType()); 1051 One = SE.getOne(Denominator->getType()); 1052 1053 // We generally do not know how to divide Expr by Denominator. We 1054 // initialize the division to a "cannot divide" state to simplify the rest 1055 // of the code. 1056 cannotDivide(Numerator); 1057 } 1058 1059 // Convenience function for giving up on the division. We set the quotient to 1060 // be equal to zero and the remainder to be equal to the numerator. 1061 void cannotDivide(const SCEV *Numerator) { 1062 Quotient = Zero; 1063 Remainder = Numerator; 1064 } 1065 1066 ScalarEvolution &SE; 1067 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1068 }; 1069 1070 } // end anonymous namespace 1071 1072 //===----------------------------------------------------------------------===// 1073 // Simple SCEV method implementations 1074 //===----------------------------------------------------------------------===// 1075 1076 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1077 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1078 ScalarEvolution &SE, 1079 Type *ResultTy) { 1080 // Handle the simplest case efficiently. 1081 if (K == 1) 1082 return SE.getTruncateOrZeroExtend(It, ResultTy); 1083 1084 // We are using the following formula for BC(It, K): 1085 // 1086 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1087 // 1088 // Suppose, W is the bitwidth of the return value. We must be prepared for 1089 // overflow. Hence, we must assure that the result of our computation is 1090 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1091 // safe in modular arithmetic. 1092 // 1093 // However, this code doesn't use exactly that formula; the formula it uses 1094 // is something like the following, where T is the number of factors of 2 in 1095 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1096 // exponentiation: 1097 // 1098 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1099 // 1100 // This formula is trivially equivalent to the previous formula. However, 1101 // this formula can be implemented much more efficiently. The trick is that 1102 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1103 // arithmetic. To do exact division in modular arithmetic, all we have 1104 // to do is multiply by the inverse. Therefore, this step can be done at 1105 // width W. 1106 // 1107 // The next issue is how to safely do the division by 2^T. The way this 1108 // is done is by doing the multiplication step at a width of at least W + T 1109 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1110 // when we perform the division by 2^T (which is equivalent to a right shift 1111 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1112 // truncated out after the division by 2^T. 1113 // 1114 // In comparison to just directly using the first formula, this technique 1115 // is much more efficient; using the first formula requires W * K bits, 1116 // but this formula less than W + K bits. Also, the first formula requires 1117 // a division step, whereas this formula only requires multiplies and shifts. 1118 // 1119 // It doesn't matter whether the subtraction step is done in the calculation 1120 // width or the input iteration count's width; if the subtraction overflows, 1121 // the result must be zero anyway. We prefer here to do it in the width of 1122 // the induction variable because it helps a lot for certain cases; CodeGen 1123 // isn't smart enough to ignore the overflow, which leads to much less 1124 // efficient code if the width of the subtraction is wider than the native 1125 // register width. 1126 // 1127 // (It's possible to not widen at all by pulling out factors of 2 before 1128 // the multiplication; for example, K=2 can be calculated as 1129 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1130 // extra arithmetic, so it's not an obvious win, and it gets 1131 // much more complicated for K > 3.) 1132 1133 // Protection from insane SCEVs; this bound is conservative, 1134 // but it probably doesn't matter. 1135 if (K > 1000) 1136 return SE.getCouldNotCompute(); 1137 1138 unsigned W = SE.getTypeSizeInBits(ResultTy); 1139 1140 // Calculate K! / 2^T and T; we divide out the factors of two before 1141 // multiplying for calculating K! / 2^T to avoid overflow. 1142 // Other overflow doesn't matter because we only care about the bottom 1143 // W bits of the result. 1144 APInt OddFactorial(W, 1); 1145 unsigned T = 1; 1146 for (unsigned i = 3; i <= K; ++i) { 1147 APInt Mult(W, i); 1148 unsigned TwoFactors = Mult.countTrailingZeros(); 1149 T += TwoFactors; 1150 Mult.lshrInPlace(TwoFactors); 1151 OddFactorial *= Mult; 1152 } 1153 1154 // We need at least W + T bits for the multiplication step 1155 unsigned CalculationBits = W + T; 1156 1157 // Calculate 2^T, at width T+W. 1158 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1159 1160 // Calculate the multiplicative inverse of K! / 2^T; 1161 // this multiplication factor will perform the exact division by 1162 // K! / 2^T. 1163 APInt Mod = APInt::getSignedMinValue(W+1); 1164 APInt MultiplyFactor = OddFactorial.zext(W+1); 1165 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1166 MultiplyFactor = MultiplyFactor.trunc(W); 1167 1168 // Calculate the product, at width T+W 1169 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1170 CalculationBits); 1171 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1172 for (unsigned i = 1; i != K; ++i) { 1173 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1174 Dividend = SE.getMulExpr(Dividend, 1175 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1176 } 1177 1178 // Divide by 2^T 1179 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1180 1181 // Truncate the result, and divide by K! / 2^T. 1182 1183 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1184 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1185 } 1186 1187 /// Return the value of this chain of recurrences at the specified iteration 1188 /// number. We can evaluate this recurrence by multiplying each element in the 1189 /// chain by the binomial coefficient corresponding to it. In other words, we 1190 /// can evaluate {A,+,B,+,C,+,D} as: 1191 /// 1192 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1193 /// 1194 /// where BC(It, k) stands for binomial coefficient. 1195 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1196 ScalarEvolution &SE) const { 1197 const SCEV *Result = getStart(); 1198 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1199 // The computation is correct in the face of overflow provided that the 1200 // multiplication is performed _after_ the evaluation of the binomial 1201 // coefficient. 1202 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1203 if (isa<SCEVCouldNotCompute>(Coeff)) 1204 return Coeff; 1205 1206 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1207 } 1208 return Result; 1209 } 1210 1211 //===----------------------------------------------------------------------===// 1212 // SCEV Expression folder implementations 1213 //===----------------------------------------------------------------------===// 1214 1215 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1216 Type *Ty) { 1217 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1218 "This is not a truncating conversion!"); 1219 assert(isSCEVable(Ty) && 1220 "This is not a conversion to a SCEVable type!"); 1221 Ty = getEffectiveSCEVType(Ty); 1222 1223 FoldingSetNodeID ID; 1224 ID.AddInteger(scTruncate); 1225 ID.AddPointer(Op); 1226 ID.AddPointer(Ty); 1227 void *IP = nullptr; 1228 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1229 1230 // Fold if the operand is constant. 1231 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1232 return getConstant( 1233 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1234 1235 // trunc(trunc(x)) --> trunc(x) 1236 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1237 return getTruncateExpr(ST->getOperand(), Ty); 1238 1239 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1240 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1241 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1242 1243 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1244 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1245 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1246 1247 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1248 // eliminate all the truncates, or we replace other casts with truncates. 1249 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1250 SmallVector<const SCEV *, 4> Operands; 1251 bool hasTrunc = false; 1252 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1253 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1254 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1255 hasTrunc = isa<SCEVTruncateExpr>(S); 1256 Operands.push_back(S); 1257 } 1258 if (!hasTrunc) 1259 return getAddExpr(Operands); 1260 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1261 } 1262 1263 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1264 // eliminate all the truncates, or we replace other casts with truncates. 1265 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1266 SmallVector<const SCEV *, 4> Operands; 1267 bool hasTrunc = false; 1268 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1269 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1270 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1271 hasTrunc = isa<SCEVTruncateExpr>(S); 1272 Operands.push_back(S); 1273 } 1274 if (!hasTrunc) 1275 return getMulExpr(Operands); 1276 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1277 } 1278 1279 // If the input value is a chrec scev, truncate the chrec's operands. 1280 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1281 SmallVector<const SCEV *, 4> Operands; 1282 for (const SCEV *Op : AddRec->operands()) 1283 Operands.push_back(getTruncateExpr(Op, Ty)); 1284 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1285 } 1286 1287 // The cast wasn't folded; create an explicit cast node. We can reuse 1288 // the existing insert position since if we get here, we won't have 1289 // made any changes which would invalidate it. 1290 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1291 Op, Ty); 1292 UniqueSCEVs.InsertNode(S, IP); 1293 addToLoopUseLists(S); 1294 return S; 1295 } 1296 1297 // Get the limit of a recurrence such that incrementing by Step cannot cause 1298 // signed overflow as long as the value of the recurrence within the 1299 // loop does not exceed this limit before incrementing. 1300 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1301 ICmpInst::Predicate *Pred, 1302 ScalarEvolution *SE) { 1303 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1304 if (SE->isKnownPositive(Step)) { 1305 *Pred = ICmpInst::ICMP_SLT; 1306 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1307 SE->getSignedRangeMax(Step)); 1308 } 1309 if (SE->isKnownNegative(Step)) { 1310 *Pred = ICmpInst::ICMP_SGT; 1311 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1312 SE->getSignedRangeMin(Step)); 1313 } 1314 return nullptr; 1315 } 1316 1317 // Get the limit of a recurrence such that incrementing by Step cannot cause 1318 // unsigned overflow as long as the value of the recurrence within the loop does 1319 // not exceed this limit before incrementing. 1320 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1321 ICmpInst::Predicate *Pred, 1322 ScalarEvolution *SE) { 1323 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1324 *Pred = ICmpInst::ICMP_ULT; 1325 1326 return SE->getConstant(APInt::getMinValue(BitWidth) - 1327 SE->getUnsignedRangeMax(Step)); 1328 } 1329 1330 namespace { 1331 1332 struct ExtendOpTraitsBase { 1333 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1334 unsigned); 1335 }; 1336 1337 // Used to make code generic over signed and unsigned overflow. 1338 template <typename ExtendOp> struct ExtendOpTraits { 1339 // Members present: 1340 // 1341 // static const SCEV::NoWrapFlags WrapType; 1342 // 1343 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1344 // 1345 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1346 // ICmpInst::Predicate *Pred, 1347 // ScalarEvolution *SE); 1348 }; 1349 1350 template <> 1351 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1352 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1353 1354 static const GetExtendExprTy GetExtendExpr; 1355 1356 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1357 ICmpInst::Predicate *Pred, 1358 ScalarEvolution *SE) { 1359 return getSignedOverflowLimitForStep(Step, Pred, SE); 1360 } 1361 }; 1362 1363 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1364 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1365 1366 template <> 1367 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1368 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1369 1370 static const GetExtendExprTy GetExtendExpr; 1371 1372 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1373 ICmpInst::Predicate *Pred, 1374 ScalarEvolution *SE) { 1375 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1376 } 1377 }; 1378 1379 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1380 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1381 1382 } // end anonymous namespace 1383 1384 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1385 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1386 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1387 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1388 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1389 // expression "Step + sext/zext(PreIncAR)" is congruent with 1390 // "sext/zext(PostIncAR)" 1391 template <typename ExtendOpTy> 1392 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1393 ScalarEvolution *SE, unsigned Depth) { 1394 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1395 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1396 1397 const Loop *L = AR->getLoop(); 1398 const SCEV *Start = AR->getStart(); 1399 const SCEV *Step = AR->getStepRecurrence(*SE); 1400 1401 // Check for a simple looking step prior to loop entry. 1402 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1403 if (!SA) 1404 return nullptr; 1405 1406 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1407 // subtraction is expensive. For this purpose, perform a quick and dirty 1408 // difference, by checking for Step in the operand list. 1409 SmallVector<const SCEV *, 4> DiffOps; 1410 for (const SCEV *Op : SA->operands()) 1411 if (Op != Step) 1412 DiffOps.push_back(Op); 1413 1414 if (DiffOps.size() == SA->getNumOperands()) 1415 return nullptr; 1416 1417 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1418 // `Step`: 1419 1420 // 1. NSW/NUW flags on the step increment. 1421 auto PreStartFlags = 1422 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1423 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1424 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1425 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1426 1427 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1428 // "S+X does not sign/unsign-overflow". 1429 // 1430 1431 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1432 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1433 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1434 return PreStart; 1435 1436 // 2. Direct overflow check on the step operation's expression. 1437 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1438 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1439 const SCEV *OperandExtendedStart = 1440 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1441 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1442 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1443 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1444 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1445 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1446 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1447 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1448 } 1449 return PreStart; 1450 } 1451 1452 // 3. Loop precondition. 1453 ICmpInst::Predicate Pred; 1454 const SCEV *OverflowLimit = 1455 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1456 1457 if (OverflowLimit && 1458 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1459 return PreStart; 1460 1461 return nullptr; 1462 } 1463 1464 // Get the normalized zero or sign extended expression for this AddRec's Start. 1465 template <typename ExtendOpTy> 1466 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1467 ScalarEvolution *SE, 1468 unsigned Depth) { 1469 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1470 1471 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1472 if (!PreStart) 1473 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1474 1475 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1476 Depth), 1477 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1478 } 1479 1480 // Try to prove away overflow by looking at "nearby" add recurrences. A 1481 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1482 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1483 // 1484 // Formally: 1485 // 1486 // {S,+,X} == {S-T,+,X} + T 1487 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1488 // 1489 // If ({S-T,+,X} + T) does not overflow ... (1) 1490 // 1491 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1492 // 1493 // If {S-T,+,X} does not overflow ... (2) 1494 // 1495 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1496 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1497 // 1498 // If (S-T)+T does not overflow ... (3) 1499 // 1500 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1501 // == {Ext(S),+,Ext(X)} == LHS 1502 // 1503 // Thus, if (1), (2) and (3) are true for some T, then 1504 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1505 // 1506 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1507 // does not overflow" restricted to the 0th iteration. Therefore we only need 1508 // to check for (1) and (2). 1509 // 1510 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1511 // is `Delta` (defined below). 1512 template <typename ExtendOpTy> 1513 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1514 const SCEV *Step, 1515 const Loop *L) { 1516 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1517 1518 // We restrict `Start` to a constant to prevent SCEV from spending too much 1519 // time here. It is correct (but more expensive) to continue with a 1520 // non-constant `Start` and do a general SCEV subtraction to compute 1521 // `PreStart` below. 1522 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1523 if (!StartC) 1524 return false; 1525 1526 APInt StartAI = StartC->getAPInt(); 1527 1528 for (unsigned Delta : {-2, -1, 1, 2}) { 1529 const SCEV *PreStart = getConstant(StartAI - Delta); 1530 1531 FoldingSetNodeID ID; 1532 ID.AddInteger(scAddRecExpr); 1533 ID.AddPointer(PreStart); 1534 ID.AddPointer(Step); 1535 ID.AddPointer(L); 1536 void *IP = nullptr; 1537 const auto *PreAR = 1538 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1539 1540 // Give up if we don't already have the add recurrence we need because 1541 // actually constructing an add recurrence is relatively expensive. 1542 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1543 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1544 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1545 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1546 DeltaS, &Pred, this); 1547 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1548 return true; 1549 } 1550 } 1551 1552 return false; 1553 } 1554 1555 const SCEV * 1556 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1557 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1558 "This is not an extending conversion!"); 1559 assert(isSCEVable(Ty) && 1560 "This is not a conversion to a SCEVable type!"); 1561 Ty = getEffectiveSCEVType(Ty); 1562 1563 // Fold if the operand is constant. 1564 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1565 return getConstant( 1566 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1567 1568 // zext(zext(x)) --> zext(x) 1569 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1570 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1571 1572 // Before doing any expensive analysis, check to see if we've already 1573 // computed a SCEV for this Op and Ty. 1574 FoldingSetNodeID ID; 1575 ID.AddInteger(scZeroExtend); 1576 ID.AddPointer(Op); 1577 ID.AddPointer(Ty); 1578 void *IP = nullptr; 1579 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1580 if (Depth > MaxExtDepth) { 1581 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1582 Op, Ty); 1583 UniqueSCEVs.InsertNode(S, IP); 1584 addToLoopUseLists(S); 1585 return S; 1586 } 1587 1588 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1589 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1590 // It's possible the bits taken off by the truncate were all zero bits. If 1591 // so, we should be able to simplify this further. 1592 const SCEV *X = ST->getOperand(); 1593 ConstantRange CR = getUnsignedRange(X); 1594 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1595 unsigned NewBits = getTypeSizeInBits(Ty); 1596 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1597 CR.zextOrTrunc(NewBits))) 1598 return getTruncateOrZeroExtend(X, Ty); 1599 } 1600 1601 // If the input value is a chrec scev, and we can prove that the value 1602 // did not overflow the old, smaller, value, we can zero extend all of the 1603 // operands (often constants). This allows analysis of something like 1604 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1605 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1606 if (AR->isAffine()) { 1607 const SCEV *Start = AR->getStart(); 1608 const SCEV *Step = AR->getStepRecurrence(*this); 1609 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1610 const Loop *L = AR->getLoop(); 1611 1612 if (!AR->hasNoUnsignedWrap()) { 1613 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1614 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1615 } 1616 1617 // If we have special knowledge that this addrec won't overflow, 1618 // we don't need to do any further analysis. 1619 if (AR->hasNoUnsignedWrap()) 1620 return getAddRecExpr( 1621 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1622 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1623 1624 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1625 // Note that this serves two purposes: It filters out loops that are 1626 // simply not analyzable, and it covers the case where this code is 1627 // being called from within backedge-taken count analysis, such that 1628 // attempting to ask for the backedge-taken count would likely result 1629 // in infinite recursion. In the later case, the analysis code will 1630 // cope with a conservative value, and it will take care to purge 1631 // that value once it has finished. 1632 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1633 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1634 // Manually compute the final value for AR, checking for 1635 // overflow. 1636 1637 // Check whether the backedge-taken count can be losslessly casted to 1638 // the addrec's type. The count is always unsigned. 1639 const SCEV *CastedMaxBECount = 1640 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1641 const SCEV *RecastedMaxBECount = 1642 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1643 if (MaxBECount == RecastedMaxBECount) { 1644 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1645 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1646 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1647 SCEV::FlagAnyWrap, Depth + 1); 1648 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1649 SCEV::FlagAnyWrap, 1650 Depth + 1), 1651 WideTy, Depth + 1); 1652 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1653 const SCEV *WideMaxBECount = 1654 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1655 const SCEV *OperandExtendedAdd = 1656 getAddExpr(WideStart, 1657 getMulExpr(WideMaxBECount, 1658 getZeroExtendExpr(Step, WideTy, Depth + 1), 1659 SCEV::FlagAnyWrap, Depth + 1), 1660 SCEV::FlagAnyWrap, Depth + 1); 1661 if (ZAdd == OperandExtendedAdd) { 1662 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1663 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1664 // Return the expression with the addrec on the outside. 1665 return getAddRecExpr( 1666 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1667 Depth + 1), 1668 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1669 AR->getNoWrapFlags()); 1670 } 1671 // Similar to above, only this time treat the step value as signed. 1672 // This covers loops that count down. 1673 OperandExtendedAdd = 1674 getAddExpr(WideStart, 1675 getMulExpr(WideMaxBECount, 1676 getSignExtendExpr(Step, WideTy, Depth + 1), 1677 SCEV::FlagAnyWrap, Depth + 1), 1678 SCEV::FlagAnyWrap, Depth + 1); 1679 if (ZAdd == OperandExtendedAdd) { 1680 // Cache knowledge of AR NW, which is propagated to this AddRec. 1681 // Negative step causes unsigned wrap, but it still can't self-wrap. 1682 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1683 // Return the expression with the addrec on the outside. 1684 return getAddRecExpr( 1685 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1686 Depth + 1), 1687 getSignExtendExpr(Step, Ty, Depth + 1), L, 1688 AR->getNoWrapFlags()); 1689 } 1690 } 1691 } 1692 1693 // Normally, in the cases we can prove no-overflow via a 1694 // backedge guarding condition, we can also compute a backedge 1695 // taken count for the loop. The exceptions are assumptions and 1696 // guards present in the loop -- SCEV is not great at exploiting 1697 // these to compute max backedge taken counts, but can still use 1698 // these to prove lack of overflow. Use this fact to avoid 1699 // doing extra work that may not pay off. 1700 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1701 !AC.assumptions().empty()) { 1702 // If the backedge is guarded by a comparison with the pre-inc 1703 // value the addrec is safe. Also, if the entry is guarded by 1704 // a comparison with the start value and the backedge is 1705 // guarded by a comparison with the post-inc value, the addrec 1706 // is safe. 1707 if (isKnownPositive(Step)) { 1708 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1709 getUnsignedRangeMax(Step)); 1710 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1711 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1712 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1713 AR->getPostIncExpr(*this), N))) { 1714 // Cache knowledge of AR NUW, which is propagated to this 1715 // AddRec. 1716 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1717 // Return the expression with the addrec on the outside. 1718 return getAddRecExpr( 1719 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1720 Depth + 1), 1721 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1722 AR->getNoWrapFlags()); 1723 } 1724 } else if (isKnownNegative(Step)) { 1725 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1726 getSignedRangeMin(Step)); 1727 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1728 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1729 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1730 AR->getPostIncExpr(*this), N))) { 1731 // Cache knowledge of AR NW, which is propagated to this 1732 // AddRec. Negative step causes unsigned wrap, but it 1733 // still can't self-wrap. 1734 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1735 // Return the expression with the addrec on the outside. 1736 return getAddRecExpr( 1737 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1738 Depth + 1), 1739 getSignExtendExpr(Step, Ty, Depth + 1), L, 1740 AR->getNoWrapFlags()); 1741 } 1742 } 1743 } 1744 1745 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1746 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1747 return getAddRecExpr( 1748 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1749 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1750 } 1751 } 1752 1753 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1754 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1755 if (SA->hasNoUnsignedWrap()) { 1756 // If the addition does not unsign overflow then we can, by definition, 1757 // commute the zero extension with the addition operation. 1758 SmallVector<const SCEV *, 4> Ops; 1759 for (const auto *Op : SA->operands()) 1760 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1761 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1762 } 1763 } 1764 1765 // The cast wasn't folded; create an explicit cast node. 1766 // Recompute the insert position, as it may have been invalidated. 1767 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1768 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1769 Op, Ty); 1770 UniqueSCEVs.InsertNode(S, IP); 1771 addToLoopUseLists(S); 1772 return S; 1773 } 1774 1775 const SCEV * 1776 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1777 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1778 "This is not an extending conversion!"); 1779 assert(isSCEVable(Ty) && 1780 "This is not a conversion to a SCEVable type!"); 1781 Ty = getEffectiveSCEVType(Ty); 1782 1783 // Fold if the operand is constant. 1784 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1785 return getConstant( 1786 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1787 1788 // sext(sext(x)) --> sext(x) 1789 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1790 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1791 1792 // sext(zext(x)) --> zext(x) 1793 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1794 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1795 1796 // Before doing any expensive analysis, check to see if we've already 1797 // computed a SCEV for this Op and Ty. 1798 FoldingSetNodeID ID; 1799 ID.AddInteger(scSignExtend); 1800 ID.AddPointer(Op); 1801 ID.AddPointer(Ty); 1802 void *IP = nullptr; 1803 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1804 // Limit recursion depth. 1805 if (Depth > MaxExtDepth) { 1806 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1807 Op, Ty); 1808 UniqueSCEVs.InsertNode(S, IP); 1809 addToLoopUseLists(S); 1810 return S; 1811 } 1812 1813 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1814 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1815 // It's possible the bits taken off by the truncate were all sign bits. If 1816 // so, we should be able to simplify this further. 1817 const SCEV *X = ST->getOperand(); 1818 ConstantRange CR = getSignedRange(X); 1819 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1820 unsigned NewBits = getTypeSizeInBits(Ty); 1821 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1822 CR.sextOrTrunc(NewBits))) 1823 return getTruncateOrSignExtend(X, Ty); 1824 } 1825 1826 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1827 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1828 if (SA->getNumOperands() == 2) { 1829 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1830 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1831 if (SMul && SC1) { 1832 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1833 const APInt &C1 = SC1->getAPInt(); 1834 const APInt &C2 = SC2->getAPInt(); 1835 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1836 C2.ugt(C1) && C2.isPowerOf2()) 1837 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1838 getSignExtendExpr(SMul, Ty, Depth + 1), 1839 SCEV::FlagAnyWrap, Depth + 1); 1840 } 1841 } 1842 } 1843 1844 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1845 if (SA->hasNoSignedWrap()) { 1846 // If the addition does not sign overflow then we can, by definition, 1847 // commute the sign extension with the addition operation. 1848 SmallVector<const SCEV *, 4> Ops; 1849 for (const auto *Op : SA->operands()) 1850 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1851 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1852 } 1853 } 1854 // If the input value is a chrec scev, and we can prove that the value 1855 // did not overflow the old, smaller, value, we can sign extend all of the 1856 // operands (often constants). This allows analysis of something like 1857 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1858 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1859 if (AR->isAffine()) { 1860 const SCEV *Start = AR->getStart(); 1861 const SCEV *Step = AR->getStepRecurrence(*this); 1862 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1863 const Loop *L = AR->getLoop(); 1864 1865 if (!AR->hasNoSignedWrap()) { 1866 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1867 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1868 } 1869 1870 // If we have special knowledge that this addrec won't overflow, 1871 // we don't need to do any further analysis. 1872 if (AR->hasNoSignedWrap()) 1873 return getAddRecExpr( 1874 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1875 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1876 1877 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1878 // Note that this serves two purposes: It filters out loops that are 1879 // simply not analyzable, and it covers the case where this code is 1880 // being called from within backedge-taken count analysis, such that 1881 // attempting to ask for the backedge-taken count would likely result 1882 // in infinite recursion. In the later case, the analysis code will 1883 // cope with a conservative value, and it will take care to purge 1884 // that value once it has finished. 1885 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1886 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1887 // Manually compute the final value for AR, checking for 1888 // overflow. 1889 1890 // Check whether the backedge-taken count can be losslessly casted to 1891 // the addrec's type. The count is always unsigned. 1892 const SCEV *CastedMaxBECount = 1893 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1894 const SCEV *RecastedMaxBECount = 1895 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1896 if (MaxBECount == RecastedMaxBECount) { 1897 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1898 // Check whether Start+Step*MaxBECount has no signed overflow. 1899 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1900 SCEV::FlagAnyWrap, Depth + 1); 1901 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1902 SCEV::FlagAnyWrap, 1903 Depth + 1), 1904 WideTy, Depth + 1); 1905 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1906 const SCEV *WideMaxBECount = 1907 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1908 const SCEV *OperandExtendedAdd = 1909 getAddExpr(WideStart, 1910 getMulExpr(WideMaxBECount, 1911 getSignExtendExpr(Step, WideTy, Depth + 1), 1912 SCEV::FlagAnyWrap, Depth + 1), 1913 SCEV::FlagAnyWrap, Depth + 1); 1914 if (SAdd == OperandExtendedAdd) { 1915 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1916 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1917 // Return the expression with the addrec on the outside. 1918 return getAddRecExpr( 1919 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1920 Depth + 1), 1921 getSignExtendExpr(Step, Ty, Depth + 1), L, 1922 AR->getNoWrapFlags()); 1923 } 1924 // Similar to above, only this time treat the step value as unsigned. 1925 // This covers loops that count up with an unsigned step. 1926 OperandExtendedAdd = 1927 getAddExpr(WideStart, 1928 getMulExpr(WideMaxBECount, 1929 getZeroExtendExpr(Step, WideTy, Depth + 1), 1930 SCEV::FlagAnyWrap, Depth + 1), 1931 SCEV::FlagAnyWrap, Depth + 1); 1932 if (SAdd == OperandExtendedAdd) { 1933 // If AR wraps around then 1934 // 1935 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1936 // => SAdd != OperandExtendedAdd 1937 // 1938 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1939 // (SAdd == OperandExtendedAdd => AR is NW) 1940 1941 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1942 1943 // Return the expression with the addrec on the outside. 1944 return getAddRecExpr( 1945 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1946 Depth + 1), 1947 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1948 AR->getNoWrapFlags()); 1949 } 1950 } 1951 } 1952 1953 // Normally, in the cases we can prove no-overflow via a 1954 // backedge guarding condition, we can also compute a backedge 1955 // taken count for the loop. The exceptions are assumptions and 1956 // guards present in the loop -- SCEV is not great at exploiting 1957 // these to compute max backedge taken counts, but can still use 1958 // these to prove lack of overflow. Use this fact to avoid 1959 // doing extra work that may not pay off. 1960 1961 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1962 !AC.assumptions().empty()) { 1963 // If the backedge is guarded by a comparison with the pre-inc 1964 // value the addrec is safe. Also, if the entry is guarded by 1965 // a comparison with the start value and the backedge is 1966 // guarded by a comparison with the post-inc value, the addrec 1967 // is safe. 1968 ICmpInst::Predicate Pred; 1969 const SCEV *OverflowLimit = 1970 getSignedOverflowLimitForStep(Step, &Pred, this); 1971 if (OverflowLimit && 1972 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1973 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1974 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1975 OverflowLimit)))) { 1976 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1977 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1978 return getAddRecExpr( 1979 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1980 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1981 } 1982 } 1983 1984 // If Start and Step are constants, check if we can apply this 1985 // transformation: 1986 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1987 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1988 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1989 if (SC1 && SC2) { 1990 const APInt &C1 = SC1->getAPInt(); 1991 const APInt &C2 = SC2->getAPInt(); 1992 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1993 C2.isPowerOf2()) { 1994 Start = getSignExtendExpr(Start, Ty, Depth + 1); 1995 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1996 AR->getNoWrapFlags()); 1997 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 1998 SCEV::FlagAnyWrap, Depth + 1); 1999 } 2000 } 2001 2002 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2003 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2004 return getAddRecExpr( 2005 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2006 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2007 } 2008 } 2009 2010 // If the input value is provably positive and we could not simplify 2011 // away the sext build a zext instead. 2012 if (isKnownNonNegative(Op)) 2013 return getZeroExtendExpr(Op, Ty, Depth + 1); 2014 2015 // The cast wasn't folded; create an explicit cast node. 2016 // Recompute the insert position, as it may have been invalidated. 2017 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2018 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2019 Op, Ty); 2020 UniqueSCEVs.InsertNode(S, IP); 2021 addToLoopUseLists(S); 2022 return S; 2023 } 2024 2025 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2026 /// unspecified bits out to the given type. 2027 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2028 Type *Ty) { 2029 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2030 "This is not an extending conversion!"); 2031 assert(isSCEVable(Ty) && 2032 "This is not a conversion to a SCEVable type!"); 2033 Ty = getEffectiveSCEVType(Ty); 2034 2035 // Sign-extend negative constants. 2036 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2037 if (SC->getAPInt().isNegative()) 2038 return getSignExtendExpr(Op, Ty); 2039 2040 // Peel off a truncate cast. 2041 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2042 const SCEV *NewOp = T->getOperand(); 2043 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2044 return getAnyExtendExpr(NewOp, Ty); 2045 return getTruncateOrNoop(NewOp, Ty); 2046 } 2047 2048 // Next try a zext cast. If the cast is folded, use it. 2049 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2050 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2051 return ZExt; 2052 2053 // Next try a sext cast. If the cast is folded, use it. 2054 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2055 if (!isa<SCEVSignExtendExpr>(SExt)) 2056 return SExt; 2057 2058 // Force the cast to be folded into the operands of an addrec. 2059 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2060 SmallVector<const SCEV *, 4> Ops; 2061 for (const SCEV *Op : AR->operands()) 2062 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2063 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2064 } 2065 2066 // If the expression is obviously signed, use the sext cast value. 2067 if (isa<SCEVSMaxExpr>(Op)) 2068 return SExt; 2069 2070 // Absent any other information, use the zext cast value. 2071 return ZExt; 2072 } 2073 2074 /// Process the given Ops list, which is a list of operands to be added under 2075 /// the given scale, update the given map. This is a helper function for 2076 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2077 /// that would form an add expression like this: 2078 /// 2079 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2080 /// 2081 /// where A and B are constants, update the map with these values: 2082 /// 2083 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2084 /// 2085 /// and add 13 + A*B*29 to AccumulatedConstant. 2086 /// This will allow getAddRecExpr to produce this: 2087 /// 2088 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2089 /// 2090 /// This form often exposes folding opportunities that are hidden in 2091 /// the original operand list. 2092 /// 2093 /// Return true iff it appears that any interesting folding opportunities 2094 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2095 /// the common case where no interesting opportunities are present, and 2096 /// is also used as a check to avoid infinite recursion. 2097 static bool 2098 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2099 SmallVectorImpl<const SCEV *> &NewOps, 2100 APInt &AccumulatedConstant, 2101 const SCEV *const *Ops, size_t NumOperands, 2102 const APInt &Scale, 2103 ScalarEvolution &SE) { 2104 bool Interesting = false; 2105 2106 // Iterate over the add operands. They are sorted, with constants first. 2107 unsigned i = 0; 2108 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2109 ++i; 2110 // Pull a buried constant out to the outside. 2111 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2112 Interesting = true; 2113 AccumulatedConstant += Scale * C->getAPInt(); 2114 } 2115 2116 // Next comes everything else. We're especially interested in multiplies 2117 // here, but they're in the middle, so just visit the rest with one loop. 2118 for (; i != NumOperands; ++i) { 2119 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2120 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2121 APInt NewScale = 2122 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2123 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2124 // A multiplication of a constant with another add; recurse. 2125 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2126 Interesting |= 2127 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2128 Add->op_begin(), Add->getNumOperands(), 2129 NewScale, SE); 2130 } else { 2131 // A multiplication of a constant with some other value. Update 2132 // the map. 2133 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2134 const SCEV *Key = SE.getMulExpr(MulOps); 2135 auto Pair = M.insert({Key, NewScale}); 2136 if (Pair.second) { 2137 NewOps.push_back(Pair.first->first); 2138 } else { 2139 Pair.first->second += NewScale; 2140 // The map already had an entry for this value, which may indicate 2141 // a folding opportunity. 2142 Interesting = true; 2143 } 2144 } 2145 } else { 2146 // An ordinary operand. Update the map. 2147 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2148 M.insert({Ops[i], Scale}); 2149 if (Pair.second) { 2150 NewOps.push_back(Pair.first->first); 2151 } else { 2152 Pair.first->second += Scale; 2153 // The map already had an entry for this value, which may indicate 2154 // a folding opportunity. 2155 Interesting = true; 2156 } 2157 } 2158 } 2159 2160 return Interesting; 2161 } 2162 2163 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2164 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2165 // can't-overflow flags for the operation if possible. 2166 static SCEV::NoWrapFlags 2167 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2168 const SmallVectorImpl<const SCEV *> &Ops, 2169 SCEV::NoWrapFlags Flags) { 2170 using namespace std::placeholders; 2171 2172 using OBO = OverflowingBinaryOperator; 2173 2174 bool CanAnalyze = 2175 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2176 (void)CanAnalyze; 2177 assert(CanAnalyze && "don't call from other places!"); 2178 2179 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2180 SCEV::NoWrapFlags SignOrUnsignWrap = 2181 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2182 2183 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2184 auto IsKnownNonNegative = [&](const SCEV *S) { 2185 return SE->isKnownNonNegative(S); 2186 }; 2187 2188 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2189 Flags = 2190 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2191 2192 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2193 2194 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2195 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2196 2197 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2198 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2199 2200 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2201 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2202 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2203 Instruction::Add, C, OBO::NoSignedWrap); 2204 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2205 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2206 } 2207 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2208 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2209 Instruction::Add, C, OBO::NoUnsignedWrap); 2210 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2211 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2212 } 2213 } 2214 2215 return Flags; 2216 } 2217 2218 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2219 if (!isLoopInvariant(S, L)) 2220 return false; 2221 // If a value depends on a SCEVUnknown which is defined after the loop, we 2222 // conservatively assume that we cannot calculate it at the loop's entry. 2223 struct FindDominatedSCEVUnknown { 2224 bool Found = false; 2225 const Loop *L; 2226 DominatorTree &DT; 2227 LoopInfo &LI; 2228 2229 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2230 : L(L), DT(DT), LI(LI) {} 2231 2232 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2233 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2234 if (DT.dominates(L->getHeader(), I->getParent())) 2235 Found = true; 2236 else 2237 assert(DT.dominates(I->getParent(), L->getHeader()) && 2238 "No dominance relationship between SCEV and loop?"); 2239 } 2240 return false; 2241 } 2242 2243 bool follow(const SCEV *S) { 2244 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2245 case scConstant: 2246 return false; 2247 case scAddRecExpr: 2248 case scTruncate: 2249 case scZeroExtend: 2250 case scSignExtend: 2251 case scAddExpr: 2252 case scMulExpr: 2253 case scUMaxExpr: 2254 case scSMaxExpr: 2255 case scUDivExpr: 2256 return true; 2257 case scUnknown: 2258 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2259 case scCouldNotCompute: 2260 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2261 } 2262 return false; 2263 } 2264 2265 bool isDone() { return Found; } 2266 }; 2267 2268 FindDominatedSCEVUnknown FSU(L, DT, LI); 2269 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2270 ST.visitAll(S); 2271 return !FSU.Found; 2272 } 2273 2274 /// Get a canonical add expression, or something simpler if possible. 2275 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2276 SCEV::NoWrapFlags Flags, 2277 unsigned Depth) { 2278 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2279 "only nuw or nsw allowed"); 2280 assert(!Ops.empty() && "Cannot get empty add!"); 2281 if (Ops.size() == 1) return Ops[0]; 2282 #ifndef NDEBUG 2283 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2284 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2285 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2286 "SCEVAddExpr operand types don't match!"); 2287 #endif 2288 2289 // Sort by complexity, this groups all similar expression types together. 2290 GroupByComplexity(Ops, &LI, DT); 2291 2292 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2293 2294 // If there are any constants, fold them together. 2295 unsigned Idx = 0; 2296 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2297 ++Idx; 2298 assert(Idx < Ops.size()); 2299 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2300 // We found two constants, fold them together! 2301 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2302 if (Ops.size() == 2) return Ops[0]; 2303 Ops.erase(Ops.begin()+1); // Erase the folded element 2304 LHSC = cast<SCEVConstant>(Ops[0]); 2305 } 2306 2307 // If we are left with a constant zero being added, strip it off. 2308 if (LHSC->getValue()->isZero()) { 2309 Ops.erase(Ops.begin()); 2310 --Idx; 2311 } 2312 2313 if (Ops.size() == 1) return Ops[0]; 2314 } 2315 2316 // Limit recursion calls depth. 2317 if (Depth > MaxArithDepth) 2318 return getOrCreateAddExpr(Ops, Flags); 2319 2320 // Okay, check to see if the same value occurs in the operand list more than 2321 // once. If so, merge them together into an multiply expression. Since we 2322 // sorted the list, these values are required to be adjacent. 2323 Type *Ty = Ops[0]->getType(); 2324 bool FoundMatch = false; 2325 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2326 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2327 // Scan ahead to count how many equal operands there are. 2328 unsigned Count = 2; 2329 while (i+Count != e && Ops[i+Count] == Ops[i]) 2330 ++Count; 2331 // Merge the values into a multiply. 2332 const SCEV *Scale = getConstant(Ty, Count); 2333 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2334 if (Ops.size() == Count) 2335 return Mul; 2336 Ops[i] = Mul; 2337 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2338 --i; e -= Count - 1; 2339 FoundMatch = true; 2340 } 2341 if (FoundMatch) 2342 return getAddExpr(Ops, Flags); 2343 2344 // Check for truncates. If all the operands are truncated from the same 2345 // type, see if factoring out the truncate would permit the result to be 2346 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2347 // if the contents of the resulting outer trunc fold to something simple. 2348 auto FindTruncSrcType = [&]() -> Type * { 2349 // We're ultimately looking to fold an addrec of truncs and muls of only 2350 // constants and truncs, so if we find any other types of SCEV 2351 // as operands of the addrec then we bail and return nullptr here. 2352 // Otherwise, we return the type of the operand of a trunc that we find. 2353 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2354 return T->getOperand()->getType(); 2355 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2356 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2357 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2358 return T->getOperand()->getType(); 2359 } 2360 return nullptr; 2361 }; 2362 if (auto *SrcType = FindTruncSrcType()) { 2363 SmallVector<const SCEV *, 8> LargeOps; 2364 bool Ok = true; 2365 // Check all the operands to see if they can be represented in the 2366 // source type of the truncate. 2367 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2368 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2369 if (T->getOperand()->getType() != SrcType) { 2370 Ok = false; 2371 break; 2372 } 2373 LargeOps.push_back(T->getOperand()); 2374 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2375 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2376 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2377 SmallVector<const SCEV *, 8> LargeMulOps; 2378 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2379 if (const SCEVTruncateExpr *T = 2380 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2381 if (T->getOperand()->getType() != SrcType) { 2382 Ok = false; 2383 break; 2384 } 2385 LargeMulOps.push_back(T->getOperand()); 2386 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2387 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2388 } else { 2389 Ok = false; 2390 break; 2391 } 2392 } 2393 if (Ok) 2394 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2395 } else { 2396 Ok = false; 2397 break; 2398 } 2399 } 2400 if (Ok) { 2401 // Evaluate the expression in the larger type. 2402 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2403 // If it folds to something simple, use it. Otherwise, don't. 2404 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2405 return getTruncateExpr(Fold, Ty); 2406 } 2407 } 2408 2409 // Skip past any other cast SCEVs. 2410 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2411 ++Idx; 2412 2413 // If there are add operands they would be next. 2414 if (Idx < Ops.size()) { 2415 bool DeletedAdd = false; 2416 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2417 if (Ops.size() > AddOpsInlineThreshold || 2418 Add->getNumOperands() > AddOpsInlineThreshold) 2419 break; 2420 // If we have an add, expand the add operands onto the end of the operands 2421 // list. 2422 Ops.erase(Ops.begin()+Idx); 2423 Ops.append(Add->op_begin(), Add->op_end()); 2424 DeletedAdd = true; 2425 } 2426 2427 // If we deleted at least one add, we added operands to the end of the list, 2428 // and they are not necessarily sorted. Recurse to resort and resimplify 2429 // any operands we just acquired. 2430 if (DeletedAdd) 2431 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2432 } 2433 2434 // Skip over the add expression until we get to a multiply. 2435 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2436 ++Idx; 2437 2438 // Check to see if there are any folding opportunities present with 2439 // operands multiplied by constant values. 2440 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2441 uint64_t BitWidth = getTypeSizeInBits(Ty); 2442 DenseMap<const SCEV *, APInt> M; 2443 SmallVector<const SCEV *, 8> NewOps; 2444 APInt AccumulatedConstant(BitWidth, 0); 2445 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2446 Ops.data(), Ops.size(), 2447 APInt(BitWidth, 1), *this)) { 2448 struct APIntCompare { 2449 bool operator()(const APInt &LHS, const APInt &RHS) const { 2450 return LHS.ult(RHS); 2451 } 2452 }; 2453 2454 // Some interesting folding opportunity is present, so its worthwhile to 2455 // re-generate the operands list. Group the operands by constant scale, 2456 // to avoid multiplying by the same constant scale multiple times. 2457 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2458 for (const SCEV *NewOp : NewOps) 2459 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2460 // Re-generate the operands list. 2461 Ops.clear(); 2462 if (AccumulatedConstant != 0) 2463 Ops.push_back(getConstant(AccumulatedConstant)); 2464 for (auto &MulOp : MulOpLists) 2465 if (MulOp.first != 0) 2466 Ops.push_back(getMulExpr( 2467 getConstant(MulOp.first), 2468 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2469 SCEV::FlagAnyWrap, Depth + 1)); 2470 if (Ops.empty()) 2471 return getZero(Ty); 2472 if (Ops.size() == 1) 2473 return Ops[0]; 2474 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2475 } 2476 } 2477 2478 // If we are adding something to a multiply expression, make sure the 2479 // something is not already an operand of the multiply. If so, merge it into 2480 // the multiply. 2481 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2482 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2483 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2484 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2485 if (isa<SCEVConstant>(MulOpSCEV)) 2486 continue; 2487 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2488 if (MulOpSCEV == Ops[AddOp]) { 2489 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2490 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2491 if (Mul->getNumOperands() != 2) { 2492 // If the multiply has more than two operands, we must get the 2493 // Y*Z term. 2494 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2495 Mul->op_begin()+MulOp); 2496 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2497 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2498 } 2499 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2500 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2501 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2502 SCEV::FlagAnyWrap, Depth + 1); 2503 if (Ops.size() == 2) return OuterMul; 2504 if (AddOp < Idx) { 2505 Ops.erase(Ops.begin()+AddOp); 2506 Ops.erase(Ops.begin()+Idx-1); 2507 } else { 2508 Ops.erase(Ops.begin()+Idx); 2509 Ops.erase(Ops.begin()+AddOp-1); 2510 } 2511 Ops.push_back(OuterMul); 2512 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2513 } 2514 2515 // Check this multiply against other multiplies being added together. 2516 for (unsigned OtherMulIdx = Idx+1; 2517 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2518 ++OtherMulIdx) { 2519 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2520 // If MulOp occurs in OtherMul, we can fold the two multiplies 2521 // together. 2522 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2523 OMulOp != e; ++OMulOp) 2524 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2525 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2526 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2527 if (Mul->getNumOperands() != 2) { 2528 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2529 Mul->op_begin()+MulOp); 2530 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2531 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2532 } 2533 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2534 if (OtherMul->getNumOperands() != 2) { 2535 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2536 OtherMul->op_begin()+OMulOp); 2537 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2538 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2539 } 2540 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2541 const SCEV *InnerMulSum = 2542 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2543 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2544 SCEV::FlagAnyWrap, Depth + 1); 2545 if (Ops.size() == 2) return OuterMul; 2546 Ops.erase(Ops.begin()+Idx); 2547 Ops.erase(Ops.begin()+OtherMulIdx-1); 2548 Ops.push_back(OuterMul); 2549 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2550 } 2551 } 2552 } 2553 } 2554 2555 // If there are any add recurrences in the operands list, see if any other 2556 // added values are loop invariant. If so, we can fold them into the 2557 // recurrence. 2558 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2559 ++Idx; 2560 2561 // Scan over all recurrences, trying to fold loop invariants into them. 2562 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2563 // Scan all of the other operands to this add and add them to the vector if 2564 // they are loop invariant w.r.t. the recurrence. 2565 SmallVector<const SCEV *, 8> LIOps; 2566 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2567 const Loop *AddRecLoop = AddRec->getLoop(); 2568 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2569 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2570 LIOps.push_back(Ops[i]); 2571 Ops.erase(Ops.begin()+i); 2572 --i; --e; 2573 } 2574 2575 // If we found some loop invariants, fold them into the recurrence. 2576 if (!LIOps.empty()) { 2577 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2578 LIOps.push_back(AddRec->getStart()); 2579 2580 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2581 AddRec->op_end()); 2582 // This follows from the fact that the no-wrap flags on the outer add 2583 // expression are applicable on the 0th iteration, when the add recurrence 2584 // will be equal to its start value. 2585 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2586 2587 // Build the new addrec. Propagate the NUW and NSW flags if both the 2588 // outer add and the inner addrec are guaranteed to have no overflow. 2589 // Always propagate NW. 2590 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2591 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2592 2593 // If all of the other operands were loop invariant, we are done. 2594 if (Ops.size() == 1) return NewRec; 2595 2596 // Otherwise, add the folded AddRec by the non-invariant parts. 2597 for (unsigned i = 0;; ++i) 2598 if (Ops[i] == AddRec) { 2599 Ops[i] = NewRec; 2600 break; 2601 } 2602 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2603 } 2604 2605 // Okay, if there weren't any loop invariants to be folded, check to see if 2606 // there are multiple AddRec's with the same loop induction variable being 2607 // added together. If so, we can fold them. 2608 for (unsigned OtherIdx = Idx+1; 2609 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2610 ++OtherIdx) { 2611 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2612 // so that the 1st found AddRecExpr is dominated by all others. 2613 assert(DT.dominates( 2614 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2615 AddRec->getLoop()->getHeader()) && 2616 "AddRecExprs are not sorted in reverse dominance order?"); 2617 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2618 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2619 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2620 AddRec->op_end()); 2621 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2622 ++OtherIdx) { 2623 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2624 if (OtherAddRec->getLoop() == AddRecLoop) { 2625 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2626 i != e; ++i) { 2627 if (i >= AddRecOps.size()) { 2628 AddRecOps.append(OtherAddRec->op_begin()+i, 2629 OtherAddRec->op_end()); 2630 break; 2631 } 2632 SmallVector<const SCEV *, 2> TwoOps = { 2633 AddRecOps[i], OtherAddRec->getOperand(i)}; 2634 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2635 } 2636 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2637 } 2638 } 2639 // Step size has changed, so we cannot guarantee no self-wraparound. 2640 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2641 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2642 } 2643 } 2644 2645 // Otherwise couldn't fold anything into this recurrence. Move onto the 2646 // next one. 2647 } 2648 2649 // Okay, it looks like we really DO need an add expr. Check to see if we 2650 // already have one, otherwise create a new one. 2651 return getOrCreateAddExpr(Ops, Flags); 2652 } 2653 2654 const SCEV * 2655 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2656 SCEV::NoWrapFlags Flags) { 2657 FoldingSetNodeID ID; 2658 ID.AddInteger(scAddExpr); 2659 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2660 ID.AddPointer(Ops[i]); 2661 void *IP = nullptr; 2662 SCEVAddExpr *S = 2663 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2664 if (!S) { 2665 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2666 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2667 S = new (SCEVAllocator) 2668 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2669 UniqueSCEVs.InsertNode(S, IP); 2670 addToLoopUseLists(S); 2671 } 2672 S->setNoWrapFlags(Flags); 2673 return S; 2674 } 2675 2676 const SCEV * 2677 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2678 SCEV::NoWrapFlags Flags) { 2679 FoldingSetNodeID ID; 2680 ID.AddInteger(scMulExpr); 2681 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2682 ID.AddPointer(Ops[i]); 2683 void *IP = nullptr; 2684 SCEVMulExpr *S = 2685 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2686 if (!S) { 2687 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2688 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2689 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2690 O, Ops.size()); 2691 UniqueSCEVs.InsertNode(S, IP); 2692 addToLoopUseLists(S); 2693 } 2694 S->setNoWrapFlags(Flags); 2695 return S; 2696 } 2697 2698 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2699 uint64_t k = i*j; 2700 if (j > 1 && k / j != i) Overflow = true; 2701 return k; 2702 } 2703 2704 /// Compute the result of "n choose k", the binomial coefficient. If an 2705 /// intermediate computation overflows, Overflow will be set and the return will 2706 /// be garbage. Overflow is not cleared on absence of overflow. 2707 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2708 // We use the multiplicative formula: 2709 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2710 // At each iteration, we take the n-th term of the numeral and divide by the 2711 // (k-n)th term of the denominator. This division will always produce an 2712 // integral result, and helps reduce the chance of overflow in the 2713 // intermediate computations. However, we can still overflow even when the 2714 // final result would fit. 2715 2716 if (n == 0 || n == k) return 1; 2717 if (k > n) return 0; 2718 2719 if (k > n/2) 2720 k = n-k; 2721 2722 uint64_t r = 1; 2723 for (uint64_t i = 1; i <= k; ++i) { 2724 r = umul_ov(r, n-(i-1), Overflow); 2725 r /= i; 2726 } 2727 return r; 2728 } 2729 2730 /// Determine if any of the operands in this SCEV are a constant or if 2731 /// any of the add or multiply expressions in this SCEV contain a constant. 2732 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2733 struct FindConstantInAddMulChain { 2734 bool FoundConstant = false; 2735 2736 bool follow(const SCEV *S) { 2737 FoundConstant |= isa<SCEVConstant>(S); 2738 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2739 } 2740 2741 bool isDone() const { 2742 return FoundConstant; 2743 } 2744 }; 2745 2746 FindConstantInAddMulChain F; 2747 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2748 ST.visitAll(StartExpr); 2749 return F.FoundConstant; 2750 } 2751 2752 /// Get a canonical multiply expression, or something simpler if possible. 2753 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2754 SCEV::NoWrapFlags Flags, 2755 unsigned Depth) { 2756 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2757 "only nuw or nsw allowed"); 2758 assert(!Ops.empty() && "Cannot get empty mul!"); 2759 if (Ops.size() == 1) return Ops[0]; 2760 #ifndef NDEBUG 2761 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2762 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2763 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2764 "SCEVMulExpr operand types don't match!"); 2765 #endif 2766 2767 // Sort by complexity, this groups all similar expression types together. 2768 GroupByComplexity(Ops, &LI, DT); 2769 2770 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2771 2772 // Limit recursion calls depth. 2773 if (Depth > MaxArithDepth) 2774 return getOrCreateMulExpr(Ops, Flags); 2775 2776 // If there are any constants, fold them together. 2777 unsigned Idx = 0; 2778 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2779 2780 // C1*(C2+V) -> C1*C2 + C1*V 2781 if (Ops.size() == 2) 2782 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2783 // If any of Add's ops are Adds or Muls with a constant, 2784 // apply this transformation as well. 2785 if (Add->getNumOperands() == 2) 2786 // TODO: There are some cases where this transformation is not 2787 // profitable, for example: 2788 // Add = (C0 + X) * Y + Z. 2789 // Maybe the scope of this transformation should be narrowed down. 2790 if (containsConstantInAddMulChain(Add)) 2791 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2792 SCEV::FlagAnyWrap, Depth + 1), 2793 getMulExpr(LHSC, Add->getOperand(1), 2794 SCEV::FlagAnyWrap, Depth + 1), 2795 SCEV::FlagAnyWrap, Depth + 1); 2796 2797 ++Idx; 2798 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2799 // We found two constants, fold them together! 2800 ConstantInt *Fold = 2801 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2802 Ops[0] = getConstant(Fold); 2803 Ops.erase(Ops.begin()+1); // Erase the folded element 2804 if (Ops.size() == 1) return Ops[0]; 2805 LHSC = cast<SCEVConstant>(Ops[0]); 2806 } 2807 2808 // If we are left with a constant one being multiplied, strip it off. 2809 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2810 Ops.erase(Ops.begin()); 2811 --Idx; 2812 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2813 // If we have a multiply of zero, it will always be zero. 2814 return Ops[0]; 2815 } else if (Ops[0]->isAllOnesValue()) { 2816 // If we have a mul by -1 of an add, try distributing the -1 among the 2817 // add operands. 2818 if (Ops.size() == 2) { 2819 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2820 SmallVector<const SCEV *, 4> NewOps; 2821 bool AnyFolded = false; 2822 for (const SCEV *AddOp : Add->operands()) { 2823 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2824 Depth + 1); 2825 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2826 NewOps.push_back(Mul); 2827 } 2828 if (AnyFolded) 2829 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2830 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2831 // Negation preserves a recurrence's no self-wrap property. 2832 SmallVector<const SCEV *, 4> Operands; 2833 for (const SCEV *AddRecOp : AddRec->operands()) 2834 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2835 Depth + 1)); 2836 2837 return getAddRecExpr(Operands, AddRec->getLoop(), 2838 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2839 } 2840 } 2841 } 2842 2843 if (Ops.size() == 1) 2844 return Ops[0]; 2845 } 2846 2847 // Skip over the add expression until we get to a multiply. 2848 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2849 ++Idx; 2850 2851 // If there are mul operands inline them all into this expression. 2852 if (Idx < Ops.size()) { 2853 bool DeletedMul = false; 2854 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2855 if (Ops.size() > MulOpsInlineThreshold) 2856 break; 2857 // If we have an mul, expand the mul operands onto the end of the 2858 // operands list. 2859 Ops.erase(Ops.begin()+Idx); 2860 Ops.append(Mul->op_begin(), Mul->op_end()); 2861 DeletedMul = true; 2862 } 2863 2864 // If we deleted at least one mul, we added operands to the end of the 2865 // list, and they are not necessarily sorted. Recurse to resort and 2866 // resimplify any operands we just acquired. 2867 if (DeletedMul) 2868 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2869 } 2870 2871 // If there are any add recurrences in the operands list, see if any other 2872 // added values are loop invariant. If so, we can fold them into the 2873 // recurrence. 2874 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2875 ++Idx; 2876 2877 // Scan over all recurrences, trying to fold loop invariants into them. 2878 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2879 // Scan all of the other operands to this mul and add them to the vector 2880 // if they are loop invariant w.r.t. the recurrence. 2881 SmallVector<const SCEV *, 8> LIOps; 2882 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2883 const Loop *AddRecLoop = AddRec->getLoop(); 2884 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2885 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2886 LIOps.push_back(Ops[i]); 2887 Ops.erase(Ops.begin()+i); 2888 --i; --e; 2889 } 2890 2891 // If we found some loop invariants, fold them into the recurrence. 2892 if (!LIOps.empty()) { 2893 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2894 SmallVector<const SCEV *, 4> NewOps; 2895 NewOps.reserve(AddRec->getNumOperands()); 2896 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2897 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2898 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2899 SCEV::FlagAnyWrap, Depth + 1)); 2900 2901 // Build the new addrec. Propagate the NUW and NSW flags if both the 2902 // outer mul and the inner addrec are guaranteed to have no overflow. 2903 // 2904 // No self-wrap cannot be guaranteed after changing the step size, but 2905 // will be inferred if either NUW or NSW is true. 2906 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2907 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2908 2909 // If all of the other operands were loop invariant, we are done. 2910 if (Ops.size() == 1) return NewRec; 2911 2912 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2913 for (unsigned i = 0;; ++i) 2914 if (Ops[i] == AddRec) { 2915 Ops[i] = NewRec; 2916 break; 2917 } 2918 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2919 } 2920 2921 // Okay, if there weren't any loop invariants to be folded, check to see 2922 // if there are multiple AddRec's with the same loop induction variable 2923 // being multiplied together. If so, we can fold them. 2924 2925 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2926 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2927 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2928 // ]]],+,...up to x=2n}. 2929 // Note that the arguments to choose() are always integers with values 2930 // known at compile time, never SCEV objects. 2931 // 2932 // The implementation avoids pointless extra computations when the two 2933 // addrec's are of different length (mathematically, it's equivalent to 2934 // an infinite stream of zeros on the right). 2935 bool OpsModified = false; 2936 for (unsigned OtherIdx = Idx+1; 2937 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2938 ++OtherIdx) { 2939 const SCEVAddRecExpr *OtherAddRec = 2940 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2941 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2942 continue; 2943 2944 // Limit max number of arguments to avoid creation of unreasonably big 2945 // SCEVAddRecs with very complex operands. 2946 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2947 MaxAddRecSize) 2948 continue; 2949 2950 bool Overflow = false; 2951 Type *Ty = AddRec->getType(); 2952 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2953 SmallVector<const SCEV*, 7> AddRecOps; 2954 for (int x = 0, xe = AddRec->getNumOperands() + 2955 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2956 const SCEV *Term = getZero(Ty); 2957 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2958 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2959 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2960 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2961 z < ze && !Overflow; ++z) { 2962 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2963 uint64_t Coeff; 2964 if (LargerThan64Bits) 2965 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2966 else 2967 Coeff = Coeff1*Coeff2; 2968 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2969 const SCEV *Term1 = AddRec->getOperand(y-z); 2970 const SCEV *Term2 = OtherAddRec->getOperand(z); 2971 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2972 SCEV::FlagAnyWrap, Depth + 1), 2973 SCEV::FlagAnyWrap, Depth + 1); 2974 } 2975 } 2976 AddRecOps.push_back(Term); 2977 } 2978 if (!Overflow) { 2979 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2980 SCEV::FlagAnyWrap); 2981 if (Ops.size() == 2) return NewAddRec; 2982 Ops[Idx] = NewAddRec; 2983 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2984 OpsModified = true; 2985 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2986 if (!AddRec) 2987 break; 2988 } 2989 } 2990 if (OpsModified) 2991 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2992 2993 // Otherwise couldn't fold anything into this recurrence. Move onto the 2994 // next one. 2995 } 2996 2997 // Okay, it looks like we really DO need an mul expr. Check to see if we 2998 // already have one, otherwise create a new one. 2999 return getOrCreateMulExpr(Ops, Flags); 3000 } 3001 3002 /// Represents an unsigned remainder expression based on unsigned division. 3003 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3004 const SCEV *RHS) { 3005 assert(getEffectiveSCEVType(LHS->getType()) == 3006 getEffectiveSCEVType(RHS->getType()) && 3007 "SCEVURemExpr operand types don't match!"); 3008 3009 // Short-circuit easy cases 3010 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3011 // If constant is one, the result is trivial 3012 if (RHSC->getValue()->isOne()) 3013 return getZero(LHS->getType()); // X urem 1 --> 0 3014 3015 // If constant is a power of two, fold into a zext(trunc(LHS)). 3016 if (RHSC->getAPInt().isPowerOf2()) { 3017 Type *FullTy = LHS->getType(); 3018 Type *TruncTy = 3019 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3020 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3021 } 3022 } 3023 3024 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3025 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3026 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3027 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3028 } 3029 3030 /// Get a canonical unsigned division expression, or something simpler if 3031 /// possible. 3032 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3033 const SCEV *RHS) { 3034 assert(getEffectiveSCEVType(LHS->getType()) == 3035 getEffectiveSCEVType(RHS->getType()) && 3036 "SCEVUDivExpr operand types don't match!"); 3037 3038 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3039 if (RHSC->getValue()->isOne()) 3040 return LHS; // X udiv 1 --> x 3041 // If the denominator is zero, the result of the udiv is undefined. Don't 3042 // try to analyze it, because the resolution chosen here may differ from 3043 // the resolution chosen in other parts of the compiler. 3044 if (!RHSC->getValue()->isZero()) { 3045 // Determine if the division can be folded into the operands of 3046 // its operands. 3047 // TODO: Generalize this to non-constants by using known-bits information. 3048 Type *Ty = LHS->getType(); 3049 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3050 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3051 // For non-power-of-two values, effectively round the value up to the 3052 // nearest power of two. 3053 if (!RHSC->getAPInt().isPowerOf2()) 3054 ++MaxShiftAmt; 3055 IntegerType *ExtTy = 3056 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3057 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3058 if (const SCEVConstant *Step = 3059 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3060 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3061 const APInt &StepInt = Step->getAPInt(); 3062 const APInt &DivInt = RHSC->getAPInt(); 3063 if (!StepInt.urem(DivInt) && 3064 getZeroExtendExpr(AR, ExtTy) == 3065 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3066 getZeroExtendExpr(Step, ExtTy), 3067 AR->getLoop(), SCEV::FlagAnyWrap)) { 3068 SmallVector<const SCEV *, 4> Operands; 3069 for (const SCEV *Op : AR->operands()) 3070 Operands.push_back(getUDivExpr(Op, RHS)); 3071 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3072 } 3073 /// Get a canonical UDivExpr for a recurrence. 3074 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3075 // We can currently only fold X%N if X is constant. 3076 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3077 if (StartC && !DivInt.urem(StepInt) && 3078 getZeroExtendExpr(AR, ExtTy) == 3079 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3080 getZeroExtendExpr(Step, ExtTy), 3081 AR->getLoop(), SCEV::FlagAnyWrap)) { 3082 const APInt &StartInt = StartC->getAPInt(); 3083 const APInt &StartRem = StartInt.urem(StepInt); 3084 if (StartRem != 0) 3085 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3086 AR->getLoop(), SCEV::FlagNW); 3087 } 3088 } 3089 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3090 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3091 SmallVector<const SCEV *, 4> Operands; 3092 for (const SCEV *Op : M->operands()) 3093 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3094 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3095 // Find an operand that's safely divisible. 3096 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3097 const SCEV *Op = M->getOperand(i); 3098 const SCEV *Div = getUDivExpr(Op, RHSC); 3099 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3100 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3101 M->op_end()); 3102 Operands[i] = Div; 3103 return getMulExpr(Operands); 3104 } 3105 } 3106 } 3107 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3108 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3109 SmallVector<const SCEV *, 4> Operands; 3110 for (const SCEV *Op : A->operands()) 3111 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3112 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3113 Operands.clear(); 3114 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3115 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3116 if (isa<SCEVUDivExpr>(Op) || 3117 getMulExpr(Op, RHS) != A->getOperand(i)) 3118 break; 3119 Operands.push_back(Op); 3120 } 3121 if (Operands.size() == A->getNumOperands()) 3122 return getAddExpr(Operands); 3123 } 3124 } 3125 3126 // Fold if both operands are constant. 3127 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3128 Constant *LHSCV = LHSC->getValue(); 3129 Constant *RHSCV = RHSC->getValue(); 3130 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3131 RHSCV))); 3132 } 3133 } 3134 } 3135 3136 FoldingSetNodeID ID; 3137 ID.AddInteger(scUDivExpr); 3138 ID.AddPointer(LHS); 3139 ID.AddPointer(RHS); 3140 void *IP = nullptr; 3141 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3142 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3143 LHS, RHS); 3144 UniqueSCEVs.InsertNode(S, IP); 3145 addToLoopUseLists(S); 3146 return S; 3147 } 3148 3149 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3150 APInt A = C1->getAPInt().abs(); 3151 APInt B = C2->getAPInt().abs(); 3152 uint32_t ABW = A.getBitWidth(); 3153 uint32_t BBW = B.getBitWidth(); 3154 3155 if (ABW > BBW) 3156 B = B.zext(ABW); 3157 else if (ABW < BBW) 3158 A = A.zext(BBW); 3159 3160 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3161 } 3162 3163 /// Get a canonical unsigned division expression, or something simpler if 3164 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3165 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3166 /// it's not exact because the udiv may be clearing bits. 3167 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3168 const SCEV *RHS) { 3169 // TODO: we could try to find factors in all sorts of things, but for now we 3170 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3171 // end of this file for inspiration. 3172 3173 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3174 if (!Mul || !Mul->hasNoUnsignedWrap()) 3175 return getUDivExpr(LHS, RHS); 3176 3177 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3178 // If the mulexpr multiplies by a constant, then that constant must be the 3179 // first element of the mulexpr. 3180 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3181 if (LHSCst == RHSCst) { 3182 SmallVector<const SCEV *, 2> Operands; 3183 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3184 return getMulExpr(Operands); 3185 } 3186 3187 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3188 // that there's a factor provided by one of the other terms. We need to 3189 // check. 3190 APInt Factor = gcd(LHSCst, RHSCst); 3191 if (!Factor.isIntN(1)) { 3192 LHSCst = 3193 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3194 RHSCst = 3195 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3196 SmallVector<const SCEV *, 2> Operands; 3197 Operands.push_back(LHSCst); 3198 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3199 LHS = getMulExpr(Operands); 3200 RHS = RHSCst; 3201 Mul = dyn_cast<SCEVMulExpr>(LHS); 3202 if (!Mul) 3203 return getUDivExactExpr(LHS, RHS); 3204 } 3205 } 3206 } 3207 3208 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3209 if (Mul->getOperand(i) == RHS) { 3210 SmallVector<const SCEV *, 2> Operands; 3211 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3212 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3213 return getMulExpr(Operands); 3214 } 3215 } 3216 3217 return getUDivExpr(LHS, RHS); 3218 } 3219 3220 /// Get an add recurrence expression for the specified loop. Simplify the 3221 /// expression as much as possible. 3222 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3223 const Loop *L, 3224 SCEV::NoWrapFlags Flags) { 3225 SmallVector<const SCEV *, 4> Operands; 3226 Operands.push_back(Start); 3227 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3228 if (StepChrec->getLoop() == L) { 3229 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3230 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3231 } 3232 3233 Operands.push_back(Step); 3234 return getAddRecExpr(Operands, L, Flags); 3235 } 3236 3237 /// Get an add recurrence expression for the specified loop. Simplify the 3238 /// expression as much as possible. 3239 const SCEV * 3240 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3241 const Loop *L, SCEV::NoWrapFlags Flags) { 3242 if (Operands.size() == 1) return Operands[0]; 3243 #ifndef NDEBUG 3244 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3245 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3246 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3247 "SCEVAddRecExpr operand types don't match!"); 3248 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3249 assert(isLoopInvariant(Operands[i], L) && 3250 "SCEVAddRecExpr operand is not loop-invariant!"); 3251 #endif 3252 3253 if (Operands.back()->isZero()) { 3254 Operands.pop_back(); 3255 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3256 } 3257 3258 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3259 // use that information to infer NUW and NSW flags. However, computing a 3260 // BE count requires calling getAddRecExpr, so we may not yet have a 3261 // meaningful BE count at this point (and if we don't, we'd be stuck 3262 // with a SCEVCouldNotCompute as the cached BE count). 3263 3264 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3265 3266 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3267 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3268 const Loop *NestedLoop = NestedAR->getLoop(); 3269 if (L->contains(NestedLoop) 3270 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3271 : (!NestedLoop->contains(L) && 3272 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3273 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3274 NestedAR->op_end()); 3275 Operands[0] = NestedAR->getStart(); 3276 // AddRecs require their operands be loop-invariant with respect to their 3277 // loops. Don't perform this transformation if it would break this 3278 // requirement. 3279 bool AllInvariant = all_of( 3280 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3281 3282 if (AllInvariant) { 3283 // Create a recurrence for the outer loop with the same step size. 3284 // 3285 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3286 // inner recurrence has the same property. 3287 SCEV::NoWrapFlags OuterFlags = 3288 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3289 3290 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3291 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3292 return isLoopInvariant(Op, NestedLoop); 3293 }); 3294 3295 if (AllInvariant) { 3296 // Ok, both add recurrences are valid after the transformation. 3297 // 3298 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3299 // the outer recurrence has the same property. 3300 SCEV::NoWrapFlags InnerFlags = 3301 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3302 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3303 } 3304 } 3305 // Reset Operands to its original state. 3306 Operands[0] = NestedAR; 3307 } 3308 } 3309 3310 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3311 // already have one, otherwise create a new one. 3312 FoldingSetNodeID ID; 3313 ID.AddInteger(scAddRecExpr); 3314 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3315 ID.AddPointer(Operands[i]); 3316 ID.AddPointer(L); 3317 void *IP = nullptr; 3318 SCEVAddRecExpr *S = 3319 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3320 if (!S) { 3321 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3322 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3323 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3324 O, Operands.size(), L); 3325 UniqueSCEVs.InsertNode(S, IP); 3326 addToLoopUseLists(S); 3327 } 3328 S->setNoWrapFlags(Flags); 3329 return S; 3330 } 3331 3332 const SCEV * 3333 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3334 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3335 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3336 // getSCEV(Base)->getType() has the same address space as Base->getType() 3337 // because SCEV::getType() preserves the address space. 3338 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3339 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3340 // instruction to its SCEV, because the Instruction may be guarded by control 3341 // flow and the no-overflow bits may not be valid for the expression in any 3342 // context. This can be fixed similarly to how these flags are handled for 3343 // adds. 3344 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3345 : SCEV::FlagAnyWrap; 3346 3347 const SCEV *TotalOffset = getZero(IntPtrTy); 3348 // The array size is unimportant. The first thing we do on CurTy is getting 3349 // its element type. 3350 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3351 for (const SCEV *IndexExpr : IndexExprs) { 3352 // Compute the (potentially symbolic) offset in bytes for this index. 3353 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3354 // For a struct, add the member offset. 3355 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3356 unsigned FieldNo = Index->getZExtValue(); 3357 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3358 3359 // Add the field offset to the running total offset. 3360 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3361 3362 // Update CurTy to the type of the field at Index. 3363 CurTy = STy->getTypeAtIndex(Index); 3364 } else { 3365 // Update CurTy to its element type. 3366 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3367 // For an array, add the element offset, explicitly scaled. 3368 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3369 // Getelementptr indices are signed. 3370 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3371 3372 // Multiply the index by the element size to compute the element offset. 3373 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3374 3375 // Add the element offset to the running total offset. 3376 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3377 } 3378 } 3379 3380 // Add the total offset from all the GEP indices to the base. 3381 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3382 } 3383 3384 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3385 const SCEV *RHS) { 3386 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3387 return getSMaxExpr(Ops); 3388 } 3389 3390 const SCEV * 3391 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3392 assert(!Ops.empty() && "Cannot get empty smax!"); 3393 if (Ops.size() == 1) return Ops[0]; 3394 #ifndef NDEBUG 3395 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3396 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3397 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3398 "SCEVSMaxExpr operand types don't match!"); 3399 #endif 3400 3401 // Sort by complexity, this groups all similar expression types together. 3402 GroupByComplexity(Ops, &LI, DT); 3403 3404 // If there are any constants, fold them together. 3405 unsigned Idx = 0; 3406 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3407 ++Idx; 3408 assert(Idx < Ops.size()); 3409 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3410 // We found two constants, fold them together! 3411 ConstantInt *Fold = ConstantInt::get( 3412 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3413 Ops[0] = getConstant(Fold); 3414 Ops.erase(Ops.begin()+1); // Erase the folded element 3415 if (Ops.size() == 1) return Ops[0]; 3416 LHSC = cast<SCEVConstant>(Ops[0]); 3417 } 3418 3419 // If we are left with a constant minimum-int, strip it off. 3420 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3421 Ops.erase(Ops.begin()); 3422 --Idx; 3423 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3424 // If we have an smax with a constant maximum-int, it will always be 3425 // maximum-int. 3426 return Ops[0]; 3427 } 3428 3429 if (Ops.size() == 1) return Ops[0]; 3430 } 3431 3432 // Find the first SMax 3433 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3434 ++Idx; 3435 3436 // Check to see if one of the operands is an SMax. If so, expand its operands 3437 // onto our operand list, and recurse to simplify. 3438 if (Idx < Ops.size()) { 3439 bool DeletedSMax = false; 3440 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3441 Ops.erase(Ops.begin()+Idx); 3442 Ops.append(SMax->op_begin(), SMax->op_end()); 3443 DeletedSMax = true; 3444 } 3445 3446 if (DeletedSMax) 3447 return getSMaxExpr(Ops); 3448 } 3449 3450 // Okay, check to see if the same value occurs in the operand list twice. If 3451 // so, delete one. Since we sorted the list, these values are required to 3452 // be adjacent. 3453 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3454 // X smax Y smax Y --> X smax Y 3455 // X smax Y --> X, if X is always greater than Y 3456 if (Ops[i] == Ops[i+1] || 3457 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3458 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3459 --i; --e; 3460 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3461 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3462 --i; --e; 3463 } 3464 3465 if (Ops.size() == 1) return Ops[0]; 3466 3467 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3468 3469 // Okay, it looks like we really DO need an smax expr. Check to see if we 3470 // already have one, otherwise create a new one. 3471 FoldingSetNodeID ID; 3472 ID.AddInteger(scSMaxExpr); 3473 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3474 ID.AddPointer(Ops[i]); 3475 void *IP = nullptr; 3476 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3477 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3478 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3479 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3480 O, Ops.size()); 3481 UniqueSCEVs.InsertNode(S, IP); 3482 addToLoopUseLists(S); 3483 return S; 3484 } 3485 3486 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3487 const SCEV *RHS) { 3488 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3489 return getUMaxExpr(Ops); 3490 } 3491 3492 const SCEV * 3493 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3494 assert(!Ops.empty() && "Cannot get empty umax!"); 3495 if (Ops.size() == 1) return Ops[0]; 3496 #ifndef NDEBUG 3497 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3498 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3499 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3500 "SCEVUMaxExpr operand types don't match!"); 3501 #endif 3502 3503 // Sort by complexity, this groups all similar expression types together. 3504 GroupByComplexity(Ops, &LI, DT); 3505 3506 // If there are any constants, fold them together. 3507 unsigned Idx = 0; 3508 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3509 ++Idx; 3510 assert(Idx < Ops.size()); 3511 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3512 // We found two constants, fold them together! 3513 ConstantInt *Fold = ConstantInt::get( 3514 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3515 Ops[0] = getConstant(Fold); 3516 Ops.erase(Ops.begin()+1); // Erase the folded element 3517 if (Ops.size() == 1) return Ops[0]; 3518 LHSC = cast<SCEVConstant>(Ops[0]); 3519 } 3520 3521 // If we are left with a constant minimum-int, strip it off. 3522 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3523 Ops.erase(Ops.begin()); 3524 --Idx; 3525 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3526 // If we have an umax with a constant maximum-int, it will always be 3527 // maximum-int. 3528 return Ops[0]; 3529 } 3530 3531 if (Ops.size() == 1) return Ops[0]; 3532 } 3533 3534 // Find the first UMax 3535 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3536 ++Idx; 3537 3538 // Check to see if one of the operands is a UMax. If so, expand its operands 3539 // onto our operand list, and recurse to simplify. 3540 if (Idx < Ops.size()) { 3541 bool DeletedUMax = false; 3542 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3543 Ops.erase(Ops.begin()+Idx); 3544 Ops.append(UMax->op_begin(), UMax->op_end()); 3545 DeletedUMax = true; 3546 } 3547 3548 if (DeletedUMax) 3549 return getUMaxExpr(Ops); 3550 } 3551 3552 // Okay, check to see if the same value occurs in the operand list twice. If 3553 // so, delete one. Since we sorted the list, these values are required to 3554 // be adjacent. 3555 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3556 // X umax Y umax Y --> X umax Y 3557 // X umax Y --> X, if X is always greater than Y 3558 if (Ops[i] == Ops[i+1] || 3559 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3560 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3561 --i; --e; 3562 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3563 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3564 --i; --e; 3565 } 3566 3567 if (Ops.size() == 1) return Ops[0]; 3568 3569 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3570 3571 // Okay, it looks like we really DO need a umax expr. Check to see if we 3572 // already have one, otherwise create a new one. 3573 FoldingSetNodeID ID; 3574 ID.AddInteger(scUMaxExpr); 3575 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3576 ID.AddPointer(Ops[i]); 3577 void *IP = nullptr; 3578 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3579 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3580 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3581 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3582 O, Ops.size()); 3583 UniqueSCEVs.InsertNode(S, IP); 3584 addToLoopUseLists(S); 3585 return S; 3586 } 3587 3588 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3589 const SCEV *RHS) { 3590 // ~smax(~x, ~y) == smin(x, y). 3591 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3592 } 3593 3594 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3595 const SCEV *RHS) { 3596 // ~umax(~x, ~y) == umin(x, y) 3597 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3598 } 3599 3600 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3601 // We can bypass creating a target-independent 3602 // constant expression and then folding it back into a ConstantInt. 3603 // This is just a compile-time optimization. 3604 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3605 } 3606 3607 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3608 StructType *STy, 3609 unsigned FieldNo) { 3610 // We can bypass creating a target-independent 3611 // constant expression and then folding it back into a ConstantInt. 3612 // This is just a compile-time optimization. 3613 return getConstant( 3614 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3615 } 3616 3617 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3618 // Don't attempt to do anything other than create a SCEVUnknown object 3619 // here. createSCEV only calls getUnknown after checking for all other 3620 // interesting possibilities, and any other code that calls getUnknown 3621 // is doing so in order to hide a value from SCEV canonicalization. 3622 3623 FoldingSetNodeID ID; 3624 ID.AddInteger(scUnknown); 3625 ID.AddPointer(V); 3626 void *IP = nullptr; 3627 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3628 assert(cast<SCEVUnknown>(S)->getValue() == V && 3629 "Stale SCEVUnknown in uniquing map!"); 3630 return S; 3631 } 3632 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3633 FirstUnknown); 3634 FirstUnknown = cast<SCEVUnknown>(S); 3635 UniqueSCEVs.InsertNode(S, IP); 3636 return S; 3637 } 3638 3639 //===----------------------------------------------------------------------===// 3640 // Basic SCEV Analysis and PHI Idiom Recognition Code 3641 // 3642 3643 /// Test if values of the given type are analyzable within the SCEV 3644 /// framework. This primarily includes integer types, and it can optionally 3645 /// include pointer types if the ScalarEvolution class has access to 3646 /// target-specific information. 3647 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3648 // Integers and pointers are always SCEVable. 3649 return Ty->isIntegerTy() || Ty->isPointerTy(); 3650 } 3651 3652 /// Return the size in bits of the specified type, for which isSCEVable must 3653 /// return true. 3654 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3655 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3656 return getDataLayout().getTypeSizeInBits(Ty); 3657 } 3658 3659 /// Return a type with the same bitwidth as the given type and which represents 3660 /// how SCEV will treat the given type, for which isSCEVable must return 3661 /// true. For pointer types, this is the pointer-sized integer type. 3662 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3663 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3664 3665 if (Ty->isIntegerTy()) 3666 return Ty; 3667 3668 // The only other support type is pointer. 3669 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3670 return getDataLayout().getIntPtrType(Ty); 3671 } 3672 3673 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3674 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3675 } 3676 3677 const SCEV *ScalarEvolution::getCouldNotCompute() { 3678 return CouldNotCompute.get(); 3679 } 3680 3681 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3682 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3683 auto *SU = dyn_cast<SCEVUnknown>(S); 3684 return SU && SU->getValue() == nullptr; 3685 }); 3686 3687 return !ContainsNulls; 3688 } 3689 3690 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3691 HasRecMapType::iterator I = HasRecMap.find(S); 3692 if (I != HasRecMap.end()) 3693 return I->second; 3694 3695 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3696 HasRecMap.insert({S, FoundAddRec}); 3697 return FoundAddRec; 3698 } 3699 3700 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3701 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3702 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3703 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3704 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3705 if (!Add) 3706 return {S, nullptr}; 3707 3708 if (Add->getNumOperands() != 2) 3709 return {S, nullptr}; 3710 3711 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3712 if (!ConstOp) 3713 return {S, nullptr}; 3714 3715 return {Add->getOperand(1), ConstOp->getValue()}; 3716 } 3717 3718 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3719 /// by the value and offset from any ValueOffsetPair in the set. 3720 SetVector<ScalarEvolution::ValueOffsetPair> * 3721 ScalarEvolution::getSCEVValues(const SCEV *S) { 3722 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3723 if (SI == ExprValueMap.end()) 3724 return nullptr; 3725 #ifndef NDEBUG 3726 if (VerifySCEVMap) { 3727 // Check there is no dangling Value in the set returned. 3728 for (const auto &VE : SI->second) 3729 assert(ValueExprMap.count(VE.first)); 3730 } 3731 #endif 3732 return &SI->second; 3733 } 3734 3735 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3736 /// cannot be used separately. eraseValueFromMap should be used to remove 3737 /// V from ValueExprMap and ExprValueMap at the same time. 3738 void ScalarEvolution::eraseValueFromMap(Value *V) { 3739 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3740 if (I != ValueExprMap.end()) { 3741 const SCEV *S = I->second; 3742 // Remove {V, 0} from the set of ExprValueMap[S] 3743 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3744 SV->remove({V, nullptr}); 3745 3746 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3747 const SCEV *Stripped; 3748 ConstantInt *Offset; 3749 std::tie(Stripped, Offset) = splitAddExpr(S); 3750 if (Offset != nullptr) { 3751 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3752 SV->remove({V, Offset}); 3753 } 3754 ValueExprMap.erase(V); 3755 } 3756 } 3757 3758 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3759 /// create a new one. 3760 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3761 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3762 3763 const SCEV *S = getExistingSCEV(V); 3764 if (S == nullptr) { 3765 S = createSCEV(V); 3766 // During PHI resolution, it is possible to create two SCEVs for the same 3767 // V, so it is needed to double check whether V->S is inserted into 3768 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3769 std::pair<ValueExprMapType::iterator, bool> Pair = 3770 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3771 if (Pair.second) { 3772 ExprValueMap[S].insert({V, nullptr}); 3773 3774 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3775 // ExprValueMap. 3776 const SCEV *Stripped = S; 3777 ConstantInt *Offset = nullptr; 3778 std::tie(Stripped, Offset) = splitAddExpr(S); 3779 // If stripped is SCEVUnknown, don't bother to save 3780 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3781 // increase the complexity of the expansion code. 3782 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3783 // because it may generate add/sub instead of GEP in SCEV expansion. 3784 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3785 !isa<GetElementPtrInst>(V)) 3786 ExprValueMap[Stripped].insert({V, Offset}); 3787 } 3788 } 3789 return S; 3790 } 3791 3792 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3793 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3794 3795 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3796 if (I != ValueExprMap.end()) { 3797 const SCEV *S = I->second; 3798 if (checkValidity(S)) 3799 return S; 3800 eraseValueFromMap(V); 3801 forgetMemoizedResults(S); 3802 } 3803 return nullptr; 3804 } 3805 3806 /// Return a SCEV corresponding to -V = -1*V 3807 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3808 SCEV::NoWrapFlags Flags) { 3809 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3810 return getConstant( 3811 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3812 3813 Type *Ty = V->getType(); 3814 Ty = getEffectiveSCEVType(Ty); 3815 return getMulExpr( 3816 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3817 } 3818 3819 /// Return a SCEV corresponding to ~V = -1-V 3820 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3821 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3822 return getConstant( 3823 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3824 3825 Type *Ty = V->getType(); 3826 Ty = getEffectiveSCEVType(Ty); 3827 const SCEV *AllOnes = 3828 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3829 return getMinusSCEV(AllOnes, V); 3830 } 3831 3832 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3833 SCEV::NoWrapFlags Flags, 3834 unsigned Depth) { 3835 // Fast path: X - X --> 0. 3836 if (LHS == RHS) 3837 return getZero(LHS->getType()); 3838 3839 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3840 // makes it so that we cannot make much use of NUW. 3841 auto AddFlags = SCEV::FlagAnyWrap; 3842 const bool RHSIsNotMinSigned = 3843 !getSignedRangeMin(RHS).isMinSignedValue(); 3844 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3845 // Let M be the minimum representable signed value. Then (-1)*RHS 3846 // signed-wraps if and only if RHS is M. That can happen even for 3847 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3848 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3849 // (-1)*RHS, we need to prove that RHS != M. 3850 // 3851 // If LHS is non-negative and we know that LHS - RHS does not 3852 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3853 // either by proving that RHS > M or that LHS >= 0. 3854 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3855 AddFlags = SCEV::FlagNSW; 3856 } 3857 } 3858 3859 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3860 // RHS is NSW and LHS >= 0. 3861 // 3862 // The difficulty here is that the NSW flag may have been proven 3863 // relative to a loop that is to be found in a recurrence in LHS and 3864 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3865 // larger scope than intended. 3866 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3867 3868 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3869 } 3870 3871 const SCEV * 3872 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3873 Type *SrcTy = V->getType(); 3874 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3875 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3876 "Cannot truncate or zero extend with non-integer arguments!"); 3877 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3878 return V; // No conversion 3879 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3880 return getTruncateExpr(V, Ty); 3881 return getZeroExtendExpr(V, Ty); 3882 } 3883 3884 const SCEV * 3885 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3886 Type *Ty) { 3887 Type *SrcTy = V->getType(); 3888 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3889 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3890 "Cannot truncate or zero extend with non-integer arguments!"); 3891 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3892 return V; // No conversion 3893 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3894 return getTruncateExpr(V, Ty); 3895 return getSignExtendExpr(V, Ty); 3896 } 3897 3898 const SCEV * 3899 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3900 Type *SrcTy = V->getType(); 3901 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3902 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3903 "Cannot noop or zero extend with non-integer arguments!"); 3904 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3905 "getNoopOrZeroExtend cannot truncate!"); 3906 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3907 return V; // No conversion 3908 return getZeroExtendExpr(V, Ty); 3909 } 3910 3911 const SCEV * 3912 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3913 Type *SrcTy = V->getType(); 3914 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3915 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3916 "Cannot noop or sign extend with non-integer arguments!"); 3917 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3918 "getNoopOrSignExtend cannot truncate!"); 3919 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3920 return V; // No conversion 3921 return getSignExtendExpr(V, Ty); 3922 } 3923 3924 const SCEV * 3925 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3926 Type *SrcTy = V->getType(); 3927 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3928 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3929 "Cannot noop or any extend with non-integer arguments!"); 3930 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3931 "getNoopOrAnyExtend cannot truncate!"); 3932 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3933 return V; // No conversion 3934 return getAnyExtendExpr(V, Ty); 3935 } 3936 3937 const SCEV * 3938 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3939 Type *SrcTy = V->getType(); 3940 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3941 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3942 "Cannot truncate or noop with non-integer arguments!"); 3943 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3944 "getTruncateOrNoop cannot extend!"); 3945 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3946 return V; // No conversion 3947 return getTruncateExpr(V, Ty); 3948 } 3949 3950 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3951 const SCEV *RHS) { 3952 const SCEV *PromotedLHS = LHS; 3953 const SCEV *PromotedRHS = RHS; 3954 3955 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3956 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3957 else 3958 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3959 3960 return getUMaxExpr(PromotedLHS, PromotedRHS); 3961 } 3962 3963 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3964 const SCEV *RHS) { 3965 const SCEV *PromotedLHS = LHS; 3966 const SCEV *PromotedRHS = RHS; 3967 3968 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3969 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3970 else 3971 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3972 3973 return getUMinExpr(PromotedLHS, PromotedRHS); 3974 } 3975 3976 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3977 // A pointer operand may evaluate to a nonpointer expression, such as null. 3978 if (!V->getType()->isPointerTy()) 3979 return V; 3980 3981 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3982 return getPointerBase(Cast->getOperand()); 3983 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3984 const SCEV *PtrOp = nullptr; 3985 for (const SCEV *NAryOp : NAry->operands()) { 3986 if (NAryOp->getType()->isPointerTy()) { 3987 // Cannot find the base of an expression with multiple pointer operands. 3988 if (PtrOp) 3989 return V; 3990 PtrOp = NAryOp; 3991 } 3992 } 3993 if (!PtrOp) 3994 return V; 3995 return getPointerBase(PtrOp); 3996 } 3997 return V; 3998 } 3999 4000 /// Push users of the given Instruction onto the given Worklist. 4001 static void 4002 PushDefUseChildren(Instruction *I, 4003 SmallVectorImpl<Instruction *> &Worklist) { 4004 // Push the def-use children onto the Worklist stack. 4005 for (User *U : I->users()) 4006 Worklist.push_back(cast<Instruction>(U)); 4007 } 4008 4009 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4010 SmallVector<Instruction *, 16> Worklist; 4011 PushDefUseChildren(PN, Worklist); 4012 4013 SmallPtrSet<Instruction *, 8> Visited; 4014 Visited.insert(PN); 4015 while (!Worklist.empty()) { 4016 Instruction *I = Worklist.pop_back_val(); 4017 if (!Visited.insert(I).second) 4018 continue; 4019 4020 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4021 if (It != ValueExprMap.end()) { 4022 const SCEV *Old = It->second; 4023 4024 // Short-circuit the def-use traversal if the symbolic name 4025 // ceases to appear in expressions. 4026 if (Old != SymName && !hasOperand(Old, SymName)) 4027 continue; 4028 4029 // SCEVUnknown for a PHI either means that it has an unrecognized 4030 // structure, it's a PHI that's in the progress of being computed 4031 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4032 // additional loop trip count information isn't going to change anything. 4033 // In the second case, createNodeForPHI will perform the necessary 4034 // updates on its own when it gets to that point. In the third, we do 4035 // want to forget the SCEVUnknown. 4036 if (!isa<PHINode>(I) || 4037 !isa<SCEVUnknown>(Old) || 4038 (I != PN && Old == SymName)) { 4039 eraseValueFromMap(It->first); 4040 forgetMemoizedResults(Old); 4041 } 4042 } 4043 4044 PushDefUseChildren(I, Worklist); 4045 } 4046 } 4047 4048 namespace { 4049 4050 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4051 public: 4052 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4053 : SCEVRewriteVisitor(SE), L(L) {} 4054 4055 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4056 ScalarEvolution &SE) { 4057 SCEVInitRewriter Rewriter(L, SE); 4058 const SCEV *Result = Rewriter.visit(S); 4059 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4060 } 4061 4062 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4063 if (!SE.isLoopInvariant(Expr, L)) 4064 Valid = false; 4065 return Expr; 4066 } 4067 4068 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4069 // Only allow AddRecExprs for this loop. 4070 if (Expr->getLoop() == L) 4071 return Expr->getStart(); 4072 Valid = false; 4073 return Expr; 4074 } 4075 4076 bool isValid() { return Valid; } 4077 4078 private: 4079 const Loop *L; 4080 bool Valid = true; 4081 }; 4082 4083 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4084 public: 4085 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4086 : SCEVRewriteVisitor(SE), L(L) {} 4087 4088 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4089 ScalarEvolution &SE) { 4090 SCEVShiftRewriter Rewriter(L, SE); 4091 const SCEV *Result = Rewriter.visit(S); 4092 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4093 } 4094 4095 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4096 // Only allow AddRecExprs for this loop. 4097 if (!SE.isLoopInvariant(Expr, L)) 4098 Valid = false; 4099 return Expr; 4100 } 4101 4102 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4103 if (Expr->getLoop() == L && Expr->isAffine()) 4104 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4105 Valid = false; 4106 return Expr; 4107 } 4108 4109 bool isValid() { return Valid; } 4110 4111 private: 4112 const Loop *L; 4113 bool Valid = true; 4114 }; 4115 4116 } // end anonymous namespace 4117 4118 SCEV::NoWrapFlags 4119 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4120 if (!AR->isAffine()) 4121 return SCEV::FlagAnyWrap; 4122 4123 using OBO = OverflowingBinaryOperator; 4124 4125 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4126 4127 if (!AR->hasNoSignedWrap()) { 4128 ConstantRange AddRecRange = getSignedRange(AR); 4129 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4130 4131 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4132 Instruction::Add, IncRange, OBO::NoSignedWrap); 4133 if (NSWRegion.contains(AddRecRange)) 4134 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4135 } 4136 4137 if (!AR->hasNoUnsignedWrap()) { 4138 ConstantRange AddRecRange = getUnsignedRange(AR); 4139 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4140 4141 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4142 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4143 if (NUWRegion.contains(AddRecRange)) 4144 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4145 } 4146 4147 return Result; 4148 } 4149 4150 namespace { 4151 4152 /// Represents an abstract binary operation. This may exist as a 4153 /// normal instruction or constant expression, or may have been 4154 /// derived from an expression tree. 4155 struct BinaryOp { 4156 unsigned Opcode; 4157 Value *LHS; 4158 Value *RHS; 4159 bool IsNSW = false; 4160 bool IsNUW = false; 4161 4162 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4163 /// constant expression. 4164 Operator *Op = nullptr; 4165 4166 explicit BinaryOp(Operator *Op) 4167 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4168 Op(Op) { 4169 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4170 IsNSW = OBO->hasNoSignedWrap(); 4171 IsNUW = OBO->hasNoUnsignedWrap(); 4172 } 4173 } 4174 4175 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4176 bool IsNUW = false) 4177 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4178 }; 4179 4180 } // end anonymous namespace 4181 4182 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4183 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4184 auto *Op = dyn_cast<Operator>(V); 4185 if (!Op) 4186 return None; 4187 4188 // Implementation detail: all the cleverness here should happen without 4189 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4190 // SCEV expressions when possible, and we should not break that. 4191 4192 switch (Op->getOpcode()) { 4193 case Instruction::Add: 4194 case Instruction::Sub: 4195 case Instruction::Mul: 4196 case Instruction::UDiv: 4197 case Instruction::URem: 4198 case Instruction::And: 4199 case Instruction::Or: 4200 case Instruction::AShr: 4201 case Instruction::Shl: 4202 return BinaryOp(Op); 4203 4204 case Instruction::Xor: 4205 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4206 // If the RHS of the xor is a signmask, then this is just an add. 4207 // Instcombine turns add of signmask into xor as a strength reduction step. 4208 if (RHSC->getValue().isSignMask()) 4209 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4210 return BinaryOp(Op); 4211 4212 case Instruction::LShr: 4213 // Turn logical shift right of a constant into a unsigned divide. 4214 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4215 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4216 4217 // If the shift count is not less than the bitwidth, the result of 4218 // the shift is undefined. Don't try to analyze it, because the 4219 // resolution chosen here may differ from the resolution chosen in 4220 // other parts of the compiler. 4221 if (SA->getValue().ult(BitWidth)) { 4222 Constant *X = 4223 ConstantInt::get(SA->getContext(), 4224 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4225 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4226 } 4227 } 4228 return BinaryOp(Op); 4229 4230 case Instruction::ExtractValue: { 4231 auto *EVI = cast<ExtractValueInst>(Op); 4232 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4233 break; 4234 4235 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4236 if (!CI) 4237 break; 4238 4239 if (auto *F = CI->getCalledFunction()) 4240 switch (F->getIntrinsicID()) { 4241 case Intrinsic::sadd_with_overflow: 4242 case Intrinsic::uadd_with_overflow: 4243 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4244 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4245 CI->getArgOperand(1)); 4246 4247 // Now that we know that all uses of the arithmetic-result component of 4248 // CI are guarded by the overflow check, we can go ahead and pretend 4249 // that the arithmetic is non-overflowing. 4250 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4251 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4252 CI->getArgOperand(1), /* IsNSW = */ true, 4253 /* IsNUW = */ false); 4254 else 4255 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4256 CI->getArgOperand(1), /* IsNSW = */ false, 4257 /* IsNUW*/ true); 4258 case Intrinsic::ssub_with_overflow: 4259 case Intrinsic::usub_with_overflow: 4260 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4261 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4262 CI->getArgOperand(1)); 4263 4264 // The same reasoning as sadd/uadd above. 4265 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4266 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4267 CI->getArgOperand(1), /* IsNSW = */ true, 4268 /* IsNUW = */ false); 4269 else 4270 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4271 CI->getArgOperand(1), /* IsNSW = */ false, 4272 /* IsNUW = */ true); 4273 case Intrinsic::smul_with_overflow: 4274 case Intrinsic::umul_with_overflow: 4275 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4276 CI->getArgOperand(1)); 4277 default: 4278 break; 4279 } 4280 } 4281 4282 default: 4283 break; 4284 } 4285 4286 return None; 4287 } 4288 4289 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4290 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4291 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4292 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4293 /// follows one of the following patterns: 4294 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4295 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4296 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4297 /// we return the type of the truncation operation, and indicate whether the 4298 /// truncated type should be treated as signed/unsigned by setting 4299 /// \p Signed to true/false, respectively. 4300 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4301 bool &Signed, ScalarEvolution &SE) { 4302 // The case where Op == SymbolicPHI (that is, with no type conversions on 4303 // the way) is handled by the regular add recurrence creating logic and 4304 // would have already been triggered in createAddRecForPHI. Reaching it here 4305 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4306 // because one of the other operands of the SCEVAddExpr updating this PHI is 4307 // not invariant). 4308 // 4309 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4310 // this case predicates that allow us to prove that Op == SymbolicPHI will 4311 // be added. 4312 if (Op == SymbolicPHI) 4313 return nullptr; 4314 4315 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4316 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4317 if (SourceBits != NewBits) 4318 return nullptr; 4319 4320 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4321 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4322 if (!SExt && !ZExt) 4323 return nullptr; 4324 const SCEVTruncateExpr *Trunc = 4325 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4326 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4327 if (!Trunc) 4328 return nullptr; 4329 const SCEV *X = Trunc->getOperand(); 4330 if (X != SymbolicPHI) 4331 return nullptr; 4332 Signed = SExt != nullptr; 4333 return Trunc->getType(); 4334 } 4335 4336 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4337 if (!PN->getType()->isIntegerTy()) 4338 return nullptr; 4339 const Loop *L = LI.getLoopFor(PN->getParent()); 4340 if (!L || L->getHeader() != PN->getParent()) 4341 return nullptr; 4342 return L; 4343 } 4344 4345 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4346 // computation that updates the phi follows the following pattern: 4347 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4348 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4349 // If so, try to see if it can be rewritten as an AddRecExpr under some 4350 // Predicates. If successful, return them as a pair. Also cache the results 4351 // of the analysis. 4352 // 4353 // Example usage scenario: 4354 // Say the Rewriter is called for the following SCEV: 4355 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4356 // where: 4357 // %X = phi i64 (%Start, %BEValue) 4358 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4359 // and call this function with %SymbolicPHI = %X. 4360 // 4361 // The analysis will find that the value coming around the backedge has 4362 // the following SCEV: 4363 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4364 // Upon concluding that this matches the desired pattern, the function 4365 // will return the pair {NewAddRec, SmallPredsVec} where: 4366 // NewAddRec = {%Start,+,%Step} 4367 // SmallPredsVec = {P1, P2, P3} as follows: 4368 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4369 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4370 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4371 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4372 // under the predicates {P1,P2,P3}. 4373 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4374 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4375 // 4376 // TODO's: 4377 // 4378 // 1) Extend the Induction descriptor to also support inductions that involve 4379 // casts: When needed (namely, when we are called in the context of the 4380 // vectorizer induction analysis), a Set of cast instructions will be 4381 // populated by this method, and provided back to isInductionPHI. This is 4382 // needed to allow the vectorizer to properly record them to be ignored by 4383 // the cost model and to avoid vectorizing them (otherwise these casts, 4384 // which are redundant under the runtime overflow checks, will be 4385 // vectorized, which can be costly). 4386 // 4387 // 2) Support additional induction/PHISCEV patterns: We also want to support 4388 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4389 // after the induction update operation (the induction increment): 4390 // 4391 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4392 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4393 // 4394 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4395 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4396 // 4397 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4398 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4399 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4400 SmallVector<const SCEVPredicate *, 3> Predicates; 4401 4402 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4403 // return an AddRec expression under some predicate. 4404 4405 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4406 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4407 assert(L && "Expecting an integer loop header phi"); 4408 4409 // The loop may have multiple entrances or multiple exits; we can analyze 4410 // this phi as an addrec if it has a unique entry value and a unique 4411 // backedge value. 4412 Value *BEValueV = nullptr, *StartValueV = nullptr; 4413 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4414 Value *V = PN->getIncomingValue(i); 4415 if (L->contains(PN->getIncomingBlock(i))) { 4416 if (!BEValueV) { 4417 BEValueV = V; 4418 } else if (BEValueV != V) { 4419 BEValueV = nullptr; 4420 break; 4421 } 4422 } else if (!StartValueV) { 4423 StartValueV = V; 4424 } else if (StartValueV != V) { 4425 StartValueV = nullptr; 4426 break; 4427 } 4428 } 4429 if (!BEValueV || !StartValueV) 4430 return None; 4431 4432 const SCEV *BEValue = getSCEV(BEValueV); 4433 4434 // If the value coming around the backedge is an add with the symbolic 4435 // value we just inserted, possibly with casts that we can ignore under 4436 // an appropriate runtime guard, then we found a simple induction variable! 4437 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4438 if (!Add) 4439 return None; 4440 4441 // If there is a single occurrence of the symbolic value, possibly 4442 // casted, replace it with a recurrence. 4443 unsigned FoundIndex = Add->getNumOperands(); 4444 Type *TruncTy = nullptr; 4445 bool Signed; 4446 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4447 if ((TruncTy = 4448 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4449 if (FoundIndex == e) { 4450 FoundIndex = i; 4451 break; 4452 } 4453 4454 if (FoundIndex == Add->getNumOperands()) 4455 return None; 4456 4457 // Create an add with everything but the specified operand. 4458 SmallVector<const SCEV *, 8> Ops; 4459 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4460 if (i != FoundIndex) 4461 Ops.push_back(Add->getOperand(i)); 4462 const SCEV *Accum = getAddExpr(Ops); 4463 4464 // The runtime checks will not be valid if the step amount is 4465 // varying inside the loop. 4466 if (!isLoopInvariant(Accum, L)) 4467 return None; 4468 4469 // *** Part2: Create the predicates 4470 4471 // Analysis was successful: we have a phi-with-cast pattern for which we 4472 // can return an AddRec expression under the following predicates: 4473 // 4474 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4475 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4476 // P2: An Equal predicate that guarantees that 4477 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4478 // P3: An Equal predicate that guarantees that 4479 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4480 // 4481 // As we next prove, the above predicates guarantee that: 4482 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4483 // 4484 // 4485 // More formally, we want to prove that: 4486 // Expr(i+1) = Start + (i+1) * Accum 4487 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4488 // 4489 // Given that: 4490 // 1) Expr(0) = Start 4491 // 2) Expr(1) = Start + Accum 4492 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4493 // 3) Induction hypothesis (step i): 4494 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4495 // 4496 // Proof: 4497 // Expr(i+1) = 4498 // = Start + (i+1)*Accum 4499 // = (Start + i*Accum) + Accum 4500 // = Expr(i) + Accum 4501 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4502 // :: from step i 4503 // 4504 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4505 // 4506 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4507 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4508 // + Accum :: from P3 4509 // 4510 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4511 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4512 // 4513 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4514 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4515 // 4516 // By induction, the same applies to all iterations 1<=i<n: 4517 // 4518 4519 // Create a truncated addrec for which we will add a no overflow check (P1). 4520 const SCEV *StartVal = getSCEV(StartValueV); 4521 const SCEV *PHISCEV = 4522 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4523 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4524 4525 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4526 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4527 // will be constant. 4528 // 4529 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4530 // add P1. 4531 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4532 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4533 Signed ? SCEVWrapPredicate::IncrementNSSW 4534 : SCEVWrapPredicate::IncrementNUSW; 4535 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4536 Predicates.push_back(AddRecPred); 4537 } 4538 4539 // Create the Equal Predicates P2,P3: 4540 4541 // It is possible that the predicates P2 and/or P3 are computable at 4542 // compile time due to StartVal and/or Accum being constants. 4543 // If either one is, then we can check that now and escape if either P2 4544 // or P3 is false. 4545 4546 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4547 // for each of StartVal and Accum 4548 auto GetExtendedExpr = [&](const SCEV *Expr) -> const SCEV * { 4549 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4550 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4551 const SCEV *ExtendedExpr = 4552 Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4553 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4554 return ExtendedExpr; 4555 }; 4556 4557 // Given: 4558 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4559 // = GetExtendedExpr(Expr) 4560 // Determine whether the predicate P: Expr == ExtendedExpr 4561 // is known to be false at compile time 4562 auto PredIsKnownFalse = [&](const SCEV *Expr, 4563 const SCEV *ExtendedExpr) -> bool { 4564 return Expr != ExtendedExpr && 4565 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4566 }; 4567 4568 const SCEV *StartExtended = GetExtendedExpr(StartVal); 4569 if (PredIsKnownFalse(StartVal, StartExtended)) { 4570 DEBUG(dbgs() << "P2 is compile-time false\n";); 4571 return None; 4572 } 4573 4574 const SCEV *AccumExtended = GetExtendedExpr(Accum); 4575 if (PredIsKnownFalse(Accum, AccumExtended)) { 4576 DEBUG(dbgs() << "P3 is compile-time false\n";); 4577 return None; 4578 } 4579 4580 auto AppendPredicate = [&](const SCEV *Expr, 4581 const SCEV *ExtendedExpr) -> void { 4582 if (Expr != ExtendedExpr && 4583 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4584 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4585 DEBUG (dbgs() << "Added Predicate: " << *Pred); 4586 Predicates.push_back(Pred); 4587 } 4588 }; 4589 4590 AppendPredicate(StartVal, StartExtended); 4591 AppendPredicate(Accum, AccumExtended); 4592 4593 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4594 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4595 // into NewAR if it will also add the runtime overflow checks specified in 4596 // Predicates. 4597 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4598 4599 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4600 std::make_pair(NewAR, Predicates); 4601 // Remember the result of the analysis for this SCEV at this locayyytion. 4602 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4603 return PredRewrite; 4604 } 4605 4606 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4607 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4608 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4609 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4610 if (!L) 4611 return None; 4612 4613 // Check to see if we already analyzed this PHI. 4614 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4615 if (I != PredicatedSCEVRewrites.end()) { 4616 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4617 I->second; 4618 // Analysis was done before and failed to create an AddRec: 4619 if (Rewrite.first == SymbolicPHI) 4620 return None; 4621 // Analysis was done before and succeeded to create an AddRec under 4622 // a predicate: 4623 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4624 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4625 return Rewrite; 4626 } 4627 4628 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4629 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4630 4631 // Record in the cache that the analysis failed 4632 if (!Rewrite) { 4633 SmallVector<const SCEVPredicate *, 3> Predicates; 4634 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4635 return None; 4636 } 4637 4638 return Rewrite; 4639 } 4640 4641 /// A helper function for createAddRecFromPHI to handle simple cases. 4642 /// 4643 /// This function tries to find an AddRec expression for the simplest (yet most 4644 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4645 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4646 /// technique for finding the AddRec expression. 4647 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4648 Value *BEValueV, 4649 Value *StartValueV) { 4650 const Loop *L = LI.getLoopFor(PN->getParent()); 4651 assert(L && L->getHeader() == PN->getParent()); 4652 assert(BEValueV && StartValueV); 4653 4654 auto BO = MatchBinaryOp(BEValueV, DT); 4655 if (!BO) 4656 return nullptr; 4657 4658 if (BO->Opcode != Instruction::Add) 4659 return nullptr; 4660 4661 const SCEV *Accum = nullptr; 4662 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4663 Accum = getSCEV(BO->RHS); 4664 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4665 Accum = getSCEV(BO->LHS); 4666 4667 if (!Accum) 4668 return nullptr; 4669 4670 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4671 if (BO->IsNUW) 4672 Flags = setFlags(Flags, SCEV::FlagNUW); 4673 if (BO->IsNSW) 4674 Flags = setFlags(Flags, SCEV::FlagNSW); 4675 4676 const SCEV *StartVal = getSCEV(StartValueV); 4677 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4678 4679 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4680 4681 // We can add Flags to the post-inc expression only if we 4682 // know that it is *undefined behavior* for BEValueV to 4683 // overflow. 4684 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4685 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4686 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4687 4688 return PHISCEV; 4689 } 4690 4691 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4692 const Loop *L = LI.getLoopFor(PN->getParent()); 4693 if (!L || L->getHeader() != PN->getParent()) 4694 return nullptr; 4695 4696 // The loop may have multiple entrances or multiple exits; we can analyze 4697 // this phi as an addrec if it has a unique entry value and a unique 4698 // backedge value. 4699 Value *BEValueV = nullptr, *StartValueV = nullptr; 4700 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4701 Value *V = PN->getIncomingValue(i); 4702 if (L->contains(PN->getIncomingBlock(i))) { 4703 if (!BEValueV) { 4704 BEValueV = V; 4705 } else if (BEValueV != V) { 4706 BEValueV = nullptr; 4707 break; 4708 } 4709 } else if (!StartValueV) { 4710 StartValueV = V; 4711 } else if (StartValueV != V) { 4712 StartValueV = nullptr; 4713 break; 4714 } 4715 } 4716 if (!BEValueV || !StartValueV) 4717 return nullptr; 4718 4719 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4720 "PHI node already processed?"); 4721 4722 // First, try to find AddRec expression without creating a fictituos symbolic 4723 // value for PN. 4724 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4725 return S; 4726 4727 // Handle PHI node value symbolically. 4728 const SCEV *SymbolicName = getUnknown(PN); 4729 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4730 4731 // Using this symbolic name for the PHI, analyze the value coming around 4732 // the back-edge. 4733 const SCEV *BEValue = getSCEV(BEValueV); 4734 4735 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4736 // has a special value for the first iteration of the loop. 4737 4738 // If the value coming around the backedge is an add with the symbolic 4739 // value we just inserted, then we found a simple induction variable! 4740 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4741 // If there is a single occurrence of the symbolic value, replace it 4742 // with a recurrence. 4743 unsigned FoundIndex = Add->getNumOperands(); 4744 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4745 if (Add->getOperand(i) == SymbolicName) 4746 if (FoundIndex == e) { 4747 FoundIndex = i; 4748 break; 4749 } 4750 4751 if (FoundIndex != Add->getNumOperands()) { 4752 // Create an add with everything but the specified operand. 4753 SmallVector<const SCEV *, 8> Ops; 4754 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4755 if (i != FoundIndex) 4756 Ops.push_back(Add->getOperand(i)); 4757 const SCEV *Accum = getAddExpr(Ops); 4758 4759 // This is not a valid addrec if the step amount is varying each 4760 // loop iteration, but is not itself an addrec in this loop. 4761 if (isLoopInvariant(Accum, L) || 4762 (isa<SCEVAddRecExpr>(Accum) && 4763 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4764 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4765 4766 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4767 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4768 if (BO->IsNUW) 4769 Flags = setFlags(Flags, SCEV::FlagNUW); 4770 if (BO->IsNSW) 4771 Flags = setFlags(Flags, SCEV::FlagNSW); 4772 } 4773 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4774 // If the increment is an inbounds GEP, then we know the address 4775 // space cannot be wrapped around. We cannot make any guarantee 4776 // about signed or unsigned overflow because pointers are 4777 // unsigned but we may have a negative index from the base 4778 // pointer. We can guarantee that no unsigned wrap occurs if the 4779 // indices form a positive value. 4780 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4781 Flags = setFlags(Flags, SCEV::FlagNW); 4782 4783 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4784 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4785 Flags = setFlags(Flags, SCEV::FlagNUW); 4786 } 4787 4788 // We cannot transfer nuw and nsw flags from subtraction 4789 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4790 // for instance. 4791 } 4792 4793 const SCEV *StartVal = getSCEV(StartValueV); 4794 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4795 4796 // Okay, for the entire analysis of this edge we assumed the PHI 4797 // to be symbolic. We now need to go back and purge all of the 4798 // entries for the scalars that use the symbolic expression. 4799 forgetSymbolicName(PN, SymbolicName); 4800 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4801 4802 // We can add Flags to the post-inc expression only if we 4803 // know that it is *undefined behavior* for BEValueV to 4804 // overflow. 4805 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4806 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4807 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4808 4809 return PHISCEV; 4810 } 4811 } 4812 } else { 4813 // Otherwise, this could be a loop like this: 4814 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4815 // In this case, j = {1,+,1} and BEValue is j. 4816 // Because the other in-value of i (0) fits the evolution of BEValue 4817 // i really is an addrec evolution. 4818 // 4819 // We can generalize this saying that i is the shifted value of BEValue 4820 // by one iteration: 4821 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4822 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4823 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4824 if (Shifted != getCouldNotCompute() && 4825 Start != getCouldNotCompute()) { 4826 const SCEV *StartVal = getSCEV(StartValueV); 4827 if (Start == StartVal) { 4828 // Okay, for the entire analysis of this edge we assumed the PHI 4829 // to be symbolic. We now need to go back and purge all of the 4830 // entries for the scalars that use the symbolic expression. 4831 forgetSymbolicName(PN, SymbolicName); 4832 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4833 return Shifted; 4834 } 4835 } 4836 } 4837 4838 // Remove the temporary PHI node SCEV that has been inserted while intending 4839 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4840 // as it will prevent later (possibly simpler) SCEV expressions to be added 4841 // to the ValueExprMap. 4842 eraseValueFromMap(PN); 4843 4844 return nullptr; 4845 } 4846 4847 // Checks if the SCEV S is available at BB. S is considered available at BB 4848 // if S can be materialized at BB without introducing a fault. 4849 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4850 BasicBlock *BB) { 4851 struct CheckAvailable { 4852 bool TraversalDone = false; 4853 bool Available = true; 4854 4855 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4856 BasicBlock *BB = nullptr; 4857 DominatorTree &DT; 4858 4859 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4860 : L(L), BB(BB), DT(DT) {} 4861 4862 bool setUnavailable() { 4863 TraversalDone = true; 4864 Available = false; 4865 return false; 4866 } 4867 4868 bool follow(const SCEV *S) { 4869 switch (S->getSCEVType()) { 4870 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4871 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4872 // These expressions are available if their operand(s) is/are. 4873 return true; 4874 4875 case scAddRecExpr: { 4876 // We allow add recurrences that are on the loop BB is in, or some 4877 // outer loop. This guarantees availability because the value of the 4878 // add recurrence at BB is simply the "current" value of the induction 4879 // variable. We can relax this in the future; for instance an add 4880 // recurrence on a sibling dominating loop is also available at BB. 4881 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4882 if (L && (ARLoop == L || ARLoop->contains(L))) 4883 return true; 4884 4885 return setUnavailable(); 4886 } 4887 4888 case scUnknown: { 4889 // For SCEVUnknown, we check for simple dominance. 4890 const auto *SU = cast<SCEVUnknown>(S); 4891 Value *V = SU->getValue(); 4892 4893 if (isa<Argument>(V)) 4894 return false; 4895 4896 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4897 return false; 4898 4899 return setUnavailable(); 4900 } 4901 4902 case scUDivExpr: 4903 case scCouldNotCompute: 4904 // We do not try to smart about these at all. 4905 return setUnavailable(); 4906 } 4907 llvm_unreachable("switch should be fully covered!"); 4908 } 4909 4910 bool isDone() { return TraversalDone; } 4911 }; 4912 4913 CheckAvailable CA(L, BB, DT); 4914 SCEVTraversal<CheckAvailable> ST(CA); 4915 4916 ST.visitAll(S); 4917 return CA.Available; 4918 } 4919 4920 // Try to match a control flow sequence that branches out at BI and merges back 4921 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4922 // match. 4923 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4924 Value *&C, Value *&LHS, Value *&RHS) { 4925 C = BI->getCondition(); 4926 4927 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4928 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4929 4930 if (!LeftEdge.isSingleEdge()) 4931 return false; 4932 4933 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4934 4935 Use &LeftUse = Merge->getOperandUse(0); 4936 Use &RightUse = Merge->getOperandUse(1); 4937 4938 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4939 LHS = LeftUse; 4940 RHS = RightUse; 4941 return true; 4942 } 4943 4944 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4945 LHS = RightUse; 4946 RHS = LeftUse; 4947 return true; 4948 } 4949 4950 return false; 4951 } 4952 4953 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4954 auto IsReachable = 4955 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4956 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4957 const Loop *L = LI.getLoopFor(PN->getParent()); 4958 4959 // We don't want to break LCSSA, even in a SCEV expression tree. 4960 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4961 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4962 return nullptr; 4963 4964 // Try to match 4965 // 4966 // br %cond, label %left, label %right 4967 // left: 4968 // br label %merge 4969 // right: 4970 // br label %merge 4971 // merge: 4972 // V = phi [ %x, %left ], [ %y, %right ] 4973 // 4974 // as "select %cond, %x, %y" 4975 4976 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4977 assert(IDom && "At least the entry block should dominate PN"); 4978 4979 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4980 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4981 4982 if (BI && BI->isConditional() && 4983 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4984 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4985 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4986 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4987 } 4988 4989 return nullptr; 4990 } 4991 4992 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4993 if (const SCEV *S = createAddRecFromPHI(PN)) 4994 return S; 4995 4996 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4997 return S; 4998 4999 // If the PHI has a single incoming value, follow that value, unless the 5000 // PHI's incoming blocks are in a different loop, in which case doing so 5001 // risks breaking LCSSA form. Instcombine would normally zap these, but 5002 // it doesn't have DominatorTree information, so it may miss cases. 5003 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5004 if (LI.replacementPreservesLCSSAForm(PN, V)) 5005 return getSCEV(V); 5006 5007 // If it's not a loop phi, we can't handle it yet. 5008 return getUnknown(PN); 5009 } 5010 5011 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5012 Value *Cond, 5013 Value *TrueVal, 5014 Value *FalseVal) { 5015 // Handle "constant" branch or select. This can occur for instance when a 5016 // loop pass transforms an inner loop and moves on to process the outer loop. 5017 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5018 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5019 5020 // Try to match some simple smax or umax patterns. 5021 auto *ICI = dyn_cast<ICmpInst>(Cond); 5022 if (!ICI) 5023 return getUnknown(I); 5024 5025 Value *LHS = ICI->getOperand(0); 5026 Value *RHS = ICI->getOperand(1); 5027 5028 switch (ICI->getPredicate()) { 5029 case ICmpInst::ICMP_SLT: 5030 case ICmpInst::ICMP_SLE: 5031 std::swap(LHS, RHS); 5032 LLVM_FALLTHROUGH; 5033 case ICmpInst::ICMP_SGT: 5034 case ICmpInst::ICMP_SGE: 5035 // a >s b ? a+x : b+x -> smax(a, b)+x 5036 // a >s b ? b+x : a+x -> smin(a, b)+x 5037 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5038 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5039 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5040 const SCEV *LA = getSCEV(TrueVal); 5041 const SCEV *RA = getSCEV(FalseVal); 5042 const SCEV *LDiff = getMinusSCEV(LA, LS); 5043 const SCEV *RDiff = getMinusSCEV(RA, RS); 5044 if (LDiff == RDiff) 5045 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5046 LDiff = getMinusSCEV(LA, RS); 5047 RDiff = getMinusSCEV(RA, LS); 5048 if (LDiff == RDiff) 5049 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5050 } 5051 break; 5052 case ICmpInst::ICMP_ULT: 5053 case ICmpInst::ICMP_ULE: 5054 std::swap(LHS, RHS); 5055 LLVM_FALLTHROUGH; 5056 case ICmpInst::ICMP_UGT: 5057 case ICmpInst::ICMP_UGE: 5058 // a >u b ? a+x : b+x -> umax(a, b)+x 5059 // a >u b ? b+x : a+x -> umin(a, b)+x 5060 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5061 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5062 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5063 const SCEV *LA = getSCEV(TrueVal); 5064 const SCEV *RA = getSCEV(FalseVal); 5065 const SCEV *LDiff = getMinusSCEV(LA, LS); 5066 const SCEV *RDiff = getMinusSCEV(RA, RS); 5067 if (LDiff == RDiff) 5068 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5069 LDiff = getMinusSCEV(LA, RS); 5070 RDiff = getMinusSCEV(RA, LS); 5071 if (LDiff == RDiff) 5072 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5073 } 5074 break; 5075 case ICmpInst::ICMP_NE: 5076 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5077 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5078 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5079 const SCEV *One = getOne(I->getType()); 5080 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5081 const SCEV *LA = getSCEV(TrueVal); 5082 const SCEV *RA = getSCEV(FalseVal); 5083 const SCEV *LDiff = getMinusSCEV(LA, LS); 5084 const SCEV *RDiff = getMinusSCEV(RA, One); 5085 if (LDiff == RDiff) 5086 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5087 } 5088 break; 5089 case ICmpInst::ICMP_EQ: 5090 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5091 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5092 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5093 const SCEV *One = getOne(I->getType()); 5094 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5095 const SCEV *LA = getSCEV(TrueVal); 5096 const SCEV *RA = getSCEV(FalseVal); 5097 const SCEV *LDiff = getMinusSCEV(LA, One); 5098 const SCEV *RDiff = getMinusSCEV(RA, LS); 5099 if (LDiff == RDiff) 5100 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5101 } 5102 break; 5103 default: 5104 break; 5105 } 5106 5107 return getUnknown(I); 5108 } 5109 5110 /// Expand GEP instructions into add and multiply operations. This allows them 5111 /// to be analyzed by regular SCEV code. 5112 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5113 // Don't attempt to analyze GEPs over unsized objects. 5114 if (!GEP->getSourceElementType()->isSized()) 5115 return getUnknown(GEP); 5116 5117 SmallVector<const SCEV *, 4> IndexExprs; 5118 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5119 IndexExprs.push_back(getSCEV(*Index)); 5120 return getGEPExpr(GEP, IndexExprs); 5121 } 5122 5123 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5124 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5125 return C->getAPInt().countTrailingZeros(); 5126 5127 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5128 return std::min(GetMinTrailingZeros(T->getOperand()), 5129 (uint32_t)getTypeSizeInBits(T->getType())); 5130 5131 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5132 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5133 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5134 ? getTypeSizeInBits(E->getType()) 5135 : OpRes; 5136 } 5137 5138 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5139 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5140 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5141 ? getTypeSizeInBits(E->getType()) 5142 : OpRes; 5143 } 5144 5145 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5146 // The result is the min of all operands results. 5147 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5148 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5149 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5150 return MinOpRes; 5151 } 5152 5153 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5154 // The result is the sum of all operands results. 5155 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5156 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5157 for (unsigned i = 1, e = M->getNumOperands(); 5158 SumOpRes != BitWidth && i != e; ++i) 5159 SumOpRes = 5160 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5161 return SumOpRes; 5162 } 5163 5164 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5165 // The result is the min of all operands results. 5166 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5167 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5168 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5169 return MinOpRes; 5170 } 5171 5172 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5173 // The result is the min of all operands results. 5174 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5175 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5176 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5177 return MinOpRes; 5178 } 5179 5180 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5181 // The result is the min of all operands results. 5182 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5183 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5184 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5185 return MinOpRes; 5186 } 5187 5188 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5189 // For a SCEVUnknown, ask ValueTracking. 5190 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5191 return Known.countMinTrailingZeros(); 5192 } 5193 5194 // SCEVUDivExpr 5195 return 0; 5196 } 5197 5198 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5199 auto I = MinTrailingZerosCache.find(S); 5200 if (I != MinTrailingZerosCache.end()) 5201 return I->second; 5202 5203 uint32_t Result = GetMinTrailingZerosImpl(S); 5204 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5205 assert(InsertPair.second && "Should insert a new key"); 5206 return InsertPair.first->second; 5207 } 5208 5209 /// Helper method to assign a range to V from metadata present in the IR. 5210 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5211 if (Instruction *I = dyn_cast<Instruction>(V)) 5212 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5213 return getConstantRangeFromMetadata(*MD); 5214 5215 return None; 5216 } 5217 5218 /// Determine the range for a particular SCEV. If SignHint is 5219 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5220 /// with a "cleaner" unsigned (resp. signed) representation. 5221 const ConstantRange & 5222 ScalarEvolution::getRangeRef(const SCEV *S, 5223 ScalarEvolution::RangeSignHint SignHint) { 5224 DenseMap<const SCEV *, ConstantRange> &Cache = 5225 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5226 : SignedRanges; 5227 5228 // See if we've computed this range already. 5229 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5230 if (I != Cache.end()) 5231 return I->second; 5232 5233 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5234 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5235 5236 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5237 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5238 5239 // If the value has known zeros, the maximum value will have those known zeros 5240 // as well. 5241 uint32_t TZ = GetMinTrailingZeros(S); 5242 if (TZ != 0) { 5243 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5244 ConservativeResult = 5245 ConstantRange(APInt::getMinValue(BitWidth), 5246 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5247 else 5248 ConservativeResult = ConstantRange( 5249 APInt::getSignedMinValue(BitWidth), 5250 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5251 } 5252 5253 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5254 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5255 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5256 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5257 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5258 } 5259 5260 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5261 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5262 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5263 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5264 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5265 } 5266 5267 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5268 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5269 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5270 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5271 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5272 } 5273 5274 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5275 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5276 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5277 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5278 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5279 } 5280 5281 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5282 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5283 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5284 return setRange(UDiv, SignHint, 5285 ConservativeResult.intersectWith(X.udiv(Y))); 5286 } 5287 5288 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5289 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5290 return setRange(ZExt, SignHint, 5291 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5292 } 5293 5294 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5295 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5296 return setRange(SExt, SignHint, 5297 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5298 } 5299 5300 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5301 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5302 return setRange(Trunc, SignHint, 5303 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5304 } 5305 5306 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5307 // If there's no unsigned wrap, the value will never be less than its 5308 // initial value. 5309 if (AddRec->hasNoUnsignedWrap()) 5310 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5311 if (!C->getValue()->isZero()) 5312 ConservativeResult = ConservativeResult.intersectWith( 5313 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5314 5315 // If there's no signed wrap, and all the operands have the same sign or 5316 // zero, the value won't ever change sign. 5317 if (AddRec->hasNoSignedWrap()) { 5318 bool AllNonNeg = true; 5319 bool AllNonPos = true; 5320 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5321 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5322 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5323 } 5324 if (AllNonNeg) 5325 ConservativeResult = ConservativeResult.intersectWith( 5326 ConstantRange(APInt(BitWidth, 0), 5327 APInt::getSignedMinValue(BitWidth))); 5328 else if (AllNonPos) 5329 ConservativeResult = ConservativeResult.intersectWith( 5330 ConstantRange(APInt::getSignedMinValue(BitWidth), 5331 APInt(BitWidth, 1))); 5332 } 5333 5334 // TODO: non-affine addrec 5335 if (AddRec->isAffine()) { 5336 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5337 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5338 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5339 auto RangeFromAffine = getRangeForAffineAR( 5340 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5341 BitWidth); 5342 if (!RangeFromAffine.isFullSet()) 5343 ConservativeResult = 5344 ConservativeResult.intersectWith(RangeFromAffine); 5345 5346 auto RangeFromFactoring = getRangeViaFactoring( 5347 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5348 BitWidth); 5349 if (!RangeFromFactoring.isFullSet()) 5350 ConservativeResult = 5351 ConservativeResult.intersectWith(RangeFromFactoring); 5352 } 5353 } 5354 5355 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5356 } 5357 5358 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5359 // Check if the IR explicitly contains !range metadata. 5360 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5361 if (MDRange.hasValue()) 5362 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5363 5364 // Split here to avoid paying the compile-time cost of calling both 5365 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5366 // if needed. 5367 const DataLayout &DL = getDataLayout(); 5368 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5369 // For a SCEVUnknown, ask ValueTracking. 5370 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5371 if (Known.One != ~Known.Zero + 1) 5372 ConservativeResult = 5373 ConservativeResult.intersectWith(ConstantRange(Known.One, 5374 ~Known.Zero + 1)); 5375 } else { 5376 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5377 "generalize as needed!"); 5378 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5379 if (NS > 1) 5380 ConservativeResult = ConservativeResult.intersectWith( 5381 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5382 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5383 } 5384 5385 return setRange(U, SignHint, std::move(ConservativeResult)); 5386 } 5387 5388 return setRange(S, SignHint, std::move(ConservativeResult)); 5389 } 5390 5391 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5392 // values that the expression can take. Initially, the expression has a value 5393 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5394 // argument defines if we treat Step as signed or unsigned. 5395 static ConstantRange getRangeForAffineARHelper(APInt Step, 5396 const ConstantRange &StartRange, 5397 const APInt &MaxBECount, 5398 unsigned BitWidth, bool Signed) { 5399 // If either Step or MaxBECount is 0, then the expression won't change, and we 5400 // just need to return the initial range. 5401 if (Step == 0 || MaxBECount == 0) 5402 return StartRange; 5403 5404 // If we don't know anything about the initial value (i.e. StartRange is 5405 // FullRange), then we don't know anything about the final range either. 5406 // Return FullRange. 5407 if (StartRange.isFullSet()) 5408 return ConstantRange(BitWidth, /* isFullSet = */ true); 5409 5410 // If Step is signed and negative, then we use its absolute value, but we also 5411 // note that we're moving in the opposite direction. 5412 bool Descending = Signed && Step.isNegative(); 5413 5414 if (Signed) 5415 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5416 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5417 // This equations hold true due to the well-defined wrap-around behavior of 5418 // APInt. 5419 Step = Step.abs(); 5420 5421 // Check if Offset is more than full span of BitWidth. If it is, the 5422 // expression is guaranteed to overflow. 5423 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5424 return ConstantRange(BitWidth, /* isFullSet = */ true); 5425 5426 // Offset is by how much the expression can change. Checks above guarantee no 5427 // overflow here. 5428 APInt Offset = Step * MaxBECount; 5429 5430 // Minimum value of the final range will match the minimal value of StartRange 5431 // if the expression is increasing and will be decreased by Offset otherwise. 5432 // Maximum value of the final range will match the maximal value of StartRange 5433 // if the expression is decreasing and will be increased by Offset otherwise. 5434 APInt StartLower = StartRange.getLower(); 5435 APInt StartUpper = StartRange.getUpper() - 1; 5436 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5437 : (StartUpper + std::move(Offset)); 5438 5439 // It's possible that the new minimum/maximum value will fall into the initial 5440 // range (due to wrap around). This means that the expression can take any 5441 // value in this bitwidth, and we have to return full range. 5442 if (StartRange.contains(MovedBoundary)) 5443 return ConstantRange(BitWidth, /* isFullSet = */ true); 5444 5445 APInt NewLower = 5446 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5447 APInt NewUpper = 5448 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5449 NewUpper += 1; 5450 5451 // If we end up with full range, return a proper full range. 5452 if (NewLower == NewUpper) 5453 return ConstantRange(BitWidth, /* isFullSet = */ true); 5454 5455 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5456 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5457 } 5458 5459 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5460 const SCEV *Step, 5461 const SCEV *MaxBECount, 5462 unsigned BitWidth) { 5463 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5464 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5465 "Precondition!"); 5466 5467 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5468 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5469 5470 // First, consider step signed. 5471 ConstantRange StartSRange = getSignedRange(Start); 5472 ConstantRange StepSRange = getSignedRange(Step); 5473 5474 // If Step can be both positive and negative, we need to find ranges for the 5475 // maximum absolute step values in both directions and union them. 5476 ConstantRange SR = 5477 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5478 MaxBECountValue, BitWidth, /* Signed = */ true); 5479 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5480 StartSRange, MaxBECountValue, 5481 BitWidth, /* Signed = */ true)); 5482 5483 // Next, consider step unsigned. 5484 ConstantRange UR = getRangeForAffineARHelper( 5485 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5486 MaxBECountValue, BitWidth, /* Signed = */ false); 5487 5488 // Finally, intersect signed and unsigned ranges. 5489 return SR.intersectWith(UR); 5490 } 5491 5492 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5493 const SCEV *Step, 5494 const SCEV *MaxBECount, 5495 unsigned BitWidth) { 5496 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5497 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5498 5499 struct SelectPattern { 5500 Value *Condition = nullptr; 5501 APInt TrueValue; 5502 APInt FalseValue; 5503 5504 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5505 const SCEV *S) { 5506 Optional<unsigned> CastOp; 5507 APInt Offset(BitWidth, 0); 5508 5509 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5510 "Should be!"); 5511 5512 // Peel off a constant offset: 5513 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5514 // In the future we could consider being smarter here and handle 5515 // {Start+Step,+,Step} too. 5516 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5517 return; 5518 5519 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5520 S = SA->getOperand(1); 5521 } 5522 5523 // Peel off a cast operation 5524 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5525 CastOp = SCast->getSCEVType(); 5526 S = SCast->getOperand(); 5527 } 5528 5529 using namespace llvm::PatternMatch; 5530 5531 auto *SU = dyn_cast<SCEVUnknown>(S); 5532 const APInt *TrueVal, *FalseVal; 5533 if (!SU || 5534 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5535 m_APInt(FalseVal)))) { 5536 Condition = nullptr; 5537 return; 5538 } 5539 5540 TrueValue = *TrueVal; 5541 FalseValue = *FalseVal; 5542 5543 // Re-apply the cast we peeled off earlier 5544 if (CastOp.hasValue()) 5545 switch (*CastOp) { 5546 default: 5547 llvm_unreachable("Unknown SCEV cast type!"); 5548 5549 case scTruncate: 5550 TrueValue = TrueValue.trunc(BitWidth); 5551 FalseValue = FalseValue.trunc(BitWidth); 5552 break; 5553 case scZeroExtend: 5554 TrueValue = TrueValue.zext(BitWidth); 5555 FalseValue = FalseValue.zext(BitWidth); 5556 break; 5557 case scSignExtend: 5558 TrueValue = TrueValue.sext(BitWidth); 5559 FalseValue = FalseValue.sext(BitWidth); 5560 break; 5561 } 5562 5563 // Re-apply the constant offset we peeled off earlier 5564 TrueValue += Offset; 5565 FalseValue += Offset; 5566 } 5567 5568 bool isRecognized() { return Condition != nullptr; } 5569 }; 5570 5571 SelectPattern StartPattern(*this, BitWidth, Start); 5572 if (!StartPattern.isRecognized()) 5573 return ConstantRange(BitWidth, /* isFullSet = */ true); 5574 5575 SelectPattern StepPattern(*this, BitWidth, Step); 5576 if (!StepPattern.isRecognized()) 5577 return ConstantRange(BitWidth, /* isFullSet = */ true); 5578 5579 if (StartPattern.Condition != StepPattern.Condition) { 5580 // We don't handle this case today; but we could, by considering four 5581 // possibilities below instead of two. I'm not sure if there are cases where 5582 // that will help over what getRange already does, though. 5583 return ConstantRange(BitWidth, /* isFullSet = */ true); 5584 } 5585 5586 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5587 // construct arbitrary general SCEV expressions here. This function is called 5588 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5589 // say) can end up caching a suboptimal value. 5590 5591 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5592 // C2352 and C2512 (otherwise it isn't needed). 5593 5594 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5595 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5596 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5597 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5598 5599 ConstantRange TrueRange = 5600 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5601 ConstantRange FalseRange = 5602 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5603 5604 return TrueRange.unionWith(FalseRange); 5605 } 5606 5607 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5608 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5609 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5610 5611 // Return early if there are no flags to propagate to the SCEV. 5612 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5613 if (BinOp->hasNoUnsignedWrap()) 5614 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5615 if (BinOp->hasNoSignedWrap()) 5616 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5617 if (Flags == SCEV::FlagAnyWrap) 5618 return SCEV::FlagAnyWrap; 5619 5620 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5621 } 5622 5623 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5624 // Here we check that I is in the header of the innermost loop containing I, 5625 // since we only deal with instructions in the loop header. The actual loop we 5626 // need to check later will come from an add recurrence, but getting that 5627 // requires computing the SCEV of the operands, which can be expensive. This 5628 // check we can do cheaply to rule out some cases early. 5629 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5630 if (InnermostContainingLoop == nullptr || 5631 InnermostContainingLoop->getHeader() != I->getParent()) 5632 return false; 5633 5634 // Only proceed if we can prove that I does not yield poison. 5635 if (!programUndefinedIfFullPoison(I)) 5636 return false; 5637 5638 // At this point we know that if I is executed, then it does not wrap 5639 // according to at least one of NSW or NUW. If I is not executed, then we do 5640 // not know if the calculation that I represents would wrap. Multiple 5641 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5642 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5643 // derived from other instructions that map to the same SCEV. We cannot make 5644 // that guarantee for cases where I is not executed. So we need to find the 5645 // loop that I is considered in relation to and prove that I is executed for 5646 // every iteration of that loop. That implies that the value that I 5647 // calculates does not wrap anywhere in the loop, so then we can apply the 5648 // flags to the SCEV. 5649 // 5650 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5651 // from different loops, so that we know which loop to prove that I is 5652 // executed in. 5653 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5654 // I could be an extractvalue from a call to an overflow intrinsic. 5655 // TODO: We can do better here in some cases. 5656 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5657 return false; 5658 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5659 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5660 bool AllOtherOpsLoopInvariant = true; 5661 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5662 ++OtherOpIndex) { 5663 if (OtherOpIndex != OpIndex) { 5664 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5665 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5666 AllOtherOpsLoopInvariant = false; 5667 break; 5668 } 5669 } 5670 } 5671 if (AllOtherOpsLoopInvariant && 5672 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5673 return true; 5674 } 5675 } 5676 return false; 5677 } 5678 5679 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5680 // If we know that \c I can never be poison period, then that's enough. 5681 if (isSCEVExprNeverPoison(I)) 5682 return true; 5683 5684 // For an add recurrence specifically, we assume that infinite loops without 5685 // side effects are undefined behavior, and then reason as follows: 5686 // 5687 // If the add recurrence is poison in any iteration, it is poison on all 5688 // future iterations (since incrementing poison yields poison). If the result 5689 // of the add recurrence is fed into the loop latch condition and the loop 5690 // does not contain any throws or exiting blocks other than the latch, we now 5691 // have the ability to "choose" whether the backedge is taken or not (by 5692 // choosing a sufficiently evil value for the poison feeding into the branch) 5693 // for every iteration including and after the one in which \p I first became 5694 // poison. There are two possibilities (let's call the iteration in which \p 5695 // I first became poison as K): 5696 // 5697 // 1. In the set of iterations including and after K, the loop body executes 5698 // no side effects. In this case executing the backege an infinte number 5699 // of times will yield undefined behavior. 5700 // 5701 // 2. In the set of iterations including and after K, the loop body executes 5702 // at least one side effect. In this case, that specific instance of side 5703 // effect is control dependent on poison, which also yields undefined 5704 // behavior. 5705 5706 auto *ExitingBB = L->getExitingBlock(); 5707 auto *LatchBB = L->getLoopLatch(); 5708 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5709 return false; 5710 5711 SmallPtrSet<const Instruction *, 16> Pushed; 5712 SmallVector<const Instruction *, 8> PoisonStack; 5713 5714 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5715 // things that are known to be fully poison under that assumption go on the 5716 // PoisonStack. 5717 Pushed.insert(I); 5718 PoisonStack.push_back(I); 5719 5720 bool LatchControlDependentOnPoison = false; 5721 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5722 const Instruction *Poison = PoisonStack.pop_back_val(); 5723 5724 for (auto *PoisonUser : Poison->users()) { 5725 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5726 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5727 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5728 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5729 assert(BI->isConditional() && "Only possibility!"); 5730 if (BI->getParent() == LatchBB) { 5731 LatchControlDependentOnPoison = true; 5732 break; 5733 } 5734 } 5735 } 5736 } 5737 5738 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5739 } 5740 5741 ScalarEvolution::LoopProperties 5742 ScalarEvolution::getLoopProperties(const Loop *L) { 5743 using LoopProperties = ScalarEvolution::LoopProperties; 5744 5745 auto Itr = LoopPropertiesCache.find(L); 5746 if (Itr == LoopPropertiesCache.end()) { 5747 auto HasSideEffects = [](Instruction *I) { 5748 if (auto *SI = dyn_cast<StoreInst>(I)) 5749 return !SI->isSimple(); 5750 5751 return I->mayHaveSideEffects(); 5752 }; 5753 5754 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5755 /*HasNoSideEffects*/ true}; 5756 5757 for (auto *BB : L->getBlocks()) 5758 for (auto &I : *BB) { 5759 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5760 LP.HasNoAbnormalExits = false; 5761 if (HasSideEffects(&I)) 5762 LP.HasNoSideEffects = false; 5763 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5764 break; // We're already as pessimistic as we can get. 5765 } 5766 5767 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5768 assert(InsertPair.second && "We just checked!"); 5769 Itr = InsertPair.first; 5770 } 5771 5772 return Itr->second; 5773 } 5774 5775 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5776 if (!isSCEVable(V->getType())) 5777 return getUnknown(V); 5778 5779 if (Instruction *I = dyn_cast<Instruction>(V)) { 5780 // Don't attempt to analyze instructions in blocks that aren't 5781 // reachable. Such instructions don't matter, and they aren't required 5782 // to obey basic rules for definitions dominating uses which this 5783 // analysis depends on. 5784 if (!DT.isReachableFromEntry(I->getParent())) 5785 return getUnknown(V); 5786 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5787 return getConstant(CI); 5788 else if (isa<ConstantPointerNull>(V)) 5789 return getZero(V->getType()); 5790 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5791 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5792 else if (!isa<ConstantExpr>(V)) 5793 return getUnknown(V); 5794 5795 Operator *U = cast<Operator>(V); 5796 if (auto BO = MatchBinaryOp(U, DT)) { 5797 switch (BO->Opcode) { 5798 case Instruction::Add: { 5799 // The simple thing to do would be to just call getSCEV on both operands 5800 // and call getAddExpr with the result. However if we're looking at a 5801 // bunch of things all added together, this can be quite inefficient, 5802 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5803 // Instead, gather up all the operands and make a single getAddExpr call. 5804 // LLVM IR canonical form means we need only traverse the left operands. 5805 SmallVector<const SCEV *, 4> AddOps; 5806 do { 5807 if (BO->Op) { 5808 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5809 AddOps.push_back(OpSCEV); 5810 break; 5811 } 5812 5813 // If a NUW or NSW flag can be applied to the SCEV for this 5814 // addition, then compute the SCEV for this addition by itself 5815 // with a separate call to getAddExpr. We need to do that 5816 // instead of pushing the operands of the addition onto AddOps, 5817 // since the flags are only known to apply to this particular 5818 // addition - they may not apply to other additions that can be 5819 // formed with operands from AddOps. 5820 const SCEV *RHS = getSCEV(BO->RHS); 5821 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5822 if (Flags != SCEV::FlagAnyWrap) { 5823 const SCEV *LHS = getSCEV(BO->LHS); 5824 if (BO->Opcode == Instruction::Sub) 5825 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5826 else 5827 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5828 break; 5829 } 5830 } 5831 5832 if (BO->Opcode == Instruction::Sub) 5833 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5834 else 5835 AddOps.push_back(getSCEV(BO->RHS)); 5836 5837 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5838 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5839 NewBO->Opcode != Instruction::Sub)) { 5840 AddOps.push_back(getSCEV(BO->LHS)); 5841 break; 5842 } 5843 BO = NewBO; 5844 } while (true); 5845 5846 return getAddExpr(AddOps); 5847 } 5848 5849 case Instruction::Mul: { 5850 SmallVector<const SCEV *, 4> MulOps; 5851 do { 5852 if (BO->Op) { 5853 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5854 MulOps.push_back(OpSCEV); 5855 break; 5856 } 5857 5858 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5859 if (Flags != SCEV::FlagAnyWrap) { 5860 MulOps.push_back( 5861 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5862 break; 5863 } 5864 } 5865 5866 MulOps.push_back(getSCEV(BO->RHS)); 5867 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5868 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5869 MulOps.push_back(getSCEV(BO->LHS)); 5870 break; 5871 } 5872 BO = NewBO; 5873 } while (true); 5874 5875 return getMulExpr(MulOps); 5876 } 5877 case Instruction::UDiv: 5878 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5879 case Instruction::URem: 5880 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5881 case Instruction::Sub: { 5882 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5883 if (BO->Op) 5884 Flags = getNoWrapFlagsFromUB(BO->Op); 5885 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5886 } 5887 case Instruction::And: 5888 // For an expression like x&255 that merely masks off the high bits, 5889 // use zext(trunc(x)) as the SCEV expression. 5890 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5891 if (CI->isZero()) 5892 return getSCEV(BO->RHS); 5893 if (CI->isMinusOne()) 5894 return getSCEV(BO->LHS); 5895 const APInt &A = CI->getValue(); 5896 5897 // Instcombine's ShrinkDemandedConstant may strip bits out of 5898 // constants, obscuring what would otherwise be a low-bits mask. 5899 // Use computeKnownBits to compute what ShrinkDemandedConstant 5900 // knew about to reconstruct a low-bits mask value. 5901 unsigned LZ = A.countLeadingZeros(); 5902 unsigned TZ = A.countTrailingZeros(); 5903 unsigned BitWidth = A.getBitWidth(); 5904 KnownBits Known(BitWidth); 5905 computeKnownBits(BO->LHS, Known, getDataLayout(), 5906 0, &AC, nullptr, &DT); 5907 5908 APInt EffectiveMask = 5909 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5910 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5911 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5912 const SCEV *LHS = getSCEV(BO->LHS); 5913 const SCEV *ShiftedLHS = nullptr; 5914 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5915 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5916 // For an expression like (x * 8) & 8, simplify the multiply. 5917 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5918 unsigned GCD = std::min(MulZeros, TZ); 5919 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5920 SmallVector<const SCEV*, 4> MulOps; 5921 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5922 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5923 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5924 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5925 } 5926 } 5927 if (!ShiftedLHS) 5928 ShiftedLHS = getUDivExpr(LHS, MulCount); 5929 return getMulExpr( 5930 getZeroExtendExpr( 5931 getTruncateExpr(ShiftedLHS, 5932 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5933 BO->LHS->getType()), 5934 MulCount); 5935 } 5936 } 5937 break; 5938 5939 case Instruction::Or: 5940 // If the RHS of the Or is a constant, we may have something like: 5941 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5942 // optimizations will transparently handle this case. 5943 // 5944 // In order for this transformation to be safe, the LHS must be of the 5945 // form X*(2^n) and the Or constant must be less than 2^n. 5946 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5947 const SCEV *LHS = getSCEV(BO->LHS); 5948 const APInt &CIVal = CI->getValue(); 5949 if (GetMinTrailingZeros(LHS) >= 5950 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5951 // Build a plain add SCEV. 5952 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5953 // If the LHS of the add was an addrec and it has no-wrap flags, 5954 // transfer the no-wrap flags, since an or won't introduce a wrap. 5955 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5956 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5957 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5958 OldAR->getNoWrapFlags()); 5959 } 5960 return S; 5961 } 5962 } 5963 break; 5964 5965 case Instruction::Xor: 5966 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5967 // If the RHS of xor is -1, then this is a not operation. 5968 if (CI->isMinusOne()) 5969 return getNotSCEV(getSCEV(BO->LHS)); 5970 5971 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5972 // This is a variant of the check for xor with -1, and it handles 5973 // the case where instcombine has trimmed non-demanded bits out 5974 // of an xor with -1. 5975 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5976 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5977 if (LBO->getOpcode() == Instruction::And && 5978 LCI->getValue() == CI->getValue()) 5979 if (const SCEVZeroExtendExpr *Z = 5980 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5981 Type *UTy = BO->LHS->getType(); 5982 const SCEV *Z0 = Z->getOperand(); 5983 Type *Z0Ty = Z0->getType(); 5984 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5985 5986 // If C is a low-bits mask, the zero extend is serving to 5987 // mask off the high bits. Complement the operand and 5988 // re-apply the zext. 5989 if (CI->getValue().isMask(Z0TySize)) 5990 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5991 5992 // If C is a single bit, it may be in the sign-bit position 5993 // before the zero-extend. In this case, represent the xor 5994 // using an add, which is equivalent, and re-apply the zext. 5995 APInt Trunc = CI->getValue().trunc(Z0TySize); 5996 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5997 Trunc.isSignMask()) 5998 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5999 UTy); 6000 } 6001 } 6002 break; 6003 6004 case Instruction::Shl: 6005 // Turn shift left of a constant amount into a multiply. 6006 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6007 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6008 6009 // If the shift count is not less than the bitwidth, the result of 6010 // the shift is undefined. Don't try to analyze it, because the 6011 // resolution chosen here may differ from the resolution chosen in 6012 // other parts of the compiler. 6013 if (SA->getValue().uge(BitWidth)) 6014 break; 6015 6016 // It is currently not resolved how to interpret NSW for left 6017 // shift by BitWidth - 1, so we avoid applying flags in that 6018 // case. Remove this check (or this comment) once the situation 6019 // is resolved. See 6020 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6021 // and http://reviews.llvm.org/D8890 . 6022 auto Flags = SCEV::FlagAnyWrap; 6023 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6024 Flags = getNoWrapFlagsFromUB(BO->Op); 6025 6026 Constant *X = ConstantInt::get(getContext(), 6027 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6028 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6029 } 6030 break; 6031 6032 case Instruction::AShr: { 6033 // AShr X, C, where C is a constant. 6034 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6035 if (!CI) 6036 break; 6037 6038 Type *OuterTy = BO->LHS->getType(); 6039 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6040 // If the shift count is not less than the bitwidth, the result of 6041 // the shift is undefined. Don't try to analyze it, because the 6042 // resolution chosen here may differ from the resolution chosen in 6043 // other parts of the compiler. 6044 if (CI->getValue().uge(BitWidth)) 6045 break; 6046 6047 if (CI->isZero()) 6048 return getSCEV(BO->LHS); // shift by zero --> noop 6049 6050 uint64_t AShrAmt = CI->getZExtValue(); 6051 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6052 6053 Operator *L = dyn_cast<Operator>(BO->LHS); 6054 if (L && L->getOpcode() == Instruction::Shl) { 6055 // X = Shl A, n 6056 // Y = AShr X, m 6057 // Both n and m are constant. 6058 6059 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6060 if (L->getOperand(1) == BO->RHS) 6061 // For a two-shift sext-inreg, i.e. n = m, 6062 // use sext(trunc(x)) as the SCEV expression. 6063 return getSignExtendExpr( 6064 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6065 6066 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6067 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6068 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6069 if (ShlAmt > AShrAmt) { 6070 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6071 // expression. We already checked that ShlAmt < BitWidth, so 6072 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6073 // ShlAmt - AShrAmt < Amt. 6074 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6075 ShlAmt - AShrAmt); 6076 return getSignExtendExpr( 6077 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6078 getConstant(Mul)), OuterTy); 6079 } 6080 } 6081 } 6082 break; 6083 } 6084 } 6085 } 6086 6087 switch (U->getOpcode()) { 6088 case Instruction::Trunc: 6089 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6090 6091 case Instruction::ZExt: 6092 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6093 6094 case Instruction::SExt: 6095 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6096 // The NSW flag of a subtract does not always survive the conversion to 6097 // A + (-1)*B. By pushing sign extension onto its operands we are much 6098 // more likely to preserve NSW and allow later AddRec optimisations. 6099 // 6100 // NOTE: This is effectively duplicating this logic from getSignExtend: 6101 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6102 // but by that point the NSW information has potentially been lost. 6103 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6104 Type *Ty = U->getType(); 6105 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6106 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6107 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6108 } 6109 } 6110 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6111 6112 case Instruction::BitCast: 6113 // BitCasts are no-op casts so we just eliminate the cast. 6114 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6115 return getSCEV(U->getOperand(0)); 6116 break; 6117 6118 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6119 // lead to pointer expressions which cannot safely be expanded to GEPs, 6120 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6121 // simplifying integer expressions. 6122 6123 case Instruction::GetElementPtr: 6124 return createNodeForGEP(cast<GEPOperator>(U)); 6125 6126 case Instruction::PHI: 6127 return createNodeForPHI(cast<PHINode>(U)); 6128 6129 case Instruction::Select: 6130 // U can also be a select constant expr, which let fall through. Since 6131 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6132 // constant expressions cannot have instructions as operands, we'd have 6133 // returned getUnknown for a select constant expressions anyway. 6134 if (isa<Instruction>(U)) 6135 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6136 U->getOperand(1), U->getOperand(2)); 6137 break; 6138 6139 case Instruction::Call: 6140 case Instruction::Invoke: 6141 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6142 return getSCEV(RV); 6143 break; 6144 } 6145 6146 return getUnknown(V); 6147 } 6148 6149 //===----------------------------------------------------------------------===// 6150 // Iteration Count Computation Code 6151 // 6152 6153 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6154 if (!ExitCount) 6155 return 0; 6156 6157 ConstantInt *ExitConst = ExitCount->getValue(); 6158 6159 // Guard against huge trip counts. 6160 if (ExitConst->getValue().getActiveBits() > 32) 6161 return 0; 6162 6163 // In case of integer overflow, this returns 0, which is correct. 6164 return ((unsigned)ExitConst->getZExtValue()) + 1; 6165 } 6166 6167 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6168 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6169 return getSmallConstantTripCount(L, ExitingBB); 6170 6171 // No trip count information for multiple exits. 6172 return 0; 6173 } 6174 6175 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6176 BasicBlock *ExitingBlock) { 6177 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6178 assert(L->isLoopExiting(ExitingBlock) && 6179 "Exiting block must actually branch out of the loop!"); 6180 const SCEVConstant *ExitCount = 6181 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6182 return getConstantTripCount(ExitCount); 6183 } 6184 6185 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6186 const auto *MaxExitCount = 6187 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6188 return getConstantTripCount(MaxExitCount); 6189 } 6190 6191 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6192 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6193 return getSmallConstantTripMultiple(L, ExitingBB); 6194 6195 // No trip multiple information for multiple exits. 6196 return 0; 6197 } 6198 6199 /// Returns the largest constant divisor of the trip count of this loop as a 6200 /// normal unsigned value, if possible. This means that the actual trip count is 6201 /// always a multiple of the returned value (don't forget the trip count could 6202 /// very well be zero as well!). 6203 /// 6204 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6205 /// multiple of a constant (which is also the case if the trip count is simply 6206 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6207 /// if the trip count is very large (>= 2^32). 6208 /// 6209 /// As explained in the comments for getSmallConstantTripCount, this assumes 6210 /// that control exits the loop via ExitingBlock. 6211 unsigned 6212 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6213 BasicBlock *ExitingBlock) { 6214 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6215 assert(L->isLoopExiting(ExitingBlock) && 6216 "Exiting block must actually branch out of the loop!"); 6217 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6218 if (ExitCount == getCouldNotCompute()) 6219 return 1; 6220 6221 // Get the trip count from the BE count by adding 1. 6222 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6223 6224 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6225 if (!TC) 6226 // Attempt to factor more general cases. Returns the greatest power of 6227 // two divisor. If overflow happens, the trip count expression is still 6228 // divisible by the greatest power of 2 divisor returned. 6229 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6230 6231 ConstantInt *Result = TC->getValue(); 6232 6233 // Guard against huge trip counts (this requires checking 6234 // for zero to handle the case where the trip count == -1 and the 6235 // addition wraps). 6236 if (!Result || Result->getValue().getActiveBits() > 32 || 6237 Result->getValue().getActiveBits() == 0) 6238 return 1; 6239 6240 return (unsigned)Result->getZExtValue(); 6241 } 6242 6243 /// Get the expression for the number of loop iterations for which this loop is 6244 /// guaranteed not to exit via ExitingBlock. Otherwise return 6245 /// SCEVCouldNotCompute. 6246 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6247 BasicBlock *ExitingBlock) { 6248 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6249 } 6250 6251 const SCEV * 6252 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6253 SCEVUnionPredicate &Preds) { 6254 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 6255 } 6256 6257 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6258 return getBackedgeTakenInfo(L).getExact(this); 6259 } 6260 6261 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6262 /// known never to be less than the actual backedge taken count. 6263 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6264 return getBackedgeTakenInfo(L).getMax(this); 6265 } 6266 6267 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6268 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6269 } 6270 6271 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6272 static void 6273 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6274 BasicBlock *Header = L->getHeader(); 6275 6276 // Push all Loop-header PHIs onto the Worklist stack. 6277 for (BasicBlock::iterator I = Header->begin(); 6278 PHINode *PN = dyn_cast<PHINode>(I); ++I) 6279 Worklist.push_back(PN); 6280 } 6281 6282 const ScalarEvolution::BackedgeTakenInfo & 6283 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6284 auto &BTI = getBackedgeTakenInfo(L); 6285 if (BTI.hasFullInfo()) 6286 return BTI; 6287 6288 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6289 6290 if (!Pair.second) 6291 return Pair.first->second; 6292 6293 BackedgeTakenInfo Result = 6294 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6295 6296 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6297 } 6298 6299 const ScalarEvolution::BackedgeTakenInfo & 6300 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6301 // Initially insert an invalid entry for this loop. If the insertion 6302 // succeeds, proceed to actually compute a backedge-taken count and 6303 // update the value. The temporary CouldNotCompute value tells SCEV 6304 // code elsewhere that it shouldn't attempt to request a new 6305 // backedge-taken count, which could result in infinite recursion. 6306 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6307 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6308 if (!Pair.second) 6309 return Pair.first->second; 6310 6311 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6312 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6313 // must be cleared in this scope. 6314 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6315 6316 if (Result.getExact(this) != getCouldNotCompute()) { 6317 assert(isLoopInvariant(Result.getExact(this), L) && 6318 isLoopInvariant(Result.getMax(this), L) && 6319 "Computed backedge-taken count isn't loop invariant for loop!"); 6320 ++NumTripCountsComputed; 6321 } 6322 else if (Result.getMax(this) == getCouldNotCompute() && 6323 isa<PHINode>(L->getHeader()->begin())) { 6324 // Only count loops that have phi nodes as not being computable. 6325 ++NumTripCountsNotComputed; 6326 } 6327 6328 // Now that we know more about the trip count for this loop, forget any 6329 // existing SCEV values for PHI nodes in this loop since they are only 6330 // conservative estimates made without the benefit of trip count 6331 // information. This is similar to the code in forgetLoop, except that 6332 // it handles SCEVUnknown PHI nodes specially. 6333 if (Result.hasAnyInfo()) { 6334 SmallVector<Instruction *, 16> Worklist; 6335 PushLoopPHIs(L, Worklist); 6336 6337 SmallPtrSet<Instruction *, 8> Visited; 6338 while (!Worklist.empty()) { 6339 Instruction *I = Worklist.pop_back_val(); 6340 if (!Visited.insert(I).second) 6341 continue; 6342 6343 ValueExprMapType::iterator It = 6344 ValueExprMap.find_as(static_cast<Value *>(I)); 6345 if (It != ValueExprMap.end()) { 6346 const SCEV *Old = It->second; 6347 6348 // SCEVUnknown for a PHI either means that it has an unrecognized 6349 // structure, or it's a PHI that's in the progress of being computed 6350 // by createNodeForPHI. In the former case, additional loop trip 6351 // count information isn't going to change anything. In the later 6352 // case, createNodeForPHI will perform the necessary updates on its 6353 // own when it gets to that point. 6354 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6355 eraseValueFromMap(It->first); 6356 forgetMemoizedResults(Old, false); 6357 } 6358 if (PHINode *PN = dyn_cast<PHINode>(I)) 6359 ConstantEvolutionLoopExitValue.erase(PN); 6360 } 6361 6362 PushDefUseChildren(I, Worklist); 6363 } 6364 } 6365 6366 // Re-lookup the insert position, since the call to 6367 // computeBackedgeTakenCount above could result in a 6368 // recusive call to getBackedgeTakenInfo (on a different 6369 // loop), which would invalidate the iterator computed 6370 // earlier. 6371 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6372 } 6373 6374 void ScalarEvolution::forgetLoop(const Loop *L) { 6375 // Drop any stored trip count value. 6376 auto RemoveLoopFromBackedgeMap = 6377 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6378 auto BTCPos = Map.find(L); 6379 if (BTCPos != Map.end()) { 6380 BTCPos->second.clear(); 6381 Map.erase(BTCPos); 6382 } 6383 }; 6384 6385 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6386 SmallVector<Instruction *, 32> Worklist; 6387 SmallPtrSet<Instruction *, 16> Visited; 6388 6389 // Iterate over all the loops and sub-loops to drop SCEV information. 6390 while (!LoopWorklist.empty()) { 6391 auto *CurrL = LoopWorklist.pop_back_val(); 6392 6393 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6394 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6395 6396 // Drop information about predicated SCEV rewrites for this loop. 6397 for (auto I = PredicatedSCEVRewrites.begin(); 6398 I != PredicatedSCEVRewrites.end();) { 6399 std::pair<const SCEV *, const Loop *> Entry = I->first; 6400 if (Entry.second == CurrL) 6401 PredicatedSCEVRewrites.erase(I++); 6402 else 6403 ++I; 6404 } 6405 6406 auto LoopUsersItr = LoopUsers.find(CurrL); 6407 if (LoopUsersItr != LoopUsers.end()) { 6408 for (auto *S : LoopUsersItr->second) 6409 forgetMemoizedResults(S); 6410 LoopUsers.erase(LoopUsersItr); 6411 } 6412 6413 // Drop information about expressions based on loop-header PHIs. 6414 PushLoopPHIs(CurrL, Worklist); 6415 6416 while (!Worklist.empty()) { 6417 Instruction *I = Worklist.pop_back_val(); 6418 if (!Visited.insert(I).second) 6419 continue; 6420 6421 ValueExprMapType::iterator It = 6422 ValueExprMap.find_as(static_cast<Value *>(I)); 6423 if (It != ValueExprMap.end()) { 6424 eraseValueFromMap(It->first); 6425 forgetMemoizedResults(It->second); 6426 if (PHINode *PN = dyn_cast<PHINode>(I)) 6427 ConstantEvolutionLoopExitValue.erase(PN); 6428 } 6429 6430 PushDefUseChildren(I, Worklist); 6431 } 6432 6433 for (auto I = ExitLimits.begin(); I != ExitLimits.end(); ++I) { 6434 auto &Query = I->first; 6435 if (Query.L == CurrL) 6436 ExitLimits.erase(I); 6437 } 6438 6439 LoopPropertiesCache.erase(CurrL); 6440 // Forget all contained loops too, to avoid dangling entries in the 6441 // ValuesAtScopes map. 6442 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6443 } 6444 } 6445 6446 void ScalarEvolution::forgetValue(Value *V) { 6447 Instruction *I = dyn_cast<Instruction>(V); 6448 if (!I) return; 6449 6450 // Drop information about expressions based on loop-header PHIs. 6451 SmallVector<Instruction *, 16> Worklist; 6452 Worklist.push_back(I); 6453 6454 SmallPtrSet<Instruction *, 8> Visited; 6455 while (!Worklist.empty()) { 6456 I = Worklist.pop_back_val(); 6457 if (!Visited.insert(I).second) 6458 continue; 6459 6460 ValueExprMapType::iterator It = 6461 ValueExprMap.find_as(static_cast<Value *>(I)); 6462 if (It != ValueExprMap.end()) { 6463 eraseValueFromMap(It->first); 6464 forgetMemoizedResults(It->second); 6465 if (PHINode *PN = dyn_cast<PHINode>(I)) 6466 ConstantEvolutionLoopExitValue.erase(PN); 6467 } 6468 6469 PushDefUseChildren(I, Worklist); 6470 } 6471 } 6472 6473 /// Get the exact loop backedge taken count considering all loop exits. A 6474 /// computable result can only be returned for loops with a single exit. 6475 /// Returning the minimum taken count among all exits is incorrect because one 6476 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 6477 /// the limit of each loop test is never skipped. This is a valid assumption as 6478 /// long as the loop exits via that test. For precise results, it is the 6479 /// caller's responsibility to specify the relevant loop exit using 6480 /// getExact(ExitingBlock, SE). 6481 const SCEV * 6482 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 6483 SCEVUnionPredicate *Preds) const { 6484 // If any exits were not computable, the loop is not computable. 6485 if (!isComplete() || ExitNotTaken.empty()) 6486 return SE->getCouldNotCompute(); 6487 6488 const SCEV *BECount = nullptr; 6489 for (auto &ENT : ExitNotTaken) { 6490 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 6491 6492 if (!BECount) 6493 BECount = ENT.ExactNotTaken; 6494 else if (BECount != ENT.ExactNotTaken) 6495 return SE->getCouldNotCompute(); 6496 if (Preds && !ENT.hasAlwaysTruePredicate()) 6497 Preds->add(ENT.Predicate.get()); 6498 6499 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6500 "Predicate should be always true!"); 6501 } 6502 6503 assert(BECount && "Invalid not taken count for loop exit"); 6504 return BECount; 6505 } 6506 6507 /// Get the exact not taken count for this loop exit. 6508 const SCEV * 6509 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6510 ScalarEvolution *SE) const { 6511 for (auto &ENT : ExitNotTaken) 6512 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6513 return ENT.ExactNotTaken; 6514 6515 return SE->getCouldNotCompute(); 6516 } 6517 6518 /// getMax - Get the max backedge taken count for the loop. 6519 const SCEV * 6520 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6521 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6522 return !ENT.hasAlwaysTruePredicate(); 6523 }; 6524 6525 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6526 return SE->getCouldNotCompute(); 6527 6528 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6529 "No point in having a non-constant max backedge taken count!"); 6530 return getMax(); 6531 } 6532 6533 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6534 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6535 return !ENT.hasAlwaysTruePredicate(); 6536 }; 6537 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6538 } 6539 6540 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6541 ScalarEvolution *SE) const { 6542 if (getMax() && getMax() != SE->getCouldNotCompute() && 6543 SE->hasOperand(getMax(), S)) 6544 return true; 6545 6546 for (auto &ENT : ExitNotTaken) 6547 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6548 SE->hasOperand(ENT.ExactNotTaken, S)) 6549 return true; 6550 6551 return false; 6552 } 6553 6554 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6555 : ExactNotTaken(E), MaxNotTaken(E) { 6556 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6557 isa<SCEVConstant>(MaxNotTaken)) && 6558 "No point in having a non-constant max backedge taken count!"); 6559 } 6560 6561 ScalarEvolution::ExitLimit::ExitLimit( 6562 const SCEV *E, const SCEV *M, bool MaxOrZero, 6563 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6564 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6565 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6566 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6567 "Exact is not allowed to be less precise than Max"); 6568 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6569 isa<SCEVConstant>(MaxNotTaken)) && 6570 "No point in having a non-constant max backedge taken count!"); 6571 for (auto *PredSet : PredSetList) 6572 for (auto *P : *PredSet) 6573 addPredicate(P); 6574 } 6575 6576 ScalarEvolution::ExitLimit::ExitLimit( 6577 const SCEV *E, const SCEV *M, bool MaxOrZero, 6578 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6579 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6580 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6581 isa<SCEVConstant>(MaxNotTaken)) && 6582 "No point in having a non-constant max backedge taken count!"); 6583 } 6584 6585 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6586 bool MaxOrZero) 6587 : ExitLimit(E, M, MaxOrZero, None) { 6588 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6589 isa<SCEVConstant>(MaxNotTaken)) && 6590 "No point in having a non-constant max backedge taken count!"); 6591 } 6592 6593 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6594 /// computable exit into a persistent ExitNotTakenInfo array. 6595 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6596 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6597 &&ExitCounts, 6598 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6599 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6600 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6601 6602 ExitNotTaken.reserve(ExitCounts.size()); 6603 std::transform( 6604 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6605 [&](const EdgeExitInfo &EEI) { 6606 BasicBlock *ExitBB = EEI.first; 6607 const ExitLimit &EL = EEI.second; 6608 if (EL.Predicates.empty()) 6609 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6610 6611 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6612 for (auto *Pred : EL.Predicates) 6613 Predicate->add(Pred); 6614 6615 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6616 }); 6617 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6618 "No point in having a non-constant max backedge taken count!"); 6619 } 6620 6621 /// Invalidate this result and free the ExitNotTakenInfo array. 6622 void ScalarEvolution::BackedgeTakenInfo::clear() { 6623 ExitNotTaken.clear(); 6624 } 6625 6626 /// Compute the number of times the backedge of the specified loop will execute. 6627 ScalarEvolution::BackedgeTakenInfo 6628 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6629 bool AllowPredicates) { 6630 SmallVector<BasicBlock *, 8> ExitingBlocks; 6631 L->getExitingBlocks(ExitingBlocks); 6632 6633 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6634 6635 SmallVector<EdgeExitInfo, 4> ExitCounts; 6636 bool CouldComputeBECount = true; 6637 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6638 const SCEV *MustExitMaxBECount = nullptr; 6639 const SCEV *MayExitMaxBECount = nullptr; 6640 bool MustExitMaxOrZero = false; 6641 6642 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6643 // and compute maxBECount. 6644 // Do a union of all the predicates here. 6645 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6646 BasicBlock *ExitBB = ExitingBlocks[i]; 6647 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6648 6649 assert((AllowPredicates || EL.Predicates.empty()) && 6650 "Predicated exit limit when predicates are not allowed!"); 6651 6652 // 1. For each exit that can be computed, add an entry to ExitCounts. 6653 // CouldComputeBECount is true only if all exits can be computed. 6654 if (EL.ExactNotTaken == getCouldNotCompute()) 6655 // We couldn't compute an exact value for this exit, so 6656 // we won't be able to compute an exact value for the loop. 6657 CouldComputeBECount = false; 6658 else 6659 ExitCounts.emplace_back(ExitBB, EL); 6660 6661 // 2. Derive the loop's MaxBECount from each exit's max number of 6662 // non-exiting iterations. Partition the loop exits into two kinds: 6663 // LoopMustExits and LoopMayExits. 6664 // 6665 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6666 // is a LoopMayExit. If any computable LoopMustExit is found, then 6667 // MaxBECount is the minimum EL.MaxNotTaken of computable 6668 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6669 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6670 // computable EL.MaxNotTaken. 6671 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6672 DT.dominates(ExitBB, Latch)) { 6673 if (!MustExitMaxBECount) { 6674 MustExitMaxBECount = EL.MaxNotTaken; 6675 MustExitMaxOrZero = EL.MaxOrZero; 6676 } else { 6677 MustExitMaxBECount = 6678 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6679 } 6680 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6681 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6682 MayExitMaxBECount = EL.MaxNotTaken; 6683 else { 6684 MayExitMaxBECount = 6685 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6686 } 6687 } 6688 } 6689 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6690 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6691 // The loop backedge will be taken the maximum or zero times if there's 6692 // a single exit that must be taken the maximum or zero times. 6693 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6694 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6695 MaxBECount, MaxOrZero); 6696 } 6697 6698 ScalarEvolution::ExitLimit 6699 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6700 bool AllowPredicates) { 6701 ExitLimitQuery Query(L, ExitingBlock, AllowPredicates); 6702 auto MaybeEL = ExitLimits.find(Query); 6703 if (MaybeEL != ExitLimits.end()) 6704 return MaybeEL->second; 6705 ExitLimit EL = computeExitLimitImpl(L, ExitingBlock, AllowPredicates); 6706 ExitLimits.insert({Query, EL}); 6707 return EL; 6708 } 6709 6710 ScalarEvolution::ExitLimit 6711 ScalarEvolution::computeExitLimitImpl(const Loop *L, BasicBlock *ExitingBlock, 6712 bool AllowPredicates) { 6713 // Okay, we've chosen an exiting block. See what condition causes us to exit 6714 // at this block and remember the exit block and whether all other targets 6715 // lead to the loop header. 6716 bool MustExecuteLoopHeader = true; 6717 BasicBlock *Exit = nullptr; 6718 for (auto *SBB : successors(ExitingBlock)) 6719 if (!L->contains(SBB)) { 6720 if (Exit) // Multiple exit successors. 6721 return getCouldNotCompute(); 6722 Exit = SBB; 6723 } else if (SBB != L->getHeader()) { 6724 MustExecuteLoopHeader = false; 6725 } 6726 6727 // At this point, we know we have a conditional branch that determines whether 6728 // the loop is exited. However, we don't know if the branch is executed each 6729 // time through the loop. If not, then the execution count of the branch will 6730 // not be equal to the trip count of the loop. 6731 // 6732 // Currently we check for this by checking to see if the Exit branch goes to 6733 // the loop header. If so, we know it will always execute the same number of 6734 // times as the loop. We also handle the case where the exit block *is* the 6735 // loop header. This is common for un-rotated loops. 6736 // 6737 // If both of those tests fail, walk up the unique predecessor chain to the 6738 // header, stopping if there is an edge that doesn't exit the loop. If the 6739 // header is reached, the execution count of the branch will be equal to the 6740 // trip count of the loop. 6741 // 6742 // More extensive analysis could be done to handle more cases here. 6743 // 6744 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6745 // The simple checks failed, try climbing the unique predecessor chain 6746 // up to the header. 6747 bool Ok = false; 6748 for (BasicBlock *BB = ExitingBlock; BB; ) { 6749 BasicBlock *Pred = BB->getUniquePredecessor(); 6750 if (!Pred) 6751 return getCouldNotCompute(); 6752 TerminatorInst *PredTerm = Pred->getTerminator(); 6753 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6754 if (PredSucc == BB) 6755 continue; 6756 // If the predecessor has a successor that isn't BB and isn't 6757 // outside the loop, assume the worst. 6758 if (L->contains(PredSucc)) 6759 return getCouldNotCompute(); 6760 } 6761 if (Pred == L->getHeader()) { 6762 Ok = true; 6763 break; 6764 } 6765 BB = Pred; 6766 } 6767 if (!Ok) 6768 return getCouldNotCompute(); 6769 } 6770 6771 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6772 TerminatorInst *Term = ExitingBlock->getTerminator(); 6773 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6774 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6775 // Proceed to the next level to examine the exit condition expression. 6776 return computeExitLimitFromCond( 6777 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6778 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6779 } 6780 6781 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6782 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6783 /*ControlsExit=*/IsOnlyExit); 6784 6785 return getCouldNotCompute(); 6786 } 6787 6788 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6789 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6790 bool ControlsExit, bool AllowPredicates) { 6791 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6792 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6793 ControlsExit, AllowPredicates); 6794 } 6795 6796 Optional<ScalarEvolution::ExitLimit> 6797 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6798 BasicBlock *TBB, BasicBlock *FBB, 6799 bool ControlsExit, bool AllowPredicates) { 6800 (void)this->L; 6801 (void)this->TBB; 6802 (void)this->FBB; 6803 (void)this->AllowPredicates; 6804 6805 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6806 this->AllowPredicates == AllowPredicates && 6807 "Variance in assumed invariant key components!"); 6808 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6809 if (Itr == TripCountMap.end()) 6810 return None; 6811 return Itr->second; 6812 } 6813 6814 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6815 BasicBlock *TBB, BasicBlock *FBB, 6816 bool ControlsExit, 6817 bool AllowPredicates, 6818 const ExitLimit &EL) { 6819 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6820 this->AllowPredicates == AllowPredicates && 6821 "Variance in assumed invariant key components!"); 6822 6823 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6824 assert(InsertResult.second && "Expected successful insertion!"); 6825 (void)InsertResult; 6826 } 6827 6828 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6829 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6830 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6831 6832 if (auto MaybeEL = 6833 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6834 return *MaybeEL; 6835 6836 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6837 ControlsExit, AllowPredicates); 6838 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6839 return EL; 6840 } 6841 6842 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6843 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6844 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6845 // Check if the controlling expression for this loop is an And or Or. 6846 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6847 if (BO->getOpcode() == Instruction::And) { 6848 // Recurse on the operands of the and. 6849 bool EitherMayExit = L->contains(TBB); 6850 ExitLimit EL0 = computeExitLimitFromCondCached( 6851 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6852 AllowPredicates); 6853 ExitLimit EL1 = computeExitLimitFromCondCached( 6854 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6855 AllowPredicates); 6856 const SCEV *BECount = getCouldNotCompute(); 6857 const SCEV *MaxBECount = getCouldNotCompute(); 6858 if (EitherMayExit) { 6859 // Both conditions must be true for the loop to continue executing. 6860 // Choose the less conservative count. 6861 if (EL0.ExactNotTaken == getCouldNotCompute() || 6862 EL1.ExactNotTaken == getCouldNotCompute()) 6863 BECount = getCouldNotCompute(); 6864 else 6865 BECount = 6866 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6867 if (EL0.MaxNotTaken == getCouldNotCompute()) 6868 MaxBECount = EL1.MaxNotTaken; 6869 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6870 MaxBECount = EL0.MaxNotTaken; 6871 else 6872 MaxBECount = 6873 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6874 } else { 6875 // Both conditions must be true at the same time for the loop to exit. 6876 // For now, be conservative. 6877 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6878 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6879 MaxBECount = EL0.MaxNotTaken; 6880 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6881 BECount = EL0.ExactNotTaken; 6882 } 6883 6884 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6885 // to be more aggressive when computing BECount than when computing 6886 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6887 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6888 // to not. 6889 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6890 !isa<SCEVCouldNotCompute>(BECount)) 6891 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6892 6893 return ExitLimit(BECount, MaxBECount, false, 6894 {&EL0.Predicates, &EL1.Predicates}); 6895 } 6896 if (BO->getOpcode() == Instruction::Or) { 6897 // Recurse on the operands of the or. 6898 bool EitherMayExit = L->contains(FBB); 6899 ExitLimit EL0 = computeExitLimitFromCondCached( 6900 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6901 AllowPredicates); 6902 ExitLimit EL1 = computeExitLimitFromCondCached( 6903 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6904 AllowPredicates); 6905 const SCEV *BECount = getCouldNotCompute(); 6906 const SCEV *MaxBECount = getCouldNotCompute(); 6907 if (EitherMayExit) { 6908 // Both conditions must be false for the loop to continue executing. 6909 // Choose the less conservative count. 6910 if (EL0.ExactNotTaken == getCouldNotCompute() || 6911 EL1.ExactNotTaken == getCouldNotCompute()) 6912 BECount = getCouldNotCompute(); 6913 else 6914 BECount = 6915 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6916 if (EL0.MaxNotTaken == getCouldNotCompute()) 6917 MaxBECount = EL1.MaxNotTaken; 6918 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6919 MaxBECount = EL0.MaxNotTaken; 6920 else 6921 MaxBECount = 6922 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6923 } else { 6924 // Both conditions must be false at the same time for the loop to exit. 6925 // For now, be conservative. 6926 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6927 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6928 MaxBECount = EL0.MaxNotTaken; 6929 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6930 BECount = EL0.ExactNotTaken; 6931 } 6932 6933 return ExitLimit(BECount, MaxBECount, false, 6934 {&EL0.Predicates, &EL1.Predicates}); 6935 } 6936 } 6937 6938 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6939 // Proceed to the next level to examine the icmp. 6940 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6941 ExitLimit EL = 6942 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6943 if (EL.hasFullInfo() || !AllowPredicates) 6944 return EL; 6945 6946 // Try again, but use SCEV predicates this time. 6947 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6948 /*AllowPredicates=*/true); 6949 } 6950 6951 // Check for a constant condition. These are normally stripped out by 6952 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6953 // preserve the CFG and is temporarily leaving constant conditions 6954 // in place. 6955 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6956 if (L->contains(FBB) == !CI->getZExtValue()) 6957 // The backedge is always taken. 6958 return getCouldNotCompute(); 6959 else 6960 // The backedge is never taken. 6961 return getZero(CI->getType()); 6962 } 6963 6964 // If it's not an integer or pointer comparison then compute it the hard way. 6965 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6966 } 6967 6968 ScalarEvolution::ExitLimit 6969 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6970 ICmpInst *ExitCond, 6971 BasicBlock *TBB, 6972 BasicBlock *FBB, 6973 bool ControlsExit, 6974 bool AllowPredicates) { 6975 // If the condition was exit on true, convert the condition to exit on false 6976 ICmpInst::Predicate Cond; 6977 if (!L->contains(FBB)) 6978 Cond = ExitCond->getPredicate(); 6979 else 6980 Cond = ExitCond->getInversePredicate(); 6981 6982 // Handle common loops like: for (X = "string"; *X; ++X) 6983 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6984 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6985 ExitLimit ItCnt = 6986 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6987 if (ItCnt.hasAnyInfo()) 6988 return ItCnt; 6989 } 6990 6991 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6992 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6993 6994 // Try to evaluate any dependencies out of the loop. 6995 LHS = getSCEVAtScope(LHS, L); 6996 RHS = getSCEVAtScope(RHS, L); 6997 6998 // At this point, we would like to compute how many iterations of the 6999 // loop the predicate will return true for these inputs. 7000 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7001 // If there is a loop-invariant, force it into the RHS. 7002 std::swap(LHS, RHS); 7003 Cond = ICmpInst::getSwappedPredicate(Cond); 7004 } 7005 7006 // Simplify the operands before analyzing them. 7007 (void)SimplifyICmpOperands(Cond, LHS, RHS); 7008 7009 // If we have a comparison of a chrec against a constant, try to use value 7010 // ranges to answer this query. 7011 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7012 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7013 if (AddRec->getLoop() == L) { 7014 // Form the constant range. 7015 ConstantRange CompRange = 7016 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 7017 7018 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7019 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7020 } 7021 7022 switch (Cond) { 7023 case ICmpInst::ICMP_NE: { // while (X != Y) 7024 // Convert to: while (X-Y != 0) 7025 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7026 AllowPredicates); 7027 if (EL.hasAnyInfo()) return EL; 7028 break; 7029 } 7030 case ICmpInst::ICMP_EQ: { // while (X == Y) 7031 // Convert to: while (X-Y == 0) 7032 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7033 if (EL.hasAnyInfo()) return EL; 7034 break; 7035 } 7036 case ICmpInst::ICMP_SLT: 7037 case ICmpInst::ICMP_ULT: { // while (X < Y) 7038 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 7039 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7040 AllowPredicates); 7041 if (EL.hasAnyInfo()) return EL; 7042 break; 7043 } 7044 case ICmpInst::ICMP_SGT: 7045 case ICmpInst::ICMP_UGT: { // while (X > Y) 7046 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 7047 ExitLimit EL = 7048 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7049 AllowPredicates); 7050 if (EL.hasAnyInfo()) return EL; 7051 break; 7052 } 7053 default: 7054 break; 7055 } 7056 7057 auto *ExhaustiveCount = 7058 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 7059 7060 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7061 return ExhaustiveCount; 7062 7063 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7064 ExitCond->getOperand(1), L, Cond); 7065 } 7066 7067 ScalarEvolution::ExitLimit 7068 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7069 SwitchInst *Switch, 7070 BasicBlock *ExitingBlock, 7071 bool ControlsExit) { 7072 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7073 7074 // Give up if the exit is the default dest of a switch. 7075 if (Switch->getDefaultDest() == ExitingBlock) 7076 return getCouldNotCompute(); 7077 7078 assert(L->contains(Switch->getDefaultDest()) && 7079 "Default case must not exit the loop!"); 7080 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7081 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7082 7083 // while (X != Y) --> while (X-Y != 0) 7084 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7085 if (EL.hasAnyInfo()) 7086 return EL; 7087 7088 return getCouldNotCompute(); 7089 } 7090 7091 static ConstantInt * 7092 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7093 ScalarEvolution &SE) { 7094 const SCEV *InVal = SE.getConstant(C); 7095 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7096 assert(isa<SCEVConstant>(Val) && 7097 "Evaluation of SCEV at constant didn't fold correctly?"); 7098 return cast<SCEVConstant>(Val)->getValue(); 7099 } 7100 7101 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7102 /// compute the backedge execution count. 7103 ScalarEvolution::ExitLimit 7104 ScalarEvolution::computeLoadConstantCompareExitLimit( 7105 LoadInst *LI, 7106 Constant *RHS, 7107 const Loop *L, 7108 ICmpInst::Predicate predicate) { 7109 if (LI->isVolatile()) return getCouldNotCompute(); 7110 7111 // Check to see if the loaded pointer is a getelementptr of a global. 7112 // TODO: Use SCEV instead of manually grubbing with GEPs. 7113 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7114 if (!GEP) return getCouldNotCompute(); 7115 7116 // Make sure that it is really a constant global we are gepping, with an 7117 // initializer, and make sure the first IDX is really 0. 7118 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7119 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7120 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7121 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7122 return getCouldNotCompute(); 7123 7124 // Okay, we allow one non-constant index into the GEP instruction. 7125 Value *VarIdx = nullptr; 7126 std::vector<Constant*> Indexes; 7127 unsigned VarIdxNum = 0; 7128 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7129 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7130 Indexes.push_back(CI); 7131 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7132 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7133 VarIdx = GEP->getOperand(i); 7134 VarIdxNum = i-2; 7135 Indexes.push_back(nullptr); 7136 } 7137 7138 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7139 if (!VarIdx) 7140 return getCouldNotCompute(); 7141 7142 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7143 // Check to see if X is a loop variant variable value now. 7144 const SCEV *Idx = getSCEV(VarIdx); 7145 Idx = getSCEVAtScope(Idx, L); 7146 7147 // We can only recognize very limited forms of loop index expressions, in 7148 // particular, only affine AddRec's like {C1,+,C2}. 7149 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7150 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7151 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7152 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7153 return getCouldNotCompute(); 7154 7155 unsigned MaxSteps = MaxBruteForceIterations; 7156 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7157 ConstantInt *ItCst = ConstantInt::get( 7158 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7159 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7160 7161 // Form the GEP offset. 7162 Indexes[VarIdxNum] = Val; 7163 7164 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7165 Indexes); 7166 if (!Result) break; // Cannot compute! 7167 7168 // Evaluate the condition for this iteration. 7169 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7170 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7171 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7172 ++NumArrayLenItCounts; 7173 return getConstant(ItCst); // Found terminating iteration! 7174 } 7175 } 7176 return getCouldNotCompute(); 7177 } 7178 7179 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7180 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7181 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7182 if (!RHS) 7183 return getCouldNotCompute(); 7184 7185 const BasicBlock *Latch = L->getLoopLatch(); 7186 if (!Latch) 7187 return getCouldNotCompute(); 7188 7189 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7190 if (!Predecessor) 7191 return getCouldNotCompute(); 7192 7193 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7194 // Return LHS in OutLHS and shift_opt in OutOpCode. 7195 auto MatchPositiveShift = 7196 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7197 7198 using namespace PatternMatch; 7199 7200 ConstantInt *ShiftAmt; 7201 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7202 OutOpCode = Instruction::LShr; 7203 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7204 OutOpCode = Instruction::AShr; 7205 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7206 OutOpCode = Instruction::Shl; 7207 else 7208 return false; 7209 7210 return ShiftAmt->getValue().isStrictlyPositive(); 7211 }; 7212 7213 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7214 // 7215 // loop: 7216 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7217 // %iv.shifted = lshr i32 %iv, <positive constant> 7218 // 7219 // Return true on a successful match. Return the corresponding PHI node (%iv 7220 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7221 auto MatchShiftRecurrence = 7222 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7223 Optional<Instruction::BinaryOps> PostShiftOpCode; 7224 7225 { 7226 Instruction::BinaryOps OpC; 7227 Value *V; 7228 7229 // If we encounter a shift instruction, "peel off" the shift operation, 7230 // and remember that we did so. Later when we inspect %iv's backedge 7231 // value, we will make sure that the backedge value uses the same 7232 // operation. 7233 // 7234 // Note: the peeled shift operation does not have to be the same 7235 // instruction as the one feeding into the PHI's backedge value. We only 7236 // really care about it being the same *kind* of shift instruction -- 7237 // that's all that is required for our later inferences to hold. 7238 if (MatchPositiveShift(LHS, V, OpC)) { 7239 PostShiftOpCode = OpC; 7240 LHS = V; 7241 } 7242 } 7243 7244 PNOut = dyn_cast<PHINode>(LHS); 7245 if (!PNOut || PNOut->getParent() != L->getHeader()) 7246 return false; 7247 7248 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7249 Value *OpLHS; 7250 7251 return 7252 // The backedge value for the PHI node must be a shift by a positive 7253 // amount 7254 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7255 7256 // of the PHI node itself 7257 OpLHS == PNOut && 7258 7259 // and the kind of shift should be match the kind of shift we peeled 7260 // off, if any. 7261 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7262 }; 7263 7264 PHINode *PN; 7265 Instruction::BinaryOps OpCode; 7266 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7267 return getCouldNotCompute(); 7268 7269 const DataLayout &DL = getDataLayout(); 7270 7271 // The key rationale for this optimization is that for some kinds of shift 7272 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7273 // within a finite number of iterations. If the condition guarding the 7274 // backedge (in the sense that the backedge is taken if the condition is true) 7275 // is false for the value the shift recurrence stabilizes to, then we know 7276 // that the backedge is taken only a finite number of times. 7277 7278 ConstantInt *StableValue = nullptr; 7279 switch (OpCode) { 7280 default: 7281 llvm_unreachable("Impossible case!"); 7282 7283 case Instruction::AShr: { 7284 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7285 // bitwidth(K) iterations. 7286 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7287 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7288 Predecessor->getTerminator(), &DT); 7289 auto *Ty = cast<IntegerType>(RHS->getType()); 7290 if (Known.isNonNegative()) 7291 StableValue = ConstantInt::get(Ty, 0); 7292 else if (Known.isNegative()) 7293 StableValue = ConstantInt::get(Ty, -1, true); 7294 else 7295 return getCouldNotCompute(); 7296 7297 break; 7298 } 7299 case Instruction::LShr: 7300 case Instruction::Shl: 7301 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7302 // stabilize to 0 in at most bitwidth(K) iterations. 7303 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7304 break; 7305 } 7306 7307 auto *Result = 7308 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7309 assert(Result->getType()->isIntegerTy(1) && 7310 "Otherwise cannot be an operand to a branch instruction"); 7311 7312 if (Result->isZeroValue()) { 7313 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7314 const SCEV *UpperBound = 7315 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7316 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7317 } 7318 7319 return getCouldNotCompute(); 7320 } 7321 7322 /// Return true if we can constant fold an instruction of the specified type, 7323 /// assuming that all operands were constants. 7324 static bool CanConstantFold(const Instruction *I) { 7325 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7326 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7327 isa<LoadInst>(I)) 7328 return true; 7329 7330 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7331 if (const Function *F = CI->getCalledFunction()) 7332 return canConstantFoldCallTo(CI, F); 7333 return false; 7334 } 7335 7336 /// Determine whether this instruction can constant evolve within this loop 7337 /// assuming its operands can all constant evolve. 7338 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7339 // An instruction outside of the loop can't be derived from a loop PHI. 7340 if (!L->contains(I)) return false; 7341 7342 if (isa<PHINode>(I)) { 7343 // We don't currently keep track of the control flow needed to evaluate 7344 // PHIs, so we cannot handle PHIs inside of loops. 7345 return L->getHeader() == I->getParent(); 7346 } 7347 7348 // If we won't be able to constant fold this expression even if the operands 7349 // are constants, bail early. 7350 return CanConstantFold(I); 7351 } 7352 7353 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7354 /// recursing through each instruction operand until reaching a loop header phi. 7355 static PHINode * 7356 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7357 DenseMap<Instruction *, PHINode *> &PHIMap, 7358 unsigned Depth) { 7359 if (Depth > MaxConstantEvolvingDepth) 7360 return nullptr; 7361 7362 // Otherwise, we can evaluate this instruction if all of its operands are 7363 // constant or derived from a PHI node themselves. 7364 PHINode *PHI = nullptr; 7365 for (Value *Op : UseInst->operands()) { 7366 if (isa<Constant>(Op)) continue; 7367 7368 Instruction *OpInst = dyn_cast<Instruction>(Op); 7369 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7370 7371 PHINode *P = dyn_cast<PHINode>(OpInst); 7372 if (!P) 7373 // If this operand is already visited, reuse the prior result. 7374 // We may have P != PHI if this is the deepest point at which the 7375 // inconsistent paths meet. 7376 P = PHIMap.lookup(OpInst); 7377 if (!P) { 7378 // Recurse and memoize the results, whether a phi is found or not. 7379 // This recursive call invalidates pointers into PHIMap. 7380 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7381 PHIMap[OpInst] = P; 7382 } 7383 if (!P) 7384 return nullptr; // Not evolving from PHI 7385 if (PHI && PHI != P) 7386 return nullptr; // Evolving from multiple different PHIs. 7387 PHI = P; 7388 } 7389 // This is a expression evolving from a constant PHI! 7390 return PHI; 7391 } 7392 7393 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7394 /// in the loop that V is derived from. We allow arbitrary operations along the 7395 /// way, but the operands of an operation must either be constants or a value 7396 /// derived from a constant PHI. If this expression does not fit with these 7397 /// constraints, return null. 7398 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7399 Instruction *I = dyn_cast<Instruction>(V); 7400 if (!I || !canConstantEvolve(I, L)) return nullptr; 7401 7402 if (PHINode *PN = dyn_cast<PHINode>(I)) 7403 return PN; 7404 7405 // Record non-constant instructions contained by the loop. 7406 DenseMap<Instruction *, PHINode *> PHIMap; 7407 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7408 } 7409 7410 /// EvaluateExpression - Given an expression that passes the 7411 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7412 /// in the loop has the value PHIVal. If we can't fold this expression for some 7413 /// reason, return null. 7414 static Constant *EvaluateExpression(Value *V, const Loop *L, 7415 DenseMap<Instruction *, Constant *> &Vals, 7416 const DataLayout &DL, 7417 const TargetLibraryInfo *TLI) { 7418 // Convenient constant check, but redundant for recursive calls. 7419 if (Constant *C = dyn_cast<Constant>(V)) return C; 7420 Instruction *I = dyn_cast<Instruction>(V); 7421 if (!I) return nullptr; 7422 7423 if (Constant *C = Vals.lookup(I)) return C; 7424 7425 // An instruction inside the loop depends on a value outside the loop that we 7426 // weren't given a mapping for, or a value such as a call inside the loop. 7427 if (!canConstantEvolve(I, L)) return nullptr; 7428 7429 // An unmapped PHI can be due to a branch or another loop inside this loop, 7430 // or due to this not being the initial iteration through a loop where we 7431 // couldn't compute the evolution of this particular PHI last time. 7432 if (isa<PHINode>(I)) return nullptr; 7433 7434 std::vector<Constant*> Operands(I->getNumOperands()); 7435 7436 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7437 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7438 if (!Operand) { 7439 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7440 if (!Operands[i]) return nullptr; 7441 continue; 7442 } 7443 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7444 Vals[Operand] = C; 7445 if (!C) return nullptr; 7446 Operands[i] = C; 7447 } 7448 7449 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7450 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7451 Operands[1], DL, TLI); 7452 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7453 if (!LI->isVolatile()) 7454 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7455 } 7456 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7457 } 7458 7459 7460 // If every incoming value to PN except the one for BB is a specific Constant, 7461 // return that, else return nullptr. 7462 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7463 Constant *IncomingVal = nullptr; 7464 7465 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7466 if (PN->getIncomingBlock(i) == BB) 7467 continue; 7468 7469 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7470 if (!CurrentVal) 7471 return nullptr; 7472 7473 if (IncomingVal != CurrentVal) { 7474 if (IncomingVal) 7475 return nullptr; 7476 IncomingVal = CurrentVal; 7477 } 7478 } 7479 7480 return IncomingVal; 7481 } 7482 7483 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7484 /// in the header of its containing loop, we know the loop executes a 7485 /// constant number of times, and the PHI node is just a recurrence 7486 /// involving constants, fold it. 7487 Constant * 7488 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7489 const APInt &BEs, 7490 const Loop *L) { 7491 auto I = ConstantEvolutionLoopExitValue.find(PN); 7492 if (I != ConstantEvolutionLoopExitValue.end()) 7493 return I->second; 7494 7495 if (BEs.ugt(MaxBruteForceIterations)) 7496 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7497 7498 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7499 7500 DenseMap<Instruction *, Constant *> CurrentIterVals; 7501 BasicBlock *Header = L->getHeader(); 7502 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7503 7504 BasicBlock *Latch = L->getLoopLatch(); 7505 if (!Latch) 7506 return nullptr; 7507 7508 for (auto &I : *Header) { 7509 PHINode *PHI = dyn_cast<PHINode>(&I); 7510 if (!PHI) break; 7511 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7512 if (!StartCST) continue; 7513 CurrentIterVals[PHI] = StartCST; 7514 } 7515 if (!CurrentIterVals.count(PN)) 7516 return RetVal = nullptr; 7517 7518 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7519 7520 // Execute the loop symbolically to determine the exit value. 7521 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7522 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7523 7524 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7525 unsigned IterationNum = 0; 7526 const DataLayout &DL = getDataLayout(); 7527 for (; ; ++IterationNum) { 7528 if (IterationNum == NumIterations) 7529 return RetVal = CurrentIterVals[PN]; // Got exit value! 7530 7531 // Compute the value of the PHIs for the next iteration. 7532 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7533 DenseMap<Instruction *, Constant *> NextIterVals; 7534 Constant *NextPHI = 7535 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7536 if (!NextPHI) 7537 return nullptr; // Couldn't evaluate! 7538 NextIterVals[PN] = NextPHI; 7539 7540 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7541 7542 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7543 // cease to be able to evaluate one of them or if they stop evolving, 7544 // because that doesn't necessarily prevent us from computing PN. 7545 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7546 for (const auto &I : CurrentIterVals) { 7547 PHINode *PHI = dyn_cast<PHINode>(I.first); 7548 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7549 PHIsToCompute.emplace_back(PHI, I.second); 7550 } 7551 // We use two distinct loops because EvaluateExpression may invalidate any 7552 // iterators into CurrentIterVals. 7553 for (const auto &I : PHIsToCompute) { 7554 PHINode *PHI = I.first; 7555 Constant *&NextPHI = NextIterVals[PHI]; 7556 if (!NextPHI) { // Not already computed. 7557 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7558 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7559 } 7560 if (NextPHI != I.second) 7561 StoppedEvolving = false; 7562 } 7563 7564 // If all entries in CurrentIterVals == NextIterVals then we can stop 7565 // iterating, the loop can't continue to change. 7566 if (StoppedEvolving) 7567 return RetVal = CurrentIterVals[PN]; 7568 7569 CurrentIterVals.swap(NextIterVals); 7570 } 7571 } 7572 7573 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7574 Value *Cond, 7575 bool ExitWhen) { 7576 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7577 if (!PN) return getCouldNotCompute(); 7578 7579 // If the loop is canonicalized, the PHI will have exactly two entries. 7580 // That's the only form we support here. 7581 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7582 7583 DenseMap<Instruction *, Constant *> CurrentIterVals; 7584 BasicBlock *Header = L->getHeader(); 7585 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7586 7587 BasicBlock *Latch = L->getLoopLatch(); 7588 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7589 7590 for (auto &I : *Header) { 7591 PHINode *PHI = dyn_cast<PHINode>(&I); 7592 if (!PHI) 7593 break; 7594 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7595 if (!StartCST) continue; 7596 CurrentIterVals[PHI] = StartCST; 7597 } 7598 if (!CurrentIterVals.count(PN)) 7599 return getCouldNotCompute(); 7600 7601 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7602 // the loop symbolically to determine when the condition gets a value of 7603 // "ExitWhen". 7604 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7605 const DataLayout &DL = getDataLayout(); 7606 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7607 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7608 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7609 7610 // Couldn't symbolically evaluate. 7611 if (!CondVal) return getCouldNotCompute(); 7612 7613 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7614 ++NumBruteForceTripCountsComputed; 7615 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7616 } 7617 7618 // Update all the PHI nodes for the next iteration. 7619 DenseMap<Instruction *, Constant *> NextIterVals; 7620 7621 // Create a list of which PHIs we need to compute. We want to do this before 7622 // calling EvaluateExpression on them because that may invalidate iterators 7623 // into CurrentIterVals. 7624 SmallVector<PHINode *, 8> PHIsToCompute; 7625 for (const auto &I : CurrentIterVals) { 7626 PHINode *PHI = dyn_cast<PHINode>(I.first); 7627 if (!PHI || PHI->getParent() != Header) continue; 7628 PHIsToCompute.push_back(PHI); 7629 } 7630 for (PHINode *PHI : PHIsToCompute) { 7631 Constant *&NextPHI = NextIterVals[PHI]; 7632 if (NextPHI) continue; // Already computed! 7633 7634 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7635 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7636 } 7637 CurrentIterVals.swap(NextIterVals); 7638 } 7639 7640 // Too many iterations were needed to evaluate. 7641 return getCouldNotCompute(); 7642 } 7643 7644 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7645 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7646 ValuesAtScopes[V]; 7647 // Check to see if we've folded this expression at this loop before. 7648 for (auto &LS : Values) 7649 if (LS.first == L) 7650 return LS.second ? LS.second : V; 7651 7652 Values.emplace_back(L, nullptr); 7653 7654 // Otherwise compute it. 7655 const SCEV *C = computeSCEVAtScope(V, L); 7656 for (auto &LS : reverse(ValuesAtScopes[V])) 7657 if (LS.first == L) { 7658 LS.second = C; 7659 break; 7660 } 7661 return C; 7662 } 7663 7664 /// This builds up a Constant using the ConstantExpr interface. That way, we 7665 /// will return Constants for objects which aren't represented by a 7666 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7667 /// Returns NULL if the SCEV isn't representable as a Constant. 7668 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7669 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7670 case scCouldNotCompute: 7671 case scAddRecExpr: 7672 break; 7673 case scConstant: 7674 return cast<SCEVConstant>(V)->getValue(); 7675 case scUnknown: 7676 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7677 case scSignExtend: { 7678 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7679 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7680 return ConstantExpr::getSExt(CastOp, SS->getType()); 7681 break; 7682 } 7683 case scZeroExtend: { 7684 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7685 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7686 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7687 break; 7688 } 7689 case scTruncate: { 7690 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7691 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7692 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7693 break; 7694 } 7695 case scAddExpr: { 7696 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7697 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7698 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7699 unsigned AS = PTy->getAddressSpace(); 7700 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7701 C = ConstantExpr::getBitCast(C, DestPtrTy); 7702 } 7703 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7704 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7705 if (!C2) return nullptr; 7706 7707 // First pointer! 7708 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7709 unsigned AS = C2->getType()->getPointerAddressSpace(); 7710 std::swap(C, C2); 7711 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7712 // The offsets have been converted to bytes. We can add bytes to an 7713 // i8* by GEP with the byte count in the first index. 7714 C = ConstantExpr::getBitCast(C, DestPtrTy); 7715 } 7716 7717 // Don't bother trying to sum two pointers. We probably can't 7718 // statically compute a load that results from it anyway. 7719 if (C2->getType()->isPointerTy()) 7720 return nullptr; 7721 7722 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7723 if (PTy->getElementType()->isStructTy()) 7724 C2 = ConstantExpr::getIntegerCast( 7725 C2, Type::getInt32Ty(C->getContext()), true); 7726 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7727 } else 7728 C = ConstantExpr::getAdd(C, C2); 7729 } 7730 return C; 7731 } 7732 break; 7733 } 7734 case scMulExpr: { 7735 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7736 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7737 // Don't bother with pointers at all. 7738 if (C->getType()->isPointerTy()) return nullptr; 7739 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7740 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7741 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7742 C = ConstantExpr::getMul(C, C2); 7743 } 7744 return C; 7745 } 7746 break; 7747 } 7748 case scUDivExpr: { 7749 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7750 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7751 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7752 if (LHS->getType() == RHS->getType()) 7753 return ConstantExpr::getUDiv(LHS, RHS); 7754 break; 7755 } 7756 case scSMaxExpr: 7757 case scUMaxExpr: 7758 break; // TODO: smax, umax. 7759 } 7760 return nullptr; 7761 } 7762 7763 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7764 if (isa<SCEVConstant>(V)) return V; 7765 7766 // If this instruction is evolved from a constant-evolving PHI, compute the 7767 // exit value from the loop without using SCEVs. 7768 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7769 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7770 const Loop *LI = this->LI[I->getParent()]; 7771 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7772 if (PHINode *PN = dyn_cast<PHINode>(I)) 7773 if (PN->getParent() == LI->getHeader()) { 7774 // Okay, there is no closed form solution for the PHI node. Check 7775 // to see if the loop that contains it has a known backedge-taken 7776 // count. If so, we may be able to force computation of the exit 7777 // value. 7778 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7779 if (const SCEVConstant *BTCC = 7780 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7781 7782 // This trivial case can show up in some degenerate cases where 7783 // the incoming IR has not yet been fully simplified. 7784 if (BTCC->getValue()->isZero()) { 7785 Value *InitValue = nullptr; 7786 bool MultipleInitValues = false; 7787 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 7788 if (!LI->contains(PN->getIncomingBlock(i))) { 7789 if (!InitValue) 7790 InitValue = PN->getIncomingValue(i); 7791 else if (InitValue != PN->getIncomingValue(i)) { 7792 MultipleInitValues = true; 7793 break; 7794 } 7795 } 7796 if (!MultipleInitValues && InitValue) 7797 return getSCEV(InitValue); 7798 } 7799 } 7800 // Okay, we know how many times the containing loop executes. If 7801 // this is a constant evolving PHI node, get the final value at 7802 // the specified iteration number. 7803 Constant *RV = 7804 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7805 if (RV) return getSCEV(RV); 7806 } 7807 } 7808 7809 // Okay, this is an expression that we cannot symbolically evaluate 7810 // into a SCEV. Check to see if it's possible to symbolically evaluate 7811 // the arguments into constants, and if so, try to constant propagate the 7812 // result. This is particularly useful for computing loop exit values. 7813 if (CanConstantFold(I)) { 7814 SmallVector<Constant *, 4> Operands; 7815 bool MadeImprovement = false; 7816 for (Value *Op : I->operands()) { 7817 if (Constant *C = dyn_cast<Constant>(Op)) { 7818 Operands.push_back(C); 7819 continue; 7820 } 7821 7822 // If any of the operands is non-constant and if they are 7823 // non-integer and non-pointer, don't even try to analyze them 7824 // with scev techniques. 7825 if (!isSCEVable(Op->getType())) 7826 return V; 7827 7828 const SCEV *OrigV = getSCEV(Op); 7829 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7830 MadeImprovement |= OrigV != OpV; 7831 7832 Constant *C = BuildConstantFromSCEV(OpV); 7833 if (!C) return V; 7834 if (C->getType() != Op->getType()) 7835 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7836 Op->getType(), 7837 false), 7838 C, Op->getType()); 7839 Operands.push_back(C); 7840 } 7841 7842 // Check to see if getSCEVAtScope actually made an improvement. 7843 if (MadeImprovement) { 7844 Constant *C = nullptr; 7845 const DataLayout &DL = getDataLayout(); 7846 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7847 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7848 Operands[1], DL, &TLI); 7849 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7850 if (!LI->isVolatile()) 7851 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7852 } else 7853 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7854 if (!C) return V; 7855 return getSCEV(C); 7856 } 7857 } 7858 } 7859 7860 // This is some other type of SCEVUnknown, just return it. 7861 return V; 7862 } 7863 7864 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7865 // Avoid performing the look-up in the common case where the specified 7866 // expression has no loop-variant portions. 7867 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7868 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7869 if (OpAtScope != Comm->getOperand(i)) { 7870 // Okay, at least one of these operands is loop variant but might be 7871 // foldable. Build a new instance of the folded commutative expression. 7872 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7873 Comm->op_begin()+i); 7874 NewOps.push_back(OpAtScope); 7875 7876 for (++i; i != e; ++i) { 7877 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7878 NewOps.push_back(OpAtScope); 7879 } 7880 if (isa<SCEVAddExpr>(Comm)) 7881 return getAddExpr(NewOps); 7882 if (isa<SCEVMulExpr>(Comm)) 7883 return getMulExpr(NewOps); 7884 if (isa<SCEVSMaxExpr>(Comm)) 7885 return getSMaxExpr(NewOps); 7886 if (isa<SCEVUMaxExpr>(Comm)) 7887 return getUMaxExpr(NewOps); 7888 llvm_unreachable("Unknown commutative SCEV type!"); 7889 } 7890 } 7891 // If we got here, all operands are loop invariant. 7892 return Comm; 7893 } 7894 7895 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7896 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7897 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7898 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7899 return Div; // must be loop invariant 7900 return getUDivExpr(LHS, RHS); 7901 } 7902 7903 // If this is a loop recurrence for a loop that does not contain L, then we 7904 // are dealing with the final value computed by the loop. 7905 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7906 // First, attempt to evaluate each operand. 7907 // Avoid performing the look-up in the common case where the specified 7908 // expression has no loop-variant portions. 7909 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7910 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7911 if (OpAtScope == AddRec->getOperand(i)) 7912 continue; 7913 7914 // Okay, at least one of these operands is loop variant but might be 7915 // foldable. Build a new instance of the folded commutative expression. 7916 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 7917 AddRec->op_begin()+i); 7918 NewOps.push_back(OpAtScope); 7919 for (++i; i != e; ++i) 7920 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 7921 7922 const SCEV *FoldedRec = 7923 getAddRecExpr(NewOps, AddRec->getLoop(), 7924 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7925 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7926 // The addrec may be folded to a nonrecurrence, for example, if the 7927 // induction variable is multiplied by zero after constant folding. Go 7928 // ahead and return the folded value. 7929 if (!AddRec) 7930 return FoldedRec; 7931 break; 7932 } 7933 7934 // If the scope is outside the addrec's loop, evaluate it by using the 7935 // loop exit value of the addrec. 7936 if (!AddRec->getLoop()->contains(L)) { 7937 // To evaluate this recurrence, we need to know how many times the AddRec 7938 // loop iterates. Compute this now. 7939 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7940 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7941 7942 // Then, evaluate the AddRec. 7943 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7944 } 7945 7946 return AddRec; 7947 } 7948 7949 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7950 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7951 if (Op == Cast->getOperand()) 7952 return Cast; // must be loop invariant 7953 return getZeroExtendExpr(Op, Cast->getType()); 7954 } 7955 7956 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7957 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7958 if (Op == Cast->getOperand()) 7959 return Cast; // must be loop invariant 7960 return getSignExtendExpr(Op, Cast->getType()); 7961 } 7962 7963 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7964 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7965 if (Op == Cast->getOperand()) 7966 return Cast; // must be loop invariant 7967 return getTruncateExpr(Op, Cast->getType()); 7968 } 7969 7970 llvm_unreachable("Unknown SCEV type!"); 7971 } 7972 7973 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7974 return getSCEVAtScope(getSCEV(V), L); 7975 } 7976 7977 /// Finds the minimum unsigned root of the following equation: 7978 /// 7979 /// A * X = B (mod N) 7980 /// 7981 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7982 /// A and B isn't important. 7983 /// 7984 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7985 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7986 ScalarEvolution &SE) { 7987 uint32_t BW = A.getBitWidth(); 7988 assert(BW == SE.getTypeSizeInBits(B->getType())); 7989 assert(A != 0 && "A must be non-zero."); 7990 7991 // 1. D = gcd(A, N) 7992 // 7993 // The gcd of A and N may have only one prime factor: 2. The number of 7994 // trailing zeros in A is its multiplicity 7995 uint32_t Mult2 = A.countTrailingZeros(); 7996 // D = 2^Mult2 7997 7998 // 2. Check if B is divisible by D. 7999 // 8000 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8001 // is not less than multiplicity of this prime factor for D. 8002 if (SE.GetMinTrailingZeros(B) < Mult2) 8003 return SE.getCouldNotCompute(); 8004 8005 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8006 // modulo (N / D). 8007 // 8008 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8009 // (N / D) in general. The inverse itself always fits into BW bits, though, 8010 // so we immediately truncate it. 8011 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8012 APInt Mod(BW + 1, 0); 8013 Mod.setBit(BW - Mult2); // Mod = N / D 8014 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8015 8016 // 4. Compute the minimum unsigned root of the equation: 8017 // I * (B / D) mod (N / D) 8018 // To simplify the computation, we factor out the divide by D: 8019 // (I * B mod N) / D 8020 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8021 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8022 } 8023 8024 /// Find the roots of the quadratic equation for the given quadratic chrec 8025 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 8026 /// two SCEVCouldNotCompute objects. 8027 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 8028 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8029 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8030 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8031 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8032 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8033 8034 // We currently can only solve this if the coefficients are constants. 8035 if (!LC || !MC || !NC) 8036 return None; 8037 8038 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 8039 const APInt &L = LC->getAPInt(); 8040 const APInt &M = MC->getAPInt(); 8041 const APInt &N = NC->getAPInt(); 8042 APInt Two(BitWidth, 2); 8043 8044 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 8045 8046 // The A coefficient is N/2 8047 APInt A = N.sdiv(Two); 8048 8049 // The B coefficient is M-N/2 8050 APInt B = M; 8051 B -= A; // A is the same as N/2. 8052 8053 // The C coefficient is L. 8054 const APInt& C = L; 8055 8056 // Compute the B^2-4ac term. 8057 APInt SqrtTerm = B; 8058 SqrtTerm *= B; 8059 SqrtTerm -= 4 * (A * C); 8060 8061 if (SqrtTerm.isNegative()) { 8062 // The loop is provably infinite. 8063 return None; 8064 } 8065 8066 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 8067 // integer value or else APInt::sqrt() will assert. 8068 APInt SqrtVal = SqrtTerm.sqrt(); 8069 8070 // Compute the two solutions for the quadratic formula. 8071 // The divisions must be performed as signed divisions. 8072 APInt NegB = -std::move(B); 8073 APInt TwoA = std::move(A); 8074 TwoA <<= 1; 8075 if (TwoA.isNullValue()) 8076 return None; 8077 8078 LLVMContext &Context = SE.getContext(); 8079 8080 ConstantInt *Solution1 = 8081 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 8082 ConstantInt *Solution2 = 8083 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 8084 8085 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 8086 cast<SCEVConstant>(SE.getConstant(Solution2))); 8087 } 8088 8089 ScalarEvolution::ExitLimit 8090 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8091 bool AllowPredicates) { 8092 8093 // This is only used for loops with a "x != y" exit test. The exit condition 8094 // is now expressed as a single expression, V = x-y. So the exit test is 8095 // effectively V != 0. We know and take advantage of the fact that this 8096 // expression only being used in a comparison by zero context. 8097 8098 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8099 // If the value is a constant 8100 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8101 // If the value is already zero, the branch will execute zero times. 8102 if (C->getValue()->isZero()) return C; 8103 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8104 } 8105 8106 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 8107 if (!AddRec && AllowPredicates) 8108 // Try to make this an AddRec using runtime tests, in the first X 8109 // iterations of this loop, where X is the SCEV expression found by the 8110 // algorithm below. 8111 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8112 8113 if (!AddRec || AddRec->getLoop() != L) 8114 return getCouldNotCompute(); 8115 8116 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8117 // the quadratic equation to solve it. 8118 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8119 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 8120 const SCEVConstant *R1 = Roots->first; 8121 const SCEVConstant *R2 = Roots->second; 8122 // Pick the smallest positive root value. 8123 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8124 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8125 if (!CB->getZExtValue()) 8126 std::swap(R1, R2); // R1 is the minimum root now. 8127 8128 // We can only use this value if the chrec ends up with an exact zero 8129 // value at this index. When solving for "X*X != 5", for example, we 8130 // should not accept a root of 2. 8131 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 8132 if (Val->isZero()) 8133 // We found a quadratic root! 8134 return ExitLimit(R1, R1, false, Predicates); 8135 } 8136 } 8137 return getCouldNotCompute(); 8138 } 8139 8140 // Otherwise we can only handle this if it is affine. 8141 if (!AddRec->isAffine()) 8142 return getCouldNotCompute(); 8143 8144 // If this is an affine expression, the execution count of this branch is 8145 // the minimum unsigned root of the following equation: 8146 // 8147 // Start + Step*N = 0 (mod 2^BW) 8148 // 8149 // equivalent to: 8150 // 8151 // Step*N = -Start (mod 2^BW) 8152 // 8153 // where BW is the common bit width of Start and Step. 8154 8155 // Get the initial value for the loop. 8156 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8157 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8158 8159 // For now we handle only constant steps. 8160 // 8161 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8162 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8163 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8164 // We have not yet seen any such cases. 8165 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8166 if (!StepC || StepC->getValue()->isZero()) 8167 return getCouldNotCompute(); 8168 8169 // For positive steps (counting up until unsigned overflow): 8170 // N = -Start/Step (as unsigned) 8171 // For negative steps (counting down to zero): 8172 // N = Start/-Step 8173 // First compute the unsigned distance from zero in the direction of Step. 8174 bool CountDown = StepC->getAPInt().isNegative(); 8175 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8176 8177 // Handle unitary steps, which cannot wraparound. 8178 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8179 // N = Distance (as unsigned) 8180 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8181 APInt MaxBECount = getUnsignedRangeMax(Distance); 8182 8183 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8184 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8185 // case, and see if we can improve the bound. 8186 // 8187 // Explicitly handling this here is necessary because getUnsignedRange 8188 // isn't context-sensitive; it doesn't know that we only care about the 8189 // range inside the loop. 8190 const SCEV *Zero = getZero(Distance->getType()); 8191 const SCEV *One = getOne(Distance->getType()); 8192 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8193 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8194 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8195 // as "unsigned_max(Distance + 1) - 1". 8196 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8197 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8198 } 8199 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8200 } 8201 8202 // If the condition controls loop exit (the loop exits only if the expression 8203 // is true) and the addition is no-wrap we can use unsigned divide to 8204 // compute the backedge count. In this case, the step may not divide the 8205 // distance, but we don't care because if the condition is "missed" the loop 8206 // will have undefined behavior due to wrapping. 8207 if (ControlsExit && AddRec->hasNoSelfWrap() && 8208 loopHasNoAbnormalExits(AddRec->getLoop())) { 8209 const SCEV *Exact = 8210 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8211 const SCEV *Max = 8212 Exact == getCouldNotCompute() 8213 ? Exact 8214 : getConstant(getUnsignedRangeMax(Exact)); 8215 return ExitLimit(Exact, Max, false, Predicates); 8216 } 8217 8218 // Solve the general equation. 8219 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8220 getNegativeSCEV(Start), *this); 8221 const SCEV *M = E == getCouldNotCompute() 8222 ? E 8223 : getConstant(getUnsignedRangeMax(E)); 8224 return ExitLimit(E, M, false, Predicates); 8225 } 8226 8227 ScalarEvolution::ExitLimit 8228 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8229 // Loops that look like: while (X == 0) are very strange indeed. We don't 8230 // handle them yet except for the trivial case. This could be expanded in the 8231 // future as needed. 8232 8233 // If the value is a constant, check to see if it is known to be non-zero 8234 // already. If so, the backedge will execute zero times. 8235 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8236 if (!C->getValue()->isZero()) 8237 return getZero(C->getType()); 8238 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8239 } 8240 8241 // We could implement others, but I really doubt anyone writes loops like 8242 // this, and if they did, they would already be constant folded. 8243 return getCouldNotCompute(); 8244 } 8245 8246 std::pair<BasicBlock *, BasicBlock *> 8247 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8248 // If the block has a unique predecessor, then there is no path from the 8249 // predecessor to the block that does not go through the direct edge 8250 // from the predecessor to the block. 8251 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8252 return {Pred, BB}; 8253 8254 // A loop's header is defined to be a block that dominates the loop. 8255 // If the header has a unique predecessor outside the loop, it must be 8256 // a block that has exactly one successor that can reach the loop. 8257 if (Loop *L = LI.getLoopFor(BB)) 8258 return {L->getLoopPredecessor(), L->getHeader()}; 8259 8260 return {nullptr, nullptr}; 8261 } 8262 8263 /// SCEV structural equivalence is usually sufficient for testing whether two 8264 /// expressions are equal, however for the purposes of looking for a condition 8265 /// guarding a loop, it can be useful to be a little more general, since a 8266 /// front-end may have replicated the controlling expression. 8267 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8268 // Quick check to see if they are the same SCEV. 8269 if (A == B) return true; 8270 8271 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8272 // Not all instructions that are "identical" compute the same value. For 8273 // instance, two distinct alloca instructions allocating the same type are 8274 // identical and do not read memory; but compute distinct values. 8275 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8276 }; 8277 8278 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8279 // two different instructions with the same value. Check for this case. 8280 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8281 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8282 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8283 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8284 if (ComputesEqualValues(AI, BI)) 8285 return true; 8286 8287 // Otherwise assume they may have a different value. 8288 return false; 8289 } 8290 8291 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8292 const SCEV *&LHS, const SCEV *&RHS, 8293 unsigned Depth) { 8294 bool Changed = false; 8295 8296 // If we hit the max recursion limit bail out. 8297 if (Depth >= 3) 8298 return false; 8299 8300 // Canonicalize a constant to the right side. 8301 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8302 // Check for both operands constant. 8303 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8304 if (ConstantExpr::getICmp(Pred, 8305 LHSC->getValue(), 8306 RHSC->getValue())->isNullValue()) 8307 goto trivially_false; 8308 else 8309 goto trivially_true; 8310 } 8311 // Otherwise swap the operands to put the constant on the right. 8312 std::swap(LHS, RHS); 8313 Pred = ICmpInst::getSwappedPredicate(Pred); 8314 Changed = true; 8315 } 8316 8317 // If we're comparing an addrec with a value which is loop-invariant in the 8318 // addrec's loop, put the addrec on the left. Also make a dominance check, 8319 // as both operands could be addrecs loop-invariant in each other's loop. 8320 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8321 const Loop *L = AR->getLoop(); 8322 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8323 std::swap(LHS, RHS); 8324 Pred = ICmpInst::getSwappedPredicate(Pred); 8325 Changed = true; 8326 } 8327 } 8328 8329 // If there's a constant operand, canonicalize comparisons with boundary 8330 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8331 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8332 const APInt &RA = RC->getAPInt(); 8333 8334 bool SimplifiedByConstantRange = false; 8335 8336 if (!ICmpInst::isEquality(Pred)) { 8337 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8338 if (ExactCR.isFullSet()) 8339 goto trivially_true; 8340 else if (ExactCR.isEmptySet()) 8341 goto trivially_false; 8342 8343 APInt NewRHS; 8344 CmpInst::Predicate NewPred; 8345 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8346 ICmpInst::isEquality(NewPred)) { 8347 // We were able to convert an inequality to an equality. 8348 Pred = NewPred; 8349 RHS = getConstant(NewRHS); 8350 Changed = SimplifiedByConstantRange = true; 8351 } 8352 } 8353 8354 if (!SimplifiedByConstantRange) { 8355 switch (Pred) { 8356 default: 8357 break; 8358 case ICmpInst::ICMP_EQ: 8359 case ICmpInst::ICMP_NE: 8360 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8361 if (!RA) 8362 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8363 if (const SCEVMulExpr *ME = 8364 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8365 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8366 ME->getOperand(0)->isAllOnesValue()) { 8367 RHS = AE->getOperand(1); 8368 LHS = ME->getOperand(1); 8369 Changed = true; 8370 } 8371 break; 8372 8373 8374 // The "Should have been caught earlier!" messages refer to the fact 8375 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8376 // should have fired on the corresponding cases, and canonicalized the 8377 // check to trivially_true or trivially_false. 8378 8379 case ICmpInst::ICMP_UGE: 8380 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8381 Pred = ICmpInst::ICMP_UGT; 8382 RHS = getConstant(RA - 1); 8383 Changed = true; 8384 break; 8385 case ICmpInst::ICMP_ULE: 8386 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8387 Pred = ICmpInst::ICMP_ULT; 8388 RHS = getConstant(RA + 1); 8389 Changed = true; 8390 break; 8391 case ICmpInst::ICMP_SGE: 8392 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8393 Pred = ICmpInst::ICMP_SGT; 8394 RHS = getConstant(RA - 1); 8395 Changed = true; 8396 break; 8397 case ICmpInst::ICMP_SLE: 8398 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8399 Pred = ICmpInst::ICMP_SLT; 8400 RHS = getConstant(RA + 1); 8401 Changed = true; 8402 break; 8403 } 8404 } 8405 } 8406 8407 // Check for obvious equality. 8408 if (HasSameValue(LHS, RHS)) { 8409 if (ICmpInst::isTrueWhenEqual(Pred)) 8410 goto trivially_true; 8411 if (ICmpInst::isFalseWhenEqual(Pred)) 8412 goto trivially_false; 8413 } 8414 8415 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8416 // adding or subtracting 1 from one of the operands. 8417 switch (Pred) { 8418 case ICmpInst::ICMP_SLE: 8419 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8420 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8421 SCEV::FlagNSW); 8422 Pred = ICmpInst::ICMP_SLT; 8423 Changed = true; 8424 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8425 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8426 SCEV::FlagNSW); 8427 Pred = ICmpInst::ICMP_SLT; 8428 Changed = true; 8429 } 8430 break; 8431 case ICmpInst::ICMP_SGE: 8432 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8433 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8434 SCEV::FlagNSW); 8435 Pred = ICmpInst::ICMP_SGT; 8436 Changed = true; 8437 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8438 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8439 SCEV::FlagNSW); 8440 Pred = ICmpInst::ICMP_SGT; 8441 Changed = true; 8442 } 8443 break; 8444 case ICmpInst::ICMP_ULE: 8445 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8446 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8447 SCEV::FlagNUW); 8448 Pred = ICmpInst::ICMP_ULT; 8449 Changed = true; 8450 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8451 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8452 Pred = ICmpInst::ICMP_ULT; 8453 Changed = true; 8454 } 8455 break; 8456 case ICmpInst::ICMP_UGE: 8457 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8458 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8459 Pred = ICmpInst::ICMP_UGT; 8460 Changed = true; 8461 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8462 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8463 SCEV::FlagNUW); 8464 Pred = ICmpInst::ICMP_UGT; 8465 Changed = true; 8466 } 8467 break; 8468 default: 8469 break; 8470 } 8471 8472 // TODO: More simplifications are possible here. 8473 8474 // Recursively simplify until we either hit a recursion limit or nothing 8475 // changes. 8476 if (Changed) 8477 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8478 8479 return Changed; 8480 8481 trivially_true: 8482 // Return 0 == 0. 8483 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8484 Pred = ICmpInst::ICMP_EQ; 8485 return true; 8486 8487 trivially_false: 8488 // Return 0 != 0. 8489 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8490 Pred = ICmpInst::ICMP_NE; 8491 return true; 8492 } 8493 8494 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8495 return getSignedRangeMax(S).isNegative(); 8496 } 8497 8498 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8499 return getSignedRangeMin(S).isStrictlyPositive(); 8500 } 8501 8502 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8503 return !getSignedRangeMin(S).isNegative(); 8504 } 8505 8506 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8507 return !getSignedRangeMax(S).isStrictlyPositive(); 8508 } 8509 8510 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8511 return isKnownNegative(S) || isKnownPositive(S); 8512 } 8513 8514 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8515 const SCEV *LHS, const SCEV *RHS) { 8516 // Canonicalize the inputs first. 8517 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8518 8519 // If LHS or RHS is an addrec, check to see if the condition is true in 8520 // every iteration of the loop. 8521 // If LHS and RHS are both addrec, both conditions must be true in 8522 // every iteration of the loop. 8523 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8524 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8525 bool LeftGuarded = false; 8526 bool RightGuarded = false; 8527 if (LAR) { 8528 const Loop *L = LAR->getLoop(); 8529 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 8530 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 8531 if (!RAR) return true; 8532 LeftGuarded = true; 8533 } 8534 } 8535 if (RAR) { 8536 const Loop *L = RAR->getLoop(); 8537 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8538 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8539 if (!LAR) return true; 8540 RightGuarded = true; 8541 } 8542 } 8543 if (LeftGuarded && RightGuarded) 8544 return true; 8545 8546 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8547 return true; 8548 8549 // Otherwise see what can be done with known constant ranges. 8550 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8551 } 8552 8553 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8554 ICmpInst::Predicate Pred, 8555 bool &Increasing) { 8556 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8557 8558 #ifndef NDEBUG 8559 // Verify an invariant: inverting the predicate should turn a monotonically 8560 // increasing change to a monotonically decreasing one, and vice versa. 8561 bool IncreasingSwapped; 8562 bool ResultSwapped = isMonotonicPredicateImpl( 8563 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8564 8565 assert(Result == ResultSwapped && "should be able to analyze both!"); 8566 if (ResultSwapped) 8567 assert(Increasing == !IncreasingSwapped && 8568 "monotonicity should flip as we flip the predicate"); 8569 #endif 8570 8571 return Result; 8572 } 8573 8574 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8575 ICmpInst::Predicate Pred, 8576 bool &Increasing) { 8577 8578 // A zero step value for LHS means the induction variable is essentially a 8579 // loop invariant value. We don't really depend on the predicate actually 8580 // flipping from false to true (for increasing predicates, and the other way 8581 // around for decreasing predicates), all we care about is that *if* the 8582 // predicate changes then it only changes from false to true. 8583 // 8584 // A zero step value in itself is not very useful, but there may be places 8585 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8586 // as general as possible. 8587 8588 switch (Pred) { 8589 default: 8590 return false; // Conservative answer 8591 8592 case ICmpInst::ICMP_UGT: 8593 case ICmpInst::ICMP_UGE: 8594 case ICmpInst::ICMP_ULT: 8595 case ICmpInst::ICMP_ULE: 8596 if (!LHS->hasNoUnsignedWrap()) 8597 return false; 8598 8599 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8600 return true; 8601 8602 case ICmpInst::ICMP_SGT: 8603 case ICmpInst::ICMP_SGE: 8604 case ICmpInst::ICMP_SLT: 8605 case ICmpInst::ICMP_SLE: { 8606 if (!LHS->hasNoSignedWrap()) 8607 return false; 8608 8609 const SCEV *Step = LHS->getStepRecurrence(*this); 8610 8611 if (isKnownNonNegative(Step)) { 8612 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8613 return true; 8614 } 8615 8616 if (isKnownNonPositive(Step)) { 8617 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8618 return true; 8619 } 8620 8621 return false; 8622 } 8623 8624 } 8625 8626 llvm_unreachable("switch has default clause!"); 8627 } 8628 8629 bool ScalarEvolution::isLoopInvariantPredicate( 8630 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8631 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8632 const SCEV *&InvariantRHS) { 8633 8634 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8635 if (!isLoopInvariant(RHS, L)) { 8636 if (!isLoopInvariant(LHS, L)) 8637 return false; 8638 8639 std::swap(LHS, RHS); 8640 Pred = ICmpInst::getSwappedPredicate(Pred); 8641 } 8642 8643 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8644 if (!ArLHS || ArLHS->getLoop() != L) 8645 return false; 8646 8647 bool Increasing; 8648 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8649 return false; 8650 8651 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8652 // true as the loop iterates, and the backedge is control dependent on 8653 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8654 // 8655 // * if the predicate was false in the first iteration then the predicate 8656 // is never evaluated again, since the loop exits without taking the 8657 // backedge. 8658 // * if the predicate was true in the first iteration then it will 8659 // continue to be true for all future iterations since it is 8660 // monotonically increasing. 8661 // 8662 // For both the above possibilities, we can replace the loop varying 8663 // predicate with its value on the first iteration of the loop (which is 8664 // loop invariant). 8665 // 8666 // A similar reasoning applies for a monotonically decreasing predicate, by 8667 // replacing true with false and false with true in the above two bullets. 8668 8669 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8670 8671 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8672 return false; 8673 8674 InvariantPred = Pred; 8675 InvariantLHS = ArLHS->getStart(); 8676 InvariantRHS = RHS; 8677 return true; 8678 } 8679 8680 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8681 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8682 if (HasSameValue(LHS, RHS)) 8683 return ICmpInst::isTrueWhenEqual(Pred); 8684 8685 // This code is split out from isKnownPredicate because it is called from 8686 // within isLoopEntryGuardedByCond. 8687 8688 auto CheckRanges = 8689 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8690 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8691 .contains(RangeLHS); 8692 }; 8693 8694 // The check at the top of the function catches the case where the values are 8695 // known to be equal. 8696 if (Pred == CmpInst::ICMP_EQ) 8697 return false; 8698 8699 if (Pred == CmpInst::ICMP_NE) 8700 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8701 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8702 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8703 8704 if (CmpInst::isSigned(Pred)) 8705 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8706 8707 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8708 } 8709 8710 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8711 const SCEV *LHS, 8712 const SCEV *RHS) { 8713 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8714 // Return Y via OutY. 8715 auto MatchBinaryAddToConst = 8716 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8717 SCEV::NoWrapFlags ExpectedFlags) { 8718 const SCEV *NonConstOp, *ConstOp; 8719 SCEV::NoWrapFlags FlagsPresent; 8720 8721 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8722 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8723 return false; 8724 8725 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8726 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8727 }; 8728 8729 APInt C; 8730 8731 switch (Pred) { 8732 default: 8733 break; 8734 8735 case ICmpInst::ICMP_SGE: 8736 std::swap(LHS, RHS); 8737 LLVM_FALLTHROUGH; 8738 case ICmpInst::ICMP_SLE: 8739 // X s<= (X + C)<nsw> if C >= 0 8740 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8741 return true; 8742 8743 // (X + C)<nsw> s<= X if C <= 0 8744 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8745 !C.isStrictlyPositive()) 8746 return true; 8747 break; 8748 8749 case ICmpInst::ICMP_SGT: 8750 std::swap(LHS, RHS); 8751 LLVM_FALLTHROUGH; 8752 case ICmpInst::ICMP_SLT: 8753 // X s< (X + C)<nsw> if C > 0 8754 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8755 C.isStrictlyPositive()) 8756 return true; 8757 8758 // (X + C)<nsw> s< X if C < 0 8759 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8760 return true; 8761 break; 8762 } 8763 8764 return false; 8765 } 8766 8767 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8768 const SCEV *LHS, 8769 const SCEV *RHS) { 8770 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8771 return false; 8772 8773 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8774 // the stack can result in exponential time complexity. 8775 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8776 8777 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8778 // 8779 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8780 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8781 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8782 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8783 // use isKnownPredicate later if needed. 8784 return isKnownNonNegative(RHS) && 8785 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8786 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8787 } 8788 8789 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8790 ICmpInst::Predicate Pred, 8791 const SCEV *LHS, const SCEV *RHS) { 8792 // No need to even try if we know the module has no guards. 8793 if (!HasGuards) 8794 return false; 8795 8796 return any_of(*BB, [&](Instruction &I) { 8797 using namespace llvm::PatternMatch; 8798 8799 Value *Condition; 8800 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8801 m_Value(Condition))) && 8802 isImpliedCond(Pred, LHS, RHS, Condition, false); 8803 }); 8804 } 8805 8806 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8807 /// protected by a conditional between LHS and RHS. This is used to 8808 /// to eliminate casts. 8809 bool 8810 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8811 ICmpInst::Predicate Pred, 8812 const SCEV *LHS, const SCEV *RHS) { 8813 // Interpret a null as meaning no loop, where there is obviously no guard 8814 // (interprocedural conditions notwithstanding). 8815 if (!L) return true; 8816 8817 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8818 return true; 8819 8820 BasicBlock *Latch = L->getLoopLatch(); 8821 if (!Latch) 8822 return false; 8823 8824 BranchInst *LoopContinuePredicate = 8825 dyn_cast<BranchInst>(Latch->getTerminator()); 8826 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8827 isImpliedCond(Pred, LHS, RHS, 8828 LoopContinuePredicate->getCondition(), 8829 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8830 return true; 8831 8832 // We don't want more than one activation of the following loops on the stack 8833 // -- that can lead to O(n!) time complexity. 8834 if (WalkingBEDominatingConds) 8835 return false; 8836 8837 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8838 8839 // See if we can exploit a trip count to prove the predicate. 8840 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8841 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8842 if (LatchBECount != getCouldNotCompute()) { 8843 // We know that Latch branches back to the loop header exactly 8844 // LatchBECount times. This means the backdege condition at Latch is 8845 // equivalent to "{0,+,1} u< LatchBECount". 8846 Type *Ty = LatchBECount->getType(); 8847 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8848 const SCEV *LoopCounter = 8849 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8850 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8851 LatchBECount)) 8852 return true; 8853 } 8854 8855 // Check conditions due to any @llvm.assume intrinsics. 8856 for (auto &AssumeVH : AC.assumptions()) { 8857 if (!AssumeVH) 8858 continue; 8859 auto *CI = cast<CallInst>(AssumeVH); 8860 if (!DT.dominates(CI, Latch->getTerminator())) 8861 continue; 8862 8863 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8864 return true; 8865 } 8866 8867 // If the loop is not reachable from the entry block, we risk running into an 8868 // infinite loop as we walk up into the dom tree. These loops do not matter 8869 // anyway, so we just return a conservative answer when we see them. 8870 if (!DT.isReachableFromEntry(L->getHeader())) 8871 return false; 8872 8873 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8874 return true; 8875 8876 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8877 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8878 assert(DTN && "should reach the loop header before reaching the root!"); 8879 8880 BasicBlock *BB = DTN->getBlock(); 8881 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8882 return true; 8883 8884 BasicBlock *PBB = BB->getSinglePredecessor(); 8885 if (!PBB) 8886 continue; 8887 8888 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8889 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8890 continue; 8891 8892 Value *Condition = ContinuePredicate->getCondition(); 8893 8894 // If we have an edge `E` within the loop body that dominates the only 8895 // latch, the condition guarding `E` also guards the backedge. This 8896 // reasoning works only for loops with a single latch. 8897 8898 BasicBlockEdge DominatingEdge(PBB, BB); 8899 if (DominatingEdge.isSingleEdge()) { 8900 // We're constructively (and conservatively) enumerating edges within the 8901 // loop body that dominate the latch. The dominator tree better agree 8902 // with us on this: 8903 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8904 8905 if (isImpliedCond(Pred, LHS, RHS, Condition, 8906 BB != ContinuePredicate->getSuccessor(0))) 8907 return true; 8908 } 8909 } 8910 8911 return false; 8912 } 8913 8914 bool 8915 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 8916 ICmpInst::Predicate Pred, 8917 const SCEV *LHS, const SCEV *RHS) { 8918 // Interpret a null as meaning no loop, where there is obviously no guard 8919 // (interprocedural conditions notwithstanding). 8920 if (!L) return false; 8921 8922 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8923 return true; 8924 8925 // Starting at the loop predecessor, climb up the predecessor chain, as long 8926 // as there are predecessors that can be found that have unique successors 8927 // leading to the original header. 8928 for (std::pair<BasicBlock *, BasicBlock *> 8929 Pair(L->getLoopPredecessor(), L->getHeader()); 8930 Pair.first; 8931 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8932 8933 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8934 return true; 8935 8936 BranchInst *LoopEntryPredicate = 8937 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8938 if (!LoopEntryPredicate || 8939 LoopEntryPredicate->isUnconditional()) 8940 continue; 8941 8942 if (isImpliedCond(Pred, LHS, RHS, 8943 LoopEntryPredicate->getCondition(), 8944 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8945 return true; 8946 } 8947 8948 // Check conditions due to any @llvm.assume intrinsics. 8949 for (auto &AssumeVH : AC.assumptions()) { 8950 if (!AssumeVH) 8951 continue; 8952 auto *CI = cast<CallInst>(AssumeVH); 8953 if (!DT.dominates(CI, L->getHeader())) 8954 continue; 8955 8956 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8957 return true; 8958 } 8959 8960 return false; 8961 } 8962 8963 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8964 const SCEV *LHS, const SCEV *RHS, 8965 Value *FoundCondValue, 8966 bool Inverse) { 8967 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8968 return false; 8969 8970 auto ClearOnExit = 8971 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8972 8973 // Recursively handle And and Or conditions. 8974 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8975 if (BO->getOpcode() == Instruction::And) { 8976 if (!Inverse) 8977 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8978 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8979 } else if (BO->getOpcode() == Instruction::Or) { 8980 if (Inverse) 8981 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8982 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8983 } 8984 } 8985 8986 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8987 if (!ICI) return false; 8988 8989 // Now that we found a conditional branch that dominates the loop or controls 8990 // the loop latch. Check to see if it is the comparison we are looking for. 8991 ICmpInst::Predicate FoundPred; 8992 if (Inverse) 8993 FoundPred = ICI->getInversePredicate(); 8994 else 8995 FoundPred = ICI->getPredicate(); 8996 8997 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8998 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8999 9000 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9001 } 9002 9003 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9004 const SCEV *RHS, 9005 ICmpInst::Predicate FoundPred, 9006 const SCEV *FoundLHS, 9007 const SCEV *FoundRHS) { 9008 // Balance the types. 9009 if (getTypeSizeInBits(LHS->getType()) < 9010 getTypeSizeInBits(FoundLHS->getType())) { 9011 if (CmpInst::isSigned(Pred)) { 9012 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9013 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9014 } else { 9015 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9016 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9017 } 9018 } else if (getTypeSizeInBits(LHS->getType()) > 9019 getTypeSizeInBits(FoundLHS->getType())) { 9020 if (CmpInst::isSigned(FoundPred)) { 9021 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9022 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9023 } else { 9024 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9025 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9026 } 9027 } 9028 9029 // Canonicalize the query to match the way instcombine will have 9030 // canonicalized the comparison. 9031 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9032 if (LHS == RHS) 9033 return CmpInst::isTrueWhenEqual(Pred); 9034 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9035 if (FoundLHS == FoundRHS) 9036 return CmpInst::isFalseWhenEqual(FoundPred); 9037 9038 // Check to see if we can make the LHS or RHS match. 9039 if (LHS == FoundRHS || RHS == FoundLHS) { 9040 if (isa<SCEVConstant>(RHS)) { 9041 std::swap(FoundLHS, FoundRHS); 9042 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9043 } else { 9044 std::swap(LHS, RHS); 9045 Pred = ICmpInst::getSwappedPredicate(Pred); 9046 } 9047 } 9048 9049 // Check whether the found predicate is the same as the desired predicate. 9050 if (FoundPred == Pred) 9051 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9052 9053 // Check whether swapping the found predicate makes it the same as the 9054 // desired predicate. 9055 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9056 if (isa<SCEVConstant>(RHS)) 9057 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9058 else 9059 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9060 RHS, LHS, FoundLHS, FoundRHS); 9061 } 9062 9063 // Unsigned comparison is the same as signed comparison when both the operands 9064 // are non-negative. 9065 if (CmpInst::isUnsigned(FoundPred) && 9066 CmpInst::getSignedPredicate(FoundPred) == Pred && 9067 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9068 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9069 9070 // Check if we can make progress by sharpening ranges. 9071 if (FoundPred == ICmpInst::ICMP_NE && 9072 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9073 9074 const SCEVConstant *C = nullptr; 9075 const SCEV *V = nullptr; 9076 9077 if (isa<SCEVConstant>(FoundLHS)) { 9078 C = cast<SCEVConstant>(FoundLHS); 9079 V = FoundRHS; 9080 } else { 9081 C = cast<SCEVConstant>(FoundRHS); 9082 V = FoundLHS; 9083 } 9084 9085 // The guarding predicate tells us that C != V. If the known range 9086 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9087 // range we consider has to correspond to same signedness as the 9088 // predicate we're interested in folding. 9089 9090 APInt Min = ICmpInst::isSigned(Pred) ? 9091 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9092 9093 if (Min == C->getAPInt()) { 9094 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9095 // This is true even if (Min + 1) wraps around -- in case of 9096 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9097 9098 APInt SharperMin = Min + 1; 9099 9100 switch (Pred) { 9101 case ICmpInst::ICMP_SGE: 9102 case ICmpInst::ICMP_UGE: 9103 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9104 // RHS, we're done. 9105 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9106 getConstant(SharperMin))) 9107 return true; 9108 LLVM_FALLTHROUGH; 9109 9110 case ICmpInst::ICMP_SGT: 9111 case ICmpInst::ICMP_UGT: 9112 // We know from the range information that (V `Pred` Min || 9113 // V == Min). We know from the guarding condition that !(V 9114 // == Min). This gives us 9115 // 9116 // V `Pred` Min || V == Min && !(V == Min) 9117 // => V `Pred` Min 9118 // 9119 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9120 9121 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9122 return true; 9123 LLVM_FALLTHROUGH; 9124 9125 default: 9126 // No change 9127 break; 9128 } 9129 } 9130 } 9131 9132 // Check whether the actual condition is beyond sufficient. 9133 if (FoundPred == ICmpInst::ICMP_EQ) 9134 if (ICmpInst::isTrueWhenEqual(Pred)) 9135 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9136 return true; 9137 if (Pred == ICmpInst::ICMP_NE) 9138 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9139 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9140 return true; 9141 9142 // Otherwise assume the worst. 9143 return false; 9144 } 9145 9146 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9147 const SCEV *&L, const SCEV *&R, 9148 SCEV::NoWrapFlags &Flags) { 9149 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9150 if (!AE || AE->getNumOperands() != 2) 9151 return false; 9152 9153 L = AE->getOperand(0); 9154 R = AE->getOperand(1); 9155 Flags = AE->getNoWrapFlags(); 9156 return true; 9157 } 9158 9159 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9160 const SCEV *Less) { 9161 // We avoid subtracting expressions here because this function is usually 9162 // fairly deep in the call stack (i.e. is called many times). 9163 9164 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9165 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9166 const auto *MAR = cast<SCEVAddRecExpr>(More); 9167 9168 if (LAR->getLoop() != MAR->getLoop()) 9169 return None; 9170 9171 // We look at affine expressions only; not for correctness but to keep 9172 // getStepRecurrence cheap. 9173 if (!LAR->isAffine() || !MAR->isAffine()) 9174 return None; 9175 9176 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9177 return None; 9178 9179 Less = LAR->getStart(); 9180 More = MAR->getStart(); 9181 9182 // fall through 9183 } 9184 9185 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9186 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9187 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9188 return M - L; 9189 } 9190 9191 const SCEV *L, *R; 9192 SCEV::NoWrapFlags Flags; 9193 if (splitBinaryAdd(Less, L, R, Flags)) 9194 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9195 if (R == More) 9196 return -(LC->getAPInt()); 9197 9198 if (splitBinaryAdd(More, L, R, Flags)) 9199 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9200 if (R == Less) 9201 return LC->getAPInt(); 9202 9203 return None; 9204 } 9205 9206 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9207 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9208 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9209 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9210 return false; 9211 9212 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9213 if (!AddRecLHS) 9214 return false; 9215 9216 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9217 if (!AddRecFoundLHS) 9218 return false; 9219 9220 // We'd like to let SCEV reason about control dependencies, so we constrain 9221 // both the inequalities to be about add recurrences on the same loop. This 9222 // way we can use isLoopEntryGuardedByCond later. 9223 9224 const Loop *L = AddRecFoundLHS->getLoop(); 9225 if (L != AddRecLHS->getLoop()) 9226 return false; 9227 9228 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9229 // 9230 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9231 // ... (2) 9232 // 9233 // Informal proof for (2), assuming (1) [*]: 9234 // 9235 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9236 // 9237 // Then 9238 // 9239 // FoundLHS s< FoundRHS s< INT_MIN - C 9240 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9241 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9242 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9243 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9244 // <=> FoundLHS + C s< FoundRHS + C 9245 // 9246 // [*]: (1) can be proved by ruling out overflow. 9247 // 9248 // [**]: This can be proved by analyzing all the four possibilities: 9249 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9250 // (A s>= 0, B s>= 0). 9251 // 9252 // Note: 9253 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9254 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9255 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9256 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9257 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9258 // C)". 9259 9260 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9261 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9262 if (!LDiff || !RDiff || *LDiff != *RDiff) 9263 return false; 9264 9265 if (LDiff->isMinValue()) 9266 return true; 9267 9268 APInt FoundRHSLimit; 9269 9270 if (Pred == CmpInst::ICMP_ULT) { 9271 FoundRHSLimit = -(*RDiff); 9272 } else { 9273 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9274 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9275 } 9276 9277 // Try to prove (1) or (2), as needed. 9278 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9279 getConstant(FoundRHSLimit)); 9280 } 9281 9282 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9283 const SCEV *LHS, const SCEV *RHS, 9284 const SCEV *FoundLHS, 9285 const SCEV *FoundRHS) { 9286 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9287 return true; 9288 9289 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9290 return true; 9291 9292 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9293 FoundLHS, FoundRHS) || 9294 // ~x < ~y --> x > y 9295 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9296 getNotSCEV(FoundRHS), 9297 getNotSCEV(FoundLHS)); 9298 } 9299 9300 /// If Expr computes ~A, return A else return nullptr 9301 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9302 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9303 if (!Add || Add->getNumOperands() != 2 || 9304 !Add->getOperand(0)->isAllOnesValue()) 9305 return nullptr; 9306 9307 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9308 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9309 !AddRHS->getOperand(0)->isAllOnesValue()) 9310 return nullptr; 9311 9312 return AddRHS->getOperand(1); 9313 } 9314 9315 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9316 template<typename MaxExprType> 9317 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9318 const SCEV *Candidate) { 9319 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9320 if (!MaxExpr) return false; 9321 9322 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9323 } 9324 9325 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9326 template<typename MaxExprType> 9327 static bool IsMinConsistingOf(ScalarEvolution &SE, 9328 const SCEV *MaybeMinExpr, 9329 const SCEV *Candidate) { 9330 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9331 if (!MaybeMaxExpr) 9332 return false; 9333 9334 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9335 } 9336 9337 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9338 ICmpInst::Predicate Pred, 9339 const SCEV *LHS, const SCEV *RHS) { 9340 // If both sides are affine addrecs for the same loop, with equal 9341 // steps, and we know the recurrences don't wrap, then we only 9342 // need to check the predicate on the starting values. 9343 9344 if (!ICmpInst::isRelational(Pred)) 9345 return false; 9346 9347 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9348 if (!LAR) 9349 return false; 9350 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9351 if (!RAR) 9352 return false; 9353 if (LAR->getLoop() != RAR->getLoop()) 9354 return false; 9355 if (!LAR->isAffine() || !RAR->isAffine()) 9356 return false; 9357 9358 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9359 return false; 9360 9361 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9362 SCEV::FlagNSW : SCEV::FlagNUW; 9363 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9364 return false; 9365 9366 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9367 } 9368 9369 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9370 /// expression? 9371 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9372 ICmpInst::Predicate Pred, 9373 const SCEV *LHS, const SCEV *RHS) { 9374 switch (Pred) { 9375 default: 9376 return false; 9377 9378 case ICmpInst::ICMP_SGE: 9379 std::swap(LHS, RHS); 9380 LLVM_FALLTHROUGH; 9381 case ICmpInst::ICMP_SLE: 9382 return 9383 // min(A, ...) <= A 9384 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9385 // A <= max(A, ...) 9386 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9387 9388 case ICmpInst::ICMP_UGE: 9389 std::swap(LHS, RHS); 9390 LLVM_FALLTHROUGH; 9391 case ICmpInst::ICMP_ULE: 9392 return 9393 // min(A, ...) <= A 9394 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9395 // A <= max(A, ...) 9396 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9397 } 9398 9399 llvm_unreachable("covered switch fell through?!"); 9400 } 9401 9402 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9403 const SCEV *LHS, const SCEV *RHS, 9404 const SCEV *FoundLHS, 9405 const SCEV *FoundRHS, 9406 unsigned Depth) { 9407 assert(getTypeSizeInBits(LHS->getType()) == 9408 getTypeSizeInBits(RHS->getType()) && 9409 "LHS and RHS have different sizes?"); 9410 assert(getTypeSizeInBits(FoundLHS->getType()) == 9411 getTypeSizeInBits(FoundRHS->getType()) && 9412 "FoundLHS and FoundRHS have different sizes?"); 9413 // We want to avoid hurting the compile time with analysis of too big trees. 9414 if (Depth > MaxSCEVOperationsImplicationDepth) 9415 return false; 9416 // We only want to work with ICMP_SGT comparison so far. 9417 // TODO: Extend to ICMP_UGT? 9418 if (Pred == ICmpInst::ICMP_SLT) { 9419 Pred = ICmpInst::ICMP_SGT; 9420 std::swap(LHS, RHS); 9421 std::swap(FoundLHS, FoundRHS); 9422 } 9423 if (Pred != ICmpInst::ICMP_SGT) 9424 return false; 9425 9426 auto GetOpFromSExt = [&](const SCEV *S) { 9427 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9428 return Ext->getOperand(); 9429 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9430 // the constant in some cases. 9431 return S; 9432 }; 9433 9434 // Acquire values from extensions. 9435 auto *OrigFoundLHS = FoundLHS; 9436 LHS = GetOpFromSExt(LHS); 9437 FoundLHS = GetOpFromSExt(FoundLHS); 9438 9439 // Is the SGT predicate can be proved trivially or using the found context. 9440 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9441 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9442 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9443 FoundRHS, Depth + 1); 9444 }; 9445 9446 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9447 // We want to avoid creation of any new non-constant SCEV. Since we are 9448 // going to compare the operands to RHS, we should be certain that we don't 9449 // need any size extensions for this. So let's decline all cases when the 9450 // sizes of types of LHS and RHS do not match. 9451 // TODO: Maybe try to get RHS from sext to catch more cases? 9452 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9453 return false; 9454 9455 // Should not overflow. 9456 if (!LHSAddExpr->hasNoSignedWrap()) 9457 return false; 9458 9459 auto *LL = LHSAddExpr->getOperand(0); 9460 auto *LR = LHSAddExpr->getOperand(1); 9461 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9462 9463 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9464 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9465 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9466 }; 9467 // Try to prove the following rule: 9468 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9469 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9470 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9471 return true; 9472 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9473 Value *LL, *LR; 9474 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9475 9476 using namespace llvm::PatternMatch; 9477 9478 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9479 // Rules for division. 9480 // We are going to perform some comparisons with Denominator and its 9481 // derivative expressions. In general case, creating a SCEV for it may 9482 // lead to a complex analysis of the entire graph, and in particular it 9483 // can request trip count recalculation for the same loop. This would 9484 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9485 // this, we only want to create SCEVs that are constants in this section. 9486 // So we bail if Denominator is not a constant. 9487 if (!isa<ConstantInt>(LR)) 9488 return false; 9489 9490 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9491 9492 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9493 // then a SCEV for the numerator already exists and matches with FoundLHS. 9494 auto *Numerator = getExistingSCEV(LL); 9495 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9496 return false; 9497 9498 // Make sure that the numerator matches with FoundLHS and the denominator 9499 // is positive. 9500 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9501 return false; 9502 9503 auto *DTy = Denominator->getType(); 9504 auto *FRHSTy = FoundRHS->getType(); 9505 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9506 // One of types is a pointer and another one is not. We cannot extend 9507 // them properly to a wider type, so let us just reject this case. 9508 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9509 // to avoid this check. 9510 return false; 9511 9512 // Given that: 9513 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9514 auto *WTy = getWiderType(DTy, FRHSTy); 9515 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9516 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9517 9518 // Try to prove the following rule: 9519 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9520 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9521 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9522 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9523 if (isKnownNonPositive(RHS) && 9524 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9525 return true; 9526 9527 // Try to prove the following rule: 9528 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9529 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9530 // If we divide it by Denominator > 2, then: 9531 // 1. If FoundLHS is negative, then the result is 0. 9532 // 2. If FoundLHS is non-negative, then the result is non-negative. 9533 // Anyways, the result is non-negative. 9534 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9535 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9536 if (isKnownNegative(RHS) && 9537 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9538 return true; 9539 } 9540 } 9541 9542 return false; 9543 } 9544 9545 bool 9546 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9547 const SCEV *LHS, const SCEV *RHS) { 9548 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9549 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9550 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9551 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9552 } 9553 9554 bool 9555 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9556 const SCEV *LHS, const SCEV *RHS, 9557 const SCEV *FoundLHS, 9558 const SCEV *FoundRHS) { 9559 switch (Pred) { 9560 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9561 case ICmpInst::ICMP_EQ: 9562 case ICmpInst::ICMP_NE: 9563 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9564 return true; 9565 break; 9566 case ICmpInst::ICMP_SLT: 9567 case ICmpInst::ICMP_SLE: 9568 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9569 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9570 return true; 9571 break; 9572 case ICmpInst::ICMP_SGT: 9573 case ICmpInst::ICMP_SGE: 9574 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9575 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9576 return true; 9577 break; 9578 case ICmpInst::ICMP_ULT: 9579 case ICmpInst::ICMP_ULE: 9580 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9581 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9582 return true; 9583 break; 9584 case ICmpInst::ICMP_UGT: 9585 case ICmpInst::ICMP_UGE: 9586 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9587 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9588 return true; 9589 break; 9590 } 9591 9592 // Maybe it can be proved via operations? 9593 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9594 return true; 9595 9596 return false; 9597 } 9598 9599 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9600 const SCEV *LHS, 9601 const SCEV *RHS, 9602 const SCEV *FoundLHS, 9603 const SCEV *FoundRHS) { 9604 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9605 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9606 // reduce the compile time impact of this optimization. 9607 return false; 9608 9609 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9610 if (!Addend) 9611 return false; 9612 9613 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9614 9615 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9616 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9617 ConstantRange FoundLHSRange = 9618 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9619 9620 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9621 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9622 9623 // We can also compute the range of values for `LHS` that satisfy the 9624 // consequent, "`LHS` `Pred` `RHS`": 9625 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9626 ConstantRange SatisfyingLHSRange = 9627 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9628 9629 // The antecedent implies the consequent if every value of `LHS` that 9630 // satisfies the antecedent also satisfies the consequent. 9631 return SatisfyingLHSRange.contains(LHSRange); 9632 } 9633 9634 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9635 bool IsSigned, bool NoWrap) { 9636 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9637 9638 if (NoWrap) return false; 9639 9640 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9641 const SCEV *One = getOne(Stride->getType()); 9642 9643 if (IsSigned) { 9644 APInt MaxRHS = getSignedRangeMax(RHS); 9645 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9646 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9647 9648 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9649 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9650 } 9651 9652 APInt MaxRHS = getUnsignedRangeMax(RHS); 9653 APInt MaxValue = APInt::getMaxValue(BitWidth); 9654 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9655 9656 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9657 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9658 } 9659 9660 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9661 bool IsSigned, bool NoWrap) { 9662 if (NoWrap) return false; 9663 9664 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9665 const SCEV *One = getOne(Stride->getType()); 9666 9667 if (IsSigned) { 9668 APInt MinRHS = getSignedRangeMin(RHS); 9669 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9670 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9671 9672 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9673 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9674 } 9675 9676 APInt MinRHS = getUnsignedRangeMin(RHS); 9677 APInt MinValue = APInt::getMinValue(BitWidth); 9678 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9679 9680 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9681 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9682 } 9683 9684 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9685 bool Equality) { 9686 const SCEV *One = getOne(Step->getType()); 9687 Delta = Equality ? getAddExpr(Delta, Step) 9688 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9689 return getUDivExpr(Delta, Step); 9690 } 9691 9692 const SCEV *ScalarEvolution::computeMaxBECount(const SCEV *Start, 9693 const SCEV *Stride, 9694 const SCEV *End, 9695 unsigned BitWidth, 9696 bool IsSigned) { 9697 9698 assert(!isKnownNonPositive(Stride) && 9699 "Stride is expected strictly positive!"); 9700 // Calculate the maximum backedge count based on the range of values 9701 // permitted by Start, End, and Stride. 9702 const SCEV *MaxBECount; 9703 APInt MinStart = 9704 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 9705 9706 APInt StrideForMaxBECount; 9707 9708 bool PositiveStride = isKnownPositive(Stride); 9709 if (PositiveStride) 9710 StrideForMaxBECount = 9711 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 9712 else 9713 // Using a stride of 1 is safe when computing max backedge taken count for 9714 // a loop with unknown stride. 9715 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 9716 9717 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 9718 : APInt::getMaxValue(BitWidth); 9719 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 9720 9721 // Although End can be a MAX expression we estimate MaxEnd considering only 9722 // the case End = RHS of the loop termination condition. This is safe because 9723 // in the other case (End - Start) is zero, leading to a zero maximum backedge 9724 // taken count. 9725 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 9726 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 9727 9728 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 9729 getConstant(StrideForMaxBECount) /* Step */, 9730 false /* Equality */); 9731 9732 return MaxBECount; 9733 } 9734 9735 ScalarEvolution::ExitLimit 9736 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9737 const Loop *L, bool IsSigned, 9738 bool ControlsExit, bool AllowPredicates) { 9739 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9740 9741 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9742 bool PredicatedIV = false; 9743 9744 if (!IV && AllowPredicates) { 9745 // Try to make this an AddRec using runtime tests, in the first X 9746 // iterations of this loop, where X is the SCEV expression found by the 9747 // algorithm below. 9748 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9749 PredicatedIV = true; 9750 } 9751 9752 // Avoid weird loops 9753 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9754 return getCouldNotCompute(); 9755 9756 bool NoWrap = ControlsExit && 9757 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9758 9759 const SCEV *Stride = IV->getStepRecurrence(*this); 9760 9761 bool PositiveStride = isKnownPositive(Stride); 9762 9763 // Avoid negative or zero stride values. 9764 if (!PositiveStride) { 9765 // We can compute the correct backedge taken count for loops with unknown 9766 // strides if we can prove that the loop is not an infinite loop with side 9767 // effects. Here's the loop structure we are trying to handle - 9768 // 9769 // i = start 9770 // do { 9771 // A[i] = i; 9772 // i += s; 9773 // } while (i < end); 9774 // 9775 // The backedge taken count for such loops is evaluated as - 9776 // (max(end, start + stride) - start - 1) /u stride 9777 // 9778 // The additional preconditions that we need to check to prove correctness 9779 // of the above formula is as follows - 9780 // 9781 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9782 // NoWrap flag). 9783 // b) loop is single exit with no side effects. 9784 // 9785 // 9786 // Precondition a) implies that if the stride is negative, this is a single 9787 // trip loop. The backedge taken count formula reduces to zero in this case. 9788 // 9789 // Precondition b) implies that the unknown stride cannot be zero otherwise 9790 // we have UB. 9791 // 9792 // The positive stride case is the same as isKnownPositive(Stride) returning 9793 // true (original behavior of the function). 9794 // 9795 // We want to make sure that the stride is truly unknown as there are edge 9796 // cases where ScalarEvolution propagates no wrap flags to the 9797 // post-increment/decrement IV even though the increment/decrement operation 9798 // itself is wrapping. The computed backedge taken count may be wrong in 9799 // such cases. This is prevented by checking that the stride is not known to 9800 // be either positive or non-positive. For example, no wrap flags are 9801 // propagated to the post-increment IV of this loop with a trip count of 2 - 9802 // 9803 // unsigned char i; 9804 // for(i=127; i<128; i+=129) 9805 // A[i] = i; 9806 // 9807 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9808 !loopHasNoSideEffects(L)) 9809 return getCouldNotCompute(); 9810 } else if (!Stride->isOne() && 9811 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9812 // Avoid proven overflow cases: this will ensure that the backedge taken 9813 // count will not generate any unsigned overflow. Relaxed no-overflow 9814 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9815 // undefined behaviors like the case of C language. 9816 return getCouldNotCompute(); 9817 9818 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9819 : ICmpInst::ICMP_ULT; 9820 const SCEV *Start = IV->getStart(); 9821 const SCEV *End = RHS; 9822 // When the RHS is not invariant, we do not know the end bound of the loop and 9823 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 9824 // calculate the MaxBECount, given the start, stride and max value for the end 9825 // bound of the loop (RHS), and the fact that IV does not overflow (which is 9826 // checked above). 9827 if (!isLoopInvariant(RHS, L)) { 9828 const SCEV *MaxBECount = computeMaxBECount( 9829 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 9830 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 9831 false /*MaxOrZero*/, Predicates); 9832 } 9833 // If the backedge is taken at least once, then it will be taken 9834 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9835 // is the LHS value of the less-than comparison the first time it is evaluated 9836 // and End is the RHS. 9837 const SCEV *BECountIfBackedgeTaken = 9838 computeBECount(getMinusSCEV(End, Start), Stride, false); 9839 // If the loop entry is guarded by the result of the backedge test of the 9840 // first loop iteration, then we know the backedge will be taken at least 9841 // once and so the backedge taken count is as above. If not then we use the 9842 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9843 // as if the backedge is taken at least once max(End,Start) is End and so the 9844 // result is as above, and if not max(End,Start) is Start so we get a backedge 9845 // count of zero. 9846 const SCEV *BECount; 9847 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9848 BECount = BECountIfBackedgeTaken; 9849 else { 9850 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9851 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9852 } 9853 9854 const SCEV *MaxBECount; 9855 bool MaxOrZero = false; 9856 if (isa<SCEVConstant>(BECount)) 9857 MaxBECount = BECount; 9858 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9859 // If we know exactly how many times the backedge will be taken if it's 9860 // taken at least once, then the backedge count will either be that or 9861 // zero. 9862 MaxBECount = BECountIfBackedgeTaken; 9863 MaxOrZero = true; 9864 } else { 9865 MaxBECount = computeMaxBECount(Start, Stride, RHS, 9866 getTypeSizeInBits(LHS->getType()), IsSigned); 9867 } 9868 9869 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9870 !isa<SCEVCouldNotCompute>(BECount)) 9871 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9872 9873 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9874 } 9875 9876 ScalarEvolution::ExitLimit 9877 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9878 const Loop *L, bool IsSigned, 9879 bool ControlsExit, bool AllowPredicates) { 9880 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9881 // We handle only IV > Invariant 9882 if (!isLoopInvariant(RHS, L)) 9883 return getCouldNotCompute(); 9884 9885 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9886 if (!IV && AllowPredicates) 9887 // Try to make this an AddRec using runtime tests, in the first X 9888 // iterations of this loop, where X is the SCEV expression found by the 9889 // algorithm below. 9890 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9891 9892 // Avoid weird loops 9893 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9894 return getCouldNotCompute(); 9895 9896 bool NoWrap = ControlsExit && 9897 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9898 9899 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9900 9901 // Avoid negative or zero stride values 9902 if (!isKnownPositive(Stride)) 9903 return getCouldNotCompute(); 9904 9905 // Avoid proven overflow cases: this will ensure that the backedge taken count 9906 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9907 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9908 // behaviors like the case of C language. 9909 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9910 return getCouldNotCompute(); 9911 9912 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9913 : ICmpInst::ICMP_UGT; 9914 9915 const SCEV *Start = IV->getStart(); 9916 const SCEV *End = RHS; 9917 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 9918 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 9919 9920 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 9921 9922 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 9923 : getUnsignedRangeMax(Start); 9924 9925 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 9926 : getUnsignedRangeMin(Stride); 9927 9928 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9929 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 9930 : APInt::getMinValue(BitWidth) + (MinStride - 1); 9931 9932 // Although End can be a MIN expression we estimate MinEnd considering only 9933 // the case End = RHS. This is safe because in the other case (Start - End) 9934 // is zero, leading to a zero maximum backedge taken count. 9935 APInt MinEnd = 9936 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 9937 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 9938 9939 9940 const SCEV *MaxBECount = getCouldNotCompute(); 9941 if (isa<SCEVConstant>(BECount)) 9942 MaxBECount = BECount; 9943 else 9944 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 9945 getConstant(MinStride), false); 9946 9947 if (isa<SCEVCouldNotCompute>(MaxBECount)) 9948 MaxBECount = BECount; 9949 9950 return ExitLimit(BECount, MaxBECount, false, Predicates); 9951 } 9952 9953 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 9954 ScalarEvolution &SE) const { 9955 if (Range.isFullSet()) // Infinite loop. 9956 return SE.getCouldNotCompute(); 9957 9958 // If the start is a non-zero constant, shift the range to simplify things. 9959 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 9960 if (!SC->getValue()->isZero()) { 9961 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 9962 Operands[0] = SE.getZero(SC->getType()); 9963 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 9964 getNoWrapFlags(FlagNW)); 9965 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 9966 return ShiftedAddRec->getNumIterationsInRange( 9967 Range.subtract(SC->getAPInt()), SE); 9968 // This is strange and shouldn't happen. 9969 return SE.getCouldNotCompute(); 9970 } 9971 9972 // The only time we can solve this is when we have all constant indices. 9973 // Otherwise, we cannot determine the overflow conditions. 9974 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 9975 return SE.getCouldNotCompute(); 9976 9977 // Okay at this point we know that all elements of the chrec are constants and 9978 // that the start element is zero. 9979 9980 // First check to see if the range contains zero. If not, the first 9981 // iteration exits. 9982 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 9983 if (!Range.contains(APInt(BitWidth, 0))) 9984 return SE.getZero(getType()); 9985 9986 if (isAffine()) { 9987 // If this is an affine expression then we have this situation: 9988 // Solve {0,+,A} in Range === Ax in Range 9989 9990 // We know that zero is in the range. If A is positive then we know that 9991 // the upper value of the range must be the first possible exit value. 9992 // If A is negative then the lower of the range is the last possible loop 9993 // value. Also note that we already checked for a full range. 9994 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 9995 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 9996 9997 // The exit value should be (End+A)/A. 9998 APInt ExitVal = (End + A).udiv(A); 9999 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10000 10001 // Evaluate at the exit value. If we really did fall out of the valid 10002 // range, then we computed our trip count, otherwise wrap around or other 10003 // things must have happened. 10004 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10005 if (Range.contains(Val->getValue())) 10006 return SE.getCouldNotCompute(); // Something strange happened 10007 10008 // Ensure that the previous value is in the range. This is a sanity check. 10009 assert(Range.contains( 10010 EvaluateConstantChrecAtConstant(this, 10011 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10012 "Linear scev computation is off in a bad way!"); 10013 return SE.getConstant(ExitValue); 10014 } else if (isQuadratic()) { 10015 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 10016 // quadratic equation to solve it. To do this, we must frame our problem in 10017 // terms of figuring out when zero is crossed, instead of when 10018 // Range.getUpper() is crossed. 10019 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 10020 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 10021 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 10022 10023 // Next, solve the constructed addrec 10024 if (auto Roots = 10025 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 10026 const SCEVConstant *R1 = Roots->first; 10027 const SCEVConstant *R2 = Roots->second; 10028 // Pick the smallest positive root value. 10029 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 10030 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 10031 if (!CB->getZExtValue()) 10032 std::swap(R1, R2); // R1 is the minimum root now. 10033 10034 // Make sure the root is not off by one. The returned iteration should 10035 // not be in the range, but the previous one should be. When solving 10036 // for "X*X < 5", for example, we should not return a root of 2. 10037 ConstantInt *R1Val = 10038 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 10039 if (Range.contains(R1Val->getValue())) { 10040 // The next iteration must be out of the range... 10041 ConstantInt *NextVal = 10042 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 10043 10044 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10045 if (!Range.contains(R1Val->getValue())) 10046 return SE.getConstant(NextVal); 10047 return SE.getCouldNotCompute(); // Something strange happened 10048 } 10049 10050 // If R1 was not in the range, then it is a good return value. Make 10051 // sure that R1-1 WAS in the range though, just in case. 10052 ConstantInt *NextVal = 10053 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 10054 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10055 if (Range.contains(R1Val->getValue())) 10056 return R1; 10057 return SE.getCouldNotCompute(); // Something strange happened 10058 } 10059 } 10060 } 10061 10062 return SE.getCouldNotCompute(); 10063 } 10064 10065 // Return true when S contains at least an undef value. 10066 static inline bool containsUndefs(const SCEV *S) { 10067 return SCEVExprContains(S, [](const SCEV *S) { 10068 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10069 return isa<UndefValue>(SU->getValue()); 10070 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10071 return isa<UndefValue>(SC->getValue()); 10072 return false; 10073 }); 10074 } 10075 10076 namespace { 10077 10078 // Collect all steps of SCEV expressions. 10079 struct SCEVCollectStrides { 10080 ScalarEvolution &SE; 10081 SmallVectorImpl<const SCEV *> &Strides; 10082 10083 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10084 : SE(SE), Strides(S) {} 10085 10086 bool follow(const SCEV *S) { 10087 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10088 Strides.push_back(AR->getStepRecurrence(SE)); 10089 return true; 10090 } 10091 10092 bool isDone() const { return false; } 10093 }; 10094 10095 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10096 struct SCEVCollectTerms { 10097 SmallVectorImpl<const SCEV *> &Terms; 10098 10099 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10100 10101 bool follow(const SCEV *S) { 10102 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10103 isa<SCEVSignExtendExpr>(S)) { 10104 if (!containsUndefs(S)) 10105 Terms.push_back(S); 10106 10107 // Stop recursion: once we collected a term, do not walk its operands. 10108 return false; 10109 } 10110 10111 // Keep looking. 10112 return true; 10113 } 10114 10115 bool isDone() const { return false; } 10116 }; 10117 10118 // Check if a SCEV contains an AddRecExpr. 10119 struct SCEVHasAddRec { 10120 bool &ContainsAddRec; 10121 10122 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10123 ContainsAddRec = false; 10124 } 10125 10126 bool follow(const SCEV *S) { 10127 if (isa<SCEVAddRecExpr>(S)) { 10128 ContainsAddRec = true; 10129 10130 // Stop recursion: once we collected a term, do not walk its operands. 10131 return false; 10132 } 10133 10134 // Keep looking. 10135 return true; 10136 } 10137 10138 bool isDone() const { return false; } 10139 }; 10140 10141 // Find factors that are multiplied with an expression that (possibly as a 10142 // subexpression) contains an AddRecExpr. In the expression: 10143 // 10144 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10145 // 10146 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10147 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10148 // parameters as they form a product with an induction variable. 10149 // 10150 // This collector expects all array size parameters to be in the same MulExpr. 10151 // It might be necessary to later add support for collecting parameters that are 10152 // spread over different nested MulExpr. 10153 struct SCEVCollectAddRecMultiplies { 10154 SmallVectorImpl<const SCEV *> &Terms; 10155 ScalarEvolution &SE; 10156 10157 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10158 : Terms(T), SE(SE) {} 10159 10160 bool follow(const SCEV *S) { 10161 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10162 bool HasAddRec = false; 10163 SmallVector<const SCEV *, 0> Operands; 10164 for (auto Op : Mul->operands()) { 10165 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10166 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10167 Operands.push_back(Op); 10168 } else if (Unknown) { 10169 HasAddRec = true; 10170 } else { 10171 bool ContainsAddRec; 10172 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10173 visitAll(Op, ContiansAddRec); 10174 HasAddRec |= ContainsAddRec; 10175 } 10176 } 10177 if (Operands.size() == 0) 10178 return true; 10179 10180 if (!HasAddRec) 10181 return false; 10182 10183 Terms.push_back(SE.getMulExpr(Operands)); 10184 // Stop recursion: once we collected a term, do not walk its operands. 10185 return false; 10186 } 10187 10188 // Keep looking. 10189 return true; 10190 } 10191 10192 bool isDone() const { return false; } 10193 }; 10194 10195 } // end anonymous namespace 10196 10197 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10198 /// two places: 10199 /// 1) The strides of AddRec expressions. 10200 /// 2) Unknowns that are multiplied with AddRec expressions. 10201 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10202 SmallVectorImpl<const SCEV *> &Terms) { 10203 SmallVector<const SCEV *, 4> Strides; 10204 SCEVCollectStrides StrideCollector(*this, Strides); 10205 visitAll(Expr, StrideCollector); 10206 10207 DEBUG({ 10208 dbgs() << "Strides:\n"; 10209 for (const SCEV *S : Strides) 10210 dbgs() << *S << "\n"; 10211 }); 10212 10213 for (const SCEV *S : Strides) { 10214 SCEVCollectTerms TermCollector(Terms); 10215 visitAll(S, TermCollector); 10216 } 10217 10218 DEBUG({ 10219 dbgs() << "Terms:\n"; 10220 for (const SCEV *T : Terms) 10221 dbgs() << *T << "\n"; 10222 }); 10223 10224 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10225 visitAll(Expr, MulCollector); 10226 } 10227 10228 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10229 SmallVectorImpl<const SCEV *> &Terms, 10230 SmallVectorImpl<const SCEV *> &Sizes) { 10231 int Last = Terms.size() - 1; 10232 const SCEV *Step = Terms[Last]; 10233 10234 // End of recursion. 10235 if (Last == 0) { 10236 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10237 SmallVector<const SCEV *, 2> Qs; 10238 for (const SCEV *Op : M->operands()) 10239 if (!isa<SCEVConstant>(Op)) 10240 Qs.push_back(Op); 10241 10242 Step = SE.getMulExpr(Qs); 10243 } 10244 10245 Sizes.push_back(Step); 10246 return true; 10247 } 10248 10249 for (const SCEV *&Term : Terms) { 10250 // Normalize the terms before the next call to findArrayDimensionsRec. 10251 const SCEV *Q, *R; 10252 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10253 10254 // Bail out when GCD does not evenly divide one of the terms. 10255 if (!R->isZero()) 10256 return false; 10257 10258 Term = Q; 10259 } 10260 10261 // Remove all SCEVConstants. 10262 Terms.erase( 10263 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10264 Terms.end()); 10265 10266 if (Terms.size() > 0) 10267 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10268 return false; 10269 10270 Sizes.push_back(Step); 10271 return true; 10272 } 10273 10274 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10275 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10276 for (const SCEV *T : Terms) 10277 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10278 return true; 10279 return false; 10280 } 10281 10282 // Return the number of product terms in S. 10283 static inline int numberOfTerms(const SCEV *S) { 10284 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10285 return Expr->getNumOperands(); 10286 return 1; 10287 } 10288 10289 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10290 if (isa<SCEVConstant>(T)) 10291 return nullptr; 10292 10293 if (isa<SCEVUnknown>(T)) 10294 return T; 10295 10296 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10297 SmallVector<const SCEV *, 2> Factors; 10298 for (const SCEV *Op : M->operands()) 10299 if (!isa<SCEVConstant>(Op)) 10300 Factors.push_back(Op); 10301 10302 return SE.getMulExpr(Factors); 10303 } 10304 10305 return T; 10306 } 10307 10308 /// Return the size of an element read or written by Inst. 10309 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10310 Type *Ty; 10311 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10312 Ty = Store->getValueOperand()->getType(); 10313 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10314 Ty = Load->getType(); 10315 else 10316 return nullptr; 10317 10318 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10319 return getSizeOfExpr(ETy, Ty); 10320 } 10321 10322 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10323 SmallVectorImpl<const SCEV *> &Sizes, 10324 const SCEV *ElementSize) { 10325 if (Terms.size() < 1 || !ElementSize) 10326 return; 10327 10328 // Early return when Terms do not contain parameters: we do not delinearize 10329 // non parametric SCEVs. 10330 if (!containsParameters(Terms)) 10331 return; 10332 10333 DEBUG({ 10334 dbgs() << "Terms:\n"; 10335 for (const SCEV *T : Terms) 10336 dbgs() << *T << "\n"; 10337 }); 10338 10339 // Remove duplicates. 10340 array_pod_sort(Terms.begin(), Terms.end()); 10341 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10342 10343 // Put larger terms first. 10344 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10345 return numberOfTerms(LHS) > numberOfTerms(RHS); 10346 }); 10347 10348 // Try to divide all terms by the element size. If term is not divisible by 10349 // element size, proceed with the original term. 10350 for (const SCEV *&Term : Terms) { 10351 const SCEV *Q, *R; 10352 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10353 if (!Q->isZero()) 10354 Term = Q; 10355 } 10356 10357 SmallVector<const SCEV *, 4> NewTerms; 10358 10359 // Remove constant factors. 10360 for (const SCEV *T : Terms) 10361 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10362 NewTerms.push_back(NewT); 10363 10364 DEBUG({ 10365 dbgs() << "Terms after sorting:\n"; 10366 for (const SCEV *T : NewTerms) 10367 dbgs() << *T << "\n"; 10368 }); 10369 10370 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10371 Sizes.clear(); 10372 return; 10373 } 10374 10375 // The last element to be pushed into Sizes is the size of an element. 10376 Sizes.push_back(ElementSize); 10377 10378 DEBUG({ 10379 dbgs() << "Sizes:\n"; 10380 for (const SCEV *S : Sizes) 10381 dbgs() << *S << "\n"; 10382 }); 10383 } 10384 10385 void ScalarEvolution::computeAccessFunctions( 10386 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10387 SmallVectorImpl<const SCEV *> &Sizes) { 10388 // Early exit in case this SCEV is not an affine multivariate function. 10389 if (Sizes.empty()) 10390 return; 10391 10392 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10393 if (!AR->isAffine()) 10394 return; 10395 10396 const SCEV *Res = Expr; 10397 int Last = Sizes.size() - 1; 10398 for (int i = Last; i >= 0; i--) { 10399 const SCEV *Q, *R; 10400 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10401 10402 DEBUG({ 10403 dbgs() << "Res: " << *Res << "\n"; 10404 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10405 dbgs() << "Res divided by Sizes[i]:\n"; 10406 dbgs() << "Quotient: " << *Q << "\n"; 10407 dbgs() << "Remainder: " << *R << "\n"; 10408 }); 10409 10410 Res = Q; 10411 10412 // Do not record the last subscript corresponding to the size of elements in 10413 // the array. 10414 if (i == Last) { 10415 10416 // Bail out if the remainder is too complex. 10417 if (isa<SCEVAddRecExpr>(R)) { 10418 Subscripts.clear(); 10419 Sizes.clear(); 10420 return; 10421 } 10422 10423 continue; 10424 } 10425 10426 // Record the access function for the current subscript. 10427 Subscripts.push_back(R); 10428 } 10429 10430 // Also push in last position the remainder of the last division: it will be 10431 // the access function of the innermost dimension. 10432 Subscripts.push_back(Res); 10433 10434 std::reverse(Subscripts.begin(), Subscripts.end()); 10435 10436 DEBUG({ 10437 dbgs() << "Subscripts:\n"; 10438 for (const SCEV *S : Subscripts) 10439 dbgs() << *S << "\n"; 10440 }); 10441 } 10442 10443 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10444 /// sizes of an array access. Returns the remainder of the delinearization that 10445 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10446 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10447 /// expressions in the stride and base of a SCEV corresponding to the 10448 /// computation of a GCD (greatest common divisor) of base and stride. When 10449 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10450 /// 10451 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10452 /// 10453 /// void foo(long n, long m, long o, double A[n][m][o]) { 10454 /// 10455 /// for (long i = 0; i < n; i++) 10456 /// for (long j = 0; j < m; j++) 10457 /// for (long k = 0; k < o; k++) 10458 /// A[i][j][k] = 1.0; 10459 /// } 10460 /// 10461 /// the delinearization input is the following AddRec SCEV: 10462 /// 10463 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10464 /// 10465 /// From this SCEV, we are able to say that the base offset of the access is %A 10466 /// because it appears as an offset that does not divide any of the strides in 10467 /// the loops: 10468 /// 10469 /// CHECK: Base offset: %A 10470 /// 10471 /// and then SCEV->delinearize determines the size of some of the dimensions of 10472 /// the array as these are the multiples by which the strides are happening: 10473 /// 10474 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10475 /// 10476 /// Note that the outermost dimension remains of UnknownSize because there are 10477 /// no strides that would help identifying the size of the last dimension: when 10478 /// the array has been statically allocated, one could compute the size of that 10479 /// dimension by dividing the overall size of the array by the size of the known 10480 /// dimensions: %m * %o * 8. 10481 /// 10482 /// Finally delinearize provides the access functions for the array reference 10483 /// that does correspond to A[i][j][k] of the above C testcase: 10484 /// 10485 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10486 /// 10487 /// The testcases are checking the output of a function pass: 10488 /// DelinearizationPass that walks through all loads and stores of a function 10489 /// asking for the SCEV of the memory access with respect to all enclosing 10490 /// loops, calling SCEV->delinearize on that and printing the results. 10491 void ScalarEvolution::delinearize(const SCEV *Expr, 10492 SmallVectorImpl<const SCEV *> &Subscripts, 10493 SmallVectorImpl<const SCEV *> &Sizes, 10494 const SCEV *ElementSize) { 10495 // First step: collect parametric terms. 10496 SmallVector<const SCEV *, 4> Terms; 10497 collectParametricTerms(Expr, Terms); 10498 10499 if (Terms.empty()) 10500 return; 10501 10502 // Second step: find subscript sizes. 10503 findArrayDimensions(Terms, Sizes, ElementSize); 10504 10505 if (Sizes.empty()) 10506 return; 10507 10508 // Third step: compute the access functions for each subscript. 10509 computeAccessFunctions(Expr, Subscripts, Sizes); 10510 10511 if (Subscripts.empty()) 10512 return; 10513 10514 DEBUG({ 10515 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 10516 dbgs() << "ArrayDecl[UnknownSize]"; 10517 for (const SCEV *S : Sizes) 10518 dbgs() << "[" << *S << "]"; 10519 10520 dbgs() << "\nArrayRef"; 10521 for (const SCEV *S : Subscripts) 10522 dbgs() << "[" << *S << "]"; 10523 dbgs() << "\n"; 10524 }); 10525 } 10526 10527 //===----------------------------------------------------------------------===// 10528 // SCEVCallbackVH Class Implementation 10529 //===----------------------------------------------------------------------===// 10530 10531 void ScalarEvolution::SCEVCallbackVH::deleted() { 10532 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10533 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 10534 SE->ConstantEvolutionLoopExitValue.erase(PN); 10535 SE->eraseValueFromMap(getValPtr()); 10536 // this now dangles! 10537 } 10538 10539 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 10540 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10541 10542 // Forget all the expressions associated with users of the old value, 10543 // so that future queries will recompute the expressions using the new 10544 // value. 10545 Value *Old = getValPtr(); 10546 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 10547 SmallPtrSet<User *, 8> Visited; 10548 while (!Worklist.empty()) { 10549 User *U = Worklist.pop_back_val(); 10550 // Deleting the Old value will cause this to dangle. Postpone 10551 // that until everything else is done. 10552 if (U == Old) 10553 continue; 10554 if (!Visited.insert(U).second) 10555 continue; 10556 if (PHINode *PN = dyn_cast<PHINode>(U)) 10557 SE->ConstantEvolutionLoopExitValue.erase(PN); 10558 SE->eraseValueFromMap(U); 10559 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10560 } 10561 // Delete the Old value. 10562 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10563 SE->ConstantEvolutionLoopExitValue.erase(PN); 10564 SE->eraseValueFromMap(Old); 10565 // this now dangles! 10566 } 10567 10568 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10569 : CallbackVH(V), SE(se) {} 10570 10571 //===----------------------------------------------------------------------===// 10572 // ScalarEvolution Class Implementation 10573 //===----------------------------------------------------------------------===// 10574 10575 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10576 AssumptionCache &AC, DominatorTree &DT, 10577 LoopInfo &LI) 10578 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10579 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 10580 LoopDispositions(64), BlockDispositions(64) { 10581 // To use guards for proving predicates, we need to scan every instruction in 10582 // relevant basic blocks, and not just terminators. Doing this is a waste of 10583 // time if the IR does not actually contain any calls to 10584 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10585 // 10586 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10587 // to _add_ guards to the module when there weren't any before, and wants 10588 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10589 // efficient in lieu of being smart in that rather obscure case. 10590 10591 auto *GuardDecl = F.getParent()->getFunction( 10592 Intrinsic::getName(Intrinsic::experimental_guard)); 10593 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10594 } 10595 10596 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10597 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10598 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10599 ValueExprMap(std::move(Arg.ValueExprMap)), 10600 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10601 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10602 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10603 PredicatedBackedgeTakenCounts( 10604 std::move(Arg.PredicatedBackedgeTakenCounts)), 10605 ExitLimits(std::move(Arg.ExitLimits)), 10606 ConstantEvolutionLoopExitValue( 10607 std::move(Arg.ConstantEvolutionLoopExitValue)), 10608 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10609 LoopDispositions(std::move(Arg.LoopDispositions)), 10610 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10611 BlockDispositions(std::move(Arg.BlockDispositions)), 10612 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10613 SignedRanges(std::move(Arg.SignedRanges)), 10614 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10615 UniquePreds(std::move(Arg.UniquePreds)), 10616 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10617 LoopUsers(std::move(Arg.LoopUsers)), 10618 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 10619 FirstUnknown(Arg.FirstUnknown) { 10620 Arg.FirstUnknown = nullptr; 10621 } 10622 10623 ScalarEvolution::~ScalarEvolution() { 10624 // Iterate through all the SCEVUnknown instances and call their 10625 // destructors, so that they release their references to their values. 10626 for (SCEVUnknown *U = FirstUnknown; U;) { 10627 SCEVUnknown *Tmp = U; 10628 U = U->Next; 10629 Tmp->~SCEVUnknown(); 10630 } 10631 FirstUnknown = nullptr; 10632 10633 ExprValueMap.clear(); 10634 ValueExprMap.clear(); 10635 HasRecMap.clear(); 10636 10637 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10638 // that a loop had multiple computable exits. 10639 for (auto &BTCI : BackedgeTakenCounts) 10640 BTCI.second.clear(); 10641 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10642 BTCI.second.clear(); 10643 10644 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10645 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10646 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10647 } 10648 10649 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10650 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10651 } 10652 10653 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10654 const Loop *L) { 10655 // Print all inner loops first 10656 for (Loop *I : *L) 10657 PrintLoopInfo(OS, SE, I); 10658 10659 OS << "Loop "; 10660 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10661 OS << ": "; 10662 10663 SmallVector<BasicBlock *, 8> ExitBlocks; 10664 L->getExitBlocks(ExitBlocks); 10665 if (ExitBlocks.size() != 1) 10666 OS << "<multiple exits> "; 10667 10668 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10669 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10670 } else { 10671 OS << "Unpredictable backedge-taken count. "; 10672 } 10673 10674 OS << "\n" 10675 "Loop "; 10676 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10677 OS << ": "; 10678 10679 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10680 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10681 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10682 OS << ", actual taken count either this or zero."; 10683 } else { 10684 OS << "Unpredictable max backedge-taken count. "; 10685 } 10686 10687 OS << "\n" 10688 "Loop "; 10689 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10690 OS << ": "; 10691 10692 SCEVUnionPredicate Pred; 10693 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10694 if (!isa<SCEVCouldNotCompute>(PBT)) { 10695 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10696 OS << " Predicates:\n"; 10697 Pred.print(OS, 4); 10698 } else { 10699 OS << "Unpredictable predicated backedge-taken count. "; 10700 } 10701 OS << "\n"; 10702 10703 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10704 OS << "Loop "; 10705 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10706 OS << ": "; 10707 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10708 } 10709 } 10710 10711 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10712 switch (LD) { 10713 case ScalarEvolution::LoopVariant: 10714 return "Variant"; 10715 case ScalarEvolution::LoopInvariant: 10716 return "Invariant"; 10717 case ScalarEvolution::LoopComputable: 10718 return "Computable"; 10719 } 10720 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10721 } 10722 10723 void ScalarEvolution::print(raw_ostream &OS) const { 10724 // ScalarEvolution's implementation of the print method is to print 10725 // out SCEV values of all instructions that are interesting. Doing 10726 // this potentially causes it to create new SCEV objects though, 10727 // which technically conflicts with the const qualifier. This isn't 10728 // observable from outside the class though, so casting away the 10729 // const isn't dangerous. 10730 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10731 10732 OS << "Classifying expressions for: "; 10733 F.printAsOperand(OS, /*PrintType=*/false); 10734 OS << "\n"; 10735 for (Instruction &I : instructions(F)) 10736 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10737 OS << I << '\n'; 10738 OS << " --> "; 10739 const SCEV *SV = SE.getSCEV(&I); 10740 SV->print(OS); 10741 if (!isa<SCEVCouldNotCompute>(SV)) { 10742 OS << " U: "; 10743 SE.getUnsignedRange(SV).print(OS); 10744 OS << " S: "; 10745 SE.getSignedRange(SV).print(OS); 10746 } 10747 10748 const Loop *L = LI.getLoopFor(I.getParent()); 10749 10750 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10751 if (AtUse != SV) { 10752 OS << " --> "; 10753 AtUse->print(OS); 10754 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10755 OS << " U: "; 10756 SE.getUnsignedRange(AtUse).print(OS); 10757 OS << " S: "; 10758 SE.getSignedRange(AtUse).print(OS); 10759 } 10760 } 10761 10762 if (L) { 10763 OS << "\t\t" "Exits: "; 10764 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10765 if (!SE.isLoopInvariant(ExitValue, L)) { 10766 OS << "<<Unknown>>"; 10767 } else { 10768 OS << *ExitValue; 10769 } 10770 10771 bool First = true; 10772 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10773 if (First) { 10774 OS << "\t\t" "LoopDispositions: { "; 10775 First = false; 10776 } else { 10777 OS << ", "; 10778 } 10779 10780 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10781 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10782 } 10783 10784 for (auto *InnerL : depth_first(L)) { 10785 if (InnerL == L) 10786 continue; 10787 if (First) { 10788 OS << "\t\t" "LoopDispositions: { "; 10789 First = false; 10790 } else { 10791 OS << ", "; 10792 } 10793 10794 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10795 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10796 } 10797 10798 OS << " }"; 10799 } 10800 10801 OS << "\n"; 10802 } 10803 10804 OS << "Determining loop execution counts for: "; 10805 F.printAsOperand(OS, /*PrintType=*/false); 10806 OS << "\n"; 10807 for (Loop *I : LI) 10808 PrintLoopInfo(OS, &SE, I); 10809 } 10810 10811 ScalarEvolution::LoopDisposition 10812 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10813 auto &Values = LoopDispositions[S]; 10814 for (auto &V : Values) { 10815 if (V.getPointer() == L) 10816 return V.getInt(); 10817 } 10818 Values.emplace_back(L, LoopVariant); 10819 LoopDisposition D = computeLoopDisposition(S, L); 10820 auto &Values2 = LoopDispositions[S]; 10821 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10822 if (V.getPointer() == L) { 10823 V.setInt(D); 10824 break; 10825 } 10826 } 10827 return D; 10828 } 10829 10830 ScalarEvolution::LoopDisposition 10831 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10832 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10833 case scConstant: 10834 return LoopInvariant; 10835 case scTruncate: 10836 case scZeroExtend: 10837 case scSignExtend: 10838 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10839 case scAddRecExpr: { 10840 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10841 10842 // If L is the addrec's loop, it's computable. 10843 if (AR->getLoop() == L) 10844 return LoopComputable; 10845 10846 // Add recurrences are never invariant in the function-body (null loop). 10847 if (!L) 10848 return LoopVariant; 10849 10850 // This recurrence is variant w.r.t. L if L contains AR's loop. 10851 if (L->contains(AR->getLoop())) 10852 return LoopVariant; 10853 10854 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10855 if (AR->getLoop()->contains(L)) 10856 return LoopInvariant; 10857 10858 // This recurrence is variant w.r.t. L if any of its operands 10859 // are variant. 10860 for (auto *Op : AR->operands()) 10861 if (!isLoopInvariant(Op, L)) 10862 return LoopVariant; 10863 10864 // Otherwise it's loop-invariant. 10865 return LoopInvariant; 10866 } 10867 case scAddExpr: 10868 case scMulExpr: 10869 case scUMaxExpr: 10870 case scSMaxExpr: { 10871 bool HasVarying = false; 10872 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10873 LoopDisposition D = getLoopDisposition(Op, L); 10874 if (D == LoopVariant) 10875 return LoopVariant; 10876 if (D == LoopComputable) 10877 HasVarying = true; 10878 } 10879 return HasVarying ? LoopComputable : LoopInvariant; 10880 } 10881 case scUDivExpr: { 10882 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10883 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10884 if (LD == LoopVariant) 10885 return LoopVariant; 10886 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10887 if (RD == LoopVariant) 10888 return LoopVariant; 10889 return (LD == LoopInvariant && RD == LoopInvariant) ? 10890 LoopInvariant : LoopComputable; 10891 } 10892 case scUnknown: 10893 // All non-instruction values are loop invariant. All instructions are loop 10894 // invariant if they are not contained in the specified loop. 10895 // Instructions are never considered invariant in the function body 10896 // (null loop) because they are defined within the "loop". 10897 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10898 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10899 return LoopInvariant; 10900 case scCouldNotCompute: 10901 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10902 } 10903 llvm_unreachable("Unknown SCEV kind!"); 10904 } 10905 10906 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10907 return getLoopDisposition(S, L) == LoopInvariant; 10908 } 10909 10910 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10911 return getLoopDisposition(S, L) == LoopComputable; 10912 } 10913 10914 ScalarEvolution::BlockDisposition 10915 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10916 auto &Values = BlockDispositions[S]; 10917 for (auto &V : Values) { 10918 if (V.getPointer() == BB) 10919 return V.getInt(); 10920 } 10921 Values.emplace_back(BB, DoesNotDominateBlock); 10922 BlockDisposition D = computeBlockDisposition(S, BB); 10923 auto &Values2 = BlockDispositions[S]; 10924 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10925 if (V.getPointer() == BB) { 10926 V.setInt(D); 10927 break; 10928 } 10929 } 10930 return D; 10931 } 10932 10933 ScalarEvolution::BlockDisposition 10934 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10935 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10936 case scConstant: 10937 return ProperlyDominatesBlock; 10938 case scTruncate: 10939 case scZeroExtend: 10940 case scSignExtend: 10941 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 10942 case scAddRecExpr: { 10943 // This uses a "dominates" query instead of "properly dominates" query 10944 // to test for proper dominance too, because the instruction which 10945 // produces the addrec's value is a PHI, and a PHI effectively properly 10946 // dominates its entire containing block. 10947 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10948 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 10949 return DoesNotDominateBlock; 10950 10951 // Fall through into SCEVNAryExpr handling. 10952 LLVM_FALLTHROUGH; 10953 } 10954 case scAddExpr: 10955 case scMulExpr: 10956 case scUMaxExpr: 10957 case scSMaxExpr: { 10958 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 10959 bool Proper = true; 10960 for (const SCEV *NAryOp : NAry->operands()) { 10961 BlockDisposition D = getBlockDisposition(NAryOp, BB); 10962 if (D == DoesNotDominateBlock) 10963 return DoesNotDominateBlock; 10964 if (D == DominatesBlock) 10965 Proper = false; 10966 } 10967 return Proper ? ProperlyDominatesBlock : DominatesBlock; 10968 } 10969 case scUDivExpr: { 10970 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10971 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 10972 BlockDisposition LD = getBlockDisposition(LHS, BB); 10973 if (LD == DoesNotDominateBlock) 10974 return DoesNotDominateBlock; 10975 BlockDisposition RD = getBlockDisposition(RHS, BB); 10976 if (RD == DoesNotDominateBlock) 10977 return DoesNotDominateBlock; 10978 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 10979 ProperlyDominatesBlock : DominatesBlock; 10980 } 10981 case scUnknown: 10982 if (Instruction *I = 10983 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 10984 if (I->getParent() == BB) 10985 return DominatesBlock; 10986 if (DT.properlyDominates(I->getParent(), BB)) 10987 return ProperlyDominatesBlock; 10988 return DoesNotDominateBlock; 10989 } 10990 return ProperlyDominatesBlock; 10991 case scCouldNotCompute: 10992 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10993 } 10994 llvm_unreachable("Unknown SCEV kind!"); 10995 } 10996 10997 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 10998 return getBlockDisposition(S, BB) >= DominatesBlock; 10999 } 11000 11001 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11002 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11003 } 11004 11005 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11006 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11007 } 11008 11009 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11010 auto IsS = [&](const SCEV *X) { return S == X; }; 11011 auto ContainsS = [&](const SCEV *X) { 11012 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11013 }; 11014 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11015 } 11016 11017 void 11018 ScalarEvolution::forgetMemoizedResults(const SCEV *S, bool EraseExitLimit) { 11019 ValuesAtScopes.erase(S); 11020 LoopDispositions.erase(S); 11021 BlockDispositions.erase(S); 11022 UnsignedRanges.erase(S); 11023 SignedRanges.erase(S); 11024 ExprValueMap.erase(S); 11025 HasRecMap.erase(S); 11026 MinTrailingZerosCache.erase(S); 11027 11028 for (auto I = PredicatedSCEVRewrites.begin(); 11029 I != PredicatedSCEVRewrites.end();) { 11030 std::pair<const SCEV *, const Loop *> Entry = I->first; 11031 if (Entry.first == S) 11032 PredicatedSCEVRewrites.erase(I++); 11033 else 11034 ++I; 11035 } 11036 11037 auto RemoveSCEVFromBackedgeMap = 11038 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11039 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11040 BackedgeTakenInfo &BEInfo = I->second; 11041 if (BEInfo.hasOperand(S, this)) { 11042 BEInfo.clear(); 11043 Map.erase(I++); 11044 } else 11045 ++I; 11046 } 11047 }; 11048 11049 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11050 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11051 11052 // TODO: There is a suspicion that we only need to do it when there is a 11053 // SCEVUnknown somewhere inside S. Need to check this. 11054 if (EraseExitLimit) 11055 for (auto I = ExitLimits.begin(), E = ExitLimits.end(); I != E; ++I) 11056 if (I->second.hasOperand(S)) 11057 ExitLimits.erase(I); 11058 } 11059 11060 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11061 struct FindUsedLoops { 11062 SmallPtrSet<const Loop *, 8> LoopsUsed; 11063 bool follow(const SCEV *S) { 11064 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11065 LoopsUsed.insert(AR->getLoop()); 11066 return true; 11067 } 11068 11069 bool isDone() const { return false; } 11070 }; 11071 11072 FindUsedLoops F; 11073 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11074 11075 for (auto *L : F.LoopsUsed) 11076 LoopUsers[L].push_back(S); 11077 } 11078 11079 void ScalarEvolution::verify() const { 11080 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11081 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11082 11083 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11084 11085 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11086 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11087 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11088 11089 const SCEV *visitConstant(const SCEVConstant *Constant) { 11090 return SE.getConstant(Constant->getAPInt()); 11091 } 11092 11093 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11094 return SE.getUnknown(Expr->getValue()); 11095 } 11096 11097 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11098 return SE.getCouldNotCompute(); 11099 } 11100 }; 11101 11102 SCEVMapper SCM(SE2); 11103 11104 while (!LoopStack.empty()) { 11105 auto *L = LoopStack.pop_back_val(); 11106 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11107 11108 auto *CurBECount = SCM.visit( 11109 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11110 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11111 11112 if (CurBECount == SE2.getCouldNotCompute() || 11113 NewBECount == SE2.getCouldNotCompute()) { 11114 // NB! This situation is legal, but is very suspicious -- whatever pass 11115 // change the loop to make a trip count go from could not compute to 11116 // computable or vice-versa *should have* invalidated SCEV. However, we 11117 // choose not to assert here (for now) since we don't want false 11118 // positives. 11119 continue; 11120 } 11121 11122 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11123 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11124 // not propagate undef aggressively). This means we can (and do) fail 11125 // verification in cases where a transform makes the trip count of a loop 11126 // go from "undef" to "undef+1" (say). The transform is fine, since in 11127 // both cases the loop iterates "undef" times, but SCEV thinks we 11128 // increased the trip count of the loop by 1 incorrectly. 11129 continue; 11130 } 11131 11132 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11133 SE.getTypeSizeInBits(NewBECount->getType())) 11134 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11135 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11136 SE.getTypeSizeInBits(NewBECount->getType())) 11137 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11138 11139 auto *ConstantDelta = 11140 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11141 11142 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11143 dbgs() << "Trip Count Changed!\n"; 11144 dbgs() << "Old: " << *CurBECount << "\n"; 11145 dbgs() << "New: " << *NewBECount << "\n"; 11146 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11147 std::abort(); 11148 } 11149 } 11150 } 11151 11152 bool ScalarEvolution::invalidate( 11153 Function &F, const PreservedAnalyses &PA, 11154 FunctionAnalysisManager::Invalidator &Inv) { 11155 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11156 // of its dependencies is invalidated. 11157 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11158 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11159 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11160 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11161 Inv.invalidate<LoopAnalysis>(F, PA); 11162 } 11163 11164 AnalysisKey ScalarEvolutionAnalysis::Key; 11165 11166 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11167 FunctionAnalysisManager &AM) { 11168 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11169 AM.getResult<AssumptionAnalysis>(F), 11170 AM.getResult<DominatorTreeAnalysis>(F), 11171 AM.getResult<LoopAnalysis>(F)); 11172 } 11173 11174 PreservedAnalyses 11175 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11176 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11177 return PreservedAnalyses::all(); 11178 } 11179 11180 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11181 "Scalar Evolution Analysis", false, true) 11182 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11183 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11184 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11185 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11186 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11187 "Scalar Evolution Analysis", false, true) 11188 11189 char ScalarEvolutionWrapperPass::ID = 0; 11190 11191 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11192 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11193 } 11194 11195 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11196 SE.reset(new ScalarEvolution( 11197 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11198 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11199 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11200 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11201 return false; 11202 } 11203 11204 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11205 11206 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11207 SE->print(OS); 11208 } 11209 11210 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11211 if (!VerifySCEV) 11212 return; 11213 11214 SE->verify(); 11215 } 11216 11217 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11218 AU.setPreservesAll(); 11219 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11220 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11221 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11222 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11223 } 11224 11225 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11226 const SCEV *RHS) { 11227 FoldingSetNodeID ID; 11228 assert(LHS->getType() == RHS->getType() && 11229 "Type mismatch between LHS and RHS"); 11230 // Unique this node based on the arguments 11231 ID.AddInteger(SCEVPredicate::P_Equal); 11232 ID.AddPointer(LHS); 11233 ID.AddPointer(RHS); 11234 void *IP = nullptr; 11235 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11236 return S; 11237 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11238 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11239 UniquePreds.InsertNode(Eq, IP); 11240 return Eq; 11241 } 11242 11243 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11244 const SCEVAddRecExpr *AR, 11245 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11246 FoldingSetNodeID ID; 11247 // Unique this node based on the arguments 11248 ID.AddInteger(SCEVPredicate::P_Wrap); 11249 ID.AddPointer(AR); 11250 ID.AddInteger(AddedFlags); 11251 void *IP = nullptr; 11252 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11253 return S; 11254 auto *OF = new (SCEVAllocator) 11255 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11256 UniquePreds.InsertNode(OF, IP); 11257 return OF; 11258 } 11259 11260 namespace { 11261 11262 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11263 public: 11264 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11265 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11266 SCEVUnionPredicate *Pred) 11267 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11268 11269 /// Rewrites \p S in the context of a loop L and the SCEV predication 11270 /// infrastructure. 11271 /// 11272 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11273 /// equivalences present in \p Pred. 11274 /// 11275 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11276 /// \p NewPreds such that the result will be an AddRecExpr. 11277 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11278 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11279 SCEVUnionPredicate *Pred) { 11280 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11281 return Rewriter.visit(S); 11282 } 11283 11284 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11285 if (Pred) { 11286 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11287 for (auto *Pred : ExprPreds) 11288 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11289 if (IPred->getLHS() == Expr) 11290 return IPred->getRHS(); 11291 } 11292 return convertToAddRecWithPreds(Expr); 11293 } 11294 11295 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11296 const SCEV *Operand = visit(Expr->getOperand()); 11297 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11298 if (AR && AR->getLoop() == L && AR->isAffine()) { 11299 // This couldn't be folded because the operand didn't have the nuw 11300 // flag. Add the nusw flag as an assumption that we could make. 11301 const SCEV *Step = AR->getStepRecurrence(SE); 11302 Type *Ty = Expr->getType(); 11303 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11304 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11305 SE.getSignExtendExpr(Step, Ty), L, 11306 AR->getNoWrapFlags()); 11307 } 11308 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11309 } 11310 11311 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11312 const SCEV *Operand = visit(Expr->getOperand()); 11313 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11314 if (AR && AR->getLoop() == L && AR->isAffine()) { 11315 // This couldn't be folded because the operand didn't have the nsw 11316 // flag. Add the nssw flag as an assumption that we could make. 11317 const SCEV *Step = AR->getStepRecurrence(SE); 11318 Type *Ty = Expr->getType(); 11319 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11320 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11321 SE.getSignExtendExpr(Step, Ty), L, 11322 AR->getNoWrapFlags()); 11323 } 11324 return SE.getSignExtendExpr(Operand, Expr->getType()); 11325 } 11326 11327 private: 11328 bool addOverflowAssumption(const SCEVPredicate *P) { 11329 if (!NewPreds) { 11330 // Check if we've already made this assumption. 11331 return Pred && Pred->implies(P); 11332 } 11333 NewPreds->insert(P); 11334 return true; 11335 } 11336 11337 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11338 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11339 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11340 return addOverflowAssumption(A); 11341 } 11342 11343 // If \p Expr represents a PHINode, we try to see if it can be represented 11344 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11345 // to add this predicate as a runtime overflow check, we return the AddRec. 11346 // If \p Expr does not meet these conditions (is not a PHI node, or we 11347 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11348 // return \p Expr. 11349 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11350 if (!isa<PHINode>(Expr->getValue())) 11351 return Expr; 11352 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11353 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11354 if (!PredicatedRewrite) 11355 return Expr; 11356 for (auto *P : PredicatedRewrite->second){ 11357 if (!addOverflowAssumption(P)) 11358 return Expr; 11359 } 11360 return PredicatedRewrite->first; 11361 } 11362 11363 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11364 SCEVUnionPredicate *Pred; 11365 const Loop *L; 11366 }; 11367 11368 } // end anonymous namespace 11369 11370 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11371 SCEVUnionPredicate &Preds) { 11372 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11373 } 11374 11375 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11376 const SCEV *S, const Loop *L, 11377 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11378 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11379 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11380 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11381 11382 if (!AddRec) 11383 return nullptr; 11384 11385 // Since the transformation was successful, we can now transfer the SCEV 11386 // predicates. 11387 for (auto *P : TransformPreds) 11388 Preds.insert(P); 11389 11390 return AddRec; 11391 } 11392 11393 /// SCEV predicates 11394 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11395 SCEVPredicateKind Kind) 11396 : FastID(ID), Kind(Kind) {} 11397 11398 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11399 const SCEV *LHS, const SCEV *RHS) 11400 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11401 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11402 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11403 } 11404 11405 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11406 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11407 11408 if (!Op) 11409 return false; 11410 11411 return Op->LHS == LHS && Op->RHS == RHS; 11412 } 11413 11414 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11415 11416 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11417 11418 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11419 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11420 } 11421 11422 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11423 const SCEVAddRecExpr *AR, 11424 IncrementWrapFlags Flags) 11425 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11426 11427 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11428 11429 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11430 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11431 11432 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11433 } 11434 11435 bool SCEVWrapPredicate::isAlwaysTrue() const { 11436 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11437 IncrementWrapFlags IFlags = Flags; 11438 11439 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11440 IFlags = clearFlags(IFlags, IncrementNSSW); 11441 11442 return IFlags == IncrementAnyWrap; 11443 } 11444 11445 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11446 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11447 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11448 OS << "<nusw>"; 11449 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11450 OS << "<nssw>"; 11451 OS << "\n"; 11452 } 11453 11454 SCEVWrapPredicate::IncrementWrapFlags 11455 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11456 ScalarEvolution &SE) { 11457 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11458 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11459 11460 // We can safely transfer the NSW flag as NSSW. 11461 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11462 ImpliedFlags = IncrementNSSW; 11463 11464 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11465 // If the increment is positive, the SCEV NUW flag will also imply the 11466 // WrapPredicate NUSW flag. 11467 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11468 if (Step->getValue()->getValue().isNonNegative()) 11469 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11470 } 11471 11472 return ImpliedFlags; 11473 } 11474 11475 /// Union predicates don't get cached so create a dummy set ID for it. 11476 SCEVUnionPredicate::SCEVUnionPredicate() 11477 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11478 11479 bool SCEVUnionPredicate::isAlwaysTrue() const { 11480 return all_of(Preds, 11481 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11482 } 11483 11484 ArrayRef<const SCEVPredicate *> 11485 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11486 auto I = SCEVToPreds.find(Expr); 11487 if (I == SCEVToPreds.end()) 11488 return ArrayRef<const SCEVPredicate *>(); 11489 return I->second; 11490 } 11491 11492 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 11493 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 11494 return all_of(Set->Preds, 11495 [this](const SCEVPredicate *I) { return this->implies(I); }); 11496 11497 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 11498 if (ScevPredsIt == SCEVToPreds.end()) 11499 return false; 11500 auto &SCEVPreds = ScevPredsIt->second; 11501 11502 return any_of(SCEVPreds, 11503 [N](const SCEVPredicate *I) { return I->implies(N); }); 11504 } 11505 11506 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 11507 11508 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 11509 for (auto Pred : Preds) 11510 Pred->print(OS, Depth); 11511 } 11512 11513 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 11514 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 11515 for (auto Pred : Set->Preds) 11516 add(Pred); 11517 return; 11518 } 11519 11520 if (implies(N)) 11521 return; 11522 11523 const SCEV *Key = N->getExpr(); 11524 assert(Key && "Only SCEVUnionPredicate doesn't have an " 11525 " associated expression!"); 11526 11527 SCEVToPreds[Key].push_back(N); 11528 Preds.push_back(N); 11529 } 11530 11531 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 11532 Loop &L) 11533 : SE(SE), L(L) {} 11534 11535 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 11536 const SCEV *Expr = SE.getSCEV(V); 11537 RewriteEntry &Entry = RewriteMap[Expr]; 11538 11539 // If we already have an entry and the version matches, return it. 11540 if (Entry.second && Generation == Entry.first) 11541 return Entry.second; 11542 11543 // We found an entry but it's stale. Rewrite the stale entry 11544 // according to the current predicate. 11545 if (Entry.second) 11546 Expr = Entry.second; 11547 11548 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 11549 Entry = {Generation, NewSCEV}; 11550 11551 return NewSCEV; 11552 } 11553 11554 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 11555 if (!BackedgeCount) { 11556 SCEVUnionPredicate BackedgePred; 11557 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 11558 addPredicate(BackedgePred); 11559 } 11560 return BackedgeCount; 11561 } 11562 11563 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 11564 if (Preds.implies(&Pred)) 11565 return; 11566 Preds.add(&Pred); 11567 updateGeneration(); 11568 } 11569 11570 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 11571 return Preds; 11572 } 11573 11574 void PredicatedScalarEvolution::updateGeneration() { 11575 // If the generation number wrapped recompute everything. 11576 if (++Generation == 0) { 11577 for (auto &II : RewriteMap) { 11578 const SCEV *Rewritten = II.second.second; 11579 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 11580 } 11581 } 11582 } 11583 11584 void PredicatedScalarEvolution::setNoOverflow( 11585 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11586 const SCEV *Expr = getSCEV(V); 11587 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11588 11589 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 11590 11591 // Clear the statically implied flags. 11592 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 11593 addPredicate(*SE.getWrapPredicate(AR, Flags)); 11594 11595 auto II = FlagsMap.insert({V, Flags}); 11596 if (!II.second) 11597 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 11598 } 11599 11600 bool PredicatedScalarEvolution::hasNoOverflow( 11601 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11602 const SCEV *Expr = getSCEV(V); 11603 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11604 11605 Flags = SCEVWrapPredicate::clearFlags( 11606 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 11607 11608 auto II = FlagsMap.find(V); 11609 11610 if (II != FlagsMap.end()) 11611 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 11612 11613 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 11614 } 11615 11616 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 11617 const SCEV *Expr = this->getSCEV(V); 11618 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 11619 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 11620 11621 if (!New) 11622 return nullptr; 11623 11624 for (auto *P : NewPreds) 11625 Preds.add(P); 11626 11627 updateGeneration(); 11628 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11629 return New; 11630 } 11631 11632 PredicatedScalarEvolution::PredicatedScalarEvolution( 11633 const PredicatedScalarEvolution &Init) 11634 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11635 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11636 for (const auto &I : Init.FlagsMap) 11637 FlagsMap.insert(I); 11638 } 11639 11640 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11641 // For each block. 11642 for (auto *BB : L.getBlocks()) 11643 for (auto &I : *BB) { 11644 if (!SE.isSCEVable(I.getType())) 11645 continue; 11646 11647 auto *Expr = SE.getSCEV(&I); 11648 auto II = RewriteMap.find(Expr); 11649 11650 if (II == RewriteMap.end()) 11651 continue; 11652 11653 // Don't print things that are not interesting. 11654 if (II->second.second == Expr) 11655 continue; 11656 11657 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11658 OS.indent(Depth + 2) << *Expr << "\n"; 11659 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11660 } 11661 } 11662