1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 139 #define DEBUG_TYPE "scalar-evolution" 140 141 STATISTIC(NumArrayLenItCounts, 142 "Number of trip counts computed with array length"); 143 STATISTIC(NumTripCountsComputed, 144 "Number of loops with predictable loop counts"); 145 STATISTIC(NumTripCountsNotComputed, 146 "Number of loops without predictable loop counts"); 147 STATISTIC(NumBruteForceTripCountsComputed, 148 "Number of loops with trip counts computed by force"); 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::ZeroOrMore, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 159 static cl::opt<bool> VerifySCEV( 160 "verify-scev", cl::Hidden, 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 static cl::opt<bool> UseExpensiveRangeSharpening( 230 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 231 cl::init(false), 232 cl::desc("Use more powerful methods of sharpening expression ranges. May " 233 "be costly in terms of compile time")); 234 235 //===----------------------------------------------------------------------===// 236 // SCEV class definitions 237 //===----------------------------------------------------------------------===// 238 239 //===----------------------------------------------------------------------===// 240 // Implementation of the SCEV class. 241 // 242 243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 244 LLVM_DUMP_METHOD void SCEV::dump() const { 245 print(dbgs()); 246 dbgs() << '\n'; 247 } 248 #endif 249 250 void SCEV::print(raw_ostream &OS) const { 251 switch (getSCEVType()) { 252 case scConstant: 253 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 254 return; 255 case scTruncate: { 256 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 257 const SCEV *Op = Trunc->getOperand(); 258 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 259 << *Trunc->getType() << ")"; 260 return; 261 } 262 case scZeroExtend: { 263 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 264 const SCEV *Op = ZExt->getOperand(); 265 OS << "(zext " << *Op->getType() << " " << *Op << " to " 266 << *ZExt->getType() << ")"; 267 return; 268 } 269 case scSignExtend: { 270 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 271 const SCEV *Op = SExt->getOperand(); 272 OS << "(sext " << *Op->getType() << " " << *Op << " to " 273 << *SExt->getType() << ")"; 274 return; 275 } 276 case scAddRecExpr: { 277 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 278 OS << "{" << *AR->getOperand(0); 279 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 280 OS << ",+," << *AR->getOperand(i); 281 OS << "}<"; 282 if (AR->hasNoUnsignedWrap()) 283 OS << "nuw><"; 284 if (AR->hasNoSignedWrap()) 285 OS << "nsw><"; 286 if (AR->hasNoSelfWrap() && 287 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 288 OS << "nw><"; 289 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 290 OS << ">"; 291 return; 292 } 293 case scAddExpr: 294 case scMulExpr: 295 case scUMaxExpr: 296 case scSMaxExpr: 297 case scUMinExpr: 298 case scSMinExpr: { 299 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 300 const char *OpStr = nullptr; 301 switch (NAry->getSCEVType()) { 302 case scAddExpr: OpStr = " + "; break; 303 case scMulExpr: OpStr = " * "; break; 304 case scUMaxExpr: OpStr = " umax "; break; 305 case scSMaxExpr: OpStr = " smax "; break; 306 case scUMinExpr: 307 OpStr = " umin "; 308 break; 309 case scSMinExpr: 310 OpStr = " smin "; 311 break; 312 default: 313 llvm_unreachable("There are no other nary expression types."); 314 } 315 OS << "("; 316 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 317 I != E; ++I) { 318 OS << **I; 319 if (std::next(I) != E) 320 OS << OpStr; 321 } 322 OS << ")"; 323 switch (NAry->getSCEVType()) { 324 case scAddExpr: 325 case scMulExpr: 326 if (NAry->hasNoUnsignedWrap()) 327 OS << "<nuw>"; 328 if (NAry->hasNoSignedWrap()) 329 OS << "<nsw>"; 330 break; 331 default: 332 // Nothing to print for other nary expressions. 333 break; 334 } 335 return; 336 } 337 case scUDivExpr: { 338 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 339 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 340 return; 341 } 342 case scUnknown: { 343 const SCEVUnknown *U = cast<SCEVUnknown>(this); 344 Type *AllocTy; 345 if (U->isSizeOf(AllocTy)) { 346 OS << "sizeof(" << *AllocTy << ")"; 347 return; 348 } 349 if (U->isAlignOf(AllocTy)) { 350 OS << "alignof(" << *AllocTy << ")"; 351 return; 352 } 353 354 Type *CTy; 355 Constant *FieldNo; 356 if (U->isOffsetOf(CTy, FieldNo)) { 357 OS << "offsetof(" << *CTy << ", "; 358 FieldNo->printAsOperand(OS, false); 359 OS << ")"; 360 return; 361 } 362 363 // Otherwise just print it normally. 364 U->getValue()->printAsOperand(OS, false); 365 return; 366 } 367 case scCouldNotCompute: 368 OS << "***COULDNOTCOMPUTE***"; 369 return; 370 } 371 llvm_unreachable("Unknown SCEV kind!"); 372 } 373 374 Type *SCEV::getType() const { 375 switch (getSCEVType()) { 376 case scConstant: 377 return cast<SCEVConstant>(this)->getType(); 378 case scTruncate: 379 case scZeroExtend: 380 case scSignExtend: 381 return cast<SCEVIntegralCastExpr>(this)->getType(); 382 case scAddRecExpr: 383 case scMulExpr: 384 case scUMaxExpr: 385 case scSMaxExpr: 386 case scUMinExpr: 387 case scSMinExpr: 388 return cast<SCEVNAryExpr>(this)->getType(); 389 case scAddExpr: 390 return cast<SCEVAddExpr>(this)->getType(); 391 case scUDivExpr: 392 return cast<SCEVUDivExpr>(this)->getType(); 393 case scUnknown: 394 return cast<SCEVUnknown>(this)->getType(); 395 case scCouldNotCompute: 396 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 397 } 398 llvm_unreachable("Unknown SCEV kind!"); 399 } 400 401 bool SCEV::isZero() const { 402 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 403 return SC->getValue()->isZero(); 404 return false; 405 } 406 407 bool SCEV::isOne() const { 408 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 409 return SC->getValue()->isOne(); 410 return false; 411 } 412 413 bool SCEV::isAllOnesValue() const { 414 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 415 return SC->getValue()->isMinusOne(); 416 return false; 417 } 418 419 bool SCEV::isNonConstantNegative() const { 420 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 421 if (!Mul) return false; 422 423 // If there is a constant factor, it will be first. 424 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 425 if (!SC) return false; 426 427 // Return true if the value is negative, this matches things like (-42 * V). 428 return SC->getAPInt().isNegative(); 429 } 430 431 SCEVCouldNotCompute::SCEVCouldNotCompute() : 432 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 433 434 bool SCEVCouldNotCompute::classof(const SCEV *S) { 435 return S->getSCEVType() == scCouldNotCompute; 436 } 437 438 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 439 FoldingSetNodeID ID; 440 ID.AddInteger(scConstant); 441 ID.AddPointer(V); 442 void *IP = nullptr; 443 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 444 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 445 UniqueSCEVs.InsertNode(S, IP); 446 return S; 447 } 448 449 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 450 return getConstant(ConstantInt::get(getContext(), Val)); 451 } 452 453 const SCEV * 454 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 455 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 456 return getConstant(ConstantInt::get(ITy, V, isSigned)); 457 } 458 459 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 460 SCEVTypes SCEVTy, const SCEV *op, 461 Type *ty) 462 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 463 Operands[0] = op; 464 } 465 466 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 467 Type *ty) 468 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 469 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 470 "Cannot truncate non-integer value!"); 471 } 472 473 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 474 const SCEV *op, Type *ty) 475 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 476 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 477 "Cannot zero extend non-integer value!"); 478 } 479 480 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 481 const SCEV *op, Type *ty) 482 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 483 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 484 "Cannot sign extend non-integer value!"); 485 } 486 487 void SCEVUnknown::deleted() { 488 // Clear this SCEVUnknown from various maps. 489 SE->forgetMemoizedResults(this); 490 491 // Remove this SCEVUnknown from the uniquing map. 492 SE->UniqueSCEVs.RemoveNode(this); 493 494 // Release the value. 495 setValPtr(nullptr); 496 } 497 498 void SCEVUnknown::allUsesReplacedWith(Value *New) { 499 // Remove this SCEVUnknown from the uniquing map. 500 SE->UniqueSCEVs.RemoveNode(this); 501 502 // Update this SCEVUnknown to point to the new value. This is needed 503 // because there may still be outstanding SCEVs which still point to 504 // this SCEVUnknown. 505 setValPtr(New); 506 } 507 508 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 509 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 510 if (VCE->getOpcode() == Instruction::PtrToInt) 511 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 512 if (CE->getOpcode() == Instruction::GetElementPtr && 513 CE->getOperand(0)->isNullValue() && 514 CE->getNumOperands() == 2) 515 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 516 if (CI->isOne()) { 517 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 518 ->getElementType(); 519 return true; 520 } 521 522 return false; 523 } 524 525 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 526 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 527 if (VCE->getOpcode() == Instruction::PtrToInt) 528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 529 if (CE->getOpcode() == Instruction::GetElementPtr && 530 CE->getOperand(0)->isNullValue()) { 531 Type *Ty = 532 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 533 if (StructType *STy = dyn_cast<StructType>(Ty)) 534 if (!STy->isPacked() && 535 CE->getNumOperands() == 3 && 536 CE->getOperand(1)->isNullValue()) { 537 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 538 if (CI->isOne() && 539 STy->getNumElements() == 2 && 540 STy->getElementType(0)->isIntegerTy(1)) { 541 AllocTy = STy->getElementType(1); 542 return true; 543 } 544 } 545 } 546 547 return false; 548 } 549 550 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 551 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 552 if (VCE->getOpcode() == Instruction::PtrToInt) 553 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 554 if (CE->getOpcode() == Instruction::GetElementPtr && 555 CE->getNumOperands() == 3 && 556 CE->getOperand(0)->isNullValue() && 557 CE->getOperand(1)->isNullValue()) { 558 Type *Ty = 559 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 560 // Ignore vector types here so that ScalarEvolutionExpander doesn't 561 // emit getelementptrs that index into vectors. 562 if (Ty->isStructTy() || Ty->isArrayTy()) { 563 CTy = Ty; 564 FieldNo = CE->getOperand(2); 565 return true; 566 } 567 } 568 569 return false; 570 } 571 572 //===----------------------------------------------------------------------===// 573 // SCEV Utilities 574 //===----------------------------------------------------------------------===// 575 576 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 577 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 578 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 579 /// have been previously deemed to be "equally complex" by this routine. It is 580 /// intended to avoid exponential time complexity in cases like: 581 /// 582 /// %a = f(%x, %y) 583 /// %b = f(%a, %a) 584 /// %c = f(%b, %b) 585 /// 586 /// %d = f(%x, %y) 587 /// %e = f(%d, %d) 588 /// %f = f(%e, %e) 589 /// 590 /// CompareValueComplexity(%f, %c) 591 /// 592 /// Since we do not continue running this routine on expression trees once we 593 /// have seen unequal values, there is no need to track them in the cache. 594 static int 595 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 596 const LoopInfo *const LI, Value *LV, Value *RV, 597 unsigned Depth) { 598 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 599 return 0; 600 601 // Order pointer values after integer values. This helps SCEVExpander form 602 // GEPs. 603 bool LIsPointer = LV->getType()->isPointerTy(), 604 RIsPointer = RV->getType()->isPointerTy(); 605 if (LIsPointer != RIsPointer) 606 return (int)LIsPointer - (int)RIsPointer; 607 608 // Compare getValueID values. 609 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 610 if (LID != RID) 611 return (int)LID - (int)RID; 612 613 // Sort arguments by their position. 614 if (const auto *LA = dyn_cast<Argument>(LV)) { 615 const auto *RA = cast<Argument>(RV); 616 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 617 return (int)LArgNo - (int)RArgNo; 618 } 619 620 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 621 const auto *RGV = cast<GlobalValue>(RV); 622 623 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 624 auto LT = GV->getLinkage(); 625 return !(GlobalValue::isPrivateLinkage(LT) || 626 GlobalValue::isInternalLinkage(LT)); 627 }; 628 629 // Use the names to distinguish the two values, but only if the 630 // names are semantically important. 631 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 632 return LGV->getName().compare(RGV->getName()); 633 } 634 635 // For instructions, compare their loop depth, and their operand count. This 636 // is pretty loose. 637 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 638 const auto *RInst = cast<Instruction>(RV); 639 640 // Compare loop depths. 641 const BasicBlock *LParent = LInst->getParent(), 642 *RParent = RInst->getParent(); 643 if (LParent != RParent) { 644 unsigned LDepth = LI->getLoopDepth(LParent), 645 RDepth = LI->getLoopDepth(RParent); 646 if (LDepth != RDepth) 647 return (int)LDepth - (int)RDepth; 648 } 649 650 // Compare the number of operands. 651 unsigned LNumOps = LInst->getNumOperands(), 652 RNumOps = RInst->getNumOperands(); 653 if (LNumOps != RNumOps) 654 return (int)LNumOps - (int)RNumOps; 655 656 for (unsigned Idx : seq(0u, LNumOps)) { 657 int Result = 658 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 659 RInst->getOperand(Idx), Depth + 1); 660 if (Result != 0) 661 return Result; 662 } 663 } 664 665 EqCacheValue.unionSets(LV, RV); 666 return 0; 667 } 668 669 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 670 // than RHS, respectively. A three-way result allows recursive comparisons to be 671 // more efficient. 672 static int CompareSCEVComplexity( 673 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 674 EquivalenceClasses<const Value *> &EqCacheValue, 675 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 676 DominatorTree &DT, unsigned Depth = 0) { 677 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 678 if (LHS == RHS) 679 return 0; 680 681 // Primarily, sort the SCEVs by their getSCEVType(). 682 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 683 if (LType != RType) 684 return (int)LType - (int)RType; 685 686 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 687 return 0; 688 // Aside from the getSCEVType() ordering, the particular ordering 689 // isn't very important except that it's beneficial to be consistent, 690 // so that (a + b) and (b + a) don't end up as different expressions. 691 switch (LType) { 692 case scUnknown: { 693 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 694 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 695 696 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 697 RU->getValue(), Depth + 1); 698 if (X == 0) 699 EqCacheSCEV.unionSets(LHS, RHS); 700 return X; 701 } 702 703 case scConstant: { 704 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 705 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 706 707 // Compare constant values. 708 const APInt &LA = LC->getAPInt(); 709 const APInt &RA = RC->getAPInt(); 710 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 711 if (LBitWidth != RBitWidth) 712 return (int)LBitWidth - (int)RBitWidth; 713 return LA.ult(RA) ? -1 : 1; 714 } 715 716 case scAddRecExpr: { 717 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 718 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 719 720 // There is always a dominance between two recs that are used by one SCEV, 721 // so we can safely sort recs by loop header dominance. We require such 722 // order in getAddExpr. 723 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 724 if (LLoop != RLoop) { 725 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 726 assert(LHead != RHead && "Two loops share the same header?"); 727 if (DT.dominates(LHead, RHead)) 728 return 1; 729 else 730 assert(DT.dominates(RHead, LHead) && 731 "No dominance between recurrences used by one SCEV?"); 732 return -1; 733 } 734 735 // Addrec complexity grows with operand count. 736 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 737 if (LNumOps != RNumOps) 738 return (int)LNumOps - (int)RNumOps; 739 740 // Lexicographically compare. 741 for (unsigned i = 0; i != LNumOps; ++i) { 742 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 743 LA->getOperand(i), RA->getOperand(i), DT, 744 Depth + 1); 745 if (X != 0) 746 return X; 747 } 748 EqCacheSCEV.unionSets(LHS, RHS); 749 return 0; 750 } 751 752 case scAddExpr: 753 case scMulExpr: 754 case scSMaxExpr: 755 case scUMaxExpr: 756 case scSMinExpr: 757 case scUMinExpr: { 758 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 759 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 760 761 // Lexicographically compare n-ary expressions. 762 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 763 if (LNumOps != RNumOps) 764 return (int)LNumOps - (int)RNumOps; 765 766 for (unsigned i = 0; i != LNumOps; ++i) { 767 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 768 LC->getOperand(i), RC->getOperand(i), DT, 769 Depth + 1); 770 if (X != 0) 771 return X; 772 } 773 EqCacheSCEV.unionSets(LHS, RHS); 774 return 0; 775 } 776 777 case scUDivExpr: { 778 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 779 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 780 781 // Lexicographically compare udiv expressions. 782 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 783 RC->getLHS(), DT, Depth + 1); 784 if (X != 0) 785 return X; 786 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 787 RC->getRHS(), DT, Depth + 1); 788 if (X == 0) 789 EqCacheSCEV.unionSets(LHS, RHS); 790 return X; 791 } 792 793 case scTruncate: 794 case scZeroExtend: 795 case scSignExtend: { 796 const SCEVIntegralCastExpr *LC = cast<SCEVIntegralCastExpr>(LHS); 797 const SCEVIntegralCastExpr *RC = cast<SCEVIntegralCastExpr>(RHS); 798 799 // Compare cast expressions by operand. 800 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 801 LC->getOperand(), RC->getOperand(), DT, 802 Depth + 1); 803 if (X == 0) 804 EqCacheSCEV.unionSets(LHS, RHS); 805 return X; 806 } 807 808 case scCouldNotCompute: 809 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 810 } 811 llvm_unreachable("Unknown SCEV kind!"); 812 } 813 814 /// Given a list of SCEV objects, order them by their complexity, and group 815 /// objects of the same complexity together by value. When this routine is 816 /// finished, we know that any duplicates in the vector are consecutive and that 817 /// complexity is monotonically increasing. 818 /// 819 /// Note that we go take special precautions to ensure that we get deterministic 820 /// results from this routine. In other words, we don't want the results of 821 /// this to depend on where the addresses of various SCEV objects happened to 822 /// land in memory. 823 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 824 LoopInfo *LI, DominatorTree &DT) { 825 if (Ops.size() < 2) return; // Noop 826 827 EquivalenceClasses<const SCEV *> EqCacheSCEV; 828 EquivalenceClasses<const Value *> EqCacheValue; 829 if (Ops.size() == 2) { 830 // This is the common case, which also happens to be trivially simple. 831 // Special case it. 832 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 833 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 834 std::swap(LHS, RHS); 835 return; 836 } 837 838 // Do the rough sort by complexity. 839 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 840 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 841 0; 842 }); 843 844 // Now that we are sorted by complexity, group elements of the same 845 // complexity. Note that this is, at worst, N^2, but the vector is likely to 846 // be extremely short in practice. Note that we take this approach because we 847 // do not want to depend on the addresses of the objects we are grouping. 848 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 849 const SCEV *S = Ops[i]; 850 unsigned Complexity = S->getSCEVType(); 851 852 // If there are any objects of the same complexity and same value as this 853 // one, group them. 854 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 855 if (Ops[j] == S) { // Found a duplicate. 856 // Move it to immediately after i'th element. 857 std::swap(Ops[i+1], Ops[j]); 858 ++i; // no need to rescan it. 859 if (i == e-2) return; // Done! 860 } 861 } 862 } 863 } 864 865 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 866 /// least HugeExprThreshold nodes). 867 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 868 return any_of(Ops, [](const SCEV *S) { 869 return S->getExpressionSize() >= HugeExprThreshold; 870 }); 871 } 872 873 //===----------------------------------------------------------------------===// 874 // Simple SCEV method implementations 875 //===----------------------------------------------------------------------===// 876 877 /// Compute BC(It, K). The result has width W. Assume, K > 0. 878 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 879 ScalarEvolution &SE, 880 Type *ResultTy) { 881 // Handle the simplest case efficiently. 882 if (K == 1) 883 return SE.getTruncateOrZeroExtend(It, ResultTy); 884 885 // We are using the following formula for BC(It, K): 886 // 887 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 888 // 889 // Suppose, W is the bitwidth of the return value. We must be prepared for 890 // overflow. Hence, we must assure that the result of our computation is 891 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 892 // safe in modular arithmetic. 893 // 894 // However, this code doesn't use exactly that formula; the formula it uses 895 // is something like the following, where T is the number of factors of 2 in 896 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 897 // exponentiation: 898 // 899 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 900 // 901 // This formula is trivially equivalent to the previous formula. However, 902 // this formula can be implemented much more efficiently. The trick is that 903 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 904 // arithmetic. To do exact division in modular arithmetic, all we have 905 // to do is multiply by the inverse. Therefore, this step can be done at 906 // width W. 907 // 908 // The next issue is how to safely do the division by 2^T. The way this 909 // is done is by doing the multiplication step at a width of at least W + T 910 // bits. This way, the bottom W+T bits of the product are accurate. Then, 911 // when we perform the division by 2^T (which is equivalent to a right shift 912 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 913 // truncated out after the division by 2^T. 914 // 915 // In comparison to just directly using the first formula, this technique 916 // is much more efficient; using the first formula requires W * K bits, 917 // but this formula less than W + K bits. Also, the first formula requires 918 // a division step, whereas this formula only requires multiplies and shifts. 919 // 920 // It doesn't matter whether the subtraction step is done in the calculation 921 // width or the input iteration count's width; if the subtraction overflows, 922 // the result must be zero anyway. We prefer here to do it in the width of 923 // the induction variable because it helps a lot for certain cases; CodeGen 924 // isn't smart enough to ignore the overflow, which leads to much less 925 // efficient code if the width of the subtraction is wider than the native 926 // register width. 927 // 928 // (It's possible to not widen at all by pulling out factors of 2 before 929 // the multiplication; for example, K=2 can be calculated as 930 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 931 // extra arithmetic, so it's not an obvious win, and it gets 932 // much more complicated for K > 3.) 933 934 // Protection from insane SCEVs; this bound is conservative, 935 // but it probably doesn't matter. 936 if (K > 1000) 937 return SE.getCouldNotCompute(); 938 939 unsigned W = SE.getTypeSizeInBits(ResultTy); 940 941 // Calculate K! / 2^T and T; we divide out the factors of two before 942 // multiplying for calculating K! / 2^T to avoid overflow. 943 // Other overflow doesn't matter because we only care about the bottom 944 // W bits of the result. 945 APInt OddFactorial(W, 1); 946 unsigned T = 1; 947 for (unsigned i = 3; i <= K; ++i) { 948 APInt Mult(W, i); 949 unsigned TwoFactors = Mult.countTrailingZeros(); 950 T += TwoFactors; 951 Mult.lshrInPlace(TwoFactors); 952 OddFactorial *= Mult; 953 } 954 955 // We need at least W + T bits for the multiplication step 956 unsigned CalculationBits = W + T; 957 958 // Calculate 2^T, at width T+W. 959 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 960 961 // Calculate the multiplicative inverse of K! / 2^T; 962 // this multiplication factor will perform the exact division by 963 // K! / 2^T. 964 APInt Mod = APInt::getSignedMinValue(W+1); 965 APInt MultiplyFactor = OddFactorial.zext(W+1); 966 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 967 MultiplyFactor = MultiplyFactor.trunc(W); 968 969 // Calculate the product, at width T+W 970 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 971 CalculationBits); 972 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 973 for (unsigned i = 1; i != K; ++i) { 974 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 975 Dividend = SE.getMulExpr(Dividend, 976 SE.getTruncateOrZeroExtend(S, CalculationTy)); 977 } 978 979 // Divide by 2^T 980 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 981 982 // Truncate the result, and divide by K! / 2^T. 983 984 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 985 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 986 } 987 988 /// Return the value of this chain of recurrences at the specified iteration 989 /// number. We can evaluate this recurrence by multiplying each element in the 990 /// chain by the binomial coefficient corresponding to it. In other words, we 991 /// can evaluate {A,+,B,+,C,+,D} as: 992 /// 993 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 994 /// 995 /// where BC(It, k) stands for binomial coefficient. 996 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 997 ScalarEvolution &SE) const { 998 const SCEV *Result = getStart(); 999 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1000 // The computation is correct in the face of overflow provided that the 1001 // multiplication is performed _after_ the evaluation of the binomial 1002 // coefficient. 1003 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1004 if (isa<SCEVCouldNotCompute>(Coeff)) 1005 return Coeff; 1006 1007 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1008 } 1009 return Result; 1010 } 1011 1012 //===----------------------------------------------------------------------===// 1013 // SCEV Expression folder implementations 1014 //===----------------------------------------------------------------------===// 1015 1016 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1017 unsigned Depth) { 1018 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1019 "This is not a truncating conversion!"); 1020 assert(isSCEVable(Ty) && 1021 "This is not a conversion to a SCEVable type!"); 1022 Ty = getEffectiveSCEVType(Ty); 1023 1024 FoldingSetNodeID ID; 1025 ID.AddInteger(scTruncate); 1026 ID.AddPointer(Op); 1027 ID.AddPointer(Ty); 1028 void *IP = nullptr; 1029 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1030 1031 // Fold if the operand is constant. 1032 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1033 return getConstant( 1034 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1035 1036 // trunc(trunc(x)) --> trunc(x) 1037 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1038 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1039 1040 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1041 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1042 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1043 1044 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1045 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1046 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1047 1048 if (Depth > MaxCastDepth) { 1049 SCEV *S = 1050 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1051 UniqueSCEVs.InsertNode(S, IP); 1052 addToLoopUseLists(S); 1053 return S; 1054 } 1055 1056 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1057 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1058 // if after transforming we have at most one truncate, not counting truncates 1059 // that replace other casts. 1060 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1061 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1062 SmallVector<const SCEV *, 4> Operands; 1063 unsigned numTruncs = 0; 1064 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1065 ++i) { 1066 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1067 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1068 isa<SCEVTruncateExpr>(S)) 1069 numTruncs++; 1070 Operands.push_back(S); 1071 } 1072 if (numTruncs < 2) { 1073 if (isa<SCEVAddExpr>(Op)) 1074 return getAddExpr(Operands); 1075 else if (isa<SCEVMulExpr>(Op)) 1076 return getMulExpr(Operands); 1077 else 1078 llvm_unreachable("Unexpected SCEV type for Op."); 1079 } 1080 // Although we checked in the beginning that ID is not in the cache, it is 1081 // possible that during recursion and different modification ID was inserted 1082 // into the cache. So if we find it, just return it. 1083 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1084 return S; 1085 } 1086 1087 // If the input value is a chrec scev, truncate the chrec's operands. 1088 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1089 SmallVector<const SCEV *, 4> Operands; 1090 for (const SCEV *Op : AddRec->operands()) 1091 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1092 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1093 } 1094 1095 // The cast wasn't folded; create an explicit cast node. We can reuse 1096 // the existing insert position since if we get here, we won't have 1097 // made any changes which would invalidate it. 1098 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1099 Op, Ty); 1100 UniqueSCEVs.InsertNode(S, IP); 1101 addToLoopUseLists(S); 1102 return S; 1103 } 1104 1105 // Get the limit of a recurrence such that incrementing by Step cannot cause 1106 // signed overflow as long as the value of the recurrence within the 1107 // loop does not exceed this limit before incrementing. 1108 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1109 ICmpInst::Predicate *Pred, 1110 ScalarEvolution *SE) { 1111 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1112 if (SE->isKnownPositive(Step)) { 1113 *Pred = ICmpInst::ICMP_SLT; 1114 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1115 SE->getSignedRangeMax(Step)); 1116 } 1117 if (SE->isKnownNegative(Step)) { 1118 *Pred = ICmpInst::ICMP_SGT; 1119 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1120 SE->getSignedRangeMin(Step)); 1121 } 1122 return nullptr; 1123 } 1124 1125 // Get the limit of a recurrence such that incrementing by Step cannot cause 1126 // unsigned overflow as long as the value of the recurrence within the loop does 1127 // not exceed this limit before incrementing. 1128 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1129 ICmpInst::Predicate *Pred, 1130 ScalarEvolution *SE) { 1131 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1132 *Pred = ICmpInst::ICMP_ULT; 1133 1134 return SE->getConstant(APInt::getMinValue(BitWidth) - 1135 SE->getUnsignedRangeMax(Step)); 1136 } 1137 1138 namespace { 1139 1140 struct ExtendOpTraitsBase { 1141 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1142 unsigned); 1143 }; 1144 1145 // Used to make code generic over signed and unsigned overflow. 1146 template <typename ExtendOp> struct ExtendOpTraits { 1147 // Members present: 1148 // 1149 // static const SCEV::NoWrapFlags WrapType; 1150 // 1151 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1152 // 1153 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1154 // ICmpInst::Predicate *Pred, 1155 // ScalarEvolution *SE); 1156 }; 1157 1158 template <> 1159 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1160 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1161 1162 static const GetExtendExprTy GetExtendExpr; 1163 1164 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1165 ICmpInst::Predicate *Pred, 1166 ScalarEvolution *SE) { 1167 return getSignedOverflowLimitForStep(Step, Pred, SE); 1168 } 1169 }; 1170 1171 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1172 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1173 1174 template <> 1175 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1176 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1177 1178 static const GetExtendExprTy GetExtendExpr; 1179 1180 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1181 ICmpInst::Predicate *Pred, 1182 ScalarEvolution *SE) { 1183 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1184 } 1185 }; 1186 1187 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1188 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1189 1190 } // end anonymous namespace 1191 1192 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1193 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1194 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1195 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1196 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1197 // expression "Step + sext/zext(PreIncAR)" is congruent with 1198 // "sext/zext(PostIncAR)" 1199 template <typename ExtendOpTy> 1200 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1201 ScalarEvolution *SE, unsigned Depth) { 1202 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1203 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1204 1205 const Loop *L = AR->getLoop(); 1206 const SCEV *Start = AR->getStart(); 1207 const SCEV *Step = AR->getStepRecurrence(*SE); 1208 1209 // Check for a simple looking step prior to loop entry. 1210 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1211 if (!SA) 1212 return nullptr; 1213 1214 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1215 // subtraction is expensive. For this purpose, perform a quick and dirty 1216 // difference, by checking for Step in the operand list. 1217 SmallVector<const SCEV *, 4> DiffOps; 1218 for (const SCEV *Op : SA->operands()) 1219 if (Op != Step) 1220 DiffOps.push_back(Op); 1221 1222 if (DiffOps.size() == SA->getNumOperands()) 1223 return nullptr; 1224 1225 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1226 // `Step`: 1227 1228 // 1. NSW/NUW flags on the step increment. 1229 auto PreStartFlags = 1230 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1231 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1232 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1233 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1234 1235 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1236 // "S+X does not sign/unsign-overflow". 1237 // 1238 1239 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1240 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1241 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1242 return PreStart; 1243 1244 // 2. Direct overflow check on the step operation's expression. 1245 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1246 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1247 const SCEV *OperandExtendedStart = 1248 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1249 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1250 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1251 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1252 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1253 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1254 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1255 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1256 } 1257 return PreStart; 1258 } 1259 1260 // 3. Loop precondition. 1261 ICmpInst::Predicate Pred; 1262 const SCEV *OverflowLimit = 1263 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1264 1265 if (OverflowLimit && 1266 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1267 return PreStart; 1268 1269 return nullptr; 1270 } 1271 1272 // Get the normalized zero or sign extended expression for this AddRec's Start. 1273 template <typename ExtendOpTy> 1274 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1275 ScalarEvolution *SE, 1276 unsigned Depth) { 1277 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1278 1279 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1280 if (!PreStart) 1281 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1282 1283 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1284 Depth), 1285 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1286 } 1287 1288 // Try to prove away overflow by looking at "nearby" add recurrences. A 1289 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1290 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1291 // 1292 // Formally: 1293 // 1294 // {S,+,X} == {S-T,+,X} + T 1295 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1296 // 1297 // If ({S-T,+,X} + T) does not overflow ... (1) 1298 // 1299 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1300 // 1301 // If {S-T,+,X} does not overflow ... (2) 1302 // 1303 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1304 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1305 // 1306 // If (S-T)+T does not overflow ... (3) 1307 // 1308 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1309 // == {Ext(S),+,Ext(X)} == LHS 1310 // 1311 // Thus, if (1), (2) and (3) are true for some T, then 1312 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1313 // 1314 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1315 // does not overflow" restricted to the 0th iteration. Therefore we only need 1316 // to check for (1) and (2). 1317 // 1318 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1319 // is `Delta` (defined below). 1320 template <typename ExtendOpTy> 1321 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1322 const SCEV *Step, 1323 const Loop *L) { 1324 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1325 1326 // We restrict `Start` to a constant to prevent SCEV from spending too much 1327 // time here. It is correct (but more expensive) to continue with a 1328 // non-constant `Start` and do a general SCEV subtraction to compute 1329 // `PreStart` below. 1330 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1331 if (!StartC) 1332 return false; 1333 1334 APInt StartAI = StartC->getAPInt(); 1335 1336 for (unsigned Delta : {-2, -1, 1, 2}) { 1337 const SCEV *PreStart = getConstant(StartAI - Delta); 1338 1339 FoldingSetNodeID ID; 1340 ID.AddInteger(scAddRecExpr); 1341 ID.AddPointer(PreStart); 1342 ID.AddPointer(Step); 1343 ID.AddPointer(L); 1344 void *IP = nullptr; 1345 const auto *PreAR = 1346 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1347 1348 // Give up if we don't already have the add recurrence we need because 1349 // actually constructing an add recurrence is relatively expensive. 1350 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1351 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1352 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1353 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1354 DeltaS, &Pred, this); 1355 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1356 return true; 1357 } 1358 } 1359 1360 return false; 1361 } 1362 1363 // Finds an integer D for an expression (C + x + y + ...) such that the top 1364 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1365 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1366 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1367 // the (C + x + y + ...) expression is \p WholeAddExpr. 1368 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1369 const SCEVConstant *ConstantTerm, 1370 const SCEVAddExpr *WholeAddExpr) { 1371 const APInt &C = ConstantTerm->getAPInt(); 1372 const unsigned BitWidth = C.getBitWidth(); 1373 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1374 uint32_t TZ = BitWidth; 1375 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1376 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1377 if (TZ) { 1378 // Set D to be as many least significant bits of C as possible while still 1379 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1380 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1381 } 1382 return APInt(BitWidth, 0); 1383 } 1384 1385 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1386 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1387 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1388 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1389 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1390 const APInt &ConstantStart, 1391 const SCEV *Step) { 1392 const unsigned BitWidth = ConstantStart.getBitWidth(); 1393 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1394 if (TZ) 1395 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1396 : ConstantStart; 1397 return APInt(BitWidth, 0); 1398 } 1399 1400 const SCEV * 1401 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1402 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1403 "This is not an extending conversion!"); 1404 assert(isSCEVable(Ty) && 1405 "This is not a conversion to a SCEVable type!"); 1406 Ty = getEffectiveSCEVType(Ty); 1407 1408 // Fold if the operand is constant. 1409 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1410 return getConstant( 1411 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1412 1413 // zext(zext(x)) --> zext(x) 1414 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1415 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1416 1417 // Before doing any expensive analysis, check to see if we've already 1418 // computed a SCEV for this Op and Ty. 1419 FoldingSetNodeID ID; 1420 ID.AddInteger(scZeroExtend); 1421 ID.AddPointer(Op); 1422 ID.AddPointer(Ty); 1423 void *IP = nullptr; 1424 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1425 if (Depth > MaxCastDepth) { 1426 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1427 Op, Ty); 1428 UniqueSCEVs.InsertNode(S, IP); 1429 addToLoopUseLists(S); 1430 return S; 1431 } 1432 1433 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1434 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1435 // It's possible the bits taken off by the truncate were all zero bits. If 1436 // so, we should be able to simplify this further. 1437 const SCEV *X = ST->getOperand(); 1438 ConstantRange CR = getUnsignedRange(X); 1439 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1440 unsigned NewBits = getTypeSizeInBits(Ty); 1441 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1442 CR.zextOrTrunc(NewBits))) 1443 return getTruncateOrZeroExtend(X, Ty, Depth); 1444 } 1445 1446 // If the input value is a chrec scev, and we can prove that the value 1447 // did not overflow the old, smaller, value, we can zero extend all of the 1448 // operands (often constants). This allows analysis of something like 1449 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1450 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1451 if (AR->isAffine()) { 1452 const SCEV *Start = AR->getStart(); 1453 const SCEV *Step = AR->getStepRecurrence(*this); 1454 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1455 const Loop *L = AR->getLoop(); 1456 1457 if (!AR->hasNoUnsignedWrap()) { 1458 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1459 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1460 } 1461 1462 // If we have special knowledge that this addrec won't overflow, 1463 // we don't need to do any further analysis. 1464 if (AR->hasNoUnsignedWrap()) 1465 return getAddRecExpr( 1466 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1467 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1468 1469 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1470 // Note that this serves two purposes: It filters out loops that are 1471 // simply not analyzable, and it covers the case where this code is 1472 // being called from within backedge-taken count analysis, such that 1473 // attempting to ask for the backedge-taken count would likely result 1474 // in infinite recursion. In the later case, the analysis code will 1475 // cope with a conservative value, and it will take care to purge 1476 // that value once it has finished. 1477 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1478 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1479 // Manually compute the final value for AR, checking for 1480 // overflow. 1481 1482 // Check whether the backedge-taken count can be losslessly casted to 1483 // the addrec's type. The count is always unsigned. 1484 const SCEV *CastedMaxBECount = 1485 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1486 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1487 CastedMaxBECount, MaxBECount->getType(), Depth); 1488 if (MaxBECount == RecastedMaxBECount) { 1489 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1490 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1491 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1492 SCEV::FlagAnyWrap, Depth + 1); 1493 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1494 SCEV::FlagAnyWrap, 1495 Depth + 1), 1496 WideTy, Depth + 1); 1497 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1498 const SCEV *WideMaxBECount = 1499 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1500 const SCEV *OperandExtendedAdd = 1501 getAddExpr(WideStart, 1502 getMulExpr(WideMaxBECount, 1503 getZeroExtendExpr(Step, WideTy, Depth + 1), 1504 SCEV::FlagAnyWrap, Depth + 1), 1505 SCEV::FlagAnyWrap, Depth + 1); 1506 if (ZAdd == OperandExtendedAdd) { 1507 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1508 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1509 // Return the expression with the addrec on the outside. 1510 return getAddRecExpr( 1511 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1512 Depth + 1), 1513 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1514 AR->getNoWrapFlags()); 1515 } 1516 // Similar to above, only this time treat the step value as signed. 1517 // This covers loops that count down. 1518 OperandExtendedAdd = 1519 getAddExpr(WideStart, 1520 getMulExpr(WideMaxBECount, 1521 getSignExtendExpr(Step, WideTy, Depth + 1), 1522 SCEV::FlagAnyWrap, Depth + 1), 1523 SCEV::FlagAnyWrap, Depth + 1); 1524 if (ZAdd == OperandExtendedAdd) { 1525 // Cache knowledge of AR NW, which is propagated to this AddRec. 1526 // Negative step causes unsigned wrap, but it still can't self-wrap. 1527 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1528 // Return the expression with the addrec on the outside. 1529 return getAddRecExpr( 1530 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1531 Depth + 1), 1532 getSignExtendExpr(Step, Ty, Depth + 1), L, 1533 AR->getNoWrapFlags()); 1534 } 1535 } 1536 } 1537 1538 // Normally, in the cases we can prove no-overflow via a 1539 // backedge guarding condition, we can also compute a backedge 1540 // taken count for the loop. The exceptions are assumptions and 1541 // guards present in the loop -- SCEV is not great at exploiting 1542 // these to compute max backedge taken counts, but can still use 1543 // these to prove lack of overflow. Use this fact to avoid 1544 // doing extra work that may not pay off. 1545 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1546 !AC.assumptions().empty()) { 1547 // If the backedge is guarded by a comparison with the pre-inc 1548 // value the addrec is safe. Also, if the entry is guarded by 1549 // a comparison with the start value and the backedge is 1550 // guarded by a comparison with the post-inc value, the addrec 1551 // is safe. 1552 if (isKnownPositive(Step)) { 1553 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1554 getUnsignedRangeMax(Step)); 1555 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1556 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1557 // Cache knowledge of AR NUW, which is propagated to this 1558 // AddRec. 1559 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1560 // Return the expression with the addrec on the outside. 1561 return getAddRecExpr( 1562 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1563 Depth + 1), 1564 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1565 AR->getNoWrapFlags()); 1566 } 1567 } else if (isKnownNegative(Step)) { 1568 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1569 getSignedRangeMin(Step)); 1570 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1571 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1572 // Cache knowledge of AR NW, which is propagated to this 1573 // AddRec. Negative step causes unsigned wrap, but it 1574 // still can't self-wrap. 1575 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1576 // Return the expression with the addrec on the outside. 1577 return getAddRecExpr( 1578 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1579 Depth + 1), 1580 getSignExtendExpr(Step, Ty, Depth + 1), L, 1581 AR->getNoWrapFlags()); 1582 } 1583 } 1584 } 1585 1586 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1587 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1588 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1589 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1590 const APInt &C = SC->getAPInt(); 1591 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1592 if (D != 0) { 1593 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1594 const SCEV *SResidual = 1595 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1596 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1597 return getAddExpr(SZExtD, SZExtR, 1598 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1599 Depth + 1); 1600 } 1601 } 1602 1603 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1604 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1605 return getAddRecExpr( 1606 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1607 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1608 } 1609 } 1610 1611 // zext(A % B) --> zext(A) % zext(B) 1612 { 1613 const SCEV *LHS; 1614 const SCEV *RHS; 1615 if (matchURem(Op, LHS, RHS)) 1616 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1617 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1618 } 1619 1620 // zext(A / B) --> zext(A) / zext(B). 1621 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1622 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1623 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1624 1625 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1626 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1627 if (SA->hasNoUnsignedWrap()) { 1628 // If the addition does not unsign overflow then we can, by definition, 1629 // commute the zero extension with the addition operation. 1630 SmallVector<const SCEV *, 4> Ops; 1631 for (const auto *Op : SA->operands()) 1632 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1633 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1634 } 1635 1636 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1637 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1638 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1639 // 1640 // Often address arithmetics contain expressions like 1641 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1642 // This transformation is useful while proving that such expressions are 1643 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1644 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1645 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1646 if (D != 0) { 1647 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1648 const SCEV *SResidual = 1649 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1650 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1651 return getAddExpr(SZExtD, SZExtR, 1652 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1653 Depth + 1); 1654 } 1655 } 1656 } 1657 1658 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1659 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1660 if (SM->hasNoUnsignedWrap()) { 1661 // If the multiply does not unsign overflow then we can, by definition, 1662 // commute the zero extension with the multiply operation. 1663 SmallVector<const SCEV *, 4> Ops; 1664 for (const auto *Op : SM->operands()) 1665 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1666 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1667 } 1668 1669 // zext(2^K * (trunc X to iN)) to iM -> 1670 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1671 // 1672 // Proof: 1673 // 1674 // zext(2^K * (trunc X to iN)) to iM 1675 // = zext((trunc X to iN) << K) to iM 1676 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1677 // (because shl removes the top K bits) 1678 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1679 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1680 // 1681 if (SM->getNumOperands() == 2) 1682 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1683 if (MulLHS->getAPInt().isPowerOf2()) 1684 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1685 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1686 MulLHS->getAPInt().logBase2(); 1687 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1688 return getMulExpr( 1689 getZeroExtendExpr(MulLHS, Ty), 1690 getZeroExtendExpr( 1691 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1692 SCEV::FlagNUW, Depth + 1); 1693 } 1694 } 1695 1696 // The cast wasn't folded; create an explicit cast node. 1697 // Recompute the insert position, as it may have been invalidated. 1698 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1699 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1700 Op, Ty); 1701 UniqueSCEVs.InsertNode(S, IP); 1702 addToLoopUseLists(S); 1703 return S; 1704 } 1705 1706 const SCEV * 1707 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1708 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1709 "This is not an extending conversion!"); 1710 assert(isSCEVable(Ty) && 1711 "This is not a conversion to a SCEVable type!"); 1712 Ty = getEffectiveSCEVType(Ty); 1713 1714 // Fold if the operand is constant. 1715 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1716 return getConstant( 1717 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1718 1719 // sext(sext(x)) --> sext(x) 1720 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1721 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1722 1723 // sext(zext(x)) --> zext(x) 1724 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1725 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1726 1727 // Before doing any expensive analysis, check to see if we've already 1728 // computed a SCEV for this Op and Ty. 1729 FoldingSetNodeID ID; 1730 ID.AddInteger(scSignExtend); 1731 ID.AddPointer(Op); 1732 ID.AddPointer(Ty); 1733 void *IP = nullptr; 1734 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1735 // Limit recursion depth. 1736 if (Depth > MaxCastDepth) { 1737 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1738 Op, Ty); 1739 UniqueSCEVs.InsertNode(S, IP); 1740 addToLoopUseLists(S); 1741 return S; 1742 } 1743 1744 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1745 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1746 // It's possible the bits taken off by the truncate were all sign bits. If 1747 // so, we should be able to simplify this further. 1748 const SCEV *X = ST->getOperand(); 1749 ConstantRange CR = getSignedRange(X); 1750 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1751 unsigned NewBits = getTypeSizeInBits(Ty); 1752 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1753 CR.sextOrTrunc(NewBits))) 1754 return getTruncateOrSignExtend(X, Ty, Depth); 1755 } 1756 1757 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1758 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1759 if (SA->hasNoSignedWrap()) { 1760 // If the addition does not sign overflow then we can, by definition, 1761 // commute the sign extension with the addition operation. 1762 SmallVector<const SCEV *, 4> Ops; 1763 for (const auto *Op : SA->operands()) 1764 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1765 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1766 } 1767 1768 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1769 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1770 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1771 // 1772 // For instance, this will bring two seemingly different expressions: 1773 // 1 + sext(5 + 20 * %x + 24 * %y) and 1774 // sext(6 + 20 * %x + 24 * %y) 1775 // to the same form: 1776 // 2 + sext(4 + 20 * %x + 24 * %y) 1777 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1778 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1779 if (D != 0) { 1780 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1781 const SCEV *SResidual = 1782 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1783 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1784 return getAddExpr(SSExtD, SSExtR, 1785 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1786 Depth + 1); 1787 } 1788 } 1789 } 1790 // If the input value is a chrec scev, and we can prove that the value 1791 // did not overflow the old, smaller, value, we can sign extend all of the 1792 // operands (often constants). This allows analysis of something like 1793 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1794 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1795 if (AR->isAffine()) { 1796 const SCEV *Start = AR->getStart(); 1797 const SCEV *Step = AR->getStepRecurrence(*this); 1798 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1799 const Loop *L = AR->getLoop(); 1800 1801 if (!AR->hasNoSignedWrap()) { 1802 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1803 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1804 } 1805 1806 // If we have special knowledge that this addrec won't overflow, 1807 // we don't need to do any further analysis. 1808 if (AR->hasNoSignedWrap()) 1809 return getAddRecExpr( 1810 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1811 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1812 1813 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1814 // Note that this serves two purposes: It filters out loops that are 1815 // simply not analyzable, and it covers the case where this code is 1816 // being called from within backedge-taken count analysis, such that 1817 // attempting to ask for the backedge-taken count would likely result 1818 // in infinite recursion. In the later case, the analysis code will 1819 // cope with a conservative value, and it will take care to purge 1820 // that value once it has finished. 1821 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1822 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1823 // Manually compute the final value for AR, checking for 1824 // overflow. 1825 1826 // Check whether the backedge-taken count can be losslessly casted to 1827 // the addrec's type. The count is always unsigned. 1828 const SCEV *CastedMaxBECount = 1829 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1830 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1831 CastedMaxBECount, MaxBECount->getType(), Depth); 1832 if (MaxBECount == RecastedMaxBECount) { 1833 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1834 // Check whether Start+Step*MaxBECount has no signed overflow. 1835 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1836 SCEV::FlagAnyWrap, Depth + 1); 1837 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1838 SCEV::FlagAnyWrap, 1839 Depth + 1), 1840 WideTy, Depth + 1); 1841 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1842 const SCEV *WideMaxBECount = 1843 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1844 const SCEV *OperandExtendedAdd = 1845 getAddExpr(WideStart, 1846 getMulExpr(WideMaxBECount, 1847 getSignExtendExpr(Step, WideTy, Depth + 1), 1848 SCEV::FlagAnyWrap, Depth + 1), 1849 SCEV::FlagAnyWrap, Depth + 1); 1850 if (SAdd == OperandExtendedAdd) { 1851 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1852 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1853 // Return the expression with the addrec on the outside. 1854 return getAddRecExpr( 1855 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1856 Depth + 1), 1857 getSignExtendExpr(Step, Ty, Depth + 1), L, 1858 AR->getNoWrapFlags()); 1859 } 1860 // Similar to above, only this time treat the step value as unsigned. 1861 // This covers loops that count up with an unsigned step. 1862 OperandExtendedAdd = 1863 getAddExpr(WideStart, 1864 getMulExpr(WideMaxBECount, 1865 getZeroExtendExpr(Step, WideTy, Depth + 1), 1866 SCEV::FlagAnyWrap, Depth + 1), 1867 SCEV::FlagAnyWrap, Depth + 1); 1868 if (SAdd == OperandExtendedAdd) { 1869 // If AR wraps around then 1870 // 1871 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1872 // => SAdd != OperandExtendedAdd 1873 // 1874 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1875 // (SAdd == OperandExtendedAdd => AR is NW) 1876 1877 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1878 1879 // Return the expression with the addrec on the outside. 1880 return getAddRecExpr( 1881 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1882 Depth + 1), 1883 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1884 AR->getNoWrapFlags()); 1885 } 1886 } 1887 } 1888 1889 // Normally, in the cases we can prove no-overflow via a 1890 // backedge guarding condition, we can also compute a backedge 1891 // taken count for the loop. The exceptions are assumptions and 1892 // guards present in the loop -- SCEV is not great at exploiting 1893 // these to compute max backedge taken counts, but can still use 1894 // these to prove lack of overflow. Use this fact to avoid 1895 // doing extra work that may not pay off. 1896 1897 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1898 !AC.assumptions().empty()) { 1899 // If the backedge is guarded by a comparison with the pre-inc 1900 // value the addrec is safe. Also, if the entry is guarded by 1901 // a comparison with the start value and the backedge is 1902 // guarded by a comparison with the post-inc value, the addrec 1903 // is safe. 1904 ICmpInst::Predicate Pred; 1905 const SCEV *OverflowLimit = 1906 getSignedOverflowLimitForStep(Step, &Pred, this); 1907 if (OverflowLimit && 1908 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1909 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 1910 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1911 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1912 return getAddRecExpr( 1913 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1914 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1915 } 1916 } 1917 1918 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 1919 // if D + (C - D + Step * n) could be proven to not signed wrap 1920 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1921 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1922 const APInt &C = SC->getAPInt(); 1923 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1924 if (D != 0) { 1925 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1926 const SCEV *SResidual = 1927 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1928 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1929 return getAddExpr(SSExtD, SSExtR, 1930 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1931 Depth + 1); 1932 } 1933 } 1934 1935 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1936 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1937 return getAddRecExpr( 1938 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1939 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1940 } 1941 } 1942 1943 // If the input value is provably positive and we could not simplify 1944 // away the sext build a zext instead. 1945 if (isKnownNonNegative(Op)) 1946 return getZeroExtendExpr(Op, Ty, Depth + 1); 1947 1948 // The cast wasn't folded; create an explicit cast node. 1949 // Recompute the insert position, as it may have been invalidated. 1950 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1951 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1952 Op, Ty); 1953 UniqueSCEVs.InsertNode(S, IP); 1954 addToLoopUseLists(S); 1955 return S; 1956 } 1957 1958 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1959 /// unspecified bits out to the given type. 1960 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1961 Type *Ty) { 1962 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1963 "This is not an extending conversion!"); 1964 assert(isSCEVable(Ty) && 1965 "This is not a conversion to a SCEVable type!"); 1966 Ty = getEffectiveSCEVType(Ty); 1967 1968 // Sign-extend negative constants. 1969 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1970 if (SC->getAPInt().isNegative()) 1971 return getSignExtendExpr(Op, Ty); 1972 1973 // Peel off a truncate cast. 1974 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1975 const SCEV *NewOp = T->getOperand(); 1976 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1977 return getAnyExtendExpr(NewOp, Ty); 1978 return getTruncateOrNoop(NewOp, Ty); 1979 } 1980 1981 // Next try a zext cast. If the cast is folded, use it. 1982 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1983 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1984 return ZExt; 1985 1986 // Next try a sext cast. If the cast is folded, use it. 1987 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1988 if (!isa<SCEVSignExtendExpr>(SExt)) 1989 return SExt; 1990 1991 // Force the cast to be folded into the operands of an addrec. 1992 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1993 SmallVector<const SCEV *, 4> Ops; 1994 for (const SCEV *Op : AR->operands()) 1995 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1996 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1997 } 1998 1999 // If the expression is obviously signed, use the sext cast value. 2000 if (isa<SCEVSMaxExpr>(Op)) 2001 return SExt; 2002 2003 // Absent any other information, use the zext cast value. 2004 return ZExt; 2005 } 2006 2007 /// Process the given Ops list, which is a list of operands to be added under 2008 /// the given scale, update the given map. This is a helper function for 2009 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2010 /// that would form an add expression like this: 2011 /// 2012 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2013 /// 2014 /// where A and B are constants, update the map with these values: 2015 /// 2016 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2017 /// 2018 /// and add 13 + A*B*29 to AccumulatedConstant. 2019 /// This will allow getAddRecExpr to produce this: 2020 /// 2021 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2022 /// 2023 /// This form often exposes folding opportunities that are hidden in 2024 /// the original operand list. 2025 /// 2026 /// Return true iff it appears that any interesting folding opportunities 2027 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2028 /// the common case where no interesting opportunities are present, and 2029 /// is also used as a check to avoid infinite recursion. 2030 static bool 2031 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2032 SmallVectorImpl<const SCEV *> &NewOps, 2033 APInt &AccumulatedConstant, 2034 const SCEV *const *Ops, size_t NumOperands, 2035 const APInt &Scale, 2036 ScalarEvolution &SE) { 2037 bool Interesting = false; 2038 2039 // Iterate over the add operands. They are sorted, with constants first. 2040 unsigned i = 0; 2041 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2042 ++i; 2043 // Pull a buried constant out to the outside. 2044 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2045 Interesting = true; 2046 AccumulatedConstant += Scale * C->getAPInt(); 2047 } 2048 2049 // Next comes everything else. We're especially interested in multiplies 2050 // here, but they're in the middle, so just visit the rest with one loop. 2051 for (; i != NumOperands; ++i) { 2052 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2053 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2054 APInt NewScale = 2055 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2056 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2057 // A multiplication of a constant with another add; recurse. 2058 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2059 Interesting |= 2060 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2061 Add->op_begin(), Add->getNumOperands(), 2062 NewScale, SE); 2063 } else { 2064 // A multiplication of a constant with some other value. Update 2065 // the map. 2066 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2067 const SCEV *Key = SE.getMulExpr(MulOps); 2068 auto Pair = M.insert({Key, NewScale}); 2069 if (Pair.second) { 2070 NewOps.push_back(Pair.first->first); 2071 } else { 2072 Pair.first->second += NewScale; 2073 // The map already had an entry for this value, which may indicate 2074 // a folding opportunity. 2075 Interesting = true; 2076 } 2077 } 2078 } else { 2079 // An ordinary operand. Update the map. 2080 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2081 M.insert({Ops[i], Scale}); 2082 if (Pair.second) { 2083 NewOps.push_back(Pair.first->first); 2084 } else { 2085 Pair.first->second += Scale; 2086 // The map already had an entry for this value, which may indicate 2087 // a folding opportunity. 2088 Interesting = true; 2089 } 2090 } 2091 } 2092 2093 return Interesting; 2094 } 2095 2096 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2097 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2098 // can't-overflow flags for the operation if possible. 2099 static SCEV::NoWrapFlags 2100 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2101 const ArrayRef<const SCEV *> Ops, 2102 SCEV::NoWrapFlags Flags) { 2103 using namespace std::placeholders; 2104 2105 using OBO = OverflowingBinaryOperator; 2106 2107 bool CanAnalyze = 2108 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2109 (void)CanAnalyze; 2110 assert(CanAnalyze && "don't call from other places!"); 2111 2112 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2113 SCEV::NoWrapFlags SignOrUnsignWrap = 2114 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2115 2116 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2117 auto IsKnownNonNegative = [&](const SCEV *S) { 2118 return SE->isKnownNonNegative(S); 2119 }; 2120 2121 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2122 Flags = 2123 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2124 2125 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2126 2127 if (SignOrUnsignWrap != SignOrUnsignMask && 2128 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2129 isa<SCEVConstant>(Ops[0])) { 2130 2131 auto Opcode = [&] { 2132 switch (Type) { 2133 case scAddExpr: 2134 return Instruction::Add; 2135 case scMulExpr: 2136 return Instruction::Mul; 2137 default: 2138 llvm_unreachable("Unexpected SCEV op."); 2139 } 2140 }(); 2141 2142 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2143 2144 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2145 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2146 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2147 Opcode, C, OBO::NoSignedWrap); 2148 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2149 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2150 } 2151 2152 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2153 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2154 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2155 Opcode, C, OBO::NoUnsignedWrap); 2156 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2157 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2158 } 2159 } 2160 2161 return Flags; 2162 } 2163 2164 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2165 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2166 } 2167 2168 /// Get a canonical add expression, or something simpler if possible. 2169 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2170 SCEV::NoWrapFlags Flags, 2171 unsigned Depth) { 2172 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2173 "only nuw or nsw allowed"); 2174 assert(!Ops.empty() && "Cannot get empty add!"); 2175 if (Ops.size() == 1) return Ops[0]; 2176 #ifndef NDEBUG 2177 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2178 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2179 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2180 "SCEVAddExpr operand types don't match!"); 2181 #endif 2182 2183 // Sort by complexity, this groups all similar expression types together. 2184 GroupByComplexity(Ops, &LI, DT); 2185 2186 // If there are any constants, fold them together. 2187 unsigned Idx = 0; 2188 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2189 ++Idx; 2190 assert(Idx < Ops.size()); 2191 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2192 // We found two constants, fold them together! 2193 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2194 if (Ops.size() == 2) return Ops[0]; 2195 Ops.erase(Ops.begin()+1); // Erase the folded element 2196 LHSC = cast<SCEVConstant>(Ops[0]); 2197 } 2198 2199 // If we are left with a constant zero being added, strip it off. 2200 if (LHSC->getValue()->isZero()) { 2201 Ops.erase(Ops.begin()); 2202 --Idx; 2203 } 2204 2205 if (Ops.size() == 1) return Ops[0]; 2206 } 2207 2208 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2209 2210 // Limit recursion calls depth. 2211 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2212 return getOrCreateAddExpr(Ops, Flags); 2213 2214 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2215 static_cast<SCEVAddExpr *>(S)->setNoWrapFlags(Flags); 2216 return S; 2217 } 2218 2219 // Okay, check to see if the same value occurs in the operand list more than 2220 // once. If so, merge them together into an multiply expression. Since we 2221 // sorted the list, these values are required to be adjacent. 2222 Type *Ty = Ops[0]->getType(); 2223 bool FoundMatch = false; 2224 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2225 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2226 // Scan ahead to count how many equal operands there are. 2227 unsigned Count = 2; 2228 while (i+Count != e && Ops[i+Count] == Ops[i]) 2229 ++Count; 2230 // Merge the values into a multiply. 2231 const SCEV *Scale = getConstant(Ty, Count); 2232 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2233 if (Ops.size() == Count) 2234 return Mul; 2235 Ops[i] = Mul; 2236 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2237 --i; e -= Count - 1; 2238 FoundMatch = true; 2239 } 2240 if (FoundMatch) 2241 return getAddExpr(Ops, Flags, Depth + 1); 2242 2243 // Check for truncates. If all the operands are truncated from the same 2244 // type, see if factoring out the truncate would permit the result to be 2245 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2246 // if the contents of the resulting outer trunc fold to something simple. 2247 auto FindTruncSrcType = [&]() -> Type * { 2248 // We're ultimately looking to fold an addrec of truncs and muls of only 2249 // constants and truncs, so if we find any other types of SCEV 2250 // as operands of the addrec then we bail and return nullptr here. 2251 // Otherwise, we return the type of the operand of a trunc that we find. 2252 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2253 return T->getOperand()->getType(); 2254 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2255 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2256 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2257 return T->getOperand()->getType(); 2258 } 2259 return nullptr; 2260 }; 2261 if (auto *SrcType = FindTruncSrcType()) { 2262 SmallVector<const SCEV *, 8> LargeOps; 2263 bool Ok = true; 2264 // Check all the operands to see if they can be represented in the 2265 // source type of the truncate. 2266 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2267 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2268 if (T->getOperand()->getType() != SrcType) { 2269 Ok = false; 2270 break; 2271 } 2272 LargeOps.push_back(T->getOperand()); 2273 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2274 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2275 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2276 SmallVector<const SCEV *, 8> LargeMulOps; 2277 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2278 if (const SCEVTruncateExpr *T = 2279 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2280 if (T->getOperand()->getType() != SrcType) { 2281 Ok = false; 2282 break; 2283 } 2284 LargeMulOps.push_back(T->getOperand()); 2285 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2286 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2287 } else { 2288 Ok = false; 2289 break; 2290 } 2291 } 2292 if (Ok) 2293 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2294 } else { 2295 Ok = false; 2296 break; 2297 } 2298 } 2299 if (Ok) { 2300 // Evaluate the expression in the larger type. 2301 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2302 // If it folds to something simple, use it. Otherwise, don't. 2303 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2304 return getTruncateExpr(Fold, Ty); 2305 } 2306 } 2307 2308 // Skip past any other cast SCEVs. 2309 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2310 ++Idx; 2311 2312 // If there are add operands they would be next. 2313 if (Idx < Ops.size()) { 2314 bool DeletedAdd = false; 2315 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2316 if (Ops.size() > AddOpsInlineThreshold || 2317 Add->getNumOperands() > AddOpsInlineThreshold) 2318 break; 2319 // If we have an add, expand the add operands onto the end of the operands 2320 // list. 2321 Ops.erase(Ops.begin()+Idx); 2322 Ops.append(Add->op_begin(), Add->op_end()); 2323 DeletedAdd = true; 2324 } 2325 2326 // If we deleted at least one add, we added operands to the end of the list, 2327 // and they are not necessarily sorted. Recurse to resort and resimplify 2328 // any operands we just acquired. 2329 if (DeletedAdd) 2330 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2331 } 2332 2333 // Skip over the add expression until we get to a multiply. 2334 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2335 ++Idx; 2336 2337 // Check to see if there are any folding opportunities present with 2338 // operands multiplied by constant values. 2339 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2340 uint64_t BitWidth = getTypeSizeInBits(Ty); 2341 DenseMap<const SCEV *, APInt> M; 2342 SmallVector<const SCEV *, 8> NewOps; 2343 APInt AccumulatedConstant(BitWidth, 0); 2344 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2345 Ops.data(), Ops.size(), 2346 APInt(BitWidth, 1), *this)) { 2347 struct APIntCompare { 2348 bool operator()(const APInt &LHS, const APInt &RHS) const { 2349 return LHS.ult(RHS); 2350 } 2351 }; 2352 2353 // Some interesting folding opportunity is present, so its worthwhile to 2354 // re-generate the operands list. Group the operands by constant scale, 2355 // to avoid multiplying by the same constant scale multiple times. 2356 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2357 for (const SCEV *NewOp : NewOps) 2358 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2359 // Re-generate the operands list. 2360 Ops.clear(); 2361 if (AccumulatedConstant != 0) 2362 Ops.push_back(getConstant(AccumulatedConstant)); 2363 for (auto &MulOp : MulOpLists) 2364 if (MulOp.first != 0) 2365 Ops.push_back(getMulExpr( 2366 getConstant(MulOp.first), 2367 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2368 SCEV::FlagAnyWrap, Depth + 1)); 2369 if (Ops.empty()) 2370 return getZero(Ty); 2371 if (Ops.size() == 1) 2372 return Ops[0]; 2373 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2374 } 2375 } 2376 2377 // If we are adding something to a multiply expression, make sure the 2378 // something is not already an operand of the multiply. If so, merge it into 2379 // the multiply. 2380 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2381 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2382 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2383 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2384 if (isa<SCEVConstant>(MulOpSCEV)) 2385 continue; 2386 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2387 if (MulOpSCEV == Ops[AddOp]) { 2388 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2389 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2390 if (Mul->getNumOperands() != 2) { 2391 // If the multiply has more than two operands, we must get the 2392 // Y*Z term. 2393 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2394 Mul->op_begin()+MulOp); 2395 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2396 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2397 } 2398 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2399 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2400 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2401 SCEV::FlagAnyWrap, Depth + 1); 2402 if (Ops.size() == 2) return OuterMul; 2403 if (AddOp < Idx) { 2404 Ops.erase(Ops.begin()+AddOp); 2405 Ops.erase(Ops.begin()+Idx-1); 2406 } else { 2407 Ops.erase(Ops.begin()+Idx); 2408 Ops.erase(Ops.begin()+AddOp-1); 2409 } 2410 Ops.push_back(OuterMul); 2411 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2412 } 2413 2414 // Check this multiply against other multiplies being added together. 2415 for (unsigned OtherMulIdx = Idx+1; 2416 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2417 ++OtherMulIdx) { 2418 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2419 // If MulOp occurs in OtherMul, we can fold the two multiplies 2420 // together. 2421 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2422 OMulOp != e; ++OMulOp) 2423 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2424 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2425 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2426 if (Mul->getNumOperands() != 2) { 2427 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2428 Mul->op_begin()+MulOp); 2429 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2430 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2431 } 2432 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2433 if (OtherMul->getNumOperands() != 2) { 2434 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2435 OtherMul->op_begin()+OMulOp); 2436 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2437 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2438 } 2439 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2440 const SCEV *InnerMulSum = 2441 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2442 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2443 SCEV::FlagAnyWrap, Depth + 1); 2444 if (Ops.size() == 2) return OuterMul; 2445 Ops.erase(Ops.begin()+Idx); 2446 Ops.erase(Ops.begin()+OtherMulIdx-1); 2447 Ops.push_back(OuterMul); 2448 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2449 } 2450 } 2451 } 2452 } 2453 2454 // If there are any add recurrences in the operands list, see if any other 2455 // added values are loop invariant. If so, we can fold them into the 2456 // recurrence. 2457 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2458 ++Idx; 2459 2460 // Scan over all recurrences, trying to fold loop invariants into them. 2461 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2462 // Scan all of the other operands to this add and add them to the vector if 2463 // they are loop invariant w.r.t. the recurrence. 2464 SmallVector<const SCEV *, 8> LIOps; 2465 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2466 const Loop *AddRecLoop = AddRec->getLoop(); 2467 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2468 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2469 LIOps.push_back(Ops[i]); 2470 Ops.erase(Ops.begin()+i); 2471 --i; --e; 2472 } 2473 2474 // If we found some loop invariants, fold them into the recurrence. 2475 if (!LIOps.empty()) { 2476 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2477 LIOps.push_back(AddRec->getStart()); 2478 2479 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2480 AddRec->op_end()); 2481 // This follows from the fact that the no-wrap flags on the outer add 2482 // expression are applicable on the 0th iteration, when the add recurrence 2483 // will be equal to its start value. 2484 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2485 2486 // Build the new addrec. Propagate the NUW and NSW flags if both the 2487 // outer add and the inner addrec are guaranteed to have no overflow. 2488 // Always propagate NW. 2489 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2490 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2491 2492 // If all of the other operands were loop invariant, we are done. 2493 if (Ops.size() == 1) return NewRec; 2494 2495 // Otherwise, add the folded AddRec by the non-invariant parts. 2496 for (unsigned i = 0;; ++i) 2497 if (Ops[i] == AddRec) { 2498 Ops[i] = NewRec; 2499 break; 2500 } 2501 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2502 } 2503 2504 // Okay, if there weren't any loop invariants to be folded, check to see if 2505 // there are multiple AddRec's with the same loop induction variable being 2506 // added together. If so, we can fold them. 2507 for (unsigned OtherIdx = Idx+1; 2508 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2509 ++OtherIdx) { 2510 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2511 // so that the 1st found AddRecExpr is dominated by all others. 2512 assert(DT.dominates( 2513 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2514 AddRec->getLoop()->getHeader()) && 2515 "AddRecExprs are not sorted in reverse dominance order?"); 2516 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2517 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2518 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2519 AddRec->op_end()); 2520 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2521 ++OtherIdx) { 2522 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2523 if (OtherAddRec->getLoop() == AddRecLoop) { 2524 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2525 i != e; ++i) { 2526 if (i >= AddRecOps.size()) { 2527 AddRecOps.append(OtherAddRec->op_begin()+i, 2528 OtherAddRec->op_end()); 2529 break; 2530 } 2531 SmallVector<const SCEV *, 2> TwoOps = { 2532 AddRecOps[i], OtherAddRec->getOperand(i)}; 2533 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2534 } 2535 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2536 } 2537 } 2538 // Step size has changed, so we cannot guarantee no self-wraparound. 2539 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2540 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2541 } 2542 } 2543 2544 // Otherwise couldn't fold anything into this recurrence. Move onto the 2545 // next one. 2546 } 2547 2548 // Okay, it looks like we really DO need an add expr. Check to see if we 2549 // already have one, otherwise create a new one. 2550 return getOrCreateAddExpr(Ops, Flags); 2551 } 2552 2553 const SCEV * 2554 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2555 SCEV::NoWrapFlags Flags) { 2556 FoldingSetNodeID ID; 2557 ID.AddInteger(scAddExpr); 2558 for (const SCEV *Op : Ops) 2559 ID.AddPointer(Op); 2560 void *IP = nullptr; 2561 SCEVAddExpr *S = 2562 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2563 if (!S) { 2564 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2565 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2566 S = new (SCEVAllocator) 2567 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2568 UniqueSCEVs.InsertNode(S, IP); 2569 addToLoopUseLists(S); 2570 } 2571 S->setNoWrapFlags(Flags); 2572 return S; 2573 } 2574 2575 const SCEV * 2576 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2577 const Loop *L, SCEV::NoWrapFlags Flags) { 2578 FoldingSetNodeID ID; 2579 ID.AddInteger(scAddRecExpr); 2580 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2581 ID.AddPointer(Ops[i]); 2582 ID.AddPointer(L); 2583 void *IP = nullptr; 2584 SCEVAddRecExpr *S = 2585 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2586 if (!S) { 2587 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2588 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2589 S = new (SCEVAllocator) 2590 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2591 UniqueSCEVs.InsertNode(S, IP); 2592 addToLoopUseLists(S); 2593 } 2594 S->setNoWrapFlags(Flags); 2595 return S; 2596 } 2597 2598 const SCEV * 2599 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2600 SCEV::NoWrapFlags Flags) { 2601 FoldingSetNodeID ID; 2602 ID.AddInteger(scMulExpr); 2603 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2604 ID.AddPointer(Ops[i]); 2605 void *IP = nullptr; 2606 SCEVMulExpr *S = 2607 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2608 if (!S) { 2609 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2610 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2611 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2612 O, Ops.size()); 2613 UniqueSCEVs.InsertNode(S, IP); 2614 addToLoopUseLists(S); 2615 } 2616 S->setNoWrapFlags(Flags); 2617 return S; 2618 } 2619 2620 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2621 uint64_t k = i*j; 2622 if (j > 1 && k / j != i) Overflow = true; 2623 return k; 2624 } 2625 2626 /// Compute the result of "n choose k", the binomial coefficient. If an 2627 /// intermediate computation overflows, Overflow will be set and the return will 2628 /// be garbage. Overflow is not cleared on absence of overflow. 2629 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2630 // We use the multiplicative formula: 2631 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2632 // At each iteration, we take the n-th term of the numeral and divide by the 2633 // (k-n)th term of the denominator. This division will always produce an 2634 // integral result, and helps reduce the chance of overflow in the 2635 // intermediate computations. However, we can still overflow even when the 2636 // final result would fit. 2637 2638 if (n == 0 || n == k) return 1; 2639 if (k > n) return 0; 2640 2641 if (k > n/2) 2642 k = n-k; 2643 2644 uint64_t r = 1; 2645 for (uint64_t i = 1; i <= k; ++i) { 2646 r = umul_ov(r, n-(i-1), Overflow); 2647 r /= i; 2648 } 2649 return r; 2650 } 2651 2652 /// Determine if any of the operands in this SCEV are a constant or if 2653 /// any of the add or multiply expressions in this SCEV contain a constant. 2654 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2655 struct FindConstantInAddMulChain { 2656 bool FoundConstant = false; 2657 2658 bool follow(const SCEV *S) { 2659 FoundConstant |= isa<SCEVConstant>(S); 2660 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2661 } 2662 2663 bool isDone() const { 2664 return FoundConstant; 2665 } 2666 }; 2667 2668 FindConstantInAddMulChain F; 2669 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2670 ST.visitAll(StartExpr); 2671 return F.FoundConstant; 2672 } 2673 2674 /// Get a canonical multiply expression, or something simpler if possible. 2675 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2676 SCEV::NoWrapFlags Flags, 2677 unsigned Depth) { 2678 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2679 "only nuw or nsw allowed"); 2680 assert(!Ops.empty() && "Cannot get empty mul!"); 2681 if (Ops.size() == 1) return Ops[0]; 2682 #ifndef NDEBUG 2683 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2684 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2685 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2686 "SCEVMulExpr operand types don't match!"); 2687 #endif 2688 2689 // Sort by complexity, this groups all similar expression types together. 2690 GroupByComplexity(Ops, &LI, DT); 2691 2692 // If there are any constants, fold them together. 2693 unsigned Idx = 0; 2694 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2695 ++Idx; 2696 assert(Idx < Ops.size()); 2697 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2698 // We found two constants, fold them together! 2699 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2700 if (Ops.size() == 2) return Ops[0]; 2701 Ops.erase(Ops.begin()+1); // Erase the folded element 2702 LHSC = cast<SCEVConstant>(Ops[0]); 2703 } 2704 2705 // If we have a multiply of zero, it will always be zero. 2706 if (LHSC->getValue()->isZero()) 2707 return LHSC; 2708 2709 // If we are left with a constant one being multiplied, strip it off. 2710 if (LHSC->getValue()->isOne()) { 2711 Ops.erase(Ops.begin()); 2712 --Idx; 2713 } 2714 2715 if (Ops.size() == 1) 2716 return Ops[0]; 2717 } 2718 2719 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2720 2721 // Limit recursion calls depth. 2722 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2723 return getOrCreateMulExpr(Ops, Flags); 2724 2725 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2726 static_cast<SCEVMulExpr *>(S)->setNoWrapFlags(Flags); 2727 return S; 2728 } 2729 2730 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2731 if (Ops.size() == 2) { 2732 // C1*(C2+V) -> C1*C2 + C1*V 2733 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2734 // If any of Add's ops are Adds or Muls with a constant, apply this 2735 // transformation as well. 2736 // 2737 // TODO: There are some cases where this transformation is not 2738 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2739 // this transformation should be narrowed down. 2740 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2741 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2742 SCEV::FlagAnyWrap, Depth + 1), 2743 getMulExpr(LHSC, Add->getOperand(1), 2744 SCEV::FlagAnyWrap, Depth + 1), 2745 SCEV::FlagAnyWrap, Depth + 1); 2746 2747 if (Ops[0]->isAllOnesValue()) { 2748 // If we have a mul by -1 of an add, try distributing the -1 among the 2749 // add operands. 2750 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2751 SmallVector<const SCEV *, 4> NewOps; 2752 bool AnyFolded = false; 2753 for (const SCEV *AddOp : Add->operands()) { 2754 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2755 Depth + 1); 2756 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2757 NewOps.push_back(Mul); 2758 } 2759 if (AnyFolded) 2760 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2761 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2762 // Negation preserves a recurrence's no self-wrap property. 2763 SmallVector<const SCEV *, 4> Operands; 2764 for (const SCEV *AddRecOp : AddRec->operands()) 2765 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2766 Depth + 1)); 2767 2768 return getAddRecExpr(Operands, AddRec->getLoop(), 2769 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2770 } 2771 } 2772 } 2773 } 2774 2775 // Skip over the add expression until we get to a multiply. 2776 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2777 ++Idx; 2778 2779 // If there are mul operands inline them all into this expression. 2780 if (Idx < Ops.size()) { 2781 bool DeletedMul = false; 2782 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2783 if (Ops.size() > MulOpsInlineThreshold) 2784 break; 2785 // If we have an mul, expand the mul operands onto the end of the 2786 // operands list. 2787 Ops.erase(Ops.begin()+Idx); 2788 Ops.append(Mul->op_begin(), Mul->op_end()); 2789 DeletedMul = true; 2790 } 2791 2792 // If we deleted at least one mul, we added operands to the end of the 2793 // list, and they are not necessarily sorted. Recurse to resort and 2794 // resimplify any operands we just acquired. 2795 if (DeletedMul) 2796 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2797 } 2798 2799 // If there are any add recurrences in the operands list, see if any other 2800 // added values are loop invariant. If so, we can fold them into the 2801 // recurrence. 2802 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2803 ++Idx; 2804 2805 // Scan over all recurrences, trying to fold loop invariants into them. 2806 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2807 // Scan all of the other operands to this mul and add them to the vector 2808 // if they are loop invariant w.r.t. the recurrence. 2809 SmallVector<const SCEV *, 8> LIOps; 2810 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2811 const Loop *AddRecLoop = AddRec->getLoop(); 2812 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2813 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2814 LIOps.push_back(Ops[i]); 2815 Ops.erase(Ops.begin()+i); 2816 --i; --e; 2817 } 2818 2819 // If we found some loop invariants, fold them into the recurrence. 2820 if (!LIOps.empty()) { 2821 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2822 SmallVector<const SCEV *, 4> NewOps; 2823 NewOps.reserve(AddRec->getNumOperands()); 2824 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2825 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2826 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2827 SCEV::FlagAnyWrap, Depth + 1)); 2828 2829 // Build the new addrec. Propagate the NUW and NSW flags if both the 2830 // outer mul and the inner addrec are guaranteed to have no overflow. 2831 // 2832 // No self-wrap cannot be guaranteed after changing the step size, but 2833 // will be inferred if either NUW or NSW is true. 2834 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2835 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2836 2837 // If all of the other operands were loop invariant, we are done. 2838 if (Ops.size() == 1) return NewRec; 2839 2840 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2841 for (unsigned i = 0;; ++i) 2842 if (Ops[i] == AddRec) { 2843 Ops[i] = NewRec; 2844 break; 2845 } 2846 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2847 } 2848 2849 // Okay, if there weren't any loop invariants to be folded, check to see 2850 // if there are multiple AddRec's with the same loop induction variable 2851 // being multiplied together. If so, we can fold them. 2852 2853 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2854 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2855 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2856 // ]]],+,...up to x=2n}. 2857 // Note that the arguments to choose() are always integers with values 2858 // known at compile time, never SCEV objects. 2859 // 2860 // The implementation avoids pointless extra computations when the two 2861 // addrec's are of different length (mathematically, it's equivalent to 2862 // an infinite stream of zeros on the right). 2863 bool OpsModified = false; 2864 for (unsigned OtherIdx = Idx+1; 2865 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2866 ++OtherIdx) { 2867 const SCEVAddRecExpr *OtherAddRec = 2868 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2869 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2870 continue; 2871 2872 // Limit max number of arguments to avoid creation of unreasonably big 2873 // SCEVAddRecs with very complex operands. 2874 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2875 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 2876 continue; 2877 2878 bool Overflow = false; 2879 Type *Ty = AddRec->getType(); 2880 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2881 SmallVector<const SCEV*, 7> AddRecOps; 2882 for (int x = 0, xe = AddRec->getNumOperands() + 2883 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2884 SmallVector <const SCEV *, 7> SumOps; 2885 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2886 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2887 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2888 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2889 z < ze && !Overflow; ++z) { 2890 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2891 uint64_t Coeff; 2892 if (LargerThan64Bits) 2893 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2894 else 2895 Coeff = Coeff1*Coeff2; 2896 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2897 const SCEV *Term1 = AddRec->getOperand(y-z); 2898 const SCEV *Term2 = OtherAddRec->getOperand(z); 2899 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 2900 SCEV::FlagAnyWrap, Depth + 1)); 2901 } 2902 } 2903 if (SumOps.empty()) 2904 SumOps.push_back(getZero(Ty)); 2905 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 2906 } 2907 if (!Overflow) { 2908 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 2909 SCEV::FlagAnyWrap); 2910 if (Ops.size() == 2) return NewAddRec; 2911 Ops[Idx] = NewAddRec; 2912 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2913 OpsModified = true; 2914 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2915 if (!AddRec) 2916 break; 2917 } 2918 } 2919 if (OpsModified) 2920 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2921 2922 // Otherwise couldn't fold anything into this recurrence. Move onto the 2923 // next one. 2924 } 2925 2926 // Okay, it looks like we really DO need an mul expr. Check to see if we 2927 // already have one, otherwise create a new one. 2928 return getOrCreateMulExpr(Ops, Flags); 2929 } 2930 2931 /// Represents an unsigned remainder expression based on unsigned division. 2932 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 2933 const SCEV *RHS) { 2934 assert(getEffectiveSCEVType(LHS->getType()) == 2935 getEffectiveSCEVType(RHS->getType()) && 2936 "SCEVURemExpr operand types don't match!"); 2937 2938 // Short-circuit easy cases 2939 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2940 // If constant is one, the result is trivial 2941 if (RHSC->getValue()->isOne()) 2942 return getZero(LHS->getType()); // X urem 1 --> 0 2943 2944 // If constant is a power of two, fold into a zext(trunc(LHS)). 2945 if (RHSC->getAPInt().isPowerOf2()) { 2946 Type *FullTy = LHS->getType(); 2947 Type *TruncTy = 2948 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 2949 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 2950 } 2951 } 2952 2953 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 2954 const SCEV *UDiv = getUDivExpr(LHS, RHS); 2955 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 2956 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 2957 } 2958 2959 /// Get a canonical unsigned division expression, or something simpler if 2960 /// possible. 2961 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2962 const SCEV *RHS) { 2963 assert(getEffectiveSCEVType(LHS->getType()) == 2964 getEffectiveSCEVType(RHS->getType()) && 2965 "SCEVUDivExpr operand types don't match!"); 2966 2967 FoldingSetNodeID ID; 2968 ID.AddInteger(scUDivExpr); 2969 ID.AddPointer(LHS); 2970 ID.AddPointer(RHS); 2971 void *IP = nullptr; 2972 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 2973 return S; 2974 2975 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2976 if (RHSC->getValue()->isOne()) 2977 return LHS; // X udiv 1 --> x 2978 // If the denominator is zero, the result of the udiv is undefined. Don't 2979 // try to analyze it, because the resolution chosen here may differ from 2980 // the resolution chosen in other parts of the compiler. 2981 if (!RHSC->getValue()->isZero()) { 2982 // Determine if the division can be folded into the operands of 2983 // its operands. 2984 // TODO: Generalize this to non-constants by using known-bits information. 2985 Type *Ty = LHS->getType(); 2986 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2987 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2988 // For non-power-of-two values, effectively round the value up to the 2989 // nearest power of two. 2990 if (!RHSC->getAPInt().isPowerOf2()) 2991 ++MaxShiftAmt; 2992 IntegerType *ExtTy = 2993 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2994 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2995 if (const SCEVConstant *Step = 2996 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2997 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2998 const APInt &StepInt = Step->getAPInt(); 2999 const APInt &DivInt = RHSC->getAPInt(); 3000 if (!StepInt.urem(DivInt) && 3001 getZeroExtendExpr(AR, ExtTy) == 3002 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3003 getZeroExtendExpr(Step, ExtTy), 3004 AR->getLoop(), SCEV::FlagAnyWrap)) { 3005 SmallVector<const SCEV *, 4> Operands; 3006 for (const SCEV *Op : AR->operands()) 3007 Operands.push_back(getUDivExpr(Op, RHS)); 3008 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3009 } 3010 /// Get a canonical UDivExpr for a recurrence. 3011 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3012 // We can currently only fold X%N if X is constant. 3013 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3014 if (StartC && !DivInt.urem(StepInt) && 3015 getZeroExtendExpr(AR, ExtTy) == 3016 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3017 getZeroExtendExpr(Step, ExtTy), 3018 AR->getLoop(), SCEV::FlagAnyWrap)) { 3019 const APInt &StartInt = StartC->getAPInt(); 3020 const APInt &StartRem = StartInt.urem(StepInt); 3021 if (StartRem != 0) { 3022 const SCEV *NewLHS = 3023 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3024 AR->getLoop(), SCEV::FlagNW); 3025 if (LHS != NewLHS) { 3026 LHS = NewLHS; 3027 3028 // Reset the ID to include the new LHS, and check if it is 3029 // already cached. 3030 ID.clear(); 3031 ID.AddInteger(scUDivExpr); 3032 ID.AddPointer(LHS); 3033 ID.AddPointer(RHS); 3034 IP = nullptr; 3035 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3036 return S; 3037 } 3038 } 3039 } 3040 } 3041 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3042 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3043 SmallVector<const SCEV *, 4> Operands; 3044 for (const SCEV *Op : M->operands()) 3045 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3046 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3047 // Find an operand that's safely divisible. 3048 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3049 const SCEV *Op = M->getOperand(i); 3050 const SCEV *Div = getUDivExpr(Op, RHSC); 3051 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3052 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3053 M->op_end()); 3054 Operands[i] = Div; 3055 return getMulExpr(Operands); 3056 } 3057 } 3058 } 3059 3060 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3061 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3062 if (auto *DivisorConstant = 3063 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3064 bool Overflow = false; 3065 APInt NewRHS = 3066 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3067 if (Overflow) { 3068 return getConstant(RHSC->getType(), 0, false); 3069 } 3070 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3071 } 3072 } 3073 3074 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3075 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3076 SmallVector<const SCEV *, 4> Operands; 3077 for (const SCEV *Op : A->operands()) 3078 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3079 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3080 Operands.clear(); 3081 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3082 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3083 if (isa<SCEVUDivExpr>(Op) || 3084 getMulExpr(Op, RHS) != A->getOperand(i)) 3085 break; 3086 Operands.push_back(Op); 3087 } 3088 if (Operands.size() == A->getNumOperands()) 3089 return getAddExpr(Operands); 3090 } 3091 } 3092 3093 // Fold if both operands are constant. 3094 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3095 Constant *LHSCV = LHSC->getValue(); 3096 Constant *RHSCV = RHSC->getValue(); 3097 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3098 RHSCV))); 3099 } 3100 } 3101 } 3102 3103 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3104 // changes). Make sure we get a new one. 3105 IP = nullptr; 3106 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3107 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3108 LHS, RHS); 3109 UniqueSCEVs.InsertNode(S, IP); 3110 addToLoopUseLists(S); 3111 return S; 3112 } 3113 3114 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3115 APInt A = C1->getAPInt().abs(); 3116 APInt B = C2->getAPInt().abs(); 3117 uint32_t ABW = A.getBitWidth(); 3118 uint32_t BBW = B.getBitWidth(); 3119 3120 if (ABW > BBW) 3121 B = B.zext(ABW); 3122 else if (ABW < BBW) 3123 A = A.zext(BBW); 3124 3125 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3126 } 3127 3128 /// Get a canonical unsigned division expression, or something simpler if 3129 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3130 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3131 /// it's not exact because the udiv may be clearing bits. 3132 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3133 const SCEV *RHS) { 3134 // TODO: we could try to find factors in all sorts of things, but for now we 3135 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3136 // end of this file for inspiration. 3137 3138 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3139 if (!Mul || !Mul->hasNoUnsignedWrap()) 3140 return getUDivExpr(LHS, RHS); 3141 3142 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3143 // If the mulexpr multiplies by a constant, then that constant must be the 3144 // first element of the mulexpr. 3145 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3146 if (LHSCst == RHSCst) { 3147 SmallVector<const SCEV *, 2> Operands; 3148 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3149 return getMulExpr(Operands); 3150 } 3151 3152 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3153 // that there's a factor provided by one of the other terms. We need to 3154 // check. 3155 APInt Factor = gcd(LHSCst, RHSCst); 3156 if (!Factor.isIntN(1)) { 3157 LHSCst = 3158 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3159 RHSCst = 3160 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3161 SmallVector<const SCEV *, 2> Operands; 3162 Operands.push_back(LHSCst); 3163 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3164 LHS = getMulExpr(Operands); 3165 RHS = RHSCst; 3166 Mul = dyn_cast<SCEVMulExpr>(LHS); 3167 if (!Mul) 3168 return getUDivExactExpr(LHS, RHS); 3169 } 3170 } 3171 } 3172 3173 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3174 if (Mul->getOperand(i) == RHS) { 3175 SmallVector<const SCEV *, 2> Operands; 3176 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3177 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3178 return getMulExpr(Operands); 3179 } 3180 } 3181 3182 return getUDivExpr(LHS, RHS); 3183 } 3184 3185 /// Get an add recurrence expression for the specified loop. Simplify the 3186 /// expression as much as possible. 3187 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3188 const Loop *L, 3189 SCEV::NoWrapFlags Flags) { 3190 SmallVector<const SCEV *, 4> Operands; 3191 Operands.push_back(Start); 3192 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3193 if (StepChrec->getLoop() == L) { 3194 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3195 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3196 } 3197 3198 Operands.push_back(Step); 3199 return getAddRecExpr(Operands, L, Flags); 3200 } 3201 3202 /// Get an add recurrence expression for the specified loop. Simplify the 3203 /// expression as much as possible. 3204 const SCEV * 3205 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3206 const Loop *L, SCEV::NoWrapFlags Flags) { 3207 if (Operands.size() == 1) return Operands[0]; 3208 #ifndef NDEBUG 3209 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3210 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3211 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3212 "SCEVAddRecExpr operand types don't match!"); 3213 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3214 assert(isLoopInvariant(Operands[i], L) && 3215 "SCEVAddRecExpr operand is not loop-invariant!"); 3216 #endif 3217 3218 if (Operands.back()->isZero()) { 3219 Operands.pop_back(); 3220 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3221 } 3222 3223 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3224 // use that information to infer NUW and NSW flags. However, computing a 3225 // BE count requires calling getAddRecExpr, so we may not yet have a 3226 // meaningful BE count at this point (and if we don't, we'd be stuck 3227 // with a SCEVCouldNotCompute as the cached BE count). 3228 3229 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3230 3231 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3232 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3233 const Loop *NestedLoop = NestedAR->getLoop(); 3234 if (L->contains(NestedLoop) 3235 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3236 : (!NestedLoop->contains(L) && 3237 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3238 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3239 NestedAR->op_end()); 3240 Operands[0] = NestedAR->getStart(); 3241 // AddRecs require their operands be loop-invariant with respect to their 3242 // loops. Don't perform this transformation if it would break this 3243 // requirement. 3244 bool AllInvariant = all_of( 3245 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3246 3247 if (AllInvariant) { 3248 // Create a recurrence for the outer loop with the same step size. 3249 // 3250 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3251 // inner recurrence has the same property. 3252 SCEV::NoWrapFlags OuterFlags = 3253 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3254 3255 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3256 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3257 return isLoopInvariant(Op, NestedLoop); 3258 }); 3259 3260 if (AllInvariant) { 3261 // Ok, both add recurrences are valid after the transformation. 3262 // 3263 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3264 // the outer recurrence has the same property. 3265 SCEV::NoWrapFlags InnerFlags = 3266 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3267 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3268 } 3269 } 3270 // Reset Operands to its original state. 3271 Operands[0] = NestedAR; 3272 } 3273 } 3274 3275 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3276 // already have one, otherwise create a new one. 3277 return getOrCreateAddRecExpr(Operands, L, Flags); 3278 } 3279 3280 const SCEV * 3281 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3282 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3283 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3284 // getSCEV(Base)->getType() has the same address space as Base->getType() 3285 // because SCEV::getType() preserves the address space. 3286 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3287 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3288 // instruction to its SCEV, because the Instruction may be guarded by control 3289 // flow and the no-overflow bits may not be valid for the expression in any 3290 // context. This can be fixed similarly to how these flags are handled for 3291 // adds. 3292 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3293 : SCEV::FlagAnyWrap; 3294 3295 const SCEV *TotalOffset = getZero(IntIdxTy); 3296 Type *CurTy = GEP->getType(); 3297 bool FirstIter = true; 3298 for (const SCEV *IndexExpr : IndexExprs) { 3299 // Compute the (potentially symbolic) offset in bytes for this index. 3300 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3301 // For a struct, add the member offset. 3302 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3303 unsigned FieldNo = Index->getZExtValue(); 3304 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3305 3306 // Add the field offset to the running total offset. 3307 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3308 3309 // Update CurTy to the type of the field at Index. 3310 CurTy = STy->getTypeAtIndex(Index); 3311 } else { 3312 // Update CurTy to its element type. 3313 if (FirstIter) { 3314 assert(isa<PointerType>(CurTy) && 3315 "The first index of a GEP indexes a pointer"); 3316 CurTy = GEP->getSourceElementType(); 3317 FirstIter = false; 3318 } else { 3319 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3320 } 3321 // For an array, add the element offset, explicitly scaled. 3322 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3323 // Getelementptr indices are signed. 3324 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3325 3326 // Multiply the index by the element size to compute the element offset. 3327 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3328 3329 // Add the element offset to the running total offset. 3330 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3331 } 3332 } 3333 3334 // Add the total offset from all the GEP indices to the base. 3335 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3336 } 3337 3338 std::tuple<SCEV *, FoldingSetNodeID, void *> 3339 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3340 ArrayRef<const SCEV *> Ops) { 3341 FoldingSetNodeID ID; 3342 void *IP = nullptr; 3343 ID.AddInteger(SCEVType); 3344 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3345 ID.AddPointer(Ops[i]); 3346 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3347 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3348 } 3349 3350 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3351 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3352 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3353 } 3354 3355 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) { 3356 Type *Ty = Op->getType(); 3357 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty)); 3358 } 3359 3360 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3361 SmallVectorImpl<const SCEV *> &Ops) { 3362 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3363 if (Ops.size() == 1) return Ops[0]; 3364 #ifndef NDEBUG 3365 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3366 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3367 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3368 "Operand types don't match!"); 3369 #endif 3370 3371 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3372 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3373 3374 // Sort by complexity, this groups all similar expression types together. 3375 GroupByComplexity(Ops, &LI, DT); 3376 3377 // Check if we have created the same expression before. 3378 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3379 return S; 3380 } 3381 3382 // If there are any constants, fold them together. 3383 unsigned Idx = 0; 3384 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3385 ++Idx; 3386 assert(Idx < Ops.size()); 3387 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3388 if (Kind == scSMaxExpr) 3389 return APIntOps::smax(LHS, RHS); 3390 else if (Kind == scSMinExpr) 3391 return APIntOps::smin(LHS, RHS); 3392 else if (Kind == scUMaxExpr) 3393 return APIntOps::umax(LHS, RHS); 3394 else if (Kind == scUMinExpr) 3395 return APIntOps::umin(LHS, RHS); 3396 llvm_unreachable("Unknown SCEV min/max opcode"); 3397 }; 3398 3399 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3400 // We found two constants, fold them together! 3401 ConstantInt *Fold = ConstantInt::get( 3402 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3403 Ops[0] = getConstant(Fold); 3404 Ops.erase(Ops.begin()+1); // Erase the folded element 3405 if (Ops.size() == 1) return Ops[0]; 3406 LHSC = cast<SCEVConstant>(Ops[0]); 3407 } 3408 3409 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3410 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3411 3412 if (IsMax ? IsMinV : IsMaxV) { 3413 // If we are left with a constant minimum(/maximum)-int, strip it off. 3414 Ops.erase(Ops.begin()); 3415 --Idx; 3416 } else if (IsMax ? IsMaxV : IsMinV) { 3417 // If we have a max(/min) with a constant maximum(/minimum)-int, 3418 // it will always be the extremum. 3419 return LHSC; 3420 } 3421 3422 if (Ops.size() == 1) return Ops[0]; 3423 } 3424 3425 // Find the first operation of the same kind 3426 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3427 ++Idx; 3428 3429 // Check to see if one of the operands is of the same kind. If so, expand its 3430 // operands onto our operand list, and recurse to simplify. 3431 if (Idx < Ops.size()) { 3432 bool DeletedAny = false; 3433 while (Ops[Idx]->getSCEVType() == Kind) { 3434 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3435 Ops.erase(Ops.begin()+Idx); 3436 Ops.append(SMME->op_begin(), SMME->op_end()); 3437 DeletedAny = true; 3438 } 3439 3440 if (DeletedAny) 3441 return getMinMaxExpr(Kind, Ops); 3442 } 3443 3444 // Okay, check to see if the same value occurs in the operand list twice. If 3445 // so, delete one. Since we sorted the list, these values are required to 3446 // be adjacent. 3447 llvm::CmpInst::Predicate GEPred = 3448 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3449 llvm::CmpInst::Predicate LEPred = 3450 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3451 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3452 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3453 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3454 if (Ops[i] == Ops[i + 1] || 3455 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3456 // X op Y op Y --> X op Y 3457 // X op Y --> X, if we know X, Y are ordered appropriately 3458 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3459 --i; 3460 --e; 3461 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3462 Ops[i + 1])) { 3463 // X op Y --> Y, if we know X, Y are ordered appropriately 3464 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3465 --i; 3466 --e; 3467 } 3468 } 3469 3470 if (Ops.size() == 1) return Ops[0]; 3471 3472 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3473 3474 // Okay, it looks like we really DO need an expr. Check to see if we 3475 // already have one, otherwise create a new one. 3476 const SCEV *ExistingSCEV; 3477 FoldingSetNodeID ID; 3478 void *IP; 3479 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3480 if (ExistingSCEV) 3481 return ExistingSCEV; 3482 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3483 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3484 SCEV *S = new (SCEVAllocator) 3485 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3486 3487 UniqueSCEVs.InsertNode(S, IP); 3488 addToLoopUseLists(S); 3489 return S; 3490 } 3491 3492 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3493 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3494 return getSMaxExpr(Ops); 3495 } 3496 3497 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3498 return getMinMaxExpr(scSMaxExpr, Ops); 3499 } 3500 3501 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3502 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3503 return getUMaxExpr(Ops); 3504 } 3505 3506 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3507 return getMinMaxExpr(scUMaxExpr, Ops); 3508 } 3509 3510 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3511 const SCEV *RHS) { 3512 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3513 return getSMinExpr(Ops); 3514 } 3515 3516 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3517 return getMinMaxExpr(scSMinExpr, Ops); 3518 } 3519 3520 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3521 const SCEV *RHS) { 3522 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3523 return getUMinExpr(Ops); 3524 } 3525 3526 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3527 return getMinMaxExpr(scUMinExpr, Ops); 3528 } 3529 3530 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3531 // We can bypass creating a target-independent 3532 // constant expression and then folding it back into a ConstantInt. 3533 // This is just a compile-time optimization. 3534 if (isa<ScalableVectorType>(AllocTy)) { 3535 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); 3536 Constant *One = ConstantInt::get(IntTy, 1); 3537 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); 3538 return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); 3539 } 3540 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3541 } 3542 3543 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3544 StructType *STy, 3545 unsigned FieldNo) { 3546 // We can bypass creating a target-independent 3547 // constant expression and then folding it back into a ConstantInt. 3548 // This is just a compile-time optimization. 3549 return getConstant( 3550 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3551 } 3552 3553 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3554 // Don't attempt to do anything other than create a SCEVUnknown object 3555 // here. createSCEV only calls getUnknown after checking for all other 3556 // interesting possibilities, and any other code that calls getUnknown 3557 // is doing so in order to hide a value from SCEV canonicalization. 3558 3559 FoldingSetNodeID ID; 3560 ID.AddInteger(scUnknown); 3561 ID.AddPointer(V); 3562 void *IP = nullptr; 3563 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3564 assert(cast<SCEVUnknown>(S)->getValue() == V && 3565 "Stale SCEVUnknown in uniquing map!"); 3566 return S; 3567 } 3568 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3569 FirstUnknown); 3570 FirstUnknown = cast<SCEVUnknown>(S); 3571 UniqueSCEVs.InsertNode(S, IP); 3572 return S; 3573 } 3574 3575 //===----------------------------------------------------------------------===// 3576 // Basic SCEV Analysis and PHI Idiom Recognition Code 3577 // 3578 3579 /// Test if values of the given type are analyzable within the SCEV 3580 /// framework. This primarily includes integer types, and it can optionally 3581 /// include pointer types if the ScalarEvolution class has access to 3582 /// target-specific information. 3583 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3584 // Integers and pointers are always SCEVable. 3585 return Ty->isIntOrPtrTy(); 3586 } 3587 3588 /// Return the size in bits of the specified type, for which isSCEVable must 3589 /// return true. 3590 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3591 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3592 if (Ty->isPointerTy()) 3593 return getDataLayout().getIndexTypeSizeInBits(Ty); 3594 return getDataLayout().getTypeSizeInBits(Ty); 3595 } 3596 3597 /// Return a type with the same bitwidth as the given type and which represents 3598 /// how SCEV will treat the given type, for which isSCEVable must return 3599 /// true. For pointer types, this is the pointer index sized integer type. 3600 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3601 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3602 3603 if (Ty->isIntegerTy()) 3604 return Ty; 3605 3606 // The only other support type is pointer. 3607 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3608 return getDataLayout().getIndexType(Ty); 3609 } 3610 3611 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3612 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3613 } 3614 3615 const SCEV *ScalarEvolution::getCouldNotCompute() { 3616 return CouldNotCompute.get(); 3617 } 3618 3619 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3620 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3621 auto *SU = dyn_cast<SCEVUnknown>(S); 3622 return SU && SU->getValue() == nullptr; 3623 }); 3624 3625 return !ContainsNulls; 3626 } 3627 3628 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3629 HasRecMapType::iterator I = HasRecMap.find(S); 3630 if (I != HasRecMap.end()) 3631 return I->second; 3632 3633 bool FoundAddRec = 3634 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3635 HasRecMap.insert({S, FoundAddRec}); 3636 return FoundAddRec; 3637 } 3638 3639 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3640 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3641 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3642 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3643 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3644 if (!Add) 3645 return {S, nullptr}; 3646 3647 if (Add->getNumOperands() != 2) 3648 return {S, nullptr}; 3649 3650 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3651 if (!ConstOp) 3652 return {S, nullptr}; 3653 3654 return {Add->getOperand(1), ConstOp->getValue()}; 3655 } 3656 3657 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3658 /// by the value and offset from any ValueOffsetPair in the set. 3659 SetVector<ScalarEvolution::ValueOffsetPair> * 3660 ScalarEvolution::getSCEVValues(const SCEV *S) { 3661 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3662 if (SI == ExprValueMap.end()) 3663 return nullptr; 3664 #ifndef NDEBUG 3665 if (VerifySCEVMap) { 3666 // Check there is no dangling Value in the set returned. 3667 for (const auto &VE : SI->second) 3668 assert(ValueExprMap.count(VE.first)); 3669 } 3670 #endif 3671 return &SI->second; 3672 } 3673 3674 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3675 /// cannot be used separately. eraseValueFromMap should be used to remove 3676 /// V from ValueExprMap and ExprValueMap at the same time. 3677 void ScalarEvolution::eraseValueFromMap(Value *V) { 3678 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3679 if (I != ValueExprMap.end()) { 3680 const SCEV *S = I->second; 3681 // Remove {V, 0} from the set of ExprValueMap[S] 3682 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3683 SV->remove({V, nullptr}); 3684 3685 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3686 const SCEV *Stripped; 3687 ConstantInt *Offset; 3688 std::tie(Stripped, Offset) = splitAddExpr(S); 3689 if (Offset != nullptr) { 3690 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3691 SV->remove({V, Offset}); 3692 } 3693 ValueExprMap.erase(V); 3694 } 3695 } 3696 3697 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3698 /// TODO: In reality it is better to check the poison recursively 3699 /// but this is better than nothing. 3700 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3701 if (auto *I = dyn_cast<Instruction>(V)) { 3702 if (isa<OverflowingBinaryOperator>(I)) { 3703 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3704 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3705 return true; 3706 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3707 return true; 3708 } 3709 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3710 return true; 3711 } 3712 return false; 3713 } 3714 3715 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3716 /// create a new one. 3717 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3718 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3719 3720 const SCEV *S = getExistingSCEV(V); 3721 if (S == nullptr) { 3722 S = createSCEV(V); 3723 // During PHI resolution, it is possible to create two SCEVs for the same 3724 // V, so it is needed to double check whether V->S is inserted into 3725 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3726 std::pair<ValueExprMapType::iterator, bool> Pair = 3727 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3728 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3729 ExprValueMap[S].insert({V, nullptr}); 3730 3731 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3732 // ExprValueMap. 3733 const SCEV *Stripped = S; 3734 ConstantInt *Offset = nullptr; 3735 std::tie(Stripped, Offset) = splitAddExpr(S); 3736 // If stripped is SCEVUnknown, don't bother to save 3737 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3738 // increase the complexity of the expansion code. 3739 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3740 // because it may generate add/sub instead of GEP in SCEV expansion. 3741 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3742 !isa<GetElementPtrInst>(V)) 3743 ExprValueMap[Stripped].insert({V, Offset}); 3744 } 3745 } 3746 return S; 3747 } 3748 3749 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3750 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3751 3752 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3753 if (I != ValueExprMap.end()) { 3754 const SCEV *S = I->second; 3755 if (checkValidity(S)) 3756 return S; 3757 eraseValueFromMap(V); 3758 forgetMemoizedResults(S); 3759 } 3760 return nullptr; 3761 } 3762 3763 /// Return a SCEV corresponding to -V = -1*V 3764 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3765 SCEV::NoWrapFlags Flags) { 3766 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3767 return getConstant( 3768 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3769 3770 Type *Ty = V->getType(); 3771 Ty = getEffectiveSCEVType(Ty); 3772 return getMulExpr(V, getMinusOne(Ty), Flags); 3773 } 3774 3775 /// If Expr computes ~A, return A else return nullptr 3776 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3777 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3778 if (!Add || Add->getNumOperands() != 2 || 3779 !Add->getOperand(0)->isAllOnesValue()) 3780 return nullptr; 3781 3782 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3783 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3784 !AddRHS->getOperand(0)->isAllOnesValue()) 3785 return nullptr; 3786 3787 return AddRHS->getOperand(1); 3788 } 3789 3790 /// Return a SCEV corresponding to ~V = -1-V 3791 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3792 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3793 return getConstant( 3794 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3795 3796 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3797 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3798 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3799 SmallVector<const SCEV *, 2> MatchedOperands; 3800 for (const SCEV *Operand : MME->operands()) { 3801 const SCEV *Matched = MatchNotExpr(Operand); 3802 if (!Matched) 3803 return (const SCEV *)nullptr; 3804 MatchedOperands.push_back(Matched); 3805 } 3806 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 3807 MatchedOperands); 3808 }; 3809 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3810 return Replaced; 3811 } 3812 3813 Type *Ty = V->getType(); 3814 Ty = getEffectiveSCEVType(Ty); 3815 return getMinusSCEV(getMinusOne(Ty), V); 3816 } 3817 3818 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3819 SCEV::NoWrapFlags Flags, 3820 unsigned Depth) { 3821 // Fast path: X - X --> 0. 3822 if (LHS == RHS) 3823 return getZero(LHS->getType()); 3824 3825 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3826 // makes it so that we cannot make much use of NUW. 3827 auto AddFlags = SCEV::FlagAnyWrap; 3828 const bool RHSIsNotMinSigned = 3829 !getSignedRangeMin(RHS).isMinSignedValue(); 3830 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3831 // Let M be the minimum representable signed value. Then (-1)*RHS 3832 // signed-wraps if and only if RHS is M. That can happen even for 3833 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3834 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3835 // (-1)*RHS, we need to prove that RHS != M. 3836 // 3837 // If LHS is non-negative and we know that LHS - RHS does not 3838 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3839 // either by proving that RHS > M or that LHS >= 0. 3840 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3841 AddFlags = SCEV::FlagNSW; 3842 } 3843 } 3844 3845 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3846 // RHS is NSW and LHS >= 0. 3847 // 3848 // The difficulty here is that the NSW flag may have been proven 3849 // relative to a loop that is to be found in a recurrence in LHS and 3850 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3851 // larger scope than intended. 3852 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3853 3854 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3855 } 3856 3857 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 3858 unsigned Depth) { 3859 Type *SrcTy = V->getType(); 3860 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3861 "Cannot truncate or zero extend with non-integer arguments!"); 3862 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3863 return V; // No conversion 3864 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3865 return getTruncateExpr(V, Ty, Depth); 3866 return getZeroExtendExpr(V, Ty, Depth); 3867 } 3868 3869 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 3870 unsigned Depth) { 3871 Type *SrcTy = V->getType(); 3872 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3873 "Cannot truncate or zero extend with non-integer arguments!"); 3874 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3875 return V; // No conversion 3876 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3877 return getTruncateExpr(V, Ty, Depth); 3878 return getSignExtendExpr(V, Ty, Depth); 3879 } 3880 3881 const SCEV * 3882 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3883 Type *SrcTy = V->getType(); 3884 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3885 "Cannot noop or zero extend with non-integer arguments!"); 3886 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3887 "getNoopOrZeroExtend cannot truncate!"); 3888 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3889 return V; // No conversion 3890 return getZeroExtendExpr(V, Ty); 3891 } 3892 3893 const SCEV * 3894 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3895 Type *SrcTy = V->getType(); 3896 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3897 "Cannot noop or sign extend with non-integer arguments!"); 3898 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3899 "getNoopOrSignExtend cannot truncate!"); 3900 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3901 return V; // No conversion 3902 return getSignExtendExpr(V, Ty); 3903 } 3904 3905 const SCEV * 3906 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3907 Type *SrcTy = V->getType(); 3908 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3909 "Cannot noop or any extend with non-integer arguments!"); 3910 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3911 "getNoopOrAnyExtend cannot truncate!"); 3912 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3913 return V; // No conversion 3914 return getAnyExtendExpr(V, Ty); 3915 } 3916 3917 const SCEV * 3918 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3919 Type *SrcTy = V->getType(); 3920 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3921 "Cannot truncate or noop with non-integer arguments!"); 3922 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3923 "getTruncateOrNoop cannot extend!"); 3924 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3925 return V; // No conversion 3926 return getTruncateExpr(V, Ty); 3927 } 3928 3929 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3930 const SCEV *RHS) { 3931 const SCEV *PromotedLHS = LHS; 3932 const SCEV *PromotedRHS = RHS; 3933 3934 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3935 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3936 else 3937 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3938 3939 return getUMaxExpr(PromotedLHS, PromotedRHS); 3940 } 3941 3942 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3943 const SCEV *RHS) { 3944 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3945 return getUMinFromMismatchedTypes(Ops); 3946 } 3947 3948 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 3949 SmallVectorImpl<const SCEV *> &Ops) { 3950 assert(!Ops.empty() && "At least one operand must be!"); 3951 // Trivial case. 3952 if (Ops.size() == 1) 3953 return Ops[0]; 3954 3955 // Find the max type first. 3956 Type *MaxType = nullptr; 3957 for (auto *S : Ops) 3958 if (MaxType) 3959 MaxType = getWiderType(MaxType, S->getType()); 3960 else 3961 MaxType = S->getType(); 3962 assert(MaxType && "Failed to find maximum type!"); 3963 3964 // Extend all ops to max type. 3965 SmallVector<const SCEV *, 2> PromotedOps; 3966 for (auto *S : Ops) 3967 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 3968 3969 // Generate umin. 3970 return getUMinExpr(PromotedOps); 3971 } 3972 3973 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3974 // A pointer operand may evaluate to a nonpointer expression, such as null. 3975 if (!V->getType()->isPointerTy()) 3976 return V; 3977 3978 while (true) { 3979 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 3980 V = Cast->getOperand(); 3981 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3982 const SCEV *PtrOp = nullptr; 3983 for (const SCEV *NAryOp : NAry->operands()) { 3984 if (NAryOp->getType()->isPointerTy()) { 3985 // Cannot find the base of an expression with multiple pointer ops. 3986 if (PtrOp) 3987 return V; 3988 PtrOp = NAryOp; 3989 } 3990 } 3991 if (!PtrOp) // All operands were non-pointer. 3992 return V; 3993 V = PtrOp; 3994 } else // Not something we can look further into. 3995 return V; 3996 } 3997 } 3998 3999 /// Push users of the given Instruction onto the given Worklist. 4000 static void 4001 PushDefUseChildren(Instruction *I, 4002 SmallVectorImpl<Instruction *> &Worklist) { 4003 // Push the def-use children onto the Worklist stack. 4004 for (User *U : I->users()) 4005 Worklist.push_back(cast<Instruction>(U)); 4006 } 4007 4008 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4009 SmallVector<Instruction *, 16> Worklist; 4010 PushDefUseChildren(PN, Worklist); 4011 4012 SmallPtrSet<Instruction *, 8> Visited; 4013 Visited.insert(PN); 4014 while (!Worklist.empty()) { 4015 Instruction *I = Worklist.pop_back_val(); 4016 if (!Visited.insert(I).second) 4017 continue; 4018 4019 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4020 if (It != ValueExprMap.end()) { 4021 const SCEV *Old = It->second; 4022 4023 // Short-circuit the def-use traversal if the symbolic name 4024 // ceases to appear in expressions. 4025 if (Old != SymName && !hasOperand(Old, SymName)) 4026 continue; 4027 4028 // SCEVUnknown for a PHI either means that it has an unrecognized 4029 // structure, it's a PHI that's in the progress of being computed 4030 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4031 // additional loop trip count information isn't going to change anything. 4032 // In the second case, createNodeForPHI will perform the necessary 4033 // updates on its own when it gets to that point. In the third, we do 4034 // want to forget the SCEVUnknown. 4035 if (!isa<PHINode>(I) || 4036 !isa<SCEVUnknown>(Old) || 4037 (I != PN && Old == SymName)) { 4038 eraseValueFromMap(It->first); 4039 forgetMemoizedResults(Old); 4040 } 4041 } 4042 4043 PushDefUseChildren(I, Worklist); 4044 } 4045 } 4046 4047 namespace { 4048 4049 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4050 /// expression in case its Loop is L. If it is not L then 4051 /// if IgnoreOtherLoops is true then use AddRec itself 4052 /// otherwise rewrite cannot be done. 4053 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4054 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4055 public: 4056 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4057 bool IgnoreOtherLoops = true) { 4058 SCEVInitRewriter Rewriter(L, SE); 4059 const SCEV *Result = Rewriter.visit(S); 4060 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4061 return SE.getCouldNotCompute(); 4062 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4063 ? SE.getCouldNotCompute() 4064 : Result; 4065 } 4066 4067 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4068 if (!SE.isLoopInvariant(Expr, L)) 4069 SeenLoopVariantSCEVUnknown = true; 4070 return Expr; 4071 } 4072 4073 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4074 // Only re-write AddRecExprs for this loop. 4075 if (Expr->getLoop() == L) 4076 return Expr->getStart(); 4077 SeenOtherLoops = true; 4078 return Expr; 4079 } 4080 4081 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4082 4083 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4084 4085 private: 4086 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4087 : SCEVRewriteVisitor(SE), L(L) {} 4088 4089 const Loop *L; 4090 bool SeenLoopVariantSCEVUnknown = false; 4091 bool SeenOtherLoops = false; 4092 }; 4093 4094 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4095 /// increment expression in case its Loop is L. If it is not L then 4096 /// use AddRec itself. 4097 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4098 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4099 public: 4100 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4101 SCEVPostIncRewriter Rewriter(L, SE); 4102 const SCEV *Result = Rewriter.visit(S); 4103 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4104 ? SE.getCouldNotCompute() 4105 : Result; 4106 } 4107 4108 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4109 if (!SE.isLoopInvariant(Expr, L)) 4110 SeenLoopVariantSCEVUnknown = true; 4111 return Expr; 4112 } 4113 4114 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4115 // Only re-write AddRecExprs for this loop. 4116 if (Expr->getLoop() == L) 4117 return Expr->getPostIncExpr(SE); 4118 SeenOtherLoops = true; 4119 return Expr; 4120 } 4121 4122 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4123 4124 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4125 4126 private: 4127 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4128 : SCEVRewriteVisitor(SE), L(L) {} 4129 4130 const Loop *L; 4131 bool SeenLoopVariantSCEVUnknown = false; 4132 bool SeenOtherLoops = false; 4133 }; 4134 4135 /// This class evaluates the compare condition by matching it against the 4136 /// condition of loop latch. If there is a match we assume a true value 4137 /// for the condition while building SCEV nodes. 4138 class SCEVBackedgeConditionFolder 4139 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4140 public: 4141 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4142 ScalarEvolution &SE) { 4143 bool IsPosBECond = false; 4144 Value *BECond = nullptr; 4145 if (BasicBlock *Latch = L->getLoopLatch()) { 4146 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4147 if (BI && BI->isConditional()) { 4148 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4149 "Both outgoing branches should not target same header!"); 4150 BECond = BI->getCondition(); 4151 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4152 } else { 4153 return S; 4154 } 4155 } 4156 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4157 return Rewriter.visit(S); 4158 } 4159 4160 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4161 const SCEV *Result = Expr; 4162 bool InvariantF = SE.isLoopInvariant(Expr, L); 4163 4164 if (!InvariantF) { 4165 Instruction *I = cast<Instruction>(Expr->getValue()); 4166 switch (I->getOpcode()) { 4167 case Instruction::Select: { 4168 SelectInst *SI = cast<SelectInst>(I); 4169 Optional<const SCEV *> Res = 4170 compareWithBackedgeCondition(SI->getCondition()); 4171 if (Res.hasValue()) { 4172 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4173 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4174 } 4175 break; 4176 } 4177 default: { 4178 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4179 if (Res.hasValue()) 4180 Result = Res.getValue(); 4181 break; 4182 } 4183 } 4184 } 4185 return Result; 4186 } 4187 4188 private: 4189 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4190 bool IsPosBECond, ScalarEvolution &SE) 4191 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4192 IsPositiveBECond(IsPosBECond) {} 4193 4194 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4195 4196 const Loop *L; 4197 /// Loop back condition. 4198 Value *BackedgeCond = nullptr; 4199 /// Set to true if loop back is on positive branch condition. 4200 bool IsPositiveBECond; 4201 }; 4202 4203 Optional<const SCEV *> 4204 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4205 4206 // If value matches the backedge condition for loop latch, 4207 // then return a constant evolution node based on loopback 4208 // branch taken. 4209 if (BackedgeCond == IC) 4210 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4211 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4212 return None; 4213 } 4214 4215 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4216 public: 4217 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4218 ScalarEvolution &SE) { 4219 SCEVShiftRewriter Rewriter(L, SE); 4220 const SCEV *Result = Rewriter.visit(S); 4221 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4222 } 4223 4224 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4225 // Only allow AddRecExprs for this loop. 4226 if (!SE.isLoopInvariant(Expr, L)) 4227 Valid = false; 4228 return Expr; 4229 } 4230 4231 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4232 if (Expr->getLoop() == L && Expr->isAffine()) 4233 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4234 Valid = false; 4235 return Expr; 4236 } 4237 4238 bool isValid() { return Valid; } 4239 4240 private: 4241 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4242 : SCEVRewriteVisitor(SE), L(L) {} 4243 4244 const Loop *L; 4245 bool Valid = true; 4246 }; 4247 4248 } // end anonymous namespace 4249 4250 SCEV::NoWrapFlags 4251 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4252 if (!AR->isAffine()) 4253 return SCEV::FlagAnyWrap; 4254 4255 using OBO = OverflowingBinaryOperator; 4256 4257 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4258 4259 if (!AR->hasNoSignedWrap()) { 4260 ConstantRange AddRecRange = getSignedRange(AR); 4261 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4262 4263 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4264 Instruction::Add, IncRange, OBO::NoSignedWrap); 4265 if (NSWRegion.contains(AddRecRange)) 4266 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4267 } 4268 4269 if (!AR->hasNoUnsignedWrap()) { 4270 ConstantRange AddRecRange = getUnsignedRange(AR); 4271 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4272 4273 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4274 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4275 if (NUWRegion.contains(AddRecRange)) 4276 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4277 } 4278 4279 return Result; 4280 } 4281 4282 namespace { 4283 4284 /// Represents an abstract binary operation. This may exist as a 4285 /// normal instruction or constant expression, or may have been 4286 /// derived from an expression tree. 4287 struct BinaryOp { 4288 unsigned Opcode; 4289 Value *LHS; 4290 Value *RHS; 4291 bool IsNSW = false; 4292 bool IsNUW = false; 4293 bool IsExact = false; 4294 4295 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4296 /// constant expression. 4297 Operator *Op = nullptr; 4298 4299 explicit BinaryOp(Operator *Op) 4300 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4301 Op(Op) { 4302 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4303 IsNSW = OBO->hasNoSignedWrap(); 4304 IsNUW = OBO->hasNoUnsignedWrap(); 4305 } 4306 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op)) 4307 IsExact = PEO->isExact(); 4308 } 4309 4310 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4311 bool IsNUW = false, bool IsExact = false) 4312 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4313 IsExact(IsExact) {} 4314 }; 4315 4316 } // end anonymous namespace 4317 4318 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4319 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4320 auto *Op = dyn_cast<Operator>(V); 4321 if (!Op) 4322 return None; 4323 4324 // Implementation detail: all the cleverness here should happen without 4325 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4326 // SCEV expressions when possible, and we should not break that. 4327 4328 switch (Op->getOpcode()) { 4329 case Instruction::Add: 4330 case Instruction::Sub: 4331 case Instruction::Mul: 4332 case Instruction::UDiv: 4333 case Instruction::URem: 4334 case Instruction::And: 4335 case Instruction::Or: 4336 case Instruction::AShr: 4337 case Instruction::Shl: 4338 return BinaryOp(Op); 4339 4340 case Instruction::Xor: 4341 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4342 // If the RHS of the xor is a signmask, then this is just an add. 4343 // Instcombine turns add of signmask into xor as a strength reduction step. 4344 if (RHSC->getValue().isSignMask()) 4345 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4346 return BinaryOp(Op); 4347 4348 case Instruction::LShr: 4349 // Turn logical shift right of a constant into a unsigned divide. 4350 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4351 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4352 4353 // If the shift count is not less than the bitwidth, the result of 4354 // the shift is undefined. Don't try to analyze it, because the 4355 // resolution chosen here may differ from the resolution chosen in 4356 // other parts of the compiler. 4357 if (SA->getValue().ult(BitWidth)) { 4358 Constant *X = 4359 ConstantInt::get(SA->getContext(), 4360 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4361 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4362 } 4363 } 4364 return BinaryOp(Op); 4365 4366 case Instruction::ExtractValue: { 4367 auto *EVI = cast<ExtractValueInst>(Op); 4368 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4369 break; 4370 4371 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4372 if (!WO) 4373 break; 4374 4375 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4376 bool Signed = WO->isSigned(); 4377 // TODO: Should add nuw/nsw flags for mul as well. 4378 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4379 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4380 4381 // Now that we know that all uses of the arithmetic-result component of 4382 // CI are guarded by the overflow check, we can go ahead and pretend 4383 // that the arithmetic is non-overflowing. 4384 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4385 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4386 } 4387 4388 default: 4389 break; 4390 } 4391 4392 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4393 // semantics as a Sub, return a binary sub expression. 4394 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4395 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4396 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4397 4398 return None; 4399 } 4400 4401 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4402 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4403 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4404 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4405 /// follows one of the following patterns: 4406 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4407 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4408 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4409 /// we return the type of the truncation operation, and indicate whether the 4410 /// truncated type should be treated as signed/unsigned by setting 4411 /// \p Signed to true/false, respectively. 4412 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4413 bool &Signed, ScalarEvolution &SE) { 4414 // The case where Op == SymbolicPHI (that is, with no type conversions on 4415 // the way) is handled by the regular add recurrence creating logic and 4416 // would have already been triggered in createAddRecForPHI. Reaching it here 4417 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4418 // because one of the other operands of the SCEVAddExpr updating this PHI is 4419 // not invariant). 4420 // 4421 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4422 // this case predicates that allow us to prove that Op == SymbolicPHI will 4423 // be added. 4424 if (Op == SymbolicPHI) 4425 return nullptr; 4426 4427 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4428 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4429 if (SourceBits != NewBits) 4430 return nullptr; 4431 4432 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4433 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4434 if (!SExt && !ZExt) 4435 return nullptr; 4436 const SCEVTruncateExpr *Trunc = 4437 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4438 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4439 if (!Trunc) 4440 return nullptr; 4441 const SCEV *X = Trunc->getOperand(); 4442 if (X != SymbolicPHI) 4443 return nullptr; 4444 Signed = SExt != nullptr; 4445 return Trunc->getType(); 4446 } 4447 4448 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4449 if (!PN->getType()->isIntegerTy()) 4450 return nullptr; 4451 const Loop *L = LI.getLoopFor(PN->getParent()); 4452 if (!L || L->getHeader() != PN->getParent()) 4453 return nullptr; 4454 return L; 4455 } 4456 4457 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4458 // computation that updates the phi follows the following pattern: 4459 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4460 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4461 // If so, try to see if it can be rewritten as an AddRecExpr under some 4462 // Predicates. If successful, return them as a pair. Also cache the results 4463 // of the analysis. 4464 // 4465 // Example usage scenario: 4466 // Say the Rewriter is called for the following SCEV: 4467 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4468 // where: 4469 // %X = phi i64 (%Start, %BEValue) 4470 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4471 // and call this function with %SymbolicPHI = %X. 4472 // 4473 // The analysis will find that the value coming around the backedge has 4474 // the following SCEV: 4475 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4476 // Upon concluding that this matches the desired pattern, the function 4477 // will return the pair {NewAddRec, SmallPredsVec} where: 4478 // NewAddRec = {%Start,+,%Step} 4479 // SmallPredsVec = {P1, P2, P3} as follows: 4480 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4481 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4482 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4483 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4484 // under the predicates {P1,P2,P3}. 4485 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4486 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4487 // 4488 // TODO's: 4489 // 4490 // 1) Extend the Induction descriptor to also support inductions that involve 4491 // casts: When needed (namely, when we are called in the context of the 4492 // vectorizer induction analysis), a Set of cast instructions will be 4493 // populated by this method, and provided back to isInductionPHI. This is 4494 // needed to allow the vectorizer to properly record them to be ignored by 4495 // the cost model and to avoid vectorizing them (otherwise these casts, 4496 // which are redundant under the runtime overflow checks, will be 4497 // vectorized, which can be costly). 4498 // 4499 // 2) Support additional induction/PHISCEV patterns: We also want to support 4500 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4501 // after the induction update operation (the induction increment): 4502 // 4503 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4504 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4505 // 4506 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4507 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4508 // 4509 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4510 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4511 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4512 SmallVector<const SCEVPredicate *, 3> Predicates; 4513 4514 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4515 // return an AddRec expression under some predicate. 4516 4517 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4518 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4519 assert(L && "Expecting an integer loop header phi"); 4520 4521 // The loop may have multiple entrances or multiple exits; we can analyze 4522 // this phi as an addrec if it has a unique entry value and a unique 4523 // backedge value. 4524 Value *BEValueV = nullptr, *StartValueV = nullptr; 4525 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4526 Value *V = PN->getIncomingValue(i); 4527 if (L->contains(PN->getIncomingBlock(i))) { 4528 if (!BEValueV) { 4529 BEValueV = V; 4530 } else if (BEValueV != V) { 4531 BEValueV = nullptr; 4532 break; 4533 } 4534 } else if (!StartValueV) { 4535 StartValueV = V; 4536 } else if (StartValueV != V) { 4537 StartValueV = nullptr; 4538 break; 4539 } 4540 } 4541 if (!BEValueV || !StartValueV) 4542 return None; 4543 4544 const SCEV *BEValue = getSCEV(BEValueV); 4545 4546 // If the value coming around the backedge is an add with the symbolic 4547 // value we just inserted, possibly with casts that we can ignore under 4548 // an appropriate runtime guard, then we found a simple induction variable! 4549 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4550 if (!Add) 4551 return None; 4552 4553 // If there is a single occurrence of the symbolic value, possibly 4554 // casted, replace it with a recurrence. 4555 unsigned FoundIndex = Add->getNumOperands(); 4556 Type *TruncTy = nullptr; 4557 bool Signed; 4558 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4559 if ((TruncTy = 4560 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4561 if (FoundIndex == e) { 4562 FoundIndex = i; 4563 break; 4564 } 4565 4566 if (FoundIndex == Add->getNumOperands()) 4567 return None; 4568 4569 // Create an add with everything but the specified operand. 4570 SmallVector<const SCEV *, 8> Ops; 4571 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4572 if (i != FoundIndex) 4573 Ops.push_back(Add->getOperand(i)); 4574 const SCEV *Accum = getAddExpr(Ops); 4575 4576 // The runtime checks will not be valid if the step amount is 4577 // varying inside the loop. 4578 if (!isLoopInvariant(Accum, L)) 4579 return None; 4580 4581 // *** Part2: Create the predicates 4582 4583 // Analysis was successful: we have a phi-with-cast pattern for which we 4584 // can return an AddRec expression under the following predicates: 4585 // 4586 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4587 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4588 // P2: An Equal predicate that guarantees that 4589 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4590 // P3: An Equal predicate that guarantees that 4591 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4592 // 4593 // As we next prove, the above predicates guarantee that: 4594 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4595 // 4596 // 4597 // More formally, we want to prove that: 4598 // Expr(i+1) = Start + (i+1) * Accum 4599 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4600 // 4601 // Given that: 4602 // 1) Expr(0) = Start 4603 // 2) Expr(1) = Start + Accum 4604 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4605 // 3) Induction hypothesis (step i): 4606 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4607 // 4608 // Proof: 4609 // Expr(i+1) = 4610 // = Start + (i+1)*Accum 4611 // = (Start + i*Accum) + Accum 4612 // = Expr(i) + Accum 4613 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4614 // :: from step i 4615 // 4616 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4617 // 4618 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4619 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4620 // + Accum :: from P3 4621 // 4622 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4623 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4624 // 4625 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4626 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4627 // 4628 // By induction, the same applies to all iterations 1<=i<n: 4629 // 4630 4631 // Create a truncated addrec for which we will add a no overflow check (P1). 4632 const SCEV *StartVal = getSCEV(StartValueV); 4633 const SCEV *PHISCEV = 4634 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4635 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4636 4637 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4638 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4639 // will be constant. 4640 // 4641 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4642 // add P1. 4643 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4644 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4645 Signed ? SCEVWrapPredicate::IncrementNSSW 4646 : SCEVWrapPredicate::IncrementNUSW; 4647 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4648 Predicates.push_back(AddRecPred); 4649 } 4650 4651 // Create the Equal Predicates P2,P3: 4652 4653 // It is possible that the predicates P2 and/or P3 are computable at 4654 // compile time due to StartVal and/or Accum being constants. 4655 // If either one is, then we can check that now and escape if either P2 4656 // or P3 is false. 4657 4658 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4659 // for each of StartVal and Accum 4660 auto getExtendedExpr = [&](const SCEV *Expr, 4661 bool CreateSignExtend) -> const SCEV * { 4662 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4663 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4664 const SCEV *ExtendedExpr = 4665 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4666 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4667 return ExtendedExpr; 4668 }; 4669 4670 // Given: 4671 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4672 // = getExtendedExpr(Expr) 4673 // Determine whether the predicate P: Expr == ExtendedExpr 4674 // is known to be false at compile time 4675 auto PredIsKnownFalse = [&](const SCEV *Expr, 4676 const SCEV *ExtendedExpr) -> bool { 4677 return Expr != ExtendedExpr && 4678 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4679 }; 4680 4681 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4682 if (PredIsKnownFalse(StartVal, StartExtended)) { 4683 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4684 return None; 4685 } 4686 4687 // The Step is always Signed (because the overflow checks are either 4688 // NSSW or NUSW) 4689 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4690 if (PredIsKnownFalse(Accum, AccumExtended)) { 4691 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4692 return None; 4693 } 4694 4695 auto AppendPredicate = [&](const SCEV *Expr, 4696 const SCEV *ExtendedExpr) -> void { 4697 if (Expr != ExtendedExpr && 4698 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4699 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4700 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4701 Predicates.push_back(Pred); 4702 } 4703 }; 4704 4705 AppendPredicate(StartVal, StartExtended); 4706 AppendPredicate(Accum, AccumExtended); 4707 4708 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4709 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4710 // into NewAR if it will also add the runtime overflow checks specified in 4711 // Predicates. 4712 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4713 4714 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4715 std::make_pair(NewAR, Predicates); 4716 // Remember the result of the analysis for this SCEV at this locayyytion. 4717 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4718 return PredRewrite; 4719 } 4720 4721 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4722 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4723 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4724 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4725 if (!L) 4726 return None; 4727 4728 // Check to see if we already analyzed this PHI. 4729 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4730 if (I != PredicatedSCEVRewrites.end()) { 4731 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4732 I->second; 4733 // Analysis was done before and failed to create an AddRec: 4734 if (Rewrite.first == SymbolicPHI) 4735 return None; 4736 // Analysis was done before and succeeded to create an AddRec under 4737 // a predicate: 4738 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4739 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4740 return Rewrite; 4741 } 4742 4743 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4744 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4745 4746 // Record in the cache that the analysis failed 4747 if (!Rewrite) { 4748 SmallVector<const SCEVPredicate *, 3> Predicates; 4749 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4750 return None; 4751 } 4752 4753 return Rewrite; 4754 } 4755 4756 // FIXME: This utility is currently required because the Rewriter currently 4757 // does not rewrite this expression: 4758 // {0, +, (sext ix (trunc iy to ix) to iy)} 4759 // into {0, +, %step}, 4760 // even when the following Equal predicate exists: 4761 // "%step == (sext ix (trunc iy to ix) to iy)". 4762 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4763 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4764 if (AR1 == AR2) 4765 return true; 4766 4767 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4768 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4769 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4770 return false; 4771 return true; 4772 }; 4773 4774 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4775 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4776 return false; 4777 return true; 4778 } 4779 4780 /// A helper function for createAddRecFromPHI to handle simple cases. 4781 /// 4782 /// This function tries to find an AddRec expression for the simplest (yet most 4783 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4784 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4785 /// technique for finding the AddRec expression. 4786 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4787 Value *BEValueV, 4788 Value *StartValueV) { 4789 const Loop *L = LI.getLoopFor(PN->getParent()); 4790 assert(L && L->getHeader() == PN->getParent()); 4791 assert(BEValueV && StartValueV); 4792 4793 auto BO = MatchBinaryOp(BEValueV, DT); 4794 if (!BO) 4795 return nullptr; 4796 4797 if (BO->Opcode != Instruction::Add) 4798 return nullptr; 4799 4800 const SCEV *Accum = nullptr; 4801 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4802 Accum = getSCEV(BO->RHS); 4803 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4804 Accum = getSCEV(BO->LHS); 4805 4806 if (!Accum) 4807 return nullptr; 4808 4809 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4810 if (BO->IsNUW) 4811 Flags = setFlags(Flags, SCEV::FlagNUW); 4812 if (BO->IsNSW) 4813 Flags = setFlags(Flags, SCEV::FlagNSW); 4814 4815 const SCEV *StartVal = getSCEV(StartValueV); 4816 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4817 4818 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4819 4820 // We can add Flags to the post-inc expression only if we 4821 // know that it is *undefined behavior* for BEValueV to 4822 // overflow. 4823 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4824 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4825 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4826 4827 return PHISCEV; 4828 } 4829 4830 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4831 const Loop *L = LI.getLoopFor(PN->getParent()); 4832 if (!L || L->getHeader() != PN->getParent()) 4833 return nullptr; 4834 4835 // The loop may have multiple entrances or multiple exits; we can analyze 4836 // this phi as an addrec if it has a unique entry value and a unique 4837 // backedge value. 4838 Value *BEValueV = nullptr, *StartValueV = nullptr; 4839 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4840 Value *V = PN->getIncomingValue(i); 4841 if (L->contains(PN->getIncomingBlock(i))) { 4842 if (!BEValueV) { 4843 BEValueV = V; 4844 } else if (BEValueV != V) { 4845 BEValueV = nullptr; 4846 break; 4847 } 4848 } else if (!StartValueV) { 4849 StartValueV = V; 4850 } else if (StartValueV != V) { 4851 StartValueV = nullptr; 4852 break; 4853 } 4854 } 4855 if (!BEValueV || !StartValueV) 4856 return nullptr; 4857 4858 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4859 "PHI node already processed?"); 4860 4861 // First, try to find AddRec expression without creating a fictituos symbolic 4862 // value for PN. 4863 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4864 return S; 4865 4866 // Handle PHI node value symbolically. 4867 const SCEV *SymbolicName = getUnknown(PN); 4868 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4869 4870 // Using this symbolic name for the PHI, analyze the value coming around 4871 // the back-edge. 4872 const SCEV *BEValue = getSCEV(BEValueV); 4873 4874 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4875 // has a special value for the first iteration of the loop. 4876 4877 // If the value coming around the backedge is an add with the symbolic 4878 // value we just inserted, then we found a simple induction variable! 4879 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4880 // If there is a single occurrence of the symbolic value, replace it 4881 // with a recurrence. 4882 unsigned FoundIndex = Add->getNumOperands(); 4883 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4884 if (Add->getOperand(i) == SymbolicName) 4885 if (FoundIndex == e) { 4886 FoundIndex = i; 4887 break; 4888 } 4889 4890 if (FoundIndex != Add->getNumOperands()) { 4891 // Create an add with everything but the specified operand. 4892 SmallVector<const SCEV *, 8> Ops; 4893 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4894 if (i != FoundIndex) 4895 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4896 L, *this)); 4897 const SCEV *Accum = getAddExpr(Ops); 4898 4899 // This is not a valid addrec if the step amount is varying each 4900 // loop iteration, but is not itself an addrec in this loop. 4901 if (isLoopInvariant(Accum, L) || 4902 (isa<SCEVAddRecExpr>(Accum) && 4903 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4904 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4905 4906 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4907 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4908 if (BO->IsNUW) 4909 Flags = setFlags(Flags, SCEV::FlagNUW); 4910 if (BO->IsNSW) 4911 Flags = setFlags(Flags, SCEV::FlagNSW); 4912 } 4913 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4914 // If the increment is an inbounds GEP, then we know the address 4915 // space cannot be wrapped around. We cannot make any guarantee 4916 // about signed or unsigned overflow because pointers are 4917 // unsigned but we may have a negative index from the base 4918 // pointer. We can guarantee that no unsigned wrap occurs if the 4919 // indices form a positive value. 4920 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4921 Flags = setFlags(Flags, SCEV::FlagNW); 4922 4923 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4924 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4925 Flags = setFlags(Flags, SCEV::FlagNUW); 4926 } 4927 4928 // We cannot transfer nuw and nsw flags from subtraction 4929 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4930 // for instance. 4931 } 4932 4933 const SCEV *StartVal = getSCEV(StartValueV); 4934 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4935 4936 // Okay, for the entire analysis of this edge we assumed the PHI 4937 // to be symbolic. We now need to go back and purge all of the 4938 // entries for the scalars that use the symbolic expression. 4939 forgetSymbolicName(PN, SymbolicName); 4940 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4941 4942 // We can add Flags to the post-inc expression only if we 4943 // know that it is *undefined behavior* for BEValueV to 4944 // overflow. 4945 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4946 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4947 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4948 4949 return PHISCEV; 4950 } 4951 } 4952 } else { 4953 // Otherwise, this could be a loop like this: 4954 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4955 // In this case, j = {1,+,1} and BEValue is j. 4956 // Because the other in-value of i (0) fits the evolution of BEValue 4957 // i really is an addrec evolution. 4958 // 4959 // We can generalize this saying that i is the shifted value of BEValue 4960 // by one iteration: 4961 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4962 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4963 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 4964 if (Shifted != getCouldNotCompute() && 4965 Start != getCouldNotCompute()) { 4966 const SCEV *StartVal = getSCEV(StartValueV); 4967 if (Start == StartVal) { 4968 // Okay, for the entire analysis of this edge we assumed the PHI 4969 // to be symbolic. We now need to go back and purge all of the 4970 // entries for the scalars that use the symbolic expression. 4971 forgetSymbolicName(PN, SymbolicName); 4972 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4973 return Shifted; 4974 } 4975 } 4976 } 4977 4978 // Remove the temporary PHI node SCEV that has been inserted while intending 4979 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4980 // as it will prevent later (possibly simpler) SCEV expressions to be added 4981 // to the ValueExprMap. 4982 eraseValueFromMap(PN); 4983 4984 return nullptr; 4985 } 4986 4987 // Checks if the SCEV S is available at BB. S is considered available at BB 4988 // if S can be materialized at BB without introducing a fault. 4989 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4990 BasicBlock *BB) { 4991 struct CheckAvailable { 4992 bool TraversalDone = false; 4993 bool Available = true; 4994 4995 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4996 BasicBlock *BB = nullptr; 4997 DominatorTree &DT; 4998 4999 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5000 : L(L), BB(BB), DT(DT) {} 5001 5002 bool setUnavailable() { 5003 TraversalDone = true; 5004 Available = false; 5005 return false; 5006 } 5007 5008 bool follow(const SCEV *S) { 5009 switch (S->getSCEVType()) { 5010 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5011 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5012 case scUMinExpr: 5013 case scSMinExpr: 5014 // These expressions are available if their operand(s) is/are. 5015 return true; 5016 5017 case scAddRecExpr: { 5018 // We allow add recurrences that are on the loop BB is in, or some 5019 // outer loop. This guarantees availability because the value of the 5020 // add recurrence at BB is simply the "current" value of the induction 5021 // variable. We can relax this in the future; for instance an add 5022 // recurrence on a sibling dominating loop is also available at BB. 5023 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5024 if (L && (ARLoop == L || ARLoop->contains(L))) 5025 return true; 5026 5027 return setUnavailable(); 5028 } 5029 5030 case scUnknown: { 5031 // For SCEVUnknown, we check for simple dominance. 5032 const auto *SU = cast<SCEVUnknown>(S); 5033 Value *V = SU->getValue(); 5034 5035 if (isa<Argument>(V)) 5036 return false; 5037 5038 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5039 return false; 5040 5041 return setUnavailable(); 5042 } 5043 5044 case scUDivExpr: 5045 case scCouldNotCompute: 5046 // We do not try to smart about these at all. 5047 return setUnavailable(); 5048 } 5049 llvm_unreachable("Unknown SCEV kind!"); 5050 } 5051 5052 bool isDone() { return TraversalDone; } 5053 }; 5054 5055 CheckAvailable CA(L, BB, DT); 5056 SCEVTraversal<CheckAvailable> ST(CA); 5057 5058 ST.visitAll(S); 5059 return CA.Available; 5060 } 5061 5062 // Try to match a control flow sequence that branches out at BI and merges back 5063 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5064 // match. 5065 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5066 Value *&C, Value *&LHS, Value *&RHS) { 5067 C = BI->getCondition(); 5068 5069 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5070 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5071 5072 if (!LeftEdge.isSingleEdge()) 5073 return false; 5074 5075 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5076 5077 Use &LeftUse = Merge->getOperandUse(0); 5078 Use &RightUse = Merge->getOperandUse(1); 5079 5080 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5081 LHS = LeftUse; 5082 RHS = RightUse; 5083 return true; 5084 } 5085 5086 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5087 LHS = RightUse; 5088 RHS = LeftUse; 5089 return true; 5090 } 5091 5092 return false; 5093 } 5094 5095 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5096 auto IsReachable = 5097 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5098 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5099 const Loop *L = LI.getLoopFor(PN->getParent()); 5100 5101 // We don't want to break LCSSA, even in a SCEV expression tree. 5102 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5103 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5104 return nullptr; 5105 5106 // Try to match 5107 // 5108 // br %cond, label %left, label %right 5109 // left: 5110 // br label %merge 5111 // right: 5112 // br label %merge 5113 // merge: 5114 // V = phi [ %x, %left ], [ %y, %right ] 5115 // 5116 // as "select %cond, %x, %y" 5117 5118 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5119 assert(IDom && "At least the entry block should dominate PN"); 5120 5121 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5122 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5123 5124 if (BI && BI->isConditional() && 5125 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5126 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5127 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5128 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5129 } 5130 5131 return nullptr; 5132 } 5133 5134 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5135 if (const SCEV *S = createAddRecFromPHI(PN)) 5136 return S; 5137 5138 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5139 return S; 5140 5141 // If the PHI has a single incoming value, follow that value, unless the 5142 // PHI's incoming blocks are in a different loop, in which case doing so 5143 // risks breaking LCSSA form. Instcombine would normally zap these, but 5144 // it doesn't have DominatorTree information, so it may miss cases. 5145 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5146 if (LI.replacementPreservesLCSSAForm(PN, V)) 5147 return getSCEV(V); 5148 5149 // If it's not a loop phi, we can't handle it yet. 5150 return getUnknown(PN); 5151 } 5152 5153 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5154 Value *Cond, 5155 Value *TrueVal, 5156 Value *FalseVal) { 5157 // Handle "constant" branch or select. This can occur for instance when a 5158 // loop pass transforms an inner loop and moves on to process the outer loop. 5159 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5160 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5161 5162 // Try to match some simple smax or umax patterns. 5163 auto *ICI = dyn_cast<ICmpInst>(Cond); 5164 if (!ICI) 5165 return getUnknown(I); 5166 5167 Value *LHS = ICI->getOperand(0); 5168 Value *RHS = ICI->getOperand(1); 5169 5170 switch (ICI->getPredicate()) { 5171 case ICmpInst::ICMP_SLT: 5172 case ICmpInst::ICMP_SLE: 5173 std::swap(LHS, RHS); 5174 LLVM_FALLTHROUGH; 5175 case ICmpInst::ICMP_SGT: 5176 case ICmpInst::ICMP_SGE: 5177 // a >s b ? a+x : b+x -> smax(a, b)+x 5178 // a >s b ? b+x : a+x -> smin(a, b)+x 5179 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5180 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5181 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5182 const SCEV *LA = getSCEV(TrueVal); 5183 const SCEV *RA = getSCEV(FalseVal); 5184 const SCEV *LDiff = getMinusSCEV(LA, LS); 5185 const SCEV *RDiff = getMinusSCEV(RA, RS); 5186 if (LDiff == RDiff) 5187 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5188 LDiff = getMinusSCEV(LA, RS); 5189 RDiff = getMinusSCEV(RA, LS); 5190 if (LDiff == RDiff) 5191 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5192 } 5193 break; 5194 case ICmpInst::ICMP_ULT: 5195 case ICmpInst::ICMP_ULE: 5196 std::swap(LHS, RHS); 5197 LLVM_FALLTHROUGH; 5198 case ICmpInst::ICMP_UGT: 5199 case ICmpInst::ICMP_UGE: 5200 // a >u b ? a+x : b+x -> umax(a, b)+x 5201 // a >u b ? b+x : a+x -> umin(a, b)+x 5202 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5203 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5204 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5205 const SCEV *LA = getSCEV(TrueVal); 5206 const SCEV *RA = getSCEV(FalseVal); 5207 const SCEV *LDiff = getMinusSCEV(LA, LS); 5208 const SCEV *RDiff = getMinusSCEV(RA, RS); 5209 if (LDiff == RDiff) 5210 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5211 LDiff = getMinusSCEV(LA, RS); 5212 RDiff = getMinusSCEV(RA, LS); 5213 if (LDiff == RDiff) 5214 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5215 } 5216 break; 5217 case ICmpInst::ICMP_NE: 5218 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5219 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5220 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5221 const SCEV *One = getOne(I->getType()); 5222 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5223 const SCEV *LA = getSCEV(TrueVal); 5224 const SCEV *RA = getSCEV(FalseVal); 5225 const SCEV *LDiff = getMinusSCEV(LA, LS); 5226 const SCEV *RDiff = getMinusSCEV(RA, One); 5227 if (LDiff == RDiff) 5228 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5229 } 5230 break; 5231 case ICmpInst::ICMP_EQ: 5232 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5233 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5234 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5235 const SCEV *One = getOne(I->getType()); 5236 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5237 const SCEV *LA = getSCEV(TrueVal); 5238 const SCEV *RA = getSCEV(FalseVal); 5239 const SCEV *LDiff = getMinusSCEV(LA, One); 5240 const SCEV *RDiff = getMinusSCEV(RA, LS); 5241 if (LDiff == RDiff) 5242 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5243 } 5244 break; 5245 default: 5246 break; 5247 } 5248 5249 return getUnknown(I); 5250 } 5251 5252 /// Expand GEP instructions into add and multiply operations. This allows them 5253 /// to be analyzed by regular SCEV code. 5254 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5255 // Don't attempt to analyze GEPs over unsized objects. 5256 if (!GEP->getSourceElementType()->isSized()) 5257 return getUnknown(GEP); 5258 5259 SmallVector<const SCEV *, 4> IndexExprs; 5260 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5261 IndexExprs.push_back(getSCEV(*Index)); 5262 return getGEPExpr(GEP, IndexExprs); 5263 } 5264 5265 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5266 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5267 return C->getAPInt().countTrailingZeros(); 5268 5269 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5270 return std::min(GetMinTrailingZeros(T->getOperand()), 5271 (uint32_t)getTypeSizeInBits(T->getType())); 5272 5273 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5274 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5275 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5276 ? getTypeSizeInBits(E->getType()) 5277 : OpRes; 5278 } 5279 5280 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5281 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5282 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5283 ? getTypeSizeInBits(E->getType()) 5284 : OpRes; 5285 } 5286 5287 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5288 // The result is the min of all operands results. 5289 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5290 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5291 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5292 return MinOpRes; 5293 } 5294 5295 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5296 // The result is the sum of all operands results. 5297 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5298 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5299 for (unsigned i = 1, e = M->getNumOperands(); 5300 SumOpRes != BitWidth && i != e; ++i) 5301 SumOpRes = 5302 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5303 return SumOpRes; 5304 } 5305 5306 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5307 // The result is the min of all operands results. 5308 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5309 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5310 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5311 return MinOpRes; 5312 } 5313 5314 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5315 // The result is the min of all operands results. 5316 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5317 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5318 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5319 return MinOpRes; 5320 } 5321 5322 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5323 // The result is the min of all operands results. 5324 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5325 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5326 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5327 return MinOpRes; 5328 } 5329 5330 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5331 // For a SCEVUnknown, ask ValueTracking. 5332 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5333 return Known.countMinTrailingZeros(); 5334 } 5335 5336 // SCEVUDivExpr 5337 return 0; 5338 } 5339 5340 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5341 auto I = MinTrailingZerosCache.find(S); 5342 if (I != MinTrailingZerosCache.end()) 5343 return I->second; 5344 5345 uint32_t Result = GetMinTrailingZerosImpl(S); 5346 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5347 assert(InsertPair.second && "Should insert a new key"); 5348 return InsertPair.first->second; 5349 } 5350 5351 /// Helper method to assign a range to V from metadata present in the IR. 5352 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5353 if (Instruction *I = dyn_cast<Instruction>(V)) 5354 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5355 return getConstantRangeFromMetadata(*MD); 5356 5357 return None; 5358 } 5359 5360 /// Determine the range for a particular SCEV. If SignHint is 5361 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5362 /// with a "cleaner" unsigned (resp. signed) representation. 5363 const ConstantRange & 5364 ScalarEvolution::getRangeRef(const SCEV *S, 5365 ScalarEvolution::RangeSignHint SignHint) { 5366 DenseMap<const SCEV *, ConstantRange> &Cache = 5367 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5368 : SignedRanges; 5369 ConstantRange::PreferredRangeType RangeType = 5370 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5371 ? ConstantRange::Unsigned : ConstantRange::Signed; 5372 5373 // See if we've computed this range already. 5374 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5375 if (I != Cache.end()) 5376 return I->second; 5377 5378 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5379 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5380 5381 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5382 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5383 using OBO = OverflowingBinaryOperator; 5384 5385 // If the value has known zeros, the maximum value will have those known zeros 5386 // as well. 5387 uint32_t TZ = GetMinTrailingZeros(S); 5388 if (TZ != 0) { 5389 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5390 ConservativeResult = 5391 ConstantRange(APInt::getMinValue(BitWidth), 5392 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5393 else 5394 ConservativeResult = ConstantRange( 5395 APInt::getSignedMinValue(BitWidth), 5396 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5397 } 5398 5399 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5400 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5401 unsigned WrapType = OBO::AnyWrap; 5402 if (Add->hasNoSignedWrap()) 5403 WrapType |= OBO::NoSignedWrap; 5404 if (Add->hasNoUnsignedWrap()) 5405 WrapType |= OBO::NoUnsignedWrap; 5406 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5407 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5408 WrapType, RangeType); 5409 return setRange(Add, SignHint, 5410 ConservativeResult.intersectWith(X, RangeType)); 5411 } 5412 5413 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5414 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5415 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5416 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5417 return setRange(Mul, SignHint, 5418 ConservativeResult.intersectWith(X, RangeType)); 5419 } 5420 5421 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5422 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5423 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5424 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5425 return setRange(SMax, SignHint, 5426 ConservativeResult.intersectWith(X, RangeType)); 5427 } 5428 5429 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5430 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5431 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5432 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5433 return setRange(UMax, SignHint, 5434 ConservativeResult.intersectWith(X, RangeType)); 5435 } 5436 5437 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5438 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5439 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5440 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5441 return setRange(SMin, SignHint, 5442 ConservativeResult.intersectWith(X, RangeType)); 5443 } 5444 5445 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5446 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5447 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5448 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5449 return setRange(UMin, SignHint, 5450 ConservativeResult.intersectWith(X, RangeType)); 5451 } 5452 5453 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5454 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5455 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5456 return setRange(UDiv, SignHint, 5457 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5458 } 5459 5460 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5461 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5462 return setRange(ZExt, SignHint, 5463 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5464 RangeType)); 5465 } 5466 5467 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5468 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5469 return setRange(SExt, SignHint, 5470 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5471 RangeType)); 5472 } 5473 5474 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5475 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5476 return setRange(Trunc, SignHint, 5477 ConservativeResult.intersectWith(X.truncate(BitWidth), 5478 RangeType)); 5479 } 5480 5481 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5482 // If there's no unsigned wrap, the value will never be less than its 5483 // initial value. 5484 if (AddRec->hasNoUnsignedWrap()) { 5485 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5486 if (!UnsignedMinValue.isNullValue()) 5487 ConservativeResult = ConservativeResult.intersectWith( 5488 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5489 } 5490 5491 // If there's no signed wrap, and all the operands except initial value have 5492 // the same sign or zero, the value won't ever be: 5493 // 1: smaller than initial value if operands are non negative, 5494 // 2: bigger than initial value if operands are non positive. 5495 // For both cases, value can not cross signed min/max boundary. 5496 if (AddRec->hasNoSignedWrap()) { 5497 bool AllNonNeg = true; 5498 bool AllNonPos = true; 5499 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5500 if (!isKnownNonNegative(AddRec->getOperand(i))) 5501 AllNonNeg = false; 5502 if (!isKnownNonPositive(AddRec->getOperand(i))) 5503 AllNonPos = false; 5504 } 5505 if (AllNonNeg) 5506 ConservativeResult = ConservativeResult.intersectWith( 5507 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5508 APInt::getSignedMinValue(BitWidth)), 5509 RangeType); 5510 else if (AllNonPos) 5511 ConservativeResult = ConservativeResult.intersectWith( 5512 ConstantRange::getNonEmpty( 5513 APInt::getSignedMinValue(BitWidth), 5514 getSignedRangeMax(AddRec->getStart()) + 1), 5515 RangeType); 5516 } 5517 5518 // TODO: non-affine addrec 5519 if (AddRec->isAffine()) { 5520 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5521 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5522 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5523 auto RangeFromAffine = getRangeForAffineAR( 5524 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5525 BitWidth); 5526 ConservativeResult = 5527 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5528 5529 auto RangeFromFactoring = getRangeViaFactoring( 5530 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5531 BitWidth); 5532 ConservativeResult = 5533 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5534 } 5535 5536 // Now try symbolic BE count and more powerful methods. 5537 if (UseExpensiveRangeSharpening) { 5538 const SCEV *SymbolicMaxBECount = 5539 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 5540 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 5541 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5542 AddRec->hasNoSelfWrap()) { 5543 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 5544 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 5545 ConservativeResult = 5546 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 5547 } 5548 } 5549 } 5550 5551 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5552 } 5553 5554 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5555 // Check if the IR explicitly contains !range metadata. 5556 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5557 if (MDRange.hasValue()) 5558 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5559 RangeType); 5560 5561 // Split here to avoid paying the compile-time cost of calling both 5562 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5563 // if needed. 5564 const DataLayout &DL = getDataLayout(); 5565 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5566 // For a SCEVUnknown, ask ValueTracking. 5567 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5568 if (Known.getBitWidth() != BitWidth) 5569 Known = Known.zextOrTrunc(BitWidth); 5570 // If Known does not result in full-set, intersect with it. 5571 if (Known.getMinValue() != Known.getMaxValue() + 1) 5572 ConservativeResult = ConservativeResult.intersectWith( 5573 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5574 RangeType); 5575 } else { 5576 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5577 "generalize as needed!"); 5578 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5579 // If the pointer size is larger than the index size type, this can cause 5580 // NS to be larger than BitWidth. So compensate for this. 5581 if (U->getType()->isPointerTy()) { 5582 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5583 int ptrIdxDiff = ptrSize - BitWidth; 5584 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5585 NS -= ptrIdxDiff; 5586 } 5587 5588 if (NS > 1) 5589 ConservativeResult = ConservativeResult.intersectWith( 5590 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5591 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5592 RangeType); 5593 } 5594 5595 // A range of Phi is a subset of union of all ranges of its input. 5596 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5597 // Make sure that we do not run over cycled Phis. 5598 if (PendingPhiRanges.insert(Phi).second) { 5599 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5600 for (auto &Op : Phi->operands()) { 5601 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5602 RangeFromOps = RangeFromOps.unionWith(OpRange); 5603 // No point to continue if we already have a full set. 5604 if (RangeFromOps.isFullSet()) 5605 break; 5606 } 5607 ConservativeResult = 5608 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5609 bool Erased = PendingPhiRanges.erase(Phi); 5610 assert(Erased && "Failed to erase Phi properly?"); 5611 (void) Erased; 5612 } 5613 } 5614 5615 return setRange(U, SignHint, std::move(ConservativeResult)); 5616 } 5617 5618 return setRange(S, SignHint, std::move(ConservativeResult)); 5619 } 5620 5621 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5622 // values that the expression can take. Initially, the expression has a value 5623 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5624 // argument defines if we treat Step as signed or unsigned. 5625 static ConstantRange getRangeForAffineARHelper(APInt Step, 5626 const ConstantRange &StartRange, 5627 const APInt &MaxBECount, 5628 unsigned BitWidth, bool Signed) { 5629 // If either Step or MaxBECount is 0, then the expression won't change, and we 5630 // just need to return the initial range. 5631 if (Step == 0 || MaxBECount == 0) 5632 return StartRange; 5633 5634 // If we don't know anything about the initial value (i.e. StartRange is 5635 // FullRange), then we don't know anything about the final range either. 5636 // Return FullRange. 5637 if (StartRange.isFullSet()) 5638 return ConstantRange::getFull(BitWidth); 5639 5640 // If Step is signed and negative, then we use its absolute value, but we also 5641 // note that we're moving in the opposite direction. 5642 bool Descending = Signed && Step.isNegative(); 5643 5644 if (Signed) 5645 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5646 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5647 // This equations hold true due to the well-defined wrap-around behavior of 5648 // APInt. 5649 Step = Step.abs(); 5650 5651 // Check if Offset is more than full span of BitWidth. If it is, the 5652 // expression is guaranteed to overflow. 5653 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5654 return ConstantRange::getFull(BitWidth); 5655 5656 // Offset is by how much the expression can change. Checks above guarantee no 5657 // overflow here. 5658 APInt Offset = Step * MaxBECount; 5659 5660 // Minimum value of the final range will match the minimal value of StartRange 5661 // if the expression is increasing and will be decreased by Offset otherwise. 5662 // Maximum value of the final range will match the maximal value of StartRange 5663 // if the expression is decreasing and will be increased by Offset otherwise. 5664 APInt StartLower = StartRange.getLower(); 5665 APInt StartUpper = StartRange.getUpper() - 1; 5666 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5667 : (StartUpper + std::move(Offset)); 5668 5669 // It's possible that the new minimum/maximum value will fall into the initial 5670 // range (due to wrap around). This means that the expression can take any 5671 // value in this bitwidth, and we have to return full range. 5672 if (StartRange.contains(MovedBoundary)) 5673 return ConstantRange::getFull(BitWidth); 5674 5675 APInt NewLower = 5676 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5677 APInt NewUpper = 5678 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5679 NewUpper += 1; 5680 5681 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5682 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5683 } 5684 5685 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5686 const SCEV *Step, 5687 const SCEV *MaxBECount, 5688 unsigned BitWidth) { 5689 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5690 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5691 "Precondition!"); 5692 5693 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5694 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5695 5696 // First, consider step signed. 5697 ConstantRange StartSRange = getSignedRange(Start); 5698 ConstantRange StepSRange = getSignedRange(Step); 5699 5700 // If Step can be both positive and negative, we need to find ranges for the 5701 // maximum absolute step values in both directions and union them. 5702 ConstantRange SR = 5703 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5704 MaxBECountValue, BitWidth, /* Signed = */ true); 5705 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5706 StartSRange, MaxBECountValue, 5707 BitWidth, /* Signed = */ true)); 5708 5709 // Next, consider step unsigned. 5710 ConstantRange UR = getRangeForAffineARHelper( 5711 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5712 MaxBECountValue, BitWidth, /* Signed = */ false); 5713 5714 // Finally, intersect signed and unsigned ranges. 5715 return SR.intersectWith(UR, ConstantRange::Smallest); 5716 } 5717 5718 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 5719 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 5720 ScalarEvolution::RangeSignHint SignHint) { 5721 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 5722 assert(AddRec->hasNoSelfWrap() && 5723 "This only works for non-self-wrapping AddRecs!"); 5724 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 5725 const SCEV *Step = AddRec->getStepRecurrence(*this); 5726 // Only deal with constant step to save compile time. 5727 if (!isa<SCEVConstant>(Step)) 5728 return ConstantRange::getFull(BitWidth); 5729 // Let's make sure that we can prove that we do not self-wrap during 5730 // MaxBECount iterations. We need this because MaxBECount is a maximum 5731 // iteration count estimate, and we might infer nw from some exit for which we 5732 // do not know max exit count (or any other side reasoning). 5733 // TODO: Turn into assert at some point. 5734 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 5735 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 5736 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 5737 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 5738 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 5739 MaxItersWithoutWrap)) 5740 return ConstantRange::getFull(BitWidth); 5741 5742 ICmpInst::Predicate LEPred = 5743 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 5744 ICmpInst::Predicate GEPred = 5745 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 5746 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 5747 5748 // We know that there is no self-wrap. Let's take Start and End values and 5749 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 5750 // the iteration. They either lie inside the range [Min(Start, End), 5751 // Max(Start, End)] or outside it: 5752 // 5753 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 5754 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 5755 // 5756 // No self wrap flag guarantees that the intermediate values cannot be BOTH 5757 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 5758 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 5759 // Start <= End and step is positive, or Start >= End and step is negative. 5760 const SCEV *Start = AddRec->getStart(); 5761 ConstantRange StartRange = getRangeRef(Start, SignHint); 5762 ConstantRange EndRange = getRangeRef(End, SignHint); 5763 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 5764 // If they already cover full iteration space, we will know nothing useful 5765 // even if we prove what we want to prove. 5766 if (RangeBetween.isFullSet()) 5767 return RangeBetween; 5768 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 5769 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 5770 : RangeBetween.isWrappedSet(); 5771 if (IsWrappedSet) 5772 return ConstantRange::getFull(BitWidth); 5773 5774 if (isKnownPositive(Step) && 5775 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 5776 return RangeBetween; 5777 else if (isKnownNegative(Step) && 5778 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 5779 return RangeBetween; 5780 return ConstantRange::getFull(BitWidth); 5781 } 5782 5783 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5784 const SCEV *Step, 5785 const SCEV *MaxBECount, 5786 unsigned BitWidth) { 5787 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5788 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5789 5790 struct SelectPattern { 5791 Value *Condition = nullptr; 5792 APInt TrueValue; 5793 APInt FalseValue; 5794 5795 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5796 const SCEV *S) { 5797 Optional<unsigned> CastOp; 5798 APInt Offset(BitWidth, 0); 5799 5800 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5801 "Should be!"); 5802 5803 // Peel off a constant offset: 5804 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5805 // In the future we could consider being smarter here and handle 5806 // {Start+Step,+,Step} too. 5807 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5808 return; 5809 5810 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5811 S = SA->getOperand(1); 5812 } 5813 5814 // Peel off a cast operation 5815 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 5816 CastOp = SCast->getSCEVType(); 5817 S = SCast->getOperand(); 5818 } 5819 5820 using namespace llvm::PatternMatch; 5821 5822 auto *SU = dyn_cast<SCEVUnknown>(S); 5823 const APInt *TrueVal, *FalseVal; 5824 if (!SU || 5825 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5826 m_APInt(FalseVal)))) { 5827 Condition = nullptr; 5828 return; 5829 } 5830 5831 TrueValue = *TrueVal; 5832 FalseValue = *FalseVal; 5833 5834 // Re-apply the cast we peeled off earlier 5835 if (CastOp.hasValue()) 5836 switch (*CastOp) { 5837 default: 5838 llvm_unreachable("Unknown SCEV cast type!"); 5839 5840 case scTruncate: 5841 TrueValue = TrueValue.trunc(BitWidth); 5842 FalseValue = FalseValue.trunc(BitWidth); 5843 break; 5844 case scZeroExtend: 5845 TrueValue = TrueValue.zext(BitWidth); 5846 FalseValue = FalseValue.zext(BitWidth); 5847 break; 5848 case scSignExtend: 5849 TrueValue = TrueValue.sext(BitWidth); 5850 FalseValue = FalseValue.sext(BitWidth); 5851 break; 5852 } 5853 5854 // Re-apply the constant offset we peeled off earlier 5855 TrueValue += Offset; 5856 FalseValue += Offset; 5857 } 5858 5859 bool isRecognized() { return Condition != nullptr; } 5860 }; 5861 5862 SelectPattern StartPattern(*this, BitWidth, Start); 5863 if (!StartPattern.isRecognized()) 5864 return ConstantRange::getFull(BitWidth); 5865 5866 SelectPattern StepPattern(*this, BitWidth, Step); 5867 if (!StepPattern.isRecognized()) 5868 return ConstantRange::getFull(BitWidth); 5869 5870 if (StartPattern.Condition != StepPattern.Condition) { 5871 // We don't handle this case today; but we could, by considering four 5872 // possibilities below instead of two. I'm not sure if there are cases where 5873 // that will help over what getRange already does, though. 5874 return ConstantRange::getFull(BitWidth); 5875 } 5876 5877 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5878 // construct arbitrary general SCEV expressions here. This function is called 5879 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5880 // say) can end up caching a suboptimal value. 5881 5882 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5883 // C2352 and C2512 (otherwise it isn't needed). 5884 5885 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5886 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5887 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5888 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5889 5890 ConstantRange TrueRange = 5891 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5892 ConstantRange FalseRange = 5893 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5894 5895 return TrueRange.unionWith(FalseRange); 5896 } 5897 5898 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5899 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5900 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5901 5902 // Return early if there are no flags to propagate to the SCEV. 5903 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5904 if (BinOp->hasNoUnsignedWrap()) 5905 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5906 if (BinOp->hasNoSignedWrap()) 5907 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5908 if (Flags == SCEV::FlagAnyWrap) 5909 return SCEV::FlagAnyWrap; 5910 5911 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5912 } 5913 5914 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5915 // Here we check that I is in the header of the innermost loop containing I, 5916 // since we only deal with instructions in the loop header. The actual loop we 5917 // need to check later will come from an add recurrence, but getting that 5918 // requires computing the SCEV of the operands, which can be expensive. This 5919 // check we can do cheaply to rule out some cases early. 5920 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5921 if (InnermostContainingLoop == nullptr || 5922 InnermostContainingLoop->getHeader() != I->getParent()) 5923 return false; 5924 5925 // Only proceed if we can prove that I does not yield poison. 5926 if (!programUndefinedIfPoison(I)) 5927 return false; 5928 5929 // At this point we know that if I is executed, then it does not wrap 5930 // according to at least one of NSW or NUW. If I is not executed, then we do 5931 // not know if the calculation that I represents would wrap. Multiple 5932 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5933 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5934 // derived from other instructions that map to the same SCEV. We cannot make 5935 // that guarantee for cases where I is not executed. So we need to find the 5936 // loop that I is considered in relation to and prove that I is executed for 5937 // every iteration of that loop. That implies that the value that I 5938 // calculates does not wrap anywhere in the loop, so then we can apply the 5939 // flags to the SCEV. 5940 // 5941 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5942 // from different loops, so that we know which loop to prove that I is 5943 // executed in. 5944 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5945 // I could be an extractvalue from a call to an overflow intrinsic. 5946 // TODO: We can do better here in some cases. 5947 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5948 return false; 5949 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5950 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5951 bool AllOtherOpsLoopInvariant = true; 5952 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5953 ++OtherOpIndex) { 5954 if (OtherOpIndex != OpIndex) { 5955 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5956 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5957 AllOtherOpsLoopInvariant = false; 5958 break; 5959 } 5960 } 5961 } 5962 if (AllOtherOpsLoopInvariant && 5963 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5964 return true; 5965 } 5966 } 5967 return false; 5968 } 5969 5970 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5971 // If we know that \c I can never be poison period, then that's enough. 5972 if (isSCEVExprNeverPoison(I)) 5973 return true; 5974 5975 // For an add recurrence specifically, we assume that infinite loops without 5976 // side effects are undefined behavior, and then reason as follows: 5977 // 5978 // If the add recurrence is poison in any iteration, it is poison on all 5979 // future iterations (since incrementing poison yields poison). If the result 5980 // of the add recurrence is fed into the loop latch condition and the loop 5981 // does not contain any throws or exiting blocks other than the latch, we now 5982 // have the ability to "choose" whether the backedge is taken or not (by 5983 // choosing a sufficiently evil value for the poison feeding into the branch) 5984 // for every iteration including and after the one in which \p I first became 5985 // poison. There are two possibilities (let's call the iteration in which \p 5986 // I first became poison as K): 5987 // 5988 // 1. In the set of iterations including and after K, the loop body executes 5989 // no side effects. In this case executing the backege an infinte number 5990 // of times will yield undefined behavior. 5991 // 5992 // 2. In the set of iterations including and after K, the loop body executes 5993 // at least one side effect. In this case, that specific instance of side 5994 // effect is control dependent on poison, which also yields undefined 5995 // behavior. 5996 5997 auto *ExitingBB = L->getExitingBlock(); 5998 auto *LatchBB = L->getLoopLatch(); 5999 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6000 return false; 6001 6002 SmallPtrSet<const Instruction *, 16> Pushed; 6003 SmallVector<const Instruction *, 8> PoisonStack; 6004 6005 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6006 // things that are known to be poison under that assumption go on the 6007 // PoisonStack. 6008 Pushed.insert(I); 6009 PoisonStack.push_back(I); 6010 6011 bool LatchControlDependentOnPoison = false; 6012 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6013 const Instruction *Poison = PoisonStack.pop_back_val(); 6014 6015 for (auto *PoisonUser : Poison->users()) { 6016 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6017 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6018 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6019 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6020 assert(BI->isConditional() && "Only possibility!"); 6021 if (BI->getParent() == LatchBB) { 6022 LatchControlDependentOnPoison = true; 6023 break; 6024 } 6025 } 6026 } 6027 } 6028 6029 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6030 } 6031 6032 ScalarEvolution::LoopProperties 6033 ScalarEvolution::getLoopProperties(const Loop *L) { 6034 using LoopProperties = ScalarEvolution::LoopProperties; 6035 6036 auto Itr = LoopPropertiesCache.find(L); 6037 if (Itr == LoopPropertiesCache.end()) { 6038 auto HasSideEffects = [](Instruction *I) { 6039 if (auto *SI = dyn_cast<StoreInst>(I)) 6040 return !SI->isSimple(); 6041 6042 return I->mayHaveSideEffects(); 6043 }; 6044 6045 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6046 /*HasNoSideEffects*/ true}; 6047 6048 for (auto *BB : L->getBlocks()) 6049 for (auto &I : *BB) { 6050 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6051 LP.HasNoAbnormalExits = false; 6052 if (HasSideEffects(&I)) 6053 LP.HasNoSideEffects = false; 6054 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6055 break; // We're already as pessimistic as we can get. 6056 } 6057 6058 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6059 assert(InsertPair.second && "We just checked!"); 6060 Itr = InsertPair.first; 6061 } 6062 6063 return Itr->second; 6064 } 6065 6066 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6067 if (!isSCEVable(V->getType())) 6068 return getUnknown(V); 6069 6070 if (Instruction *I = dyn_cast<Instruction>(V)) { 6071 // Don't attempt to analyze instructions in blocks that aren't 6072 // reachable. Such instructions don't matter, and they aren't required 6073 // to obey basic rules for definitions dominating uses which this 6074 // analysis depends on. 6075 if (!DT.isReachableFromEntry(I->getParent())) 6076 return getUnknown(UndefValue::get(V->getType())); 6077 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6078 return getConstant(CI); 6079 else if (isa<ConstantPointerNull>(V)) 6080 return getZero(V->getType()); 6081 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6082 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6083 else if (!isa<ConstantExpr>(V)) 6084 return getUnknown(V); 6085 6086 Operator *U = cast<Operator>(V); 6087 if (auto BO = MatchBinaryOp(U, DT)) { 6088 switch (BO->Opcode) { 6089 case Instruction::Add: { 6090 // The simple thing to do would be to just call getSCEV on both operands 6091 // and call getAddExpr with the result. However if we're looking at a 6092 // bunch of things all added together, this can be quite inefficient, 6093 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6094 // Instead, gather up all the operands and make a single getAddExpr call. 6095 // LLVM IR canonical form means we need only traverse the left operands. 6096 SmallVector<const SCEV *, 4> AddOps; 6097 do { 6098 if (BO->Op) { 6099 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6100 AddOps.push_back(OpSCEV); 6101 break; 6102 } 6103 6104 // If a NUW or NSW flag can be applied to the SCEV for this 6105 // addition, then compute the SCEV for this addition by itself 6106 // with a separate call to getAddExpr. We need to do that 6107 // instead of pushing the operands of the addition onto AddOps, 6108 // since the flags are only known to apply to this particular 6109 // addition - they may not apply to other additions that can be 6110 // formed with operands from AddOps. 6111 const SCEV *RHS = getSCEV(BO->RHS); 6112 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6113 if (Flags != SCEV::FlagAnyWrap) { 6114 const SCEV *LHS = getSCEV(BO->LHS); 6115 if (BO->Opcode == Instruction::Sub) 6116 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6117 else 6118 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6119 break; 6120 } 6121 } 6122 6123 if (BO->Opcode == Instruction::Sub) 6124 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6125 else 6126 AddOps.push_back(getSCEV(BO->RHS)); 6127 6128 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6129 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6130 NewBO->Opcode != Instruction::Sub)) { 6131 AddOps.push_back(getSCEV(BO->LHS)); 6132 break; 6133 } 6134 BO = NewBO; 6135 } while (true); 6136 6137 return getAddExpr(AddOps); 6138 } 6139 6140 case Instruction::Mul: { 6141 SmallVector<const SCEV *, 4> MulOps; 6142 do { 6143 if (BO->Op) { 6144 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6145 MulOps.push_back(OpSCEV); 6146 break; 6147 } 6148 6149 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6150 if (Flags != SCEV::FlagAnyWrap) { 6151 MulOps.push_back( 6152 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6153 break; 6154 } 6155 } 6156 6157 MulOps.push_back(getSCEV(BO->RHS)); 6158 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6159 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6160 MulOps.push_back(getSCEV(BO->LHS)); 6161 break; 6162 } 6163 BO = NewBO; 6164 } while (true); 6165 6166 return getMulExpr(MulOps); 6167 } 6168 case Instruction::UDiv: 6169 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6170 case Instruction::URem: 6171 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6172 case Instruction::Sub: { 6173 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6174 if (BO->Op) 6175 Flags = getNoWrapFlagsFromUB(BO->Op); 6176 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6177 } 6178 case Instruction::And: 6179 // For an expression like x&255 that merely masks off the high bits, 6180 // use zext(trunc(x)) as the SCEV expression. 6181 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6182 if (CI->isZero()) 6183 return getSCEV(BO->RHS); 6184 if (CI->isMinusOne()) 6185 return getSCEV(BO->LHS); 6186 const APInt &A = CI->getValue(); 6187 6188 // Instcombine's ShrinkDemandedConstant may strip bits out of 6189 // constants, obscuring what would otherwise be a low-bits mask. 6190 // Use computeKnownBits to compute what ShrinkDemandedConstant 6191 // knew about to reconstruct a low-bits mask value. 6192 unsigned LZ = A.countLeadingZeros(); 6193 unsigned TZ = A.countTrailingZeros(); 6194 unsigned BitWidth = A.getBitWidth(); 6195 KnownBits Known(BitWidth); 6196 computeKnownBits(BO->LHS, Known, getDataLayout(), 6197 0, &AC, nullptr, &DT); 6198 6199 APInt EffectiveMask = 6200 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6201 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6202 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6203 const SCEV *LHS = getSCEV(BO->LHS); 6204 const SCEV *ShiftedLHS = nullptr; 6205 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6206 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6207 // For an expression like (x * 8) & 8, simplify the multiply. 6208 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6209 unsigned GCD = std::min(MulZeros, TZ); 6210 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6211 SmallVector<const SCEV*, 4> MulOps; 6212 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6213 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6214 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6215 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6216 } 6217 } 6218 if (!ShiftedLHS) 6219 ShiftedLHS = getUDivExpr(LHS, MulCount); 6220 return getMulExpr( 6221 getZeroExtendExpr( 6222 getTruncateExpr(ShiftedLHS, 6223 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6224 BO->LHS->getType()), 6225 MulCount); 6226 } 6227 } 6228 break; 6229 6230 case Instruction::Or: 6231 // If the RHS of the Or is a constant, we may have something like: 6232 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6233 // optimizations will transparently handle this case. 6234 // 6235 // In order for this transformation to be safe, the LHS must be of the 6236 // form X*(2^n) and the Or constant must be less than 2^n. 6237 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6238 const SCEV *LHS = getSCEV(BO->LHS); 6239 const APInt &CIVal = CI->getValue(); 6240 if (GetMinTrailingZeros(LHS) >= 6241 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6242 // Build a plain add SCEV. 6243 return getAddExpr(LHS, getSCEV(CI), 6244 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6245 } 6246 } 6247 break; 6248 6249 case Instruction::Xor: 6250 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6251 // If the RHS of xor is -1, then this is a not operation. 6252 if (CI->isMinusOne()) 6253 return getNotSCEV(getSCEV(BO->LHS)); 6254 6255 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6256 // This is a variant of the check for xor with -1, and it handles 6257 // the case where instcombine has trimmed non-demanded bits out 6258 // of an xor with -1. 6259 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6260 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6261 if (LBO->getOpcode() == Instruction::And && 6262 LCI->getValue() == CI->getValue()) 6263 if (const SCEVZeroExtendExpr *Z = 6264 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6265 Type *UTy = BO->LHS->getType(); 6266 const SCEV *Z0 = Z->getOperand(); 6267 Type *Z0Ty = Z0->getType(); 6268 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6269 6270 // If C is a low-bits mask, the zero extend is serving to 6271 // mask off the high bits. Complement the operand and 6272 // re-apply the zext. 6273 if (CI->getValue().isMask(Z0TySize)) 6274 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6275 6276 // If C is a single bit, it may be in the sign-bit position 6277 // before the zero-extend. In this case, represent the xor 6278 // using an add, which is equivalent, and re-apply the zext. 6279 APInt Trunc = CI->getValue().trunc(Z0TySize); 6280 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6281 Trunc.isSignMask()) 6282 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6283 UTy); 6284 } 6285 } 6286 break; 6287 6288 case Instruction::Shl: 6289 // Turn shift left of a constant amount into a multiply. 6290 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6291 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6292 6293 // If the shift count is not less than the bitwidth, the result of 6294 // the shift is undefined. Don't try to analyze it, because the 6295 // resolution chosen here may differ from the resolution chosen in 6296 // other parts of the compiler. 6297 if (SA->getValue().uge(BitWidth)) 6298 break; 6299 6300 // We can safely preserve the nuw flag in all cases. It's also safe to 6301 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6302 // requires special handling. It can be preserved as long as we're not 6303 // left shifting by bitwidth - 1. 6304 auto Flags = SCEV::FlagAnyWrap; 6305 if (BO->Op) { 6306 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6307 if ((MulFlags & SCEV::FlagNSW) && 6308 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6309 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6310 if (MulFlags & SCEV::FlagNUW) 6311 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6312 } 6313 6314 Constant *X = ConstantInt::get( 6315 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6316 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6317 } 6318 break; 6319 6320 case Instruction::AShr: { 6321 // AShr X, C, where C is a constant. 6322 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6323 if (!CI) 6324 break; 6325 6326 Type *OuterTy = BO->LHS->getType(); 6327 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6328 // If the shift count is not less than the bitwidth, the result of 6329 // the shift is undefined. Don't try to analyze it, because the 6330 // resolution chosen here may differ from the resolution chosen in 6331 // other parts of the compiler. 6332 if (CI->getValue().uge(BitWidth)) 6333 break; 6334 6335 if (CI->isZero()) 6336 return getSCEV(BO->LHS); // shift by zero --> noop 6337 6338 uint64_t AShrAmt = CI->getZExtValue(); 6339 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6340 6341 Operator *L = dyn_cast<Operator>(BO->LHS); 6342 if (L && L->getOpcode() == Instruction::Shl) { 6343 // X = Shl A, n 6344 // Y = AShr X, m 6345 // Both n and m are constant. 6346 6347 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6348 if (L->getOperand(1) == BO->RHS) 6349 // For a two-shift sext-inreg, i.e. n = m, 6350 // use sext(trunc(x)) as the SCEV expression. 6351 return getSignExtendExpr( 6352 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6353 6354 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6355 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6356 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6357 if (ShlAmt > AShrAmt) { 6358 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6359 // expression. We already checked that ShlAmt < BitWidth, so 6360 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6361 // ShlAmt - AShrAmt < Amt. 6362 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6363 ShlAmt - AShrAmt); 6364 return getSignExtendExpr( 6365 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6366 getConstant(Mul)), OuterTy); 6367 } 6368 } 6369 } 6370 if (BO->IsExact) { 6371 // Given exact arithmetic in-bounds right-shift by a constant, 6372 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x) 6373 const SCEV *X = getSCEV(BO->LHS); 6374 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false); 6375 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt); 6376 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult)); 6377 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW); 6378 } 6379 break; 6380 } 6381 } 6382 } 6383 6384 switch (U->getOpcode()) { 6385 case Instruction::Trunc: 6386 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6387 6388 case Instruction::ZExt: 6389 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6390 6391 case Instruction::SExt: 6392 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6393 // The NSW flag of a subtract does not always survive the conversion to 6394 // A + (-1)*B. By pushing sign extension onto its operands we are much 6395 // more likely to preserve NSW and allow later AddRec optimisations. 6396 // 6397 // NOTE: This is effectively duplicating this logic from getSignExtend: 6398 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6399 // but by that point the NSW information has potentially been lost. 6400 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6401 Type *Ty = U->getType(); 6402 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6403 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6404 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6405 } 6406 } 6407 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6408 6409 case Instruction::BitCast: 6410 // BitCasts are no-op casts so we just eliminate the cast. 6411 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6412 return getSCEV(U->getOperand(0)); 6413 break; 6414 6415 case Instruction::SDiv: 6416 // If both operands are non-negative, this is just an udiv. 6417 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6418 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6419 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6420 break; 6421 6422 case Instruction::SRem: 6423 // If both operands are non-negative, this is just an urem. 6424 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6425 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6426 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6427 break; 6428 6429 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6430 // lead to pointer expressions which cannot safely be expanded to GEPs, 6431 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6432 // simplifying integer expressions. 6433 6434 case Instruction::GetElementPtr: 6435 return createNodeForGEP(cast<GEPOperator>(U)); 6436 6437 case Instruction::PHI: 6438 return createNodeForPHI(cast<PHINode>(U)); 6439 6440 case Instruction::Select: 6441 // U can also be a select constant expr, which let fall through. Since 6442 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6443 // constant expressions cannot have instructions as operands, we'd have 6444 // returned getUnknown for a select constant expressions anyway. 6445 if (isa<Instruction>(U)) 6446 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6447 U->getOperand(1), U->getOperand(2)); 6448 break; 6449 6450 case Instruction::Call: 6451 case Instruction::Invoke: 6452 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6453 return getSCEV(RV); 6454 6455 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6456 switch (II->getIntrinsicID()) { 6457 case Intrinsic::abs: 6458 return getAbsExpr( 6459 getSCEV(II->getArgOperand(0)), 6460 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6461 case Intrinsic::umax: 6462 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6463 getSCEV(II->getArgOperand(1))); 6464 case Intrinsic::umin: 6465 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6466 getSCEV(II->getArgOperand(1))); 6467 case Intrinsic::smax: 6468 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6469 getSCEV(II->getArgOperand(1))); 6470 case Intrinsic::smin: 6471 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6472 getSCEV(II->getArgOperand(1))); 6473 case Intrinsic::usub_sat: { 6474 const SCEV *X = getSCEV(II->getArgOperand(0)); 6475 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6476 const SCEV *ClampedY = getUMinExpr(X, Y); 6477 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6478 } 6479 case Intrinsic::uadd_sat: { 6480 const SCEV *X = getSCEV(II->getArgOperand(0)); 6481 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6482 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6483 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6484 } 6485 default: 6486 break; 6487 } 6488 } 6489 break; 6490 } 6491 6492 return getUnknown(V); 6493 } 6494 6495 //===----------------------------------------------------------------------===// 6496 // Iteration Count Computation Code 6497 // 6498 6499 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6500 if (!ExitCount) 6501 return 0; 6502 6503 ConstantInt *ExitConst = ExitCount->getValue(); 6504 6505 // Guard against huge trip counts. 6506 if (ExitConst->getValue().getActiveBits() > 32) 6507 return 0; 6508 6509 // In case of integer overflow, this returns 0, which is correct. 6510 return ((unsigned)ExitConst->getZExtValue()) + 1; 6511 } 6512 6513 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6514 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6515 return getSmallConstantTripCount(L, ExitingBB); 6516 6517 // No trip count information for multiple exits. 6518 return 0; 6519 } 6520 6521 unsigned 6522 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6523 const BasicBlock *ExitingBlock) { 6524 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6525 assert(L->isLoopExiting(ExitingBlock) && 6526 "Exiting block must actually branch out of the loop!"); 6527 const SCEVConstant *ExitCount = 6528 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6529 return getConstantTripCount(ExitCount); 6530 } 6531 6532 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6533 const auto *MaxExitCount = 6534 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6535 return getConstantTripCount(MaxExitCount); 6536 } 6537 6538 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6539 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6540 return getSmallConstantTripMultiple(L, ExitingBB); 6541 6542 // No trip multiple information for multiple exits. 6543 return 0; 6544 } 6545 6546 /// Returns the largest constant divisor of the trip count of this loop as a 6547 /// normal unsigned value, if possible. This means that the actual trip count is 6548 /// always a multiple of the returned value (don't forget the trip count could 6549 /// very well be zero as well!). 6550 /// 6551 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6552 /// multiple of a constant (which is also the case if the trip count is simply 6553 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6554 /// if the trip count is very large (>= 2^32). 6555 /// 6556 /// As explained in the comments for getSmallConstantTripCount, this assumes 6557 /// that control exits the loop via ExitingBlock. 6558 unsigned 6559 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6560 const BasicBlock *ExitingBlock) { 6561 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6562 assert(L->isLoopExiting(ExitingBlock) && 6563 "Exiting block must actually branch out of the loop!"); 6564 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6565 if (ExitCount == getCouldNotCompute()) 6566 return 1; 6567 6568 // Get the trip count from the BE count by adding 1. 6569 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6570 6571 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6572 if (!TC) 6573 // Attempt to factor more general cases. Returns the greatest power of 6574 // two divisor. If overflow happens, the trip count expression is still 6575 // divisible by the greatest power of 2 divisor returned. 6576 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6577 6578 ConstantInt *Result = TC->getValue(); 6579 6580 // Guard against huge trip counts (this requires checking 6581 // for zero to handle the case where the trip count == -1 and the 6582 // addition wraps). 6583 if (!Result || Result->getValue().getActiveBits() > 32 || 6584 Result->getValue().getActiveBits() == 0) 6585 return 1; 6586 6587 return (unsigned)Result->getZExtValue(); 6588 } 6589 6590 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6591 const BasicBlock *ExitingBlock, 6592 ExitCountKind Kind) { 6593 switch (Kind) { 6594 case Exact: 6595 case SymbolicMaximum: 6596 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6597 case ConstantMaximum: 6598 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 6599 }; 6600 llvm_unreachable("Invalid ExitCountKind!"); 6601 } 6602 6603 const SCEV * 6604 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6605 SCEVUnionPredicate &Preds) { 6606 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6607 } 6608 6609 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6610 ExitCountKind Kind) { 6611 switch (Kind) { 6612 case Exact: 6613 return getBackedgeTakenInfo(L).getExact(L, this); 6614 case ConstantMaximum: 6615 return getBackedgeTakenInfo(L).getConstantMax(this); 6616 case SymbolicMaximum: 6617 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 6618 }; 6619 llvm_unreachable("Invalid ExitCountKind!"); 6620 } 6621 6622 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6623 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 6624 } 6625 6626 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6627 static void 6628 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6629 BasicBlock *Header = L->getHeader(); 6630 6631 // Push all Loop-header PHIs onto the Worklist stack. 6632 for (PHINode &PN : Header->phis()) 6633 Worklist.push_back(&PN); 6634 } 6635 6636 const ScalarEvolution::BackedgeTakenInfo & 6637 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6638 auto &BTI = getBackedgeTakenInfo(L); 6639 if (BTI.hasFullInfo()) 6640 return BTI; 6641 6642 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6643 6644 if (!Pair.second) 6645 return Pair.first->second; 6646 6647 BackedgeTakenInfo Result = 6648 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6649 6650 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6651 } 6652 6653 ScalarEvolution::BackedgeTakenInfo & 6654 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6655 // Initially insert an invalid entry for this loop. If the insertion 6656 // succeeds, proceed to actually compute a backedge-taken count and 6657 // update the value. The temporary CouldNotCompute value tells SCEV 6658 // code elsewhere that it shouldn't attempt to request a new 6659 // backedge-taken count, which could result in infinite recursion. 6660 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6661 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6662 if (!Pair.second) 6663 return Pair.first->second; 6664 6665 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6666 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6667 // must be cleared in this scope. 6668 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6669 6670 // In product build, there are no usage of statistic. 6671 (void)NumTripCountsComputed; 6672 (void)NumTripCountsNotComputed; 6673 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6674 const SCEV *BEExact = Result.getExact(L, this); 6675 if (BEExact != getCouldNotCompute()) { 6676 assert(isLoopInvariant(BEExact, L) && 6677 isLoopInvariant(Result.getConstantMax(this), L) && 6678 "Computed backedge-taken count isn't loop invariant for loop!"); 6679 ++NumTripCountsComputed; 6680 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 6681 isa<PHINode>(L->getHeader()->begin())) { 6682 // Only count loops that have phi nodes as not being computable. 6683 ++NumTripCountsNotComputed; 6684 } 6685 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6686 6687 // Now that we know more about the trip count for this loop, forget any 6688 // existing SCEV values for PHI nodes in this loop since they are only 6689 // conservative estimates made without the benefit of trip count 6690 // information. This is similar to the code in forgetLoop, except that 6691 // it handles SCEVUnknown PHI nodes specially. 6692 if (Result.hasAnyInfo()) { 6693 SmallVector<Instruction *, 16> Worklist; 6694 PushLoopPHIs(L, Worklist); 6695 6696 SmallPtrSet<Instruction *, 8> Discovered; 6697 while (!Worklist.empty()) { 6698 Instruction *I = Worklist.pop_back_val(); 6699 6700 ValueExprMapType::iterator It = 6701 ValueExprMap.find_as(static_cast<Value *>(I)); 6702 if (It != ValueExprMap.end()) { 6703 const SCEV *Old = It->second; 6704 6705 // SCEVUnknown for a PHI either means that it has an unrecognized 6706 // structure, or it's a PHI that's in the progress of being computed 6707 // by createNodeForPHI. In the former case, additional loop trip 6708 // count information isn't going to change anything. In the later 6709 // case, createNodeForPHI will perform the necessary updates on its 6710 // own when it gets to that point. 6711 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6712 eraseValueFromMap(It->first); 6713 forgetMemoizedResults(Old); 6714 } 6715 if (PHINode *PN = dyn_cast<PHINode>(I)) 6716 ConstantEvolutionLoopExitValue.erase(PN); 6717 } 6718 6719 // Since we don't need to invalidate anything for correctness and we're 6720 // only invalidating to make SCEV's results more precise, we get to stop 6721 // early to avoid invalidating too much. This is especially important in 6722 // cases like: 6723 // 6724 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6725 // loop0: 6726 // %pn0 = phi 6727 // ... 6728 // loop1: 6729 // %pn1 = phi 6730 // ... 6731 // 6732 // where both loop0 and loop1's backedge taken count uses the SCEV 6733 // expression for %v. If we don't have the early stop below then in cases 6734 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6735 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6736 // count for loop1, effectively nullifying SCEV's trip count cache. 6737 for (auto *U : I->users()) 6738 if (auto *I = dyn_cast<Instruction>(U)) { 6739 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6740 if (LoopForUser && L->contains(LoopForUser) && 6741 Discovered.insert(I).second) 6742 Worklist.push_back(I); 6743 } 6744 } 6745 } 6746 6747 // Re-lookup the insert position, since the call to 6748 // computeBackedgeTakenCount above could result in a 6749 // recusive call to getBackedgeTakenInfo (on a different 6750 // loop), which would invalidate the iterator computed 6751 // earlier. 6752 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6753 } 6754 6755 void ScalarEvolution::forgetAllLoops() { 6756 // This method is intended to forget all info about loops. It should 6757 // invalidate caches as if the following happened: 6758 // - The trip counts of all loops have changed arbitrarily 6759 // - Every llvm::Value has been updated in place to produce a different 6760 // result. 6761 BackedgeTakenCounts.clear(); 6762 PredicatedBackedgeTakenCounts.clear(); 6763 LoopPropertiesCache.clear(); 6764 ConstantEvolutionLoopExitValue.clear(); 6765 ValueExprMap.clear(); 6766 ValuesAtScopes.clear(); 6767 LoopDispositions.clear(); 6768 BlockDispositions.clear(); 6769 UnsignedRanges.clear(); 6770 SignedRanges.clear(); 6771 ExprValueMap.clear(); 6772 HasRecMap.clear(); 6773 MinTrailingZerosCache.clear(); 6774 PredicatedSCEVRewrites.clear(); 6775 } 6776 6777 void ScalarEvolution::forgetLoop(const Loop *L) { 6778 // Drop any stored trip count value. 6779 auto RemoveLoopFromBackedgeMap = 6780 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6781 auto BTCPos = Map.find(L); 6782 if (BTCPos != Map.end()) { 6783 BTCPos->second.clear(); 6784 Map.erase(BTCPos); 6785 } 6786 }; 6787 6788 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6789 SmallVector<Instruction *, 32> Worklist; 6790 SmallPtrSet<Instruction *, 16> Visited; 6791 6792 // Iterate over all the loops and sub-loops to drop SCEV information. 6793 while (!LoopWorklist.empty()) { 6794 auto *CurrL = LoopWorklist.pop_back_val(); 6795 6796 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6797 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6798 6799 // Drop information about predicated SCEV rewrites for this loop. 6800 for (auto I = PredicatedSCEVRewrites.begin(); 6801 I != PredicatedSCEVRewrites.end();) { 6802 std::pair<const SCEV *, const Loop *> Entry = I->first; 6803 if (Entry.second == CurrL) 6804 PredicatedSCEVRewrites.erase(I++); 6805 else 6806 ++I; 6807 } 6808 6809 auto LoopUsersItr = LoopUsers.find(CurrL); 6810 if (LoopUsersItr != LoopUsers.end()) { 6811 for (auto *S : LoopUsersItr->second) 6812 forgetMemoizedResults(S); 6813 LoopUsers.erase(LoopUsersItr); 6814 } 6815 6816 // Drop information about expressions based on loop-header PHIs. 6817 PushLoopPHIs(CurrL, Worklist); 6818 6819 while (!Worklist.empty()) { 6820 Instruction *I = Worklist.pop_back_val(); 6821 if (!Visited.insert(I).second) 6822 continue; 6823 6824 ValueExprMapType::iterator It = 6825 ValueExprMap.find_as(static_cast<Value *>(I)); 6826 if (It != ValueExprMap.end()) { 6827 eraseValueFromMap(It->first); 6828 forgetMemoizedResults(It->second); 6829 if (PHINode *PN = dyn_cast<PHINode>(I)) 6830 ConstantEvolutionLoopExitValue.erase(PN); 6831 } 6832 6833 PushDefUseChildren(I, Worklist); 6834 } 6835 6836 LoopPropertiesCache.erase(CurrL); 6837 // Forget all contained loops too, to avoid dangling entries in the 6838 // ValuesAtScopes map. 6839 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6840 } 6841 } 6842 6843 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6844 while (Loop *Parent = L->getParentLoop()) 6845 L = Parent; 6846 forgetLoop(L); 6847 } 6848 6849 void ScalarEvolution::forgetValue(Value *V) { 6850 Instruction *I = dyn_cast<Instruction>(V); 6851 if (!I) return; 6852 6853 // Drop information about expressions based on loop-header PHIs. 6854 SmallVector<Instruction *, 16> Worklist; 6855 Worklist.push_back(I); 6856 6857 SmallPtrSet<Instruction *, 8> Visited; 6858 while (!Worklist.empty()) { 6859 I = Worklist.pop_back_val(); 6860 if (!Visited.insert(I).second) 6861 continue; 6862 6863 ValueExprMapType::iterator It = 6864 ValueExprMap.find_as(static_cast<Value *>(I)); 6865 if (It != ValueExprMap.end()) { 6866 eraseValueFromMap(It->first); 6867 forgetMemoizedResults(It->second); 6868 if (PHINode *PN = dyn_cast<PHINode>(I)) 6869 ConstantEvolutionLoopExitValue.erase(PN); 6870 } 6871 6872 PushDefUseChildren(I, Worklist); 6873 } 6874 } 6875 6876 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 6877 LoopDispositions.clear(); 6878 } 6879 6880 /// Get the exact loop backedge taken count considering all loop exits. A 6881 /// computable result can only be returned for loops with all exiting blocks 6882 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6883 /// is never skipped. This is a valid assumption as long as the loop exits via 6884 /// that test. For precise results, it is the caller's responsibility to specify 6885 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6886 const SCEV * 6887 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6888 SCEVUnionPredicate *Preds) const { 6889 // If any exits were not computable, the loop is not computable. 6890 if (!isComplete() || ExitNotTaken.empty()) 6891 return SE->getCouldNotCompute(); 6892 6893 const BasicBlock *Latch = L->getLoopLatch(); 6894 // All exiting blocks we have collected must dominate the only backedge. 6895 if (!Latch) 6896 return SE->getCouldNotCompute(); 6897 6898 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6899 // count is simply a minimum out of all these calculated exit counts. 6900 SmallVector<const SCEV *, 2> Ops; 6901 for (auto &ENT : ExitNotTaken) { 6902 const SCEV *BECount = ENT.ExactNotTaken; 6903 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6904 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6905 "We should only have known counts for exiting blocks that dominate " 6906 "latch!"); 6907 6908 Ops.push_back(BECount); 6909 6910 if (Preds && !ENT.hasAlwaysTruePredicate()) 6911 Preds->add(ENT.Predicate.get()); 6912 6913 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6914 "Predicate should be always true!"); 6915 } 6916 6917 return SE->getUMinFromMismatchedTypes(Ops); 6918 } 6919 6920 /// Get the exact not taken count for this loop exit. 6921 const SCEV * 6922 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 6923 ScalarEvolution *SE) const { 6924 for (auto &ENT : ExitNotTaken) 6925 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6926 return ENT.ExactNotTaken; 6927 6928 return SE->getCouldNotCompute(); 6929 } 6930 6931 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 6932 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 6933 for (auto &ENT : ExitNotTaken) 6934 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6935 return ENT.MaxNotTaken; 6936 6937 return SE->getCouldNotCompute(); 6938 } 6939 6940 /// getConstantMax - Get the constant max backedge taken count for the loop. 6941 const SCEV * 6942 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 6943 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6944 return !ENT.hasAlwaysTruePredicate(); 6945 }; 6946 6947 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 6948 return SE->getCouldNotCompute(); 6949 6950 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 6951 isa<SCEVConstant>(getConstantMax())) && 6952 "No point in having a non-constant max backedge taken count!"); 6953 return getConstantMax(); 6954 } 6955 6956 const SCEV * 6957 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 6958 ScalarEvolution *SE) { 6959 if (!SymbolicMax) 6960 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 6961 return SymbolicMax; 6962 } 6963 6964 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 6965 ScalarEvolution *SE) const { 6966 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6967 return !ENT.hasAlwaysTruePredicate(); 6968 }; 6969 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6970 } 6971 6972 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6973 ScalarEvolution *SE) const { 6974 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() && 6975 SE->hasOperand(getConstantMax(), S)) 6976 return true; 6977 6978 for (auto &ENT : ExitNotTaken) 6979 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6980 SE->hasOperand(ENT.ExactNotTaken, S)) 6981 return true; 6982 6983 return false; 6984 } 6985 6986 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6987 : ExactNotTaken(E), MaxNotTaken(E) { 6988 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6989 isa<SCEVConstant>(MaxNotTaken)) && 6990 "No point in having a non-constant max backedge taken count!"); 6991 } 6992 6993 ScalarEvolution::ExitLimit::ExitLimit( 6994 const SCEV *E, const SCEV *M, bool MaxOrZero, 6995 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6996 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6997 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6998 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6999 "Exact is not allowed to be less precise than Max"); 7000 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7001 isa<SCEVConstant>(MaxNotTaken)) && 7002 "No point in having a non-constant max backedge taken count!"); 7003 for (auto *PredSet : PredSetList) 7004 for (auto *P : *PredSet) 7005 addPredicate(P); 7006 } 7007 7008 ScalarEvolution::ExitLimit::ExitLimit( 7009 const SCEV *E, const SCEV *M, bool MaxOrZero, 7010 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7011 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7012 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7013 isa<SCEVConstant>(MaxNotTaken)) && 7014 "No point in having a non-constant max backedge taken count!"); 7015 } 7016 7017 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7018 bool MaxOrZero) 7019 : ExitLimit(E, M, MaxOrZero, None) { 7020 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7021 isa<SCEVConstant>(MaxNotTaken)) && 7022 "No point in having a non-constant max backedge taken count!"); 7023 } 7024 7025 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7026 /// computable exit into a persistent ExitNotTakenInfo array. 7027 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7028 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7029 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7030 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7031 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7032 7033 ExitNotTaken.reserve(ExitCounts.size()); 7034 std::transform( 7035 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7036 [&](const EdgeExitInfo &EEI) { 7037 BasicBlock *ExitBB = EEI.first; 7038 const ExitLimit &EL = EEI.second; 7039 if (EL.Predicates.empty()) 7040 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7041 nullptr); 7042 7043 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7044 for (auto *Pred : EL.Predicates) 7045 Predicate->add(Pred); 7046 7047 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7048 std::move(Predicate)); 7049 }); 7050 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7051 isa<SCEVConstant>(ConstantMax)) && 7052 "No point in having a non-constant max backedge taken count!"); 7053 } 7054 7055 /// Invalidate this result and free the ExitNotTakenInfo array. 7056 void ScalarEvolution::BackedgeTakenInfo::clear() { 7057 ExitNotTaken.clear(); 7058 } 7059 7060 /// Compute the number of times the backedge of the specified loop will execute. 7061 ScalarEvolution::BackedgeTakenInfo 7062 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7063 bool AllowPredicates) { 7064 SmallVector<BasicBlock *, 8> ExitingBlocks; 7065 L->getExitingBlocks(ExitingBlocks); 7066 7067 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7068 7069 SmallVector<EdgeExitInfo, 4> ExitCounts; 7070 bool CouldComputeBECount = true; 7071 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7072 const SCEV *MustExitMaxBECount = nullptr; 7073 const SCEV *MayExitMaxBECount = nullptr; 7074 bool MustExitMaxOrZero = false; 7075 7076 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7077 // and compute maxBECount. 7078 // Do a union of all the predicates here. 7079 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7080 BasicBlock *ExitBB = ExitingBlocks[i]; 7081 7082 // We canonicalize untaken exits to br (constant), ignore them so that 7083 // proving an exit untaken doesn't negatively impact our ability to reason 7084 // about the loop as whole. 7085 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7086 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7087 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7088 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7089 continue; 7090 } 7091 7092 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7093 7094 assert((AllowPredicates || EL.Predicates.empty()) && 7095 "Predicated exit limit when predicates are not allowed!"); 7096 7097 // 1. For each exit that can be computed, add an entry to ExitCounts. 7098 // CouldComputeBECount is true only if all exits can be computed. 7099 if (EL.ExactNotTaken == getCouldNotCompute()) 7100 // We couldn't compute an exact value for this exit, so 7101 // we won't be able to compute an exact value for the loop. 7102 CouldComputeBECount = false; 7103 else 7104 ExitCounts.emplace_back(ExitBB, EL); 7105 7106 // 2. Derive the loop's MaxBECount from each exit's max number of 7107 // non-exiting iterations. Partition the loop exits into two kinds: 7108 // LoopMustExits and LoopMayExits. 7109 // 7110 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7111 // is a LoopMayExit. If any computable LoopMustExit is found, then 7112 // MaxBECount is the minimum EL.MaxNotTaken of computable 7113 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7114 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7115 // computable EL.MaxNotTaken. 7116 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7117 DT.dominates(ExitBB, Latch)) { 7118 if (!MustExitMaxBECount) { 7119 MustExitMaxBECount = EL.MaxNotTaken; 7120 MustExitMaxOrZero = EL.MaxOrZero; 7121 } else { 7122 MustExitMaxBECount = 7123 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7124 } 7125 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7126 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7127 MayExitMaxBECount = EL.MaxNotTaken; 7128 else { 7129 MayExitMaxBECount = 7130 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7131 } 7132 } 7133 } 7134 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7135 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7136 // The loop backedge will be taken the maximum or zero times if there's 7137 // a single exit that must be taken the maximum or zero times. 7138 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7139 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7140 MaxBECount, MaxOrZero); 7141 } 7142 7143 ScalarEvolution::ExitLimit 7144 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7145 bool AllowPredicates) { 7146 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7147 // If our exiting block does not dominate the latch, then its connection with 7148 // loop's exit limit may be far from trivial. 7149 const BasicBlock *Latch = L->getLoopLatch(); 7150 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7151 return getCouldNotCompute(); 7152 7153 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7154 Instruction *Term = ExitingBlock->getTerminator(); 7155 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7156 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7157 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7158 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7159 "It should have one successor in loop and one exit block!"); 7160 // Proceed to the next level to examine the exit condition expression. 7161 return computeExitLimitFromCond( 7162 L, BI->getCondition(), ExitIfTrue, 7163 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7164 } 7165 7166 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7167 // For switch, make sure that there is a single exit from the loop. 7168 BasicBlock *Exit = nullptr; 7169 for (auto *SBB : successors(ExitingBlock)) 7170 if (!L->contains(SBB)) { 7171 if (Exit) // Multiple exit successors. 7172 return getCouldNotCompute(); 7173 Exit = SBB; 7174 } 7175 assert(Exit && "Exiting block must have at least one exit"); 7176 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7177 /*ControlsExit=*/IsOnlyExit); 7178 } 7179 7180 return getCouldNotCompute(); 7181 } 7182 7183 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7184 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7185 bool ControlsExit, bool AllowPredicates) { 7186 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7187 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7188 ControlsExit, AllowPredicates); 7189 } 7190 7191 Optional<ScalarEvolution::ExitLimit> 7192 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7193 bool ExitIfTrue, bool ControlsExit, 7194 bool AllowPredicates) { 7195 (void)this->L; 7196 (void)this->ExitIfTrue; 7197 (void)this->AllowPredicates; 7198 7199 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7200 this->AllowPredicates == AllowPredicates && 7201 "Variance in assumed invariant key components!"); 7202 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7203 if (Itr == TripCountMap.end()) 7204 return None; 7205 return Itr->second; 7206 } 7207 7208 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7209 bool ExitIfTrue, 7210 bool ControlsExit, 7211 bool AllowPredicates, 7212 const ExitLimit &EL) { 7213 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7214 this->AllowPredicates == AllowPredicates && 7215 "Variance in assumed invariant key components!"); 7216 7217 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7218 assert(InsertResult.second && "Expected successful insertion!"); 7219 (void)InsertResult; 7220 (void)ExitIfTrue; 7221 } 7222 7223 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7224 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7225 bool ControlsExit, bool AllowPredicates) { 7226 7227 if (auto MaybeEL = 7228 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7229 return *MaybeEL; 7230 7231 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7232 ControlsExit, AllowPredicates); 7233 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7234 return EL; 7235 } 7236 7237 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7238 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7239 bool ControlsExit, bool AllowPredicates) { 7240 // Check if the controlling expression for this loop is an And or Or. 7241 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7242 if (BO->getOpcode() == Instruction::And) { 7243 // Recurse on the operands of the and. 7244 bool EitherMayExit = !ExitIfTrue; 7245 ExitLimit EL0 = computeExitLimitFromCondCached( 7246 Cache, L, BO->getOperand(0), ExitIfTrue, 7247 ControlsExit && !EitherMayExit, AllowPredicates); 7248 ExitLimit EL1 = computeExitLimitFromCondCached( 7249 Cache, L, BO->getOperand(1), ExitIfTrue, 7250 ControlsExit && !EitherMayExit, AllowPredicates); 7251 // Be robust against unsimplified IR for the form "and i1 X, true" 7252 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7253 return CI->isOne() ? EL0 : EL1; 7254 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7255 return CI->isOne() ? EL1 : EL0; 7256 const SCEV *BECount = getCouldNotCompute(); 7257 const SCEV *MaxBECount = getCouldNotCompute(); 7258 if (EitherMayExit) { 7259 // Both conditions must be true for the loop to continue executing. 7260 // Choose the less conservative count. 7261 if (EL0.ExactNotTaken == getCouldNotCompute() || 7262 EL1.ExactNotTaken == getCouldNotCompute()) 7263 BECount = getCouldNotCompute(); 7264 else 7265 BECount = 7266 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7267 if (EL0.MaxNotTaken == getCouldNotCompute()) 7268 MaxBECount = EL1.MaxNotTaken; 7269 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7270 MaxBECount = EL0.MaxNotTaken; 7271 else 7272 MaxBECount = 7273 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7274 } else { 7275 // Both conditions must be true at the same time for the loop to exit. 7276 // For now, be conservative. 7277 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7278 MaxBECount = EL0.MaxNotTaken; 7279 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7280 BECount = EL0.ExactNotTaken; 7281 } 7282 7283 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7284 // to be more aggressive when computing BECount than when computing 7285 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7286 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7287 // to not. 7288 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7289 !isa<SCEVCouldNotCompute>(BECount)) 7290 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7291 7292 return ExitLimit(BECount, MaxBECount, false, 7293 {&EL0.Predicates, &EL1.Predicates}); 7294 } 7295 if (BO->getOpcode() == Instruction::Or) { 7296 // Recurse on the operands of the or. 7297 bool EitherMayExit = ExitIfTrue; 7298 ExitLimit EL0 = computeExitLimitFromCondCached( 7299 Cache, L, BO->getOperand(0), ExitIfTrue, 7300 ControlsExit && !EitherMayExit, AllowPredicates); 7301 ExitLimit EL1 = computeExitLimitFromCondCached( 7302 Cache, L, BO->getOperand(1), ExitIfTrue, 7303 ControlsExit && !EitherMayExit, AllowPredicates); 7304 // Be robust against unsimplified IR for the form "or i1 X, true" 7305 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7306 return CI->isZero() ? EL0 : EL1; 7307 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7308 return CI->isZero() ? EL1 : EL0; 7309 const SCEV *BECount = getCouldNotCompute(); 7310 const SCEV *MaxBECount = getCouldNotCompute(); 7311 if (EitherMayExit) { 7312 // Both conditions must be false for the loop to continue executing. 7313 // Choose the less conservative count. 7314 if (EL0.ExactNotTaken == getCouldNotCompute() || 7315 EL1.ExactNotTaken == getCouldNotCompute()) 7316 BECount = getCouldNotCompute(); 7317 else 7318 BECount = 7319 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7320 if (EL0.MaxNotTaken == getCouldNotCompute()) 7321 MaxBECount = EL1.MaxNotTaken; 7322 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7323 MaxBECount = EL0.MaxNotTaken; 7324 else 7325 MaxBECount = 7326 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7327 } else { 7328 // Both conditions must be false at the same time for the loop to exit. 7329 // For now, be conservative. 7330 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7331 MaxBECount = EL0.MaxNotTaken; 7332 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7333 BECount = EL0.ExactNotTaken; 7334 } 7335 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7336 // to be more aggressive when computing BECount than when computing 7337 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7338 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7339 // to not. 7340 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7341 !isa<SCEVCouldNotCompute>(BECount)) 7342 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7343 7344 return ExitLimit(BECount, MaxBECount, false, 7345 {&EL0.Predicates, &EL1.Predicates}); 7346 } 7347 } 7348 7349 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7350 // Proceed to the next level to examine the icmp. 7351 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7352 ExitLimit EL = 7353 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7354 if (EL.hasFullInfo() || !AllowPredicates) 7355 return EL; 7356 7357 // Try again, but use SCEV predicates this time. 7358 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7359 /*AllowPredicates=*/true); 7360 } 7361 7362 // Check for a constant condition. These are normally stripped out by 7363 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7364 // preserve the CFG and is temporarily leaving constant conditions 7365 // in place. 7366 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7367 if (ExitIfTrue == !CI->getZExtValue()) 7368 // The backedge is always taken. 7369 return getCouldNotCompute(); 7370 else 7371 // The backedge is never taken. 7372 return getZero(CI->getType()); 7373 } 7374 7375 // If it's not an integer or pointer comparison then compute it the hard way. 7376 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7377 } 7378 7379 ScalarEvolution::ExitLimit 7380 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7381 ICmpInst *ExitCond, 7382 bool ExitIfTrue, 7383 bool ControlsExit, 7384 bool AllowPredicates) { 7385 // If the condition was exit on true, convert the condition to exit on false 7386 ICmpInst::Predicate Pred; 7387 if (!ExitIfTrue) 7388 Pred = ExitCond->getPredicate(); 7389 else 7390 Pred = ExitCond->getInversePredicate(); 7391 const ICmpInst::Predicate OriginalPred = Pred; 7392 7393 // Handle common loops like: for (X = "string"; *X; ++X) 7394 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7395 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7396 ExitLimit ItCnt = 7397 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7398 if (ItCnt.hasAnyInfo()) 7399 return ItCnt; 7400 } 7401 7402 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7403 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7404 7405 // Try to evaluate any dependencies out of the loop. 7406 LHS = getSCEVAtScope(LHS, L); 7407 RHS = getSCEVAtScope(RHS, L); 7408 7409 // At this point, we would like to compute how many iterations of the 7410 // loop the predicate will return true for these inputs. 7411 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7412 // If there is a loop-invariant, force it into the RHS. 7413 std::swap(LHS, RHS); 7414 Pred = ICmpInst::getSwappedPredicate(Pred); 7415 } 7416 7417 // Simplify the operands before analyzing them. 7418 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7419 7420 // If we have a comparison of a chrec against a constant, try to use value 7421 // ranges to answer this query. 7422 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7423 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7424 if (AddRec->getLoop() == L) { 7425 // Form the constant range. 7426 ConstantRange CompRange = 7427 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7428 7429 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7430 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7431 } 7432 7433 switch (Pred) { 7434 case ICmpInst::ICMP_NE: { // while (X != Y) 7435 // Convert to: while (X-Y != 0) 7436 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7437 AllowPredicates); 7438 if (EL.hasAnyInfo()) return EL; 7439 break; 7440 } 7441 case ICmpInst::ICMP_EQ: { // while (X == Y) 7442 // Convert to: while (X-Y == 0) 7443 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7444 if (EL.hasAnyInfo()) return EL; 7445 break; 7446 } 7447 case ICmpInst::ICMP_SLT: 7448 case ICmpInst::ICMP_ULT: { // while (X < Y) 7449 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7450 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7451 AllowPredicates); 7452 if (EL.hasAnyInfo()) return EL; 7453 break; 7454 } 7455 case ICmpInst::ICMP_SGT: 7456 case ICmpInst::ICMP_UGT: { // while (X > Y) 7457 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7458 ExitLimit EL = 7459 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7460 AllowPredicates); 7461 if (EL.hasAnyInfo()) return EL; 7462 break; 7463 } 7464 default: 7465 break; 7466 } 7467 7468 auto *ExhaustiveCount = 7469 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7470 7471 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7472 return ExhaustiveCount; 7473 7474 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7475 ExitCond->getOperand(1), L, OriginalPred); 7476 } 7477 7478 ScalarEvolution::ExitLimit 7479 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7480 SwitchInst *Switch, 7481 BasicBlock *ExitingBlock, 7482 bool ControlsExit) { 7483 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7484 7485 // Give up if the exit is the default dest of a switch. 7486 if (Switch->getDefaultDest() == ExitingBlock) 7487 return getCouldNotCompute(); 7488 7489 assert(L->contains(Switch->getDefaultDest()) && 7490 "Default case must not exit the loop!"); 7491 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7492 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7493 7494 // while (X != Y) --> while (X-Y != 0) 7495 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7496 if (EL.hasAnyInfo()) 7497 return EL; 7498 7499 return getCouldNotCompute(); 7500 } 7501 7502 static ConstantInt * 7503 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7504 ScalarEvolution &SE) { 7505 const SCEV *InVal = SE.getConstant(C); 7506 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7507 assert(isa<SCEVConstant>(Val) && 7508 "Evaluation of SCEV at constant didn't fold correctly?"); 7509 return cast<SCEVConstant>(Val)->getValue(); 7510 } 7511 7512 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7513 /// compute the backedge execution count. 7514 ScalarEvolution::ExitLimit 7515 ScalarEvolution::computeLoadConstantCompareExitLimit( 7516 LoadInst *LI, 7517 Constant *RHS, 7518 const Loop *L, 7519 ICmpInst::Predicate predicate) { 7520 if (LI->isVolatile()) return getCouldNotCompute(); 7521 7522 // Check to see if the loaded pointer is a getelementptr of a global. 7523 // TODO: Use SCEV instead of manually grubbing with GEPs. 7524 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7525 if (!GEP) return getCouldNotCompute(); 7526 7527 // Make sure that it is really a constant global we are gepping, with an 7528 // initializer, and make sure the first IDX is really 0. 7529 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7530 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7531 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7532 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7533 return getCouldNotCompute(); 7534 7535 // Okay, we allow one non-constant index into the GEP instruction. 7536 Value *VarIdx = nullptr; 7537 std::vector<Constant*> Indexes; 7538 unsigned VarIdxNum = 0; 7539 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7540 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7541 Indexes.push_back(CI); 7542 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7543 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7544 VarIdx = GEP->getOperand(i); 7545 VarIdxNum = i-2; 7546 Indexes.push_back(nullptr); 7547 } 7548 7549 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7550 if (!VarIdx) 7551 return getCouldNotCompute(); 7552 7553 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7554 // Check to see if X is a loop variant variable value now. 7555 const SCEV *Idx = getSCEV(VarIdx); 7556 Idx = getSCEVAtScope(Idx, L); 7557 7558 // We can only recognize very limited forms of loop index expressions, in 7559 // particular, only affine AddRec's like {C1,+,C2}. 7560 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7561 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7562 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7563 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7564 return getCouldNotCompute(); 7565 7566 unsigned MaxSteps = MaxBruteForceIterations; 7567 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7568 ConstantInt *ItCst = ConstantInt::get( 7569 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7570 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7571 7572 // Form the GEP offset. 7573 Indexes[VarIdxNum] = Val; 7574 7575 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7576 Indexes); 7577 if (!Result) break; // Cannot compute! 7578 7579 // Evaluate the condition for this iteration. 7580 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7581 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7582 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7583 ++NumArrayLenItCounts; 7584 return getConstant(ItCst); // Found terminating iteration! 7585 } 7586 } 7587 return getCouldNotCompute(); 7588 } 7589 7590 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7591 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7592 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7593 if (!RHS) 7594 return getCouldNotCompute(); 7595 7596 const BasicBlock *Latch = L->getLoopLatch(); 7597 if (!Latch) 7598 return getCouldNotCompute(); 7599 7600 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7601 if (!Predecessor) 7602 return getCouldNotCompute(); 7603 7604 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7605 // Return LHS in OutLHS and shift_opt in OutOpCode. 7606 auto MatchPositiveShift = 7607 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7608 7609 using namespace PatternMatch; 7610 7611 ConstantInt *ShiftAmt; 7612 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7613 OutOpCode = Instruction::LShr; 7614 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7615 OutOpCode = Instruction::AShr; 7616 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7617 OutOpCode = Instruction::Shl; 7618 else 7619 return false; 7620 7621 return ShiftAmt->getValue().isStrictlyPositive(); 7622 }; 7623 7624 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7625 // 7626 // loop: 7627 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7628 // %iv.shifted = lshr i32 %iv, <positive constant> 7629 // 7630 // Return true on a successful match. Return the corresponding PHI node (%iv 7631 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7632 auto MatchShiftRecurrence = 7633 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7634 Optional<Instruction::BinaryOps> PostShiftOpCode; 7635 7636 { 7637 Instruction::BinaryOps OpC; 7638 Value *V; 7639 7640 // If we encounter a shift instruction, "peel off" the shift operation, 7641 // and remember that we did so. Later when we inspect %iv's backedge 7642 // value, we will make sure that the backedge value uses the same 7643 // operation. 7644 // 7645 // Note: the peeled shift operation does not have to be the same 7646 // instruction as the one feeding into the PHI's backedge value. We only 7647 // really care about it being the same *kind* of shift instruction -- 7648 // that's all that is required for our later inferences to hold. 7649 if (MatchPositiveShift(LHS, V, OpC)) { 7650 PostShiftOpCode = OpC; 7651 LHS = V; 7652 } 7653 } 7654 7655 PNOut = dyn_cast<PHINode>(LHS); 7656 if (!PNOut || PNOut->getParent() != L->getHeader()) 7657 return false; 7658 7659 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7660 Value *OpLHS; 7661 7662 return 7663 // The backedge value for the PHI node must be a shift by a positive 7664 // amount 7665 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7666 7667 // of the PHI node itself 7668 OpLHS == PNOut && 7669 7670 // and the kind of shift should be match the kind of shift we peeled 7671 // off, if any. 7672 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7673 }; 7674 7675 PHINode *PN; 7676 Instruction::BinaryOps OpCode; 7677 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7678 return getCouldNotCompute(); 7679 7680 const DataLayout &DL = getDataLayout(); 7681 7682 // The key rationale for this optimization is that for some kinds of shift 7683 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7684 // within a finite number of iterations. If the condition guarding the 7685 // backedge (in the sense that the backedge is taken if the condition is true) 7686 // is false for the value the shift recurrence stabilizes to, then we know 7687 // that the backedge is taken only a finite number of times. 7688 7689 ConstantInt *StableValue = nullptr; 7690 switch (OpCode) { 7691 default: 7692 llvm_unreachable("Impossible case!"); 7693 7694 case Instruction::AShr: { 7695 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7696 // bitwidth(K) iterations. 7697 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7698 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7699 Predecessor->getTerminator(), &DT); 7700 auto *Ty = cast<IntegerType>(RHS->getType()); 7701 if (Known.isNonNegative()) 7702 StableValue = ConstantInt::get(Ty, 0); 7703 else if (Known.isNegative()) 7704 StableValue = ConstantInt::get(Ty, -1, true); 7705 else 7706 return getCouldNotCompute(); 7707 7708 break; 7709 } 7710 case Instruction::LShr: 7711 case Instruction::Shl: 7712 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7713 // stabilize to 0 in at most bitwidth(K) iterations. 7714 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7715 break; 7716 } 7717 7718 auto *Result = 7719 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7720 assert(Result->getType()->isIntegerTy(1) && 7721 "Otherwise cannot be an operand to a branch instruction"); 7722 7723 if (Result->isZeroValue()) { 7724 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7725 const SCEV *UpperBound = 7726 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7727 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7728 } 7729 7730 return getCouldNotCompute(); 7731 } 7732 7733 /// Return true if we can constant fold an instruction of the specified type, 7734 /// assuming that all operands were constants. 7735 static bool CanConstantFold(const Instruction *I) { 7736 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7737 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7738 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 7739 return true; 7740 7741 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7742 if (const Function *F = CI->getCalledFunction()) 7743 return canConstantFoldCallTo(CI, F); 7744 return false; 7745 } 7746 7747 /// Determine whether this instruction can constant evolve within this loop 7748 /// assuming its operands can all constant evolve. 7749 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7750 // An instruction outside of the loop can't be derived from a loop PHI. 7751 if (!L->contains(I)) return false; 7752 7753 if (isa<PHINode>(I)) { 7754 // We don't currently keep track of the control flow needed to evaluate 7755 // PHIs, so we cannot handle PHIs inside of loops. 7756 return L->getHeader() == I->getParent(); 7757 } 7758 7759 // If we won't be able to constant fold this expression even if the operands 7760 // are constants, bail early. 7761 return CanConstantFold(I); 7762 } 7763 7764 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7765 /// recursing through each instruction operand until reaching a loop header phi. 7766 static PHINode * 7767 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7768 DenseMap<Instruction *, PHINode *> &PHIMap, 7769 unsigned Depth) { 7770 if (Depth > MaxConstantEvolvingDepth) 7771 return nullptr; 7772 7773 // Otherwise, we can evaluate this instruction if all of its operands are 7774 // constant or derived from a PHI node themselves. 7775 PHINode *PHI = nullptr; 7776 for (Value *Op : UseInst->operands()) { 7777 if (isa<Constant>(Op)) continue; 7778 7779 Instruction *OpInst = dyn_cast<Instruction>(Op); 7780 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7781 7782 PHINode *P = dyn_cast<PHINode>(OpInst); 7783 if (!P) 7784 // If this operand is already visited, reuse the prior result. 7785 // We may have P != PHI if this is the deepest point at which the 7786 // inconsistent paths meet. 7787 P = PHIMap.lookup(OpInst); 7788 if (!P) { 7789 // Recurse and memoize the results, whether a phi is found or not. 7790 // This recursive call invalidates pointers into PHIMap. 7791 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7792 PHIMap[OpInst] = P; 7793 } 7794 if (!P) 7795 return nullptr; // Not evolving from PHI 7796 if (PHI && PHI != P) 7797 return nullptr; // Evolving from multiple different PHIs. 7798 PHI = P; 7799 } 7800 // This is a expression evolving from a constant PHI! 7801 return PHI; 7802 } 7803 7804 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7805 /// in the loop that V is derived from. We allow arbitrary operations along the 7806 /// way, but the operands of an operation must either be constants or a value 7807 /// derived from a constant PHI. If this expression does not fit with these 7808 /// constraints, return null. 7809 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7810 Instruction *I = dyn_cast<Instruction>(V); 7811 if (!I || !canConstantEvolve(I, L)) return nullptr; 7812 7813 if (PHINode *PN = dyn_cast<PHINode>(I)) 7814 return PN; 7815 7816 // Record non-constant instructions contained by the loop. 7817 DenseMap<Instruction *, PHINode *> PHIMap; 7818 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7819 } 7820 7821 /// EvaluateExpression - Given an expression that passes the 7822 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7823 /// in the loop has the value PHIVal. If we can't fold this expression for some 7824 /// reason, return null. 7825 static Constant *EvaluateExpression(Value *V, const Loop *L, 7826 DenseMap<Instruction *, Constant *> &Vals, 7827 const DataLayout &DL, 7828 const TargetLibraryInfo *TLI) { 7829 // Convenient constant check, but redundant for recursive calls. 7830 if (Constant *C = dyn_cast<Constant>(V)) return C; 7831 Instruction *I = dyn_cast<Instruction>(V); 7832 if (!I) return nullptr; 7833 7834 if (Constant *C = Vals.lookup(I)) return C; 7835 7836 // An instruction inside the loop depends on a value outside the loop that we 7837 // weren't given a mapping for, or a value such as a call inside the loop. 7838 if (!canConstantEvolve(I, L)) return nullptr; 7839 7840 // An unmapped PHI can be due to a branch or another loop inside this loop, 7841 // or due to this not being the initial iteration through a loop where we 7842 // couldn't compute the evolution of this particular PHI last time. 7843 if (isa<PHINode>(I)) return nullptr; 7844 7845 std::vector<Constant*> Operands(I->getNumOperands()); 7846 7847 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7848 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7849 if (!Operand) { 7850 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7851 if (!Operands[i]) return nullptr; 7852 continue; 7853 } 7854 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7855 Vals[Operand] = C; 7856 if (!C) return nullptr; 7857 Operands[i] = C; 7858 } 7859 7860 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7861 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7862 Operands[1], DL, TLI); 7863 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7864 if (!LI->isVolatile()) 7865 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7866 } 7867 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7868 } 7869 7870 7871 // If every incoming value to PN except the one for BB is a specific Constant, 7872 // return that, else return nullptr. 7873 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7874 Constant *IncomingVal = nullptr; 7875 7876 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7877 if (PN->getIncomingBlock(i) == BB) 7878 continue; 7879 7880 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7881 if (!CurrentVal) 7882 return nullptr; 7883 7884 if (IncomingVal != CurrentVal) { 7885 if (IncomingVal) 7886 return nullptr; 7887 IncomingVal = CurrentVal; 7888 } 7889 } 7890 7891 return IncomingVal; 7892 } 7893 7894 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7895 /// in the header of its containing loop, we know the loop executes a 7896 /// constant number of times, and the PHI node is just a recurrence 7897 /// involving constants, fold it. 7898 Constant * 7899 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7900 const APInt &BEs, 7901 const Loop *L) { 7902 auto I = ConstantEvolutionLoopExitValue.find(PN); 7903 if (I != ConstantEvolutionLoopExitValue.end()) 7904 return I->second; 7905 7906 if (BEs.ugt(MaxBruteForceIterations)) 7907 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7908 7909 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7910 7911 DenseMap<Instruction *, Constant *> CurrentIterVals; 7912 BasicBlock *Header = L->getHeader(); 7913 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7914 7915 BasicBlock *Latch = L->getLoopLatch(); 7916 if (!Latch) 7917 return nullptr; 7918 7919 for (PHINode &PHI : Header->phis()) { 7920 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7921 CurrentIterVals[&PHI] = StartCST; 7922 } 7923 if (!CurrentIterVals.count(PN)) 7924 return RetVal = nullptr; 7925 7926 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7927 7928 // Execute the loop symbolically to determine the exit value. 7929 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7930 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7931 7932 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7933 unsigned IterationNum = 0; 7934 const DataLayout &DL = getDataLayout(); 7935 for (; ; ++IterationNum) { 7936 if (IterationNum == NumIterations) 7937 return RetVal = CurrentIterVals[PN]; // Got exit value! 7938 7939 // Compute the value of the PHIs for the next iteration. 7940 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7941 DenseMap<Instruction *, Constant *> NextIterVals; 7942 Constant *NextPHI = 7943 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7944 if (!NextPHI) 7945 return nullptr; // Couldn't evaluate! 7946 NextIterVals[PN] = NextPHI; 7947 7948 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7949 7950 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7951 // cease to be able to evaluate one of them or if they stop evolving, 7952 // because that doesn't necessarily prevent us from computing PN. 7953 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7954 for (const auto &I : CurrentIterVals) { 7955 PHINode *PHI = dyn_cast<PHINode>(I.first); 7956 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7957 PHIsToCompute.emplace_back(PHI, I.second); 7958 } 7959 // We use two distinct loops because EvaluateExpression may invalidate any 7960 // iterators into CurrentIterVals. 7961 for (const auto &I : PHIsToCompute) { 7962 PHINode *PHI = I.first; 7963 Constant *&NextPHI = NextIterVals[PHI]; 7964 if (!NextPHI) { // Not already computed. 7965 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7966 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7967 } 7968 if (NextPHI != I.second) 7969 StoppedEvolving = false; 7970 } 7971 7972 // If all entries in CurrentIterVals == NextIterVals then we can stop 7973 // iterating, the loop can't continue to change. 7974 if (StoppedEvolving) 7975 return RetVal = CurrentIterVals[PN]; 7976 7977 CurrentIterVals.swap(NextIterVals); 7978 } 7979 } 7980 7981 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7982 Value *Cond, 7983 bool ExitWhen) { 7984 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7985 if (!PN) return getCouldNotCompute(); 7986 7987 // If the loop is canonicalized, the PHI will have exactly two entries. 7988 // That's the only form we support here. 7989 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7990 7991 DenseMap<Instruction *, Constant *> CurrentIterVals; 7992 BasicBlock *Header = L->getHeader(); 7993 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7994 7995 BasicBlock *Latch = L->getLoopLatch(); 7996 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7997 7998 for (PHINode &PHI : Header->phis()) { 7999 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8000 CurrentIterVals[&PHI] = StartCST; 8001 } 8002 if (!CurrentIterVals.count(PN)) 8003 return getCouldNotCompute(); 8004 8005 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8006 // the loop symbolically to determine when the condition gets a value of 8007 // "ExitWhen". 8008 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8009 const DataLayout &DL = getDataLayout(); 8010 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8011 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8012 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8013 8014 // Couldn't symbolically evaluate. 8015 if (!CondVal) return getCouldNotCompute(); 8016 8017 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8018 ++NumBruteForceTripCountsComputed; 8019 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8020 } 8021 8022 // Update all the PHI nodes for the next iteration. 8023 DenseMap<Instruction *, Constant *> NextIterVals; 8024 8025 // Create a list of which PHIs we need to compute. We want to do this before 8026 // calling EvaluateExpression on them because that may invalidate iterators 8027 // into CurrentIterVals. 8028 SmallVector<PHINode *, 8> PHIsToCompute; 8029 for (const auto &I : CurrentIterVals) { 8030 PHINode *PHI = dyn_cast<PHINode>(I.first); 8031 if (!PHI || PHI->getParent() != Header) continue; 8032 PHIsToCompute.push_back(PHI); 8033 } 8034 for (PHINode *PHI : PHIsToCompute) { 8035 Constant *&NextPHI = NextIterVals[PHI]; 8036 if (NextPHI) continue; // Already computed! 8037 8038 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8039 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8040 } 8041 CurrentIterVals.swap(NextIterVals); 8042 } 8043 8044 // Too many iterations were needed to evaluate. 8045 return getCouldNotCompute(); 8046 } 8047 8048 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8049 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8050 ValuesAtScopes[V]; 8051 // Check to see if we've folded this expression at this loop before. 8052 for (auto &LS : Values) 8053 if (LS.first == L) 8054 return LS.second ? LS.second : V; 8055 8056 Values.emplace_back(L, nullptr); 8057 8058 // Otherwise compute it. 8059 const SCEV *C = computeSCEVAtScope(V, L); 8060 for (auto &LS : reverse(ValuesAtScopes[V])) 8061 if (LS.first == L) { 8062 LS.second = C; 8063 break; 8064 } 8065 return C; 8066 } 8067 8068 /// This builds up a Constant using the ConstantExpr interface. That way, we 8069 /// will return Constants for objects which aren't represented by a 8070 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8071 /// Returns NULL if the SCEV isn't representable as a Constant. 8072 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8073 switch (V->getSCEVType()) { 8074 case scCouldNotCompute: 8075 case scAddRecExpr: 8076 return nullptr; 8077 case scConstant: 8078 return cast<SCEVConstant>(V)->getValue(); 8079 case scUnknown: 8080 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8081 case scSignExtend: { 8082 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8083 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8084 return ConstantExpr::getSExt(CastOp, SS->getType()); 8085 return nullptr; 8086 } 8087 case scZeroExtend: { 8088 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8089 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8090 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8091 return nullptr; 8092 } 8093 case scTruncate: { 8094 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8095 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8096 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8097 return nullptr; 8098 } 8099 case scAddExpr: { 8100 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8101 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8102 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8103 unsigned AS = PTy->getAddressSpace(); 8104 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8105 C = ConstantExpr::getBitCast(C, DestPtrTy); 8106 } 8107 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8108 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8109 if (!C2) 8110 return nullptr; 8111 8112 // First pointer! 8113 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8114 unsigned AS = C2->getType()->getPointerAddressSpace(); 8115 std::swap(C, C2); 8116 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8117 // The offsets have been converted to bytes. We can add bytes to an 8118 // i8* by GEP with the byte count in the first index. 8119 C = ConstantExpr::getBitCast(C, DestPtrTy); 8120 } 8121 8122 // Don't bother trying to sum two pointers. We probably can't 8123 // statically compute a load that results from it anyway. 8124 if (C2->getType()->isPointerTy()) 8125 return nullptr; 8126 8127 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8128 if (PTy->getElementType()->isStructTy()) 8129 C2 = ConstantExpr::getIntegerCast( 8130 C2, Type::getInt32Ty(C->getContext()), true); 8131 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8132 } else 8133 C = ConstantExpr::getAdd(C, C2); 8134 } 8135 return C; 8136 } 8137 return nullptr; 8138 } 8139 case scMulExpr: { 8140 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8141 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8142 // Don't bother with pointers at all. 8143 if (C->getType()->isPointerTy()) 8144 return nullptr; 8145 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8146 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8147 if (!C2 || C2->getType()->isPointerTy()) 8148 return nullptr; 8149 C = ConstantExpr::getMul(C, C2); 8150 } 8151 return C; 8152 } 8153 return nullptr; 8154 } 8155 case scUDivExpr: { 8156 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8157 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8158 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8159 if (LHS->getType() == RHS->getType()) 8160 return ConstantExpr::getUDiv(LHS, RHS); 8161 return nullptr; 8162 } 8163 case scSMaxExpr: 8164 case scUMaxExpr: 8165 case scSMinExpr: 8166 case scUMinExpr: 8167 return nullptr; // TODO: smax, umax, smin, umax. 8168 } 8169 llvm_unreachable("Unknown SCEV kind!"); 8170 } 8171 8172 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8173 if (isa<SCEVConstant>(V)) return V; 8174 8175 // If this instruction is evolved from a constant-evolving PHI, compute the 8176 // exit value from the loop without using SCEVs. 8177 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8178 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8179 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8180 const Loop *CurrLoop = this->LI[I->getParent()]; 8181 // Looking for loop exit value. 8182 if (CurrLoop && CurrLoop->getParentLoop() == L && 8183 PN->getParent() == CurrLoop->getHeader()) { 8184 // Okay, there is no closed form solution for the PHI node. Check 8185 // to see if the loop that contains it has a known backedge-taken 8186 // count. If so, we may be able to force computation of the exit 8187 // value. 8188 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8189 // This trivial case can show up in some degenerate cases where 8190 // the incoming IR has not yet been fully simplified. 8191 if (BackedgeTakenCount->isZero()) { 8192 Value *InitValue = nullptr; 8193 bool MultipleInitValues = false; 8194 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8195 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8196 if (!InitValue) 8197 InitValue = PN->getIncomingValue(i); 8198 else if (InitValue != PN->getIncomingValue(i)) { 8199 MultipleInitValues = true; 8200 break; 8201 } 8202 } 8203 } 8204 if (!MultipleInitValues && InitValue) 8205 return getSCEV(InitValue); 8206 } 8207 // Do we have a loop invariant value flowing around the backedge 8208 // for a loop which must execute the backedge? 8209 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8210 isKnownPositive(BackedgeTakenCount) && 8211 PN->getNumIncomingValues() == 2) { 8212 8213 unsigned InLoopPred = 8214 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8215 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8216 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8217 return getSCEV(BackedgeVal); 8218 } 8219 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8220 // Okay, we know how many times the containing loop executes. If 8221 // this is a constant evolving PHI node, get the final value at 8222 // the specified iteration number. 8223 Constant *RV = getConstantEvolutionLoopExitValue( 8224 PN, BTCC->getAPInt(), CurrLoop); 8225 if (RV) return getSCEV(RV); 8226 } 8227 } 8228 8229 // If there is a single-input Phi, evaluate it at our scope. If we can 8230 // prove that this replacement does not break LCSSA form, use new value. 8231 if (PN->getNumOperands() == 1) { 8232 const SCEV *Input = getSCEV(PN->getOperand(0)); 8233 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8234 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8235 // for the simplest case just support constants. 8236 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8237 } 8238 } 8239 8240 // Okay, this is an expression that we cannot symbolically evaluate 8241 // into a SCEV. Check to see if it's possible to symbolically evaluate 8242 // the arguments into constants, and if so, try to constant propagate the 8243 // result. This is particularly useful for computing loop exit values. 8244 if (CanConstantFold(I)) { 8245 SmallVector<Constant *, 4> Operands; 8246 bool MadeImprovement = false; 8247 for (Value *Op : I->operands()) { 8248 if (Constant *C = dyn_cast<Constant>(Op)) { 8249 Operands.push_back(C); 8250 continue; 8251 } 8252 8253 // If any of the operands is non-constant and if they are 8254 // non-integer and non-pointer, don't even try to analyze them 8255 // with scev techniques. 8256 if (!isSCEVable(Op->getType())) 8257 return V; 8258 8259 const SCEV *OrigV = getSCEV(Op); 8260 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8261 MadeImprovement |= OrigV != OpV; 8262 8263 Constant *C = BuildConstantFromSCEV(OpV); 8264 if (!C) return V; 8265 if (C->getType() != Op->getType()) 8266 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8267 Op->getType(), 8268 false), 8269 C, Op->getType()); 8270 Operands.push_back(C); 8271 } 8272 8273 // Check to see if getSCEVAtScope actually made an improvement. 8274 if (MadeImprovement) { 8275 Constant *C = nullptr; 8276 const DataLayout &DL = getDataLayout(); 8277 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8278 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8279 Operands[1], DL, &TLI); 8280 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8281 if (!Load->isVolatile()) 8282 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8283 DL); 8284 } else 8285 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8286 if (!C) return V; 8287 return getSCEV(C); 8288 } 8289 } 8290 } 8291 8292 // This is some other type of SCEVUnknown, just return it. 8293 return V; 8294 } 8295 8296 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8297 // Avoid performing the look-up in the common case where the specified 8298 // expression has no loop-variant portions. 8299 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8300 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8301 if (OpAtScope != Comm->getOperand(i)) { 8302 // Okay, at least one of these operands is loop variant but might be 8303 // foldable. Build a new instance of the folded commutative expression. 8304 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8305 Comm->op_begin()+i); 8306 NewOps.push_back(OpAtScope); 8307 8308 for (++i; i != e; ++i) { 8309 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8310 NewOps.push_back(OpAtScope); 8311 } 8312 if (isa<SCEVAddExpr>(Comm)) 8313 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8314 if (isa<SCEVMulExpr>(Comm)) 8315 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8316 if (isa<SCEVMinMaxExpr>(Comm)) 8317 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8318 llvm_unreachable("Unknown commutative SCEV type!"); 8319 } 8320 } 8321 // If we got here, all operands are loop invariant. 8322 return Comm; 8323 } 8324 8325 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8326 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8327 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8328 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8329 return Div; // must be loop invariant 8330 return getUDivExpr(LHS, RHS); 8331 } 8332 8333 // If this is a loop recurrence for a loop that does not contain L, then we 8334 // are dealing with the final value computed by the loop. 8335 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8336 // First, attempt to evaluate each operand. 8337 // Avoid performing the look-up in the common case where the specified 8338 // expression has no loop-variant portions. 8339 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8340 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8341 if (OpAtScope == AddRec->getOperand(i)) 8342 continue; 8343 8344 // Okay, at least one of these operands is loop variant but might be 8345 // foldable. Build a new instance of the folded commutative expression. 8346 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8347 AddRec->op_begin()+i); 8348 NewOps.push_back(OpAtScope); 8349 for (++i; i != e; ++i) 8350 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8351 8352 const SCEV *FoldedRec = 8353 getAddRecExpr(NewOps, AddRec->getLoop(), 8354 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8355 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8356 // The addrec may be folded to a nonrecurrence, for example, if the 8357 // induction variable is multiplied by zero after constant folding. Go 8358 // ahead and return the folded value. 8359 if (!AddRec) 8360 return FoldedRec; 8361 break; 8362 } 8363 8364 // If the scope is outside the addrec's loop, evaluate it by using the 8365 // loop exit value of the addrec. 8366 if (!AddRec->getLoop()->contains(L)) { 8367 // To evaluate this recurrence, we need to know how many times the AddRec 8368 // loop iterates. Compute this now. 8369 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8370 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8371 8372 // Then, evaluate the AddRec. 8373 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8374 } 8375 8376 return AddRec; 8377 } 8378 8379 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8380 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8381 if (Op == Cast->getOperand()) 8382 return Cast; // must be loop invariant 8383 return getZeroExtendExpr(Op, Cast->getType()); 8384 } 8385 8386 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8387 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8388 if (Op == Cast->getOperand()) 8389 return Cast; // must be loop invariant 8390 return getSignExtendExpr(Op, Cast->getType()); 8391 } 8392 8393 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8394 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8395 if (Op == Cast->getOperand()) 8396 return Cast; // must be loop invariant 8397 return getTruncateExpr(Op, Cast->getType()); 8398 } 8399 8400 llvm_unreachable("Unknown SCEV type!"); 8401 } 8402 8403 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8404 return getSCEVAtScope(getSCEV(V), L); 8405 } 8406 8407 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8408 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8409 return stripInjectiveFunctions(ZExt->getOperand()); 8410 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8411 return stripInjectiveFunctions(SExt->getOperand()); 8412 return S; 8413 } 8414 8415 /// Finds the minimum unsigned root of the following equation: 8416 /// 8417 /// A * X = B (mod N) 8418 /// 8419 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8420 /// A and B isn't important. 8421 /// 8422 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8423 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8424 ScalarEvolution &SE) { 8425 uint32_t BW = A.getBitWidth(); 8426 assert(BW == SE.getTypeSizeInBits(B->getType())); 8427 assert(A != 0 && "A must be non-zero."); 8428 8429 // 1. D = gcd(A, N) 8430 // 8431 // The gcd of A and N may have only one prime factor: 2. The number of 8432 // trailing zeros in A is its multiplicity 8433 uint32_t Mult2 = A.countTrailingZeros(); 8434 // D = 2^Mult2 8435 8436 // 2. Check if B is divisible by D. 8437 // 8438 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8439 // is not less than multiplicity of this prime factor for D. 8440 if (SE.GetMinTrailingZeros(B) < Mult2) 8441 return SE.getCouldNotCompute(); 8442 8443 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8444 // modulo (N / D). 8445 // 8446 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8447 // (N / D) in general. The inverse itself always fits into BW bits, though, 8448 // so we immediately truncate it. 8449 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8450 APInt Mod(BW + 1, 0); 8451 Mod.setBit(BW - Mult2); // Mod = N / D 8452 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8453 8454 // 4. Compute the minimum unsigned root of the equation: 8455 // I * (B / D) mod (N / D) 8456 // To simplify the computation, we factor out the divide by D: 8457 // (I * B mod N) / D 8458 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8459 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8460 } 8461 8462 /// For a given quadratic addrec, generate coefficients of the corresponding 8463 /// quadratic equation, multiplied by a common value to ensure that they are 8464 /// integers. 8465 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8466 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8467 /// were multiplied by, and BitWidth is the bit width of the original addrec 8468 /// coefficients. 8469 /// This function returns None if the addrec coefficients are not compile- 8470 /// time constants. 8471 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8472 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8473 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8474 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8475 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8476 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8477 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8478 << *AddRec << '\n'); 8479 8480 // We currently can only solve this if the coefficients are constants. 8481 if (!LC || !MC || !NC) { 8482 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8483 return None; 8484 } 8485 8486 APInt L = LC->getAPInt(); 8487 APInt M = MC->getAPInt(); 8488 APInt N = NC->getAPInt(); 8489 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8490 8491 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8492 unsigned NewWidth = BitWidth + 1; 8493 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8494 << BitWidth << '\n'); 8495 // The sign-extension (as opposed to a zero-extension) here matches the 8496 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8497 N = N.sext(NewWidth); 8498 M = M.sext(NewWidth); 8499 L = L.sext(NewWidth); 8500 8501 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8502 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8503 // L+M, L+2M+N, L+3M+3N, ... 8504 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8505 // 8506 // The equation Acc = 0 is then 8507 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8508 // In a quadratic form it becomes: 8509 // N n^2 + (2M-N) n + 2L = 0. 8510 8511 APInt A = N; 8512 APInt B = 2 * M - A; 8513 APInt C = 2 * L; 8514 APInt T = APInt(NewWidth, 2); 8515 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8516 << "x + " << C << ", coeff bw: " << NewWidth 8517 << ", multiplied by " << T << '\n'); 8518 return std::make_tuple(A, B, C, T, BitWidth); 8519 } 8520 8521 /// Helper function to compare optional APInts: 8522 /// (a) if X and Y both exist, return min(X, Y), 8523 /// (b) if neither X nor Y exist, return None, 8524 /// (c) if exactly one of X and Y exists, return that value. 8525 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8526 if (X.hasValue() && Y.hasValue()) { 8527 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8528 APInt XW = X->sextOrSelf(W); 8529 APInt YW = Y->sextOrSelf(W); 8530 return XW.slt(YW) ? *X : *Y; 8531 } 8532 if (!X.hasValue() && !Y.hasValue()) 8533 return None; 8534 return X.hasValue() ? *X : *Y; 8535 } 8536 8537 /// Helper function to truncate an optional APInt to a given BitWidth. 8538 /// When solving addrec-related equations, it is preferable to return a value 8539 /// that has the same bit width as the original addrec's coefficients. If the 8540 /// solution fits in the original bit width, truncate it (except for i1). 8541 /// Returning a value of a different bit width may inhibit some optimizations. 8542 /// 8543 /// In general, a solution to a quadratic equation generated from an addrec 8544 /// may require BW+1 bits, where BW is the bit width of the addrec's 8545 /// coefficients. The reason is that the coefficients of the quadratic 8546 /// equation are BW+1 bits wide (to avoid truncation when converting from 8547 /// the addrec to the equation). 8548 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8549 if (!X.hasValue()) 8550 return None; 8551 unsigned W = X->getBitWidth(); 8552 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8553 return X->trunc(BitWidth); 8554 return X; 8555 } 8556 8557 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8558 /// iterations. The values L, M, N are assumed to be signed, and they 8559 /// should all have the same bit widths. 8560 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8561 /// where BW is the bit width of the addrec's coefficients. 8562 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8563 /// returned as such, otherwise the bit width of the returned value may 8564 /// be greater than BW. 8565 /// 8566 /// This function returns None if 8567 /// (a) the addrec coefficients are not constant, or 8568 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8569 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8570 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8571 static Optional<APInt> 8572 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8573 APInt A, B, C, M; 8574 unsigned BitWidth; 8575 auto T = GetQuadraticEquation(AddRec); 8576 if (!T.hasValue()) 8577 return None; 8578 8579 std::tie(A, B, C, M, BitWidth) = *T; 8580 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8581 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8582 if (!X.hasValue()) 8583 return None; 8584 8585 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8586 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8587 if (!V->isZero()) 8588 return None; 8589 8590 return TruncIfPossible(X, BitWidth); 8591 } 8592 8593 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8594 /// iterations. The values M, N are assumed to be signed, and they 8595 /// should all have the same bit widths. 8596 /// Find the least n such that c(n) does not belong to the given range, 8597 /// while c(n-1) does. 8598 /// 8599 /// This function returns None if 8600 /// (a) the addrec coefficients are not constant, or 8601 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8602 /// bounds of the range. 8603 static Optional<APInt> 8604 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8605 const ConstantRange &Range, ScalarEvolution &SE) { 8606 assert(AddRec->getOperand(0)->isZero() && 8607 "Starting value of addrec should be 0"); 8608 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8609 << Range << ", addrec " << *AddRec << '\n'); 8610 // This case is handled in getNumIterationsInRange. Here we can assume that 8611 // we start in the range. 8612 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8613 "Addrec's initial value should be in range"); 8614 8615 APInt A, B, C, M; 8616 unsigned BitWidth; 8617 auto T = GetQuadraticEquation(AddRec); 8618 if (!T.hasValue()) 8619 return None; 8620 8621 // Be careful about the return value: there can be two reasons for not 8622 // returning an actual number. First, if no solutions to the equations 8623 // were found, and second, if the solutions don't leave the given range. 8624 // The first case means that the actual solution is "unknown", the second 8625 // means that it's known, but not valid. If the solution is unknown, we 8626 // cannot make any conclusions. 8627 // Return a pair: the optional solution and a flag indicating if the 8628 // solution was found. 8629 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8630 // Solve for signed overflow and unsigned overflow, pick the lower 8631 // solution. 8632 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8633 << Bound << " (before multiplying by " << M << ")\n"); 8634 Bound *= M; // The quadratic equation multiplier. 8635 8636 Optional<APInt> SO = None; 8637 if (BitWidth > 1) { 8638 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8639 "signed overflow\n"); 8640 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8641 } 8642 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8643 "unsigned overflow\n"); 8644 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8645 BitWidth+1); 8646 8647 auto LeavesRange = [&] (const APInt &X) { 8648 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8649 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8650 if (Range.contains(V0->getValue())) 8651 return false; 8652 // X should be at least 1, so X-1 is non-negative. 8653 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8654 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8655 if (Range.contains(V1->getValue())) 8656 return true; 8657 return false; 8658 }; 8659 8660 // If SolveQuadraticEquationWrap returns None, it means that there can 8661 // be a solution, but the function failed to find it. We cannot treat it 8662 // as "no solution". 8663 if (!SO.hasValue() || !UO.hasValue()) 8664 return { None, false }; 8665 8666 // Check the smaller value first to see if it leaves the range. 8667 // At this point, both SO and UO must have values. 8668 Optional<APInt> Min = MinOptional(SO, UO); 8669 if (LeavesRange(*Min)) 8670 return { Min, true }; 8671 Optional<APInt> Max = Min == SO ? UO : SO; 8672 if (LeavesRange(*Max)) 8673 return { Max, true }; 8674 8675 // Solutions were found, but were eliminated, hence the "true". 8676 return { None, true }; 8677 }; 8678 8679 std::tie(A, B, C, M, BitWidth) = *T; 8680 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8681 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8682 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8683 auto SL = SolveForBoundary(Lower); 8684 auto SU = SolveForBoundary(Upper); 8685 // If any of the solutions was unknown, no meaninigful conclusions can 8686 // be made. 8687 if (!SL.second || !SU.second) 8688 return None; 8689 8690 // Claim: The correct solution is not some value between Min and Max. 8691 // 8692 // Justification: Assuming that Min and Max are different values, one of 8693 // them is when the first signed overflow happens, the other is when the 8694 // first unsigned overflow happens. Crossing the range boundary is only 8695 // possible via an overflow (treating 0 as a special case of it, modeling 8696 // an overflow as crossing k*2^W for some k). 8697 // 8698 // The interesting case here is when Min was eliminated as an invalid 8699 // solution, but Max was not. The argument is that if there was another 8700 // overflow between Min and Max, it would also have been eliminated if 8701 // it was considered. 8702 // 8703 // For a given boundary, it is possible to have two overflows of the same 8704 // type (signed/unsigned) without having the other type in between: this 8705 // can happen when the vertex of the parabola is between the iterations 8706 // corresponding to the overflows. This is only possible when the two 8707 // overflows cross k*2^W for the same k. In such case, if the second one 8708 // left the range (and was the first one to do so), the first overflow 8709 // would have to enter the range, which would mean that either we had left 8710 // the range before or that we started outside of it. Both of these cases 8711 // are contradictions. 8712 // 8713 // Claim: In the case where SolveForBoundary returns None, the correct 8714 // solution is not some value between the Max for this boundary and the 8715 // Min of the other boundary. 8716 // 8717 // Justification: Assume that we had such Max_A and Min_B corresponding 8718 // to range boundaries A and B and such that Max_A < Min_B. If there was 8719 // a solution between Max_A and Min_B, it would have to be caused by an 8720 // overflow corresponding to either A or B. It cannot correspond to B, 8721 // since Min_B is the first occurrence of such an overflow. If it 8722 // corresponded to A, it would have to be either a signed or an unsigned 8723 // overflow that is larger than both eliminated overflows for A. But 8724 // between the eliminated overflows and this overflow, the values would 8725 // cover the entire value space, thus crossing the other boundary, which 8726 // is a contradiction. 8727 8728 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8729 } 8730 8731 ScalarEvolution::ExitLimit 8732 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8733 bool AllowPredicates) { 8734 8735 // This is only used for loops with a "x != y" exit test. The exit condition 8736 // is now expressed as a single expression, V = x-y. So the exit test is 8737 // effectively V != 0. We know and take advantage of the fact that this 8738 // expression only being used in a comparison by zero context. 8739 8740 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8741 // If the value is a constant 8742 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8743 // If the value is already zero, the branch will execute zero times. 8744 if (C->getValue()->isZero()) return C; 8745 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8746 } 8747 8748 const SCEVAddRecExpr *AddRec = 8749 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8750 8751 if (!AddRec && AllowPredicates) 8752 // Try to make this an AddRec using runtime tests, in the first X 8753 // iterations of this loop, where X is the SCEV expression found by the 8754 // algorithm below. 8755 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8756 8757 if (!AddRec || AddRec->getLoop() != L) 8758 return getCouldNotCompute(); 8759 8760 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8761 // the quadratic equation to solve it. 8762 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8763 // We can only use this value if the chrec ends up with an exact zero 8764 // value at this index. When solving for "X*X != 5", for example, we 8765 // should not accept a root of 2. 8766 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8767 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8768 return ExitLimit(R, R, false, Predicates); 8769 } 8770 return getCouldNotCompute(); 8771 } 8772 8773 // Otherwise we can only handle this if it is affine. 8774 if (!AddRec->isAffine()) 8775 return getCouldNotCompute(); 8776 8777 // If this is an affine expression, the execution count of this branch is 8778 // the minimum unsigned root of the following equation: 8779 // 8780 // Start + Step*N = 0 (mod 2^BW) 8781 // 8782 // equivalent to: 8783 // 8784 // Step*N = -Start (mod 2^BW) 8785 // 8786 // where BW is the common bit width of Start and Step. 8787 8788 // Get the initial value for the loop. 8789 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8790 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8791 8792 // For now we handle only constant steps. 8793 // 8794 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8795 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8796 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8797 // We have not yet seen any such cases. 8798 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8799 if (!StepC || StepC->getValue()->isZero()) 8800 return getCouldNotCompute(); 8801 8802 // For positive steps (counting up until unsigned overflow): 8803 // N = -Start/Step (as unsigned) 8804 // For negative steps (counting down to zero): 8805 // N = Start/-Step 8806 // First compute the unsigned distance from zero in the direction of Step. 8807 bool CountDown = StepC->getAPInt().isNegative(); 8808 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8809 8810 // Handle unitary steps, which cannot wraparound. 8811 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8812 // N = Distance (as unsigned) 8813 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8814 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 8815 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 8816 if (MaxBECountBase.ult(MaxBECount)) 8817 MaxBECount = MaxBECountBase; 8818 8819 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8820 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8821 // case, and see if we can improve the bound. 8822 // 8823 // Explicitly handling this here is necessary because getUnsignedRange 8824 // isn't context-sensitive; it doesn't know that we only care about the 8825 // range inside the loop. 8826 const SCEV *Zero = getZero(Distance->getType()); 8827 const SCEV *One = getOne(Distance->getType()); 8828 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8829 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8830 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8831 // as "unsigned_max(Distance + 1) - 1". 8832 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8833 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8834 } 8835 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8836 } 8837 8838 // If the condition controls loop exit (the loop exits only if the expression 8839 // is true) and the addition is no-wrap we can use unsigned divide to 8840 // compute the backedge count. In this case, the step may not divide the 8841 // distance, but we don't care because if the condition is "missed" the loop 8842 // will have undefined behavior due to wrapping. 8843 if (ControlsExit && AddRec->hasNoSelfWrap() && 8844 loopHasNoAbnormalExits(AddRec->getLoop())) { 8845 const SCEV *Exact = 8846 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8847 const SCEV *Max = 8848 Exact == getCouldNotCompute() 8849 ? Exact 8850 : getConstant(getUnsignedRangeMax(Exact)); 8851 return ExitLimit(Exact, Max, false, Predicates); 8852 } 8853 8854 // Solve the general equation. 8855 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8856 getNegativeSCEV(Start), *this); 8857 const SCEV *M = E == getCouldNotCompute() 8858 ? E 8859 : getConstant(getUnsignedRangeMax(E)); 8860 return ExitLimit(E, M, false, Predicates); 8861 } 8862 8863 ScalarEvolution::ExitLimit 8864 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8865 // Loops that look like: while (X == 0) are very strange indeed. We don't 8866 // handle them yet except for the trivial case. This could be expanded in the 8867 // future as needed. 8868 8869 // If the value is a constant, check to see if it is known to be non-zero 8870 // already. If so, the backedge will execute zero times. 8871 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8872 if (!C->getValue()->isZero()) 8873 return getZero(C->getType()); 8874 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8875 } 8876 8877 // We could implement others, but I really doubt anyone writes loops like 8878 // this, and if they did, they would already be constant folded. 8879 return getCouldNotCompute(); 8880 } 8881 8882 std::pair<const BasicBlock *, const BasicBlock *> 8883 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 8884 const { 8885 // If the block has a unique predecessor, then there is no path from the 8886 // predecessor to the block that does not go through the direct edge 8887 // from the predecessor to the block. 8888 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 8889 return {Pred, BB}; 8890 8891 // A loop's header is defined to be a block that dominates the loop. 8892 // If the header has a unique predecessor outside the loop, it must be 8893 // a block that has exactly one successor that can reach the loop. 8894 if (const Loop *L = LI.getLoopFor(BB)) 8895 return {L->getLoopPredecessor(), L->getHeader()}; 8896 8897 return {nullptr, nullptr}; 8898 } 8899 8900 /// SCEV structural equivalence is usually sufficient for testing whether two 8901 /// expressions are equal, however for the purposes of looking for a condition 8902 /// guarding a loop, it can be useful to be a little more general, since a 8903 /// front-end may have replicated the controlling expression. 8904 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8905 // Quick check to see if they are the same SCEV. 8906 if (A == B) return true; 8907 8908 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8909 // Not all instructions that are "identical" compute the same value. For 8910 // instance, two distinct alloca instructions allocating the same type are 8911 // identical and do not read memory; but compute distinct values. 8912 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8913 }; 8914 8915 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8916 // two different instructions with the same value. Check for this case. 8917 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8918 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8919 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8920 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8921 if (ComputesEqualValues(AI, BI)) 8922 return true; 8923 8924 // Otherwise assume they may have a different value. 8925 return false; 8926 } 8927 8928 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8929 const SCEV *&LHS, const SCEV *&RHS, 8930 unsigned Depth) { 8931 bool Changed = false; 8932 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8933 // '0 != 0'. 8934 auto TrivialCase = [&](bool TriviallyTrue) { 8935 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8936 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8937 return true; 8938 }; 8939 // If we hit the max recursion limit bail out. 8940 if (Depth >= 3) 8941 return false; 8942 8943 // Canonicalize a constant to the right side. 8944 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8945 // Check for both operands constant. 8946 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8947 if (ConstantExpr::getICmp(Pred, 8948 LHSC->getValue(), 8949 RHSC->getValue())->isNullValue()) 8950 return TrivialCase(false); 8951 else 8952 return TrivialCase(true); 8953 } 8954 // Otherwise swap the operands to put the constant on the right. 8955 std::swap(LHS, RHS); 8956 Pred = ICmpInst::getSwappedPredicate(Pred); 8957 Changed = true; 8958 } 8959 8960 // If we're comparing an addrec with a value which is loop-invariant in the 8961 // addrec's loop, put the addrec on the left. Also make a dominance check, 8962 // as both operands could be addrecs loop-invariant in each other's loop. 8963 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8964 const Loop *L = AR->getLoop(); 8965 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8966 std::swap(LHS, RHS); 8967 Pred = ICmpInst::getSwappedPredicate(Pred); 8968 Changed = true; 8969 } 8970 } 8971 8972 // If there's a constant operand, canonicalize comparisons with boundary 8973 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8974 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8975 const APInt &RA = RC->getAPInt(); 8976 8977 bool SimplifiedByConstantRange = false; 8978 8979 if (!ICmpInst::isEquality(Pred)) { 8980 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8981 if (ExactCR.isFullSet()) 8982 return TrivialCase(true); 8983 else if (ExactCR.isEmptySet()) 8984 return TrivialCase(false); 8985 8986 APInt NewRHS; 8987 CmpInst::Predicate NewPred; 8988 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8989 ICmpInst::isEquality(NewPred)) { 8990 // We were able to convert an inequality to an equality. 8991 Pred = NewPred; 8992 RHS = getConstant(NewRHS); 8993 Changed = SimplifiedByConstantRange = true; 8994 } 8995 } 8996 8997 if (!SimplifiedByConstantRange) { 8998 switch (Pred) { 8999 default: 9000 break; 9001 case ICmpInst::ICMP_EQ: 9002 case ICmpInst::ICMP_NE: 9003 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9004 if (!RA) 9005 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9006 if (const SCEVMulExpr *ME = 9007 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9008 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9009 ME->getOperand(0)->isAllOnesValue()) { 9010 RHS = AE->getOperand(1); 9011 LHS = ME->getOperand(1); 9012 Changed = true; 9013 } 9014 break; 9015 9016 9017 // The "Should have been caught earlier!" messages refer to the fact 9018 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9019 // should have fired on the corresponding cases, and canonicalized the 9020 // check to trivial case. 9021 9022 case ICmpInst::ICMP_UGE: 9023 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9024 Pred = ICmpInst::ICMP_UGT; 9025 RHS = getConstant(RA - 1); 9026 Changed = true; 9027 break; 9028 case ICmpInst::ICMP_ULE: 9029 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9030 Pred = ICmpInst::ICMP_ULT; 9031 RHS = getConstant(RA + 1); 9032 Changed = true; 9033 break; 9034 case ICmpInst::ICMP_SGE: 9035 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9036 Pred = ICmpInst::ICMP_SGT; 9037 RHS = getConstant(RA - 1); 9038 Changed = true; 9039 break; 9040 case ICmpInst::ICMP_SLE: 9041 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9042 Pred = ICmpInst::ICMP_SLT; 9043 RHS = getConstant(RA + 1); 9044 Changed = true; 9045 break; 9046 } 9047 } 9048 } 9049 9050 // Check for obvious equality. 9051 if (HasSameValue(LHS, RHS)) { 9052 if (ICmpInst::isTrueWhenEqual(Pred)) 9053 return TrivialCase(true); 9054 if (ICmpInst::isFalseWhenEqual(Pred)) 9055 return TrivialCase(false); 9056 } 9057 9058 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9059 // adding or subtracting 1 from one of the operands. 9060 switch (Pred) { 9061 case ICmpInst::ICMP_SLE: 9062 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9063 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9064 SCEV::FlagNSW); 9065 Pred = ICmpInst::ICMP_SLT; 9066 Changed = true; 9067 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9068 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9069 SCEV::FlagNSW); 9070 Pred = ICmpInst::ICMP_SLT; 9071 Changed = true; 9072 } 9073 break; 9074 case ICmpInst::ICMP_SGE: 9075 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9076 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9077 SCEV::FlagNSW); 9078 Pred = ICmpInst::ICMP_SGT; 9079 Changed = true; 9080 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9081 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9082 SCEV::FlagNSW); 9083 Pred = ICmpInst::ICMP_SGT; 9084 Changed = true; 9085 } 9086 break; 9087 case ICmpInst::ICMP_ULE: 9088 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9089 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9090 SCEV::FlagNUW); 9091 Pred = ICmpInst::ICMP_ULT; 9092 Changed = true; 9093 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9094 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9095 Pred = ICmpInst::ICMP_ULT; 9096 Changed = true; 9097 } 9098 break; 9099 case ICmpInst::ICMP_UGE: 9100 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9101 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9102 Pred = ICmpInst::ICMP_UGT; 9103 Changed = true; 9104 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9105 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9106 SCEV::FlagNUW); 9107 Pred = ICmpInst::ICMP_UGT; 9108 Changed = true; 9109 } 9110 break; 9111 default: 9112 break; 9113 } 9114 9115 // TODO: More simplifications are possible here. 9116 9117 // Recursively simplify until we either hit a recursion limit or nothing 9118 // changes. 9119 if (Changed) 9120 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9121 9122 return Changed; 9123 } 9124 9125 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9126 return getSignedRangeMax(S).isNegative(); 9127 } 9128 9129 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9130 return getSignedRangeMin(S).isStrictlyPositive(); 9131 } 9132 9133 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9134 return !getSignedRangeMin(S).isNegative(); 9135 } 9136 9137 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9138 return !getSignedRangeMax(S).isStrictlyPositive(); 9139 } 9140 9141 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9142 return isKnownNegative(S) || isKnownPositive(S); 9143 } 9144 9145 std::pair<const SCEV *, const SCEV *> 9146 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9147 // Compute SCEV on entry of loop L. 9148 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9149 if (Start == getCouldNotCompute()) 9150 return { Start, Start }; 9151 // Compute post increment SCEV for loop L. 9152 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9153 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9154 return { Start, PostInc }; 9155 } 9156 9157 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9158 const SCEV *LHS, const SCEV *RHS) { 9159 // First collect all loops. 9160 SmallPtrSet<const Loop *, 8> LoopsUsed; 9161 getUsedLoops(LHS, LoopsUsed); 9162 getUsedLoops(RHS, LoopsUsed); 9163 9164 if (LoopsUsed.empty()) 9165 return false; 9166 9167 // Domination relationship must be a linear order on collected loops. 9168 #ifndef NDEBUG 9169 for (auto *L1 : LoopsUsed) 9170 for (auto *L2 : LoopsUsed) 9171 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9172 DT.dominates(L2->getHeader(), L1->getHeader())) && 9173 "Domination relationship is not a linear order"); 9174 #endif 9175 9176 const Loop *MDL = 9177 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9178 [&](const Loop *L1, const Loop *L2) { 9179 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9180 }); 9181 9182 // Get init and post increment value for LHS. 9183 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9184 // if LHS contains unknown non-invariant SCEV then bail out. 9185 if (SplitLHS.first == getCouldNotCompute()) 9186 return false; 9187 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9188 // Get init and post increment value for RHS. 9189 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9190 // if RHS contains unknown non-invariant SCEV then bail out. 9191 if (SplitRHS.first == getCouldNotCompute()) 9192 return false; 9193 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9194 // It is possible that init SCEV contains an invariant load but it does 9195 // not dominate MDL and is not available at MDL loop entry, so we should 9196 // check it here. 9197 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9198 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9199 return false; 9200 9201 // It seems backedge guard check is faster than entry one so in some cases 9202 // it can speed up whole estimation by short circuit 9203 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9204 SplitRHS.second) && 9205 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9206 } 9207 9208 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9209 const SCEV *LHS, const SCEV *RHS) { 9210 // Canonicalize the inputs first. 9211 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9212 9213 if (isKnownViaInduction(Pred, LHS, RHS)) 9214 return true; 9215 9216 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9217 return true; 9218 9219 // Otherwise see what can be done with some simple reasoning. 9220 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9221 } 9222 9223 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9224 const SCEV *LHS, const SCEV *RHS, 9225 const Instruction *Context) { 9226 // TODO: Analyze guards and assumes from Context's block. 9227 return isKnownPredicate(Pred, LHS, RHS) || 9228 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9229 } 9230 9231 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9232 const SCEVAddRecExpr *LHS, 9233 const SCEV *RHS) { 9234 const Loop *L = LHS->getLoop(); 9235 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9236 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9237 } 9238 9239 Optional<ScalarEvolution::MonotonicPredicateType> 9240 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9241 ICmpInst::Predicate Pred) { 9242 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9243 9244 #ifndef NDEBUG 9245 // Verify an invariant: inverting the predicate should turn a monotonically 9246 // increasing change to a monotonically decreasing one, and vice versa. 9247 if (Result) { 9248 auto ResultSwapped = 9249 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9250 9251 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9252 assert(ResultSwapped.getValue() != Result.getValue() && 9253 "monotonicity should flip as we flip the predicate"); 9254 } 9255 #endif 9256 9257 return Result; 9258 } 9259 9260 Optional<ScalarEvolution::MonotonicPredicateType> 9261 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9262 ICmpInst::Predicate Pred) { 9263 // A zero step value for LHS means the induction variable is essentially a 9264 // loop invariant value. We don't really depend on the predicate actually 9265 // flipping from false to true (for increasing predicates, and the other way 9266 // around for decreasing predicates), all we care about is that *if* the 9267 // predicate changes then it only changes from false to true. 9268 // 9269 // A zero step value in itself is not very useful, but there may be places 9270 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9271 // as general as possible. 9272 9273 // Only handle LE/LT/GE/GT predicates. 9274 if (!ICmpInst::isRelational(Pred)) 9275 return None; 9276 9277 // Check that AR does not wrap. 9278 if (ICmpInst::isUnsigned(Pred)) { 9279 if (!LHS->hasNoUnsignedWrap()) 9280 return None; 9281 return Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE 9282 ? MonotonicallyIncreasing 9283 : MonotonicallyDecreasing; 9284 } else { 9285 assert(ICmpInst::isSigned(Pred) && 9286 "Relational predicate is either signed or unsigned!"); 9287 if (!LHS->hasNoSignedWrap()) 9288 return None; 9289 9290 const SCEV *Step = LHS->getStepRecurrence(*this); 9291 9292 if (isKnownNonNegative(Step)) { 9293 return Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE 9294 ? MonotonicallyIncreasing 9295 : MonotonicallyDecreasing; 9296 } 9297 9298 if (isKnownNonPositive(Step)) { 9299 return Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE 9300 ? MonotonicallyIncreasing 9301 : MonotonicallyDecreasing; 9302 } 9303 9304 return None; 9305 } 9306 } 9307 9308 bool ScalarEvolution::isLoopInvariantPredicate( 9309 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9310 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9311 const SCEV *&InvariantRHS) { 9312 9313 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9314 if (!isLoopInvariant(RHS, L)) { 9315 if (!isLoopInvariant(LHS, L)) 9316 return false; 9317 9318 std::swap(LHS, RHS); 9319 Pred = ICmpInst::getSwappedPredicate(Pred); 9320 } 9321 9322 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9323 if (!ArLHS || ArLHS->getLoop() != L) 9324 return false; 9325 9326 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9327 if (!MonotonicType) 9328 return false; 9329 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9330 // true as the loop iterates, and the backedge is control dependent on 9331 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9332 // 9333 // * if the predicate was false in the first iteration then the predicate 9334 // is never evaluated again, since the loop exits without taking the 9335 // backedge. 9336 // * if the predicate was true in the first iteration then it will 9337 // continue to be true for all future iterations since it is 9338 // monotonically increasing. 9339 // 9340 // For both the above possibilities, we can replace the loop varying 9341 // predicate with its value on the first iteration of the loop (which is 9342 // loop invariant). 9343 // 9344 // A similar reasoning applies for a monotonically decreasing predicate, by 9345 // replacing true with false and false with true in the above two bullets. 9346 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9347 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9348 9349 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9350 return false; 9351 9352 InvariantPred = Pred; 9353 InvariantLHS = ArLHS->getStart(); 9354 InvariantRHS = RHS; 9355 return true; 9356 } 9357 9358 bool ScalarEvolution::isLoopInvariantExitCondDuringFirstIterations( 9359 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9360 const Instruction *Context, const SCEV *MaxIter, 9361 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9362 const SCEV *&InvariantRHS) { 9363 // Try to prove the following set of facts: 9364 // - The predicate is monotonic. 9365 // - If the check does not fail on the 1st iteration: 9366 // - No overflow will happen during first MaxIter iterations; 9367 // - It will not fail on the MaxIter'th iteration. 9368 // If the check does fail on the 1st iteration, we leave the loop and no 9369 // other checks matter. 9370 9371 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9372 if (!isLoopInvariant(RHS, L)) { 9373 if (!isLoopInvariant(LHS, L)) 9374 return false; 9375 9376 std::swap(LHS, RHS); 9377 Pred = ICmpInst::getSwappedPredicate(Pred); 9378 } 9379 9380 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9381 // TODO: Lift affinity limitation in the future. 9382 if (!AR || AR->getLoop() != L || !AR->isAffine()) 9383 return false; 9384 9385 // The predicate must be relational (i.e. <, <=, >=, >). 9386 if (!ICmpInst::isRelational(Pred)) 9387 return false; 9388 9389 // TODO: Support steps other than +/- 1. 9390 const SCEV *Step = AR->getOperand(1); 9391 auto *One = getOne(Step->getType()); 9392 auto *MinusOne = getNegativeSCEV(One); 9393 if (Step != One && Step != MinusOne) 9394 return false; 9395 9396 // Type mismatch here means that MaxIter is potentially larger than max 9397 // unsigned value in start type, which mean we cannot prove no wrap for the 9398 // indvar. 9399 if (AR->getType() != MaxIter->getType()) 9400 return false; 9401 9402 // Value of IV on suggested last iteration. 9403 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9404 // Does it still meet the requirement? 9405 if (!isKnownPredicateAt(Pred, Last, RHS, Context)) 9406 return false; 9407 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 9408 // not exceed max unsigned value of this type), this effectively proves 9409 // that there is no wrap during the iteration. To prove that there is no 9410 // signed/unsigned wrap, we need to check that 9411 // Start <= Last for step = 1 or Start >= Last for step = -1. 9412 ICmpInst::Predicate NoOverflowPred = 9413 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 9414 if (Step == MinusOne) 9415 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 9416 const SCEV *Start = AR->getStart(); 9417 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 9418 return false; 9419 9420 // Everything is fine. 9421 InvariantPred = Pred; 9422 InvariantLHS = Start; 9423 InvariantRHS = RHS; 9424 return true; 9425 } 9426 9427 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9428 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9429 if (HasSameValue(LHS, RHS)) 9430 return ICmpInst::isTrueWhenEqual(Pred); 9431 9432 // This code is split out from isKnownPredicate because it is called from 9433 // within isLoopEntryGuardedByCond. 9434 9435 auto CheckRanges = 9436 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9437 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9438 .contains(RangeLHS); 9439 }; 9440 9441 // The check at the top of the function catches the case where the values are 9442 // known to be equal. 9443 if (Pred == CmpInst::ICMP_EQ) 9444 return false; 9445 9446 if (Pred == CmpInst::ICMP_NE) 9447 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9448 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9449 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9450 9451 if (CmpInst::isSigned(Pred)) 9452 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9453 9454 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9455 } 9456 9457 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9458 const SCEV *LHS, 9459 const SCEV *RHS) { 9460 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9461 // Return Y via OutY. 9462 auto MatchBinaryAddToConst = 9463 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9464 SCEV::NoWrapFlags ExpectedFlags) { 9465 const SCEV *NonConstOp, *ConstOp; 9466 SCEV::NoWrapFlags FlagsPresent; 9467 9468 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9469 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9470 return false; 9471 9472 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9473 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9474 }; 9475 9476 APInt C; 9477 9478 switch (Pred) { 9479 default: 9480 break; 9481 9482 case ICmpInst::ICMP_SGE: 9483 std::swap(LHS, RHS); 9484 LLVM_FALLTHROUGH; 9485 case ICmpInst::ICMP_SLE: 9486 // X s<= (X + C)<nsw> if C >= 0 9487 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9488 return true; 9489 9490 // (X + C)<nsw> s<= X if C <= 0 9491 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9492 !C.isStrictlyPositive()) 9493 return true; 9494 break; 9495 9496 case ICmpInst::ICMP_SGT: 9497 std::swap(LHS, RHS); 9498 LLVM_FALLTHROUGH; 9499 case ICmpInst::ICMP_SLT: 9500 // X s< (X + C)<nsw> if C > 0 9501 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9502 C.isStrictlyPositive()) 9503 return true; 9504 9505 // (X + C)<nsw> s< X if C < 0 9506 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9507 return true; 9508 break; 9509 9510 case ICmpInst::ICMP_UGE: 9511 std::swap(LHS, RHS); 9512 LLVM_FALLTHROUGH; 9513 case ICmpInst::ICMP_ULE: 9514 // X u<= (X + C)<nuw> for any C 9515 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9516 return true; 9517 break; 9518 9519 case ICmpInst::ICMP_UGT: 9520 std::swap(LHS, RHS); 9521 LLVM_FALLTHROUGH; 9522 case ICmpInst::ICMP_ULT: 9523 // X u< (X + C)<nuw> if C != 0 9524 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9525 return true; 9526 break; 9527 } 9528 9529 return false; 9530 } 9531 9532 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9533 const SCEV *LHS, 9534 const SCEV *RHS) { 9535 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9536 return false; 9537 9538 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9539 // the stack can result in exponential time complexity. 9540 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9541 9542 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9543 // 9544 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9545 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9546 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9547 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9548 // use isKnownPredicate later if needed. 9549 return isKnownNonNegative(RHS) && 9550 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9551 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9552 } 9553 9554 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9555 ICmpInst::Predicate Pred, 9556 const SCEV *LHS, const SCEV *RHS) { 9557 // No need to even try if we know the module has no guards. 9558 if (!HasGuards) 9559 return false; 9560 9561 return any_of(*BB, [&](const Instruction &I) { 9562 using namespace llvm::PatternMatch; 9563 9564 Value *Condition; 9565 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9566 m_Value(Condition))) && 9567 isImpliedCond(Pred, LHS, RHS, Condition, false); 9568 }); 9569 } 9570 9571 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9572 /// protected by a conditional between LHS and RHS. This is used to 9573 /// to eliminate casts. 9574 bool 9575 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9576 ICmpInst::Predicate Pred, 9577 const SCEV *LHS, const SCEV *RHS) { 9578 // Interpret a null as meaning no loop, where there is obviously no guard 9579 // (interprocedural conditions notwithstanding). 9580 if (!L) return true; 9581 9582 if (VerifyIR) 9583 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9584 "This cannot be done on broken IR!"); 9585 9586 9587 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9588 return true; 9589 9590 BasicBlock *Latch = L->getLoopLatch(); 9591 if (!Latch) 9592 return false; 9593 9594 BranchInst *LoopContinuePredicate = 9595 dyn_cast<BranchInst>(Latch->getTerminator()); 9596 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9597 isImpliedCond(Pred, LHS, RHS, 9598 LoopContinuePredicate->getCondition(), 9599 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9600 return true; 9601 9602 // We don't want more than one activation of the following loops on the stack 9603 // -- that can lead to O(n!) time complexity. 9604 if (WalkingBEDominatingConds) 9605 return false; 9606 9607 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9608 9609 // See if we can exploit a trip count to prove the predicate. 9610 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9611 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9612 if (LatchBECount != getCouldNotCompute()) { 9613 // We know that Latch branches back to the loop header exactly 9614 // LatchBECount times. This means the backdege condition at Latch is 9615 // equivalent to "{0,+,1} u< LatchBECount". 9616 Type *Ty = LatchBECount->getType(); 9617 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9618 const SCEV *LoopCounter = 9619 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9620 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9621 LatchBECount)) 9622 return true; 9623 } 9624 9625 // Check conditions due to any @llvm.assume intrinsics. 9626 for (auto &AssumeVH : AC.assumptions()) { 9627 if (!AssumeVH) 9628 continue; 9629 auto *CI = cast<CallInst>(AssumeVH); 9630 if (!DT.dominates(CI, Latch->getTerminator())) 9631 continue; 9632 9633 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9634 return true; 9635 } 9636 9637 // If the loop is not reachable from the entry block, we risk running into an 9638 // infinite loop as we walk up into the dom tree. These loops do not matter 9639 // anyway, so we just return a conservative answer when we see them. 9640 if (!DT.isReachableFromEntry(L->getHeader())) 9641 return false; 9642 9643 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9644 return true; 9645 9646 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9647 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9648 assert(DTN && "should reach the loop header before reaching the root!"); 9649 9650 BasicBlock *BB = DTN->getBlock(); 9651 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9652 return true; 9653 9654 BasicBlock *PBB = BB->getSinglePredecessor(); 9655 if (!PBB) 9656 continue; 9657 9658 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9659 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9660 continue; 9661 9662 Value *Condition = ContinuePredicate->getCondition(); 9663 9664 // If we have an edge `E` within the loop body that dominates the only 9665 // latch, the condition guarding `E` also guards the backedge. This 9666 // reasoning works only for loops with a single latch. 9667 9668 BasicBlockEdge DominatingEdge(PBB, BB); 9669 if (DominatingEdge.isSingleEdge()) { 9670 // We're constructively (and conservatively) enumerating edges within the 9671 // loop body that dominate the latch. The dominator tree better agree 9672 // with us on this: 9673 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9674 9675 if (isImpliedCond(Pred, LHS, RHS, Condition, 9676 BB != ContinuePredicate->getSuccessor(0))) 9677 return true; 9678 } 9679 } 9680 9681 return false; 9682 } 9683 9684 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 9685 ICmpInst::Predicate Pred, 9686 const SCEV *LHS, 9687 const SCEV *RHS) { 9688 if (VerifyIR) 9689 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 9690 "This cannot be done on broken IR!"); 9691 9692 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9693 return true; 9694 9695 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9696 // the facts (a >= b && a != b) separately. A typical situation is when the 9697 // non-strict comparison is known from ranges and non-equality is known from 9698 // dominating predicates. If we are proving strict comparison, we always try 9699 // to prove non-equality and non-strict comparison separately. 9700 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9701 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9702 bool ProvedNonStrictComparison = false; 9703 bool ProvedNonEquality = false; 9704 9705 if (ProvingStrictComparison) { 9706 ProvedNonStrictComparison = 9707 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9708 ProvedNonEquality = 9709 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9710 if (ProvedNonStrictComparison && ProvedNonEquality) 9711 return true; 9712 } 9713 9714 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9715 auto ProveViaGuard = [&](const BasicBlock *Block) { 9716 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9717 return true; 9718 if (ProvingStrictComparison) { 9719 if (!ProvedNonStrictComparison) 9720 ProvedNonStrictComparison = 9721 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9722 if (!ProvedNonEquality) 9723 ProvedNonEquality = 9724 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9725 if (ProvedNonStrictComparison && ProvedNonEquality) 9726 return true; 9727 } 9728 return false; 9729 }; 9730 9731 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9732 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 9733 const Instruction *Context = &BB->front(); 9734 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 9735 return true; 9736 if (ProvingStrictComparison) { 9737 if (!ProvedNonStrictComparison) 9738 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS, 9739 Condition, Inverse, Context); 9740 if (!ProvedNonEquality) 9741 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, 9742 Condition, Inverse, Context); 9743 if (ProvedNonStrictComparison && ProvedNonEquality) 9744 return true; 9745 } 9746 return false; 9747 }; 9748 9749 // Starting at the block's predecessor, climb up the predecessor chain, as long 9750 // as there are predecessors that can be found that have unique successors 9751 // leading to the original block. 9752 const Loop *ContainingLoop = LI.getLoopFor(BB); 9753 const BasicBlock *PredBB; 9754 if (ContainingLoop && ContainingLoop->getHeader() == BB) 9755 PredBB = ContainingLoop->getLoopPredecessor(); 9756 else 9757 PredBB = BB->getSinglePredecessor(); 9758 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 9759 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9760 if (ProveViaGuard(Pair.first)) 9761 return true; 9762 9763 const BranchInst *LoopEntryPredicate = 9764 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9765 if (!LoopEntryPredicate || 9766 LoopEntryPredicate->isUnconditional()) 9767 continue; 9768 9769 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9770 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9771 return true; 9772 } 9773 9774 // Check conditions due to any @llvm.assume intrinsics. 9775 for (auto &AssumeVH : AC.assumptions()) { 9776 if (!AssumeVH) 9777 continue; 9778 auto *CI = cast<CallInst>(AssumeVH); 9779 if (!DT.dominates(CI, BB)) 9780 continue; 9781 9782 if (ProveViaCond(CI->getArgOperand(0), false)) 9783 return true; 9784 } 9785 9786 return false; 9787 } 9788 9789 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9790 ICmpInst::Predicate Pred, 9791 const SCEV *LHS, 9792 const SCEV *RHS) { 9793 // Interpret a null as meaning no loop, where there is obviously no guard 9794 // (interprocedural conditions notwithstanding). 9795 if (!L) 9796 return false; 9797 9798 // Both LHS and RHS must be available at loop entry. 9799 assert(isAvailableAtLoopEntry(LHS, L) && 9800 "LHS is not available at Loop Entry"); 9801 assert(isAvailableAtLoopEntry(RHS, L) && 9802 "RHS is not available at Loop Entry"); 9803 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 9804 } 9805 9806 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9807 const SCEV *RHS, 9808 const Value *FoundCondValue, bool Inverse, 9809 const Instruction *Context) { 9810 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9811 return false; 9812 9813 auto ClearOnExit = 9814 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9815 9816 // Recursively handle And and Or conditions. 9817 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9818 if (BO->getOpcode() == Instruction::And) { 9819 if (!Inverse) 9820 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 9821 Context) || 9822 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 9823 Context); 9824 } else if (BO->getOpcode() == Instruction::Or) { 9825 if (Inverse) 9826 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 9827 Context) || 9828 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 9829 Context); 9830 } 9831 } 9832 9833 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9834 if (!ICI) return false; 9835 9836 // Now that we found a conditional branch that dominates the loop or controls 9837 // the loop latch. Check to see if it is the comparison we are looking for. 9838 ICmpInst::Predicate FoundPred; 9839 if (Inverse) 9840 FoundPred = ICI->getInversePredicate(); 9841 else 9842 FoundPred = ICI->getPredicate(); 9843 9844 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9845 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9846 9847 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 9848 } 9849 9850 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9851 const SCEV *RHS, 9852 ICmpInst::Predicate FoundPred, 9853 const SCEV *FoundLHS, const SCEV *FoundRHS, 9854 const Instruction *Context) { 9855 // Balance the types. 9856 if (getTypeSizeInBits(LHS->getType()) < 9857 getTypeSizeInBits(FoundLHS->getType())) { 9858 // For unsigned and equality predicates, try to prove that both found 9859 // operands fit into narrow unsigned range. If so, try to prove facts in 9860 // narrow types. 9861 if (!CmpInst::isSigned(FoundPred)) { 9862 auto *NarrowType = LHS->getType(); 9863 auto *WideType = FoundLHS->getType(); 9864 auto BitWidth = getTypeSizeInBits(NarrowType); 9865 const SCEV *MaxValue = getZeroExtendExpr( 9866 getConstant(APInt::getMaxValue(BitWidth)), WideType); 9867 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 9868 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 9869 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 9870 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 9871 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 9872 TruncFoundRHS, Context)) 9873 return true; 9874 } 9875 } 9876 9877 if (CmpInst::isSigned(Pred)) { 9878 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9879 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9880 } else { 9881 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9882 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9883 } 9884 } else if (getTypeSizeInBits(LHS->getType()) > 9885 getTypeSizeInBits(FoundLHS->getType())) { 9886 if (CmpInst::isSigned(FoundPred)) { 9887 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9888 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9889 } else { 9890 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9891 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9892 } 9893 } 9894 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 9895 FoundRHS, Context); 9896 } 9897 9898 bool ScalarEvolution::isImpliedCondBalancedTypes( 9899 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9900 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 9901 const Instruction *Context) { 9902 assert(getTypeSizeInBits(LHS->getType()) == 9903 getTypeSizeInBits(FoundLHS->getType()) && 9904 "Types should be balanced!"); 9905 // Canonicalize the query to match the way instcombine will have 9906 // canonicalized the comparison. 9907 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9908 if (LHS == RHS) 9909 return CmpInst::isTrueWhenEqual(Pred); 9910 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9911 if (FoundLHS == FoundRHS) 9912 return CmpInst::isFalseWhenEqual(FoundPred); 9913 9914 // Check to see if we can make the LHS or RHS match. 9915 if (LHS == FoundRHS || RHS == FoundLHS) { 9916 if (isa<SCEVConstant>(RHS)) { 9917 std::swap(FoundLHS, FoundRHS); 9918 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9919 } else { 9920 std::swap(LHS, RHS); 9921 Pred = ICmpInst::getSwappedPredicate(Pred); 9922 } 9923 } 9924 9925 // Check whether the found predicate is the same as the desired predicate. 9926 if (FoundPred == Pred) 9927 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 9928 9929 // Check whether swapping the found predicate makes it the same as the 9930 // desired predicate. 9931 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9932 if (isa<SCEVConstant>(RHS)) 9933 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 9934 else 9935 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, 9936 LHS, FoundLHS, FoundRHS, Context); 9937 } 9938 9939 // Unsigned comparison is the same as signed comparison when both the operands 9940 // are non-negative. 9941 if (CmpInst::isUnsigned(FoundPred) && 9942 CmpInst::getSignedPredicate(FoundPred) == Pred && 9943 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9944 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 9945 9946 // Check if we can make progress by sharpening ranges. 9947 if (FoundPred == ICmpInst::ICMP_NE && 9948 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9949 9950 const SCEVConstant *C = nullptr; 9951 const SCEV *V = nullptr; 9952 9953 if (isa<SCEVConstant>(FoundLHS)) { 9954 C = cast<SCEVConstant>(FoundLHS); 9955 V = FoundRHS; 9956 } else { 9957 C = cast<SCEVConstant>(FoundRHS); 9958 V = FoundLHS; 9959 } 9960 9961 // The guarding predicate tells us that C != V. If the known range 9962 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9963 // range we consider has to correspond to same signedness as the 9964 // predicate we're interested in folding. 9965 9966 APInt Min = ICmpInst::isSigned(Pred) ? 9967 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9968 9969 if (Min == C->getAPInt()) { 9970 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9971 // This is true even if (Min + 1) wraps around -- in case of 9972 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9973 9974 APInt SharperMin = Min + 1; 9975 9976 switch (Pred) { 9977 case ICmpInst::ICMP_SGE: 9978 case ICmpInst::ICMP_UGE: 9979 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9980 // RHS, we're done. 9981 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 9982 Context)) 9983 return true; 9984 LLVM_FALLTHROUGH; 9985 9986 case ICmpInst::ICMP_SGT: 9987 case ICmpInst::ICMP_UGT: 9988 // We know from the range information that (V `Pred` Min || 9989 // V == Min). We know from the guarding condition that !(V 9990 // == Min). This gives us 9991 // 9992 // V `Pred` Min || V == Min && !(V == Min) 9993 // => V `Pred` Min 9994 // 9995 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9996 9997 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 9998 Context)) 9999 return true; 10000 break; 10001 10002 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10003 case ICmpInst::ICMP_SLE: 10004 case ICmpInst::ICMP_ULE: 10005 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10006 LHS, V, getConstant(SharperMin), Context)) 10007 return true; 10008 LLVM_FALLTHROUGH; 10009 10010 case ICmpInst::ICMP_SLT: 10011 case ICmpInst::ICMP_ULT: 10012 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10013 LHS, V, getConstant(Min), Context)) 10014 return true; 10015 break; 10016 10017 default: 10018 // No change 10019 break; 10020 } 10021 } 10022 } 10023 10024 // Check whether the actual condition is beyond sufficient. 10025 if (FoundPred == ICmpInst::ICMP_EQ) 10026 if (ICmpInst::isTrueWhenEqual(Pred)) 10027 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10028 return true; 10029 if (Pred == ICmpInst::ICMP_NE) 10030 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10031 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10032 Context)) 10033 return true; 10034 10035 // Otherwise assume the worst. 10036 return false; 10037 } 10038 10039 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10040 const SCEV *&L, const SCEV *&R, 10041 SCEV::NoWrapFlags &Flags) { 10042 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10043 if (!AE || AE->getNumOperands() != 2) 10044 return false; 10045 10046 L = AE->getOperand(0); 10047 R = AE->getOperand(1); 10048 Flags = AE->getNoWrapFlags(); 10049 return true; 10050 } 10051 10052 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10053 const SCEV *Less) { 10054 // We avoid subtracting expressions here because this function is usually 10055 // fairly deep in the call stack (i.e. is called many times). 10056 10057 // X - X = 0. 10058 if (More == Less) 10059 return APInt(getTypeSizeInBits(More->getType()), 0); 10060 10061 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10062 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10063 const auto *MAR = cast<SCEVAddRecExpr>(More); 10064 10065 if (LAR->getLoop() != MAR->getLoop()) 10066 return None; 10067 10068 // We look at affine expressions only; not for correctness but to keep 10069 // getStepRecurrence cheap. 10070 if (!LAR->isAffine() || !MAR->isAffine()) 10071 return None; 10072 10073 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10074 return None; 10075 10076 Less = LAR->getStart(); 10077 More = MAR->getStart(); 10078 10079 // fall through 10080 } 10081 10082 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10083 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10084 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10085 return M - L; 10086 } 10087 10088 SCEV::NoWrapFlags Flags; 10089 const SCEV *LLess = nullptr, *RLess = nullptr; 10090 const SCEV *LMore = nullptr, *RMore = nullptr; 10091 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10092 // Compare (X + C1) vs X. 10093 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10094 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10095 if (RLess == More) 10096 return -(C1->getAPInt()); 10097 10098 // Compare X vs (X + C2). 10099 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10100 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10101 if (RMore == Less) 10102 return C2->getAPInt(); 10103 10104 // Compare (X + C1) vs (X + C2). 10105 if (C1 && C2 && RLess == RMore) 10106 return C2->getAPInt() - C1->getAPInt(); 10107 10108 return None; 10109 } 10110 10111 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10112 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10113 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10114 // Try to recognize the following pattern: 10115 // 10116 // FoundRHS = ... 10117 // ... 10118 // loop: 10119 // FoundLHS = {Start,+,W} 10120 // context_bb: // Basic block from the same loop 10121 // known(Pred, FoundLHS, FoundRHS) 10122 // 10123 // If some predicate is known in the context of a loop, it is also known on 10124 // each iteration of this loop, including the first iteration. Therefore, in 10125 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10126 // prove the original pred using this fact. 10127 if (!Context) 10128 return false; 10129 const BasicBlock *ContextBB = Context->getParent(); 10130 // Make sure AR varies in the context block. 10131 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10132 const Loop *L = AR->getLoop(); 10133 // Make sure that context belongs to the loop and executes on 1st iteration 10134 // (if it ever executes at all). 10135 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10136 return false; 10137 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10138 return false; 10139 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10140 } 10141 10142 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10143 const Loop *L = AR->getLoop(); 10144 // Make sure that context belongs to the loop and executes on 1st iteration 10145 // (if it ever executes at all). 10146 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10147 return false; 10148 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10149 return false; 10150 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10151 } 10152 10153 return false; 10154 } 10155 10156 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10157 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10158 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10159 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10160 return false; 10161 10162 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10163 if (!AddRecLHS) 10164 return false; 10165 10166 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10167 if (!AddRecFoundLHS) 10168 return false; 10169 10170 // We'd like to let SCEV reason about control dependencies, so we constrain 10171 // both the inequalities to be about add recurrences on the same loop. This 10172 // way we can use isLoopEntryGuardedByCond later. 10173 10174 const Loop *L = AddRecFoundLHS->getLoop(); 10175 if (L != AddRecLHS->getLoop()) 10176 return false; 10177 10178 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10179 // 10180 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10181 // ... (2) 10182 // 10183 // Informal proof for (2), assuming (1) [*]: 10184 // 10185 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10186 // 10187 // Then 10188 // 10189 // FoundLHS s< FoundRHS s< INT_MIN - C 10190 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10191 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10192 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10193 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10194 // <=> FoundLHS + C s< FoundRHS + C 10195 // 10196 // [*]: (1) can be proved by ruling out overflow. 10197 // 10198 // [**]: This can be proved by analyzing all the four possibilities: 10199 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10200 // (A s>= 0, B s>= 0). 10201 // 10202 // Note: 10203 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10204 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10205 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10206 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10207 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10208 // C)". 10209 10210 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10211 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10212 if (!LDiff || !RDiff || *LDiff != *RDiff) 10213 return false; 10214 10215 if (LDiff->isMinValue()) 10216 return true; 10217 10218 APInt FoundRHSLimit; 10219 10220 if (Pred == CmpInst::ICMP_ULT) { 10221 FoundRHSLimit = -(*RDiff); 10222 } else { 10223 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10224 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10225 } 10226 10227 // Try to prove (1) or (2), as needed. 10228 return isAvailableAtLoopEntry(FoundRHS, L) && 10229 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10230 getConstant(FoundRHSLimit)); 10231 } 10232 10233 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10234 const SCEV *LHS, const SCEV *RHS, 10235 const SCEV *FoundLHS, 10236 const SCEV *FoundRHS, unsigned Depth) { 10237 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10238 10239 auto ClearOnExit = make_scope_exit([&]() { 10240 if (LPhi) { 10241 bool Erased = PendingMerges.erase(LPhi); 10242 assert(Erased && "Failed to erase LPhi!"); 10243 (void)Erased; 10244 } 10245 if (RPhi) { 10246 bool Erased = PendingMerges.erase(RPhi); 10247 assert(Erased && "Failed to erase RPhi!"); 10248 (void)Erased; 10249 } 10250 }); 10251 10252 // Find respective Phis and check that they are not being pending. 10253 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10254 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10255 if (!PendingMerges.insert(Phi).second) 10256 return false; 10257 LPhi = Phi; 10258 } 10259 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10260 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10261 // If we detect a loop of Phi nodes being processed by this method, for 10262 // example: 10263 // 10264 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10265 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10266 // 10267 // we don't want to deal with a case that complex, so return conservative 10268 // answer false. 10269 if (!PendingMerges.insert(Phi).second) 10270 return false; 10271 RPhi = Phi; 10272 } 10273 10274 // If none of LHS, RHS is a Phi, nothing to do here. 10275 if (!LPhi && !RPhi) 10276 return false; 10277 10278 // If there is a SCEVUnknown Phi we are interested in, make it left. 10279 if (!LPhi) { 10280 std::swap(LHS, RHS); 10281 std::swap(FoundLHS, FoundRHS); 10282 std::swap(LPhi, RPhi); 10283 Pred = ICmpInst::getSwappedPredicate(Pred); 10284 } 10285 10286 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10287 const BasicBlock *LBB = LPhi->getParent(); 10288 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10289 10290 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10291 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10292 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10293 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10294 }; 10295 10296 if (RPhi && RPhi->getParent() == LBB) { 10297 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10298 // If we compare two Phis from the same block, and for each entry block 10299 // the predicate is true for incoming values from this block, then the 10300 // predicate is also true for the Phis. 10301 for (const BasicBlock *IncBB : predecessors(LBB)) { 10302 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10303 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10304 if (!ProvedEasily(L, R)) 10305 return false; 10306 } 10307 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10308 // Case two: RHS is also a Phi from the same basic block, and it is an 10309 // AddRec. It means that there is a loop which has both AddRec and Unknown 10310 // PHIs, for it we can compare incoming values of AddRec from above the loop 10311 // and latch with their respective incoming values of LPhi. 10312 // TODO: Generalize to handle loops with many inputs in a header. 10313 if (LPhi->getNumIncomingValues() != 2) return false; 10314 10315 auto *RLoop = RAR->getLoop(); 10316 auto *Predecessor = RLoop->getLoopPredecessor(); 10317 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10318 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10319 if (!ProvedEasily(L1, RAR->getStart())) 10320 return false; 10321 auto *Latch = RLoop->getLoopLatch(); 10322 assert(Latch && "Loop with AddRec with no latch?"); 10323 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10324 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10325 return false; 10326 } else { 10327 // In all other cases go over inputs of LHS and compare each of them to RHS, 10328 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10329 // At this point RHS is either a non-Phi, or it is a Phi from some block 10330 // different from LBB. 10331 for (const BasicBlock *IncBB : predecessors(LBB)) { 10332 // Check that RHS is available in this block. 10333 if (!dominates(RHS, IncBB)) 10334 return false; 10335 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10336 if (!ProvedEasily(L, RHS)) 10337 return false; 10338 } 10339 } 10340 return true; 10341 } 10342 10343 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10344 const SCEV *LHS, const SCEV *RHS, 10345 const SCEV *FoundLHS, 10346 const SCEV *FoundRHS, 10347 const Instruction *Context) { 10348 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10349 return true; 10350 10351 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10352 return true; 10353 10354 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10355 Context)) 10356 return true; 10357 10358 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10359 FoundLHS, FoundRHS) || 10360 // ~x < ~y --> x > y 10361 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10362 getNotSCEV(FoundRHS), 10363 getNotSCEV(FoundLHS)); 10364 } 10365 10366 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10367 template <typename MinMaxExprType> 10368 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10369 const SCEV *Candidate) { 10370 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10371 if (!MinMaxExpr) 10372 return false; 10373 10374 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10375 } 10376 10377 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10378 ICmpInst::Predicate Pred, 10379 const SCEV *LHS, const SCEV *RHS) { 10380 // If both sides are affine addrecs for the same loop, with equal 10381 // steps, and we know the recurrences don't wrap, then we only 10382 // need to check the predicate on the starting values. 10383 10384 if (!ICmpInst::isRelational(Pred)) 10385 return false; 10386 10387 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10388 if (!LAR) 10389 return false; 10390 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10391 if (!RAR) 10392 return false; 10393 if (LAR->getLoop() != RAR->getLoop()) 10394 return false; 10395 if (!LAR->isAffine() || !RAR->isAffine()) 10396 return false; 10397 10398 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10399 return false; 10400 10401 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10402 SCEV::FlagNSW : SCEV::FlagNUW; 10403 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10404 return false; 10405 10406 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10407 } 10408 10409 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10410 /// expression? 10411 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10412 ICmpInst::Predicate Pred, 10413 const SCEV *LHS, const SCEV *RHS) { 10414 switch (Pred) { 10415 default: 10416 return false; 10417 10418 case ICmpInst::ICMP_SGE: 10419 std::swap(LHS, RHS); 10420 LLVM_FALLTHROUGH; 10421 case ICmpInst::ICMP_SLE: 10422 return 10423 // min(A, ...) <= A 10424 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10425 // A <= max(A, ...) 10426 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10427 10428 case ICmpInst::ICMP_UGE: 10429 std::swap(LHS, RHS); 10430 LLVM_FALLTHROUGH; 10431 case ICmpInst::ICMP_ULE: 10432 return 10433 // min(A, ...) <= A 10434 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10435 // A <= max(A, ...) 10436 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10437 } 10438 10439 llvm_unreachable("covered switch fell through?!"); 10440 } 10441 10442 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10443 const SCEV *LHS, const SCEV *RHS, 10444 const SCEV *FoundLHS, 10445 const SCEV *FoundRHS, 10446 unsigned Depth) { 10447 assert(getTypeSizeInBits(LHS->getType()) == 10448 getTypeSizeInBits(RHS->getType()) && 10449 "LHS and RHS have different sizes?"); 10450 assert(getTypeSizeInBits(FoundLHS->getType()) == 10451 getTypeSizeInBits(FoundRHS->getType()) && 10452 "FoundLHS and FoundRHS have different sizes?"); 10453 // We want to avoid hurting the compile time with analysis of too big trees. 10454 if (Depth > MaxSCEVOperationsImplicationDepth) 10455 return false; 10456 10457 // We only want to work with GT comparison so far. 10458 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10459 Pred = CmpInst::getSwappedPredicate(Pred); 10460 std::swap(LHS, RHS); 10461 std::swap(FoundLHS, FoundRHS); 10462 } 10463 10464 // For unsigned, try to reduce it to corresponding signed comparison. 10465 if (Pred == ICmpInst::ICMP_UGT) 10466 // We can replace unsigned predicate with its signed counterpart if all 10467 // involved values are non-negative. 10468 // TODO: We could have better support for unsigned. 10469 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10470 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10471 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10472 // use this fact to prove that LHS and RHS are non-negative. 10473 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10474 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10475 FoundRHS) && 10476 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10477 FoundRHS)) 10478 Pred = ICmpInst::ICMP_SGT; 10479 } 10480 10481 if (Pred != ICmpInst::ICMP_SGT) 10482 return false; 10483 10484 auto GetOpFromSExt = [&](const SCEV *S) { 10485 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10486 return Ext->getOperand(); 10487 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10488 // the constant in some cases. 10489 return S; 10490 }; 10491 10492 // Acquire values from extensions. 10493 auto *OrigLHS = LHS; 10494 auto *OrigFoundLHS = FoundLHS; 10495 LHS = GetOpFromSExt(LHS); 10496 FoundLHS = GetOpFromSExt(FoundLHS); 10497 10498 // Is the SGT predicate can be proved trivially or using the found context. 10499 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10500 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10501 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10502 FoundRHS, Depth + 1); 10503 }; 10504 10505 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10506 // We want to avoid creation of any new non-constant SCEV. Since we are 10507 // going to compare the operands to RHS, we should be certain that we don't 10508 // need any size extensions for this. So let's decline all cases when the 10509 // sizes of types of LHS and RHS do not match. 10510 // TODO: Maybe try to get RHS from sext to catch more cases? 10511 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10512 return false; 10513 10514 // Should not overflow. 10515 if (!LHSAddExpr->hasNoSignedWrap()) 10516 return false; 10517 10518 auto *LL = LHSAddExpr->getOperand(0); 10519 auto *LR = LHSAddExpr->getOperand(1); 10520 auto *MinusOne = getMinusOne(RHS->getType()); 10521 10522 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10523 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10524 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10525 }; 10526 // Try to prove the following rule: 10527 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10528 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10529 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10530 return true; 10531 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10532 Value *LL, *LR; 10533 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10534 10535 using namespace llvm::PatternMatch; 10536 10537 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10538 // Rules for division. 10539 // We are going to perform some comparisons with Denominator and its 10540 // derivative expressions. In general case, creating a SCEV for it may 10541 // lead to a complex analysis of the entire graph, and in particular it 10542 // can request trip count recalculation for the same loop. This would 10543 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10544 // this, we only want to create SCEVs that are constants in this section. 10545 // So we bail if Denominator is not a constant. 10546 if (!isa<ConstantInt>(LR)) 10547 return false; 10548 10549 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10550 10551 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10552 // then a SCEV for the numerator already exists and matches with FoundLHS. 10553 auto *Numerator = getExistingSCEV(LL); 10554 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10555 return false; 10556 10557 // Make sure that the numerator matches with FoundLHS and the denominator 10558 // is positive. 10559 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10560 return false; 10561 10562 auto *DTy = Denominator->getType(); 10563 auto *FRHSTy = FoundRHS->getType(); 10564 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10565 // One of types is a pointer and another one is not. We cannot extend 10566 // them properly to a wider type, so let us just reject this case. 10567 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10568 // to avoid this check. 10569 return false; 10570 10571 // Given that: 10572 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10573 auto *WTy = getWiderType(DTy, FRHSTy); 10574 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10575 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10576 10577 // Try to prove the following rule: 10578 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10579 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10580 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10581 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10582 if (isKnownNonPositive(RHS) && 10583 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10584 return true; 10585 10586 // Try to prove the following rule: 10587 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10588 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10589 // If we divide it by Denominator > 2, then: 10590 // 1. If FoundLHS is negative, then the result is 0. 10591 // 2. If FoundLHS is non-negative, then the result is non-negative. 10592 // Anyways, the result is non-negative. 10593 auto *MinusOne = getMinusOne(WTy); 10594 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10595 if (isKnownNegative(RHS) && 10596 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10597 return true; 10598 } 10599 } 10600 10601 // If our expression contained SCEVUnknown Phis, and we split it down and now 10602 // need to prove something for them, try to prove the predicate for every 10603 // possible incoming values of those Phis. 10604 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10605 return true; 10606 10607 return false; 10608 } 10609 10610 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10611 const SCEV *LHS, const SCEV *RHS) { 10612 // zext x u<= sext x, sext x s<= zext x 10613 switch (Pred) { 10614 case ICmpInst::ICMP_SGE: 10615 std::swap(LHS, RHS); 10616 LLVM_FALLTHROUGH; 10617 case ICmpInst::ICMP_SLE: { 10618 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10619 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10620 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10621 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10622 return true; 10623 break; 10624 } 10625 case ICmpInst::ICMP_UGE: 10626 std::swap(LHS, RHS); 10627 LLVM_FALLTHROUGH; 10628 case ICmpInst::ICMP_ULE: { 10629 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10630 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10631 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10632 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10633 return true; 10634 break; 10635 } 10636 default: 10637 break; 10638 }; 10639 return false; 10640 } 10641 10642 bool 10643 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10644 const SCEV *LHS, const SCEV *RHS) { 10645 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10646 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10647 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10648 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10649 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10650 } 10651 10652 bool 10653 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10654 const SCEV *LHS, const SCEV *RHS, 10655 const SCEV *FoundLHS, 10656 const SCEV *FoundRHS) { 10657 switch (Pred) { 10658 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10659 case ICmpInst::ICMP_EQ: 10660 case ICmpInst::ICMP_NE: 10661 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10662 return true; 10663 break; 10664 case ICmpInst::ICMP_SLT: 10665 case ICmpInst::ICMP_SLE: 10666 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10667 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10668 return true; 10669 break; 10670 case ICmpInst::ICMP_SGT: 10671 case ICmpInst::ICMP_SGE: 10672 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10673 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10674 return true; 10675 break; 10676 case ICmpInst::ICMP_ULT: 10677 case ICmpInst::ICMP_ULE: 10678 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10679 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10680 return true; 10681 break; 10682 case ICmpInst::ICMP_UGT: 10683 case ICmpInst::ICMP_UGE: 10684 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10685 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10686 return true; 10687 break; 10688 } 10689 10690 // Maybe it can be proved via operations? 10691 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10692 return true; 10693 10694 return false; 10695 } 10696 10697 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10698 const SCEV *LHS, 10699 const SCEV *RHS, 10700 const SCEV *FoundLHS, 10701 const SCEV *FoundRHS) { 10702 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10703 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10704 // reduce the compile time impact of this optimization. 10705 return false; 10706 10707 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10708 if (!Addend) 10709 return false; 10710 10711 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10712 10713 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10714 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10715 ConstantRange FoundLHSRange = 10716 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10717 10718 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10719 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10720 10721 // We can also compute the range of values for `LHS` that satisfy the 10722 // consequent, "`LHS` `Pred` `RHS`": 10723 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10724 ConstantRange SatisfyingLHSRange = 10725 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10726 10727 // The antecedent implies the consequent if every value of `LHS` that 10728 // satisfies the antecedent also satisfies the consequent. 10729 return SatisfyingLHSRange.contains(LHSRange); 10730 } 10731 10732 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10733 bool IsSigned, bool NoWrap) { 10734 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10735 10736 if (NoWrap) return false; 10737 10738 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10739 const SCEV *One = getOne(Stride->getType()); 10740 10741 if (IsSigned) { 10742 APInt MaxRHS = getSignedRangeMax(RHS); 10743 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10744 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10745 10746 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10747 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10748 } 10749 10750 APInt MaxRHS = getUnsignedRangeMax(RHS); 10751 APInt MaxValue = APInt::getMaxValue(BitWidth); 10752 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10753 10754 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10755 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10756 } 10757 10758 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10759 bool IsSigned, bool NoWrap) { 10760 if (NoWrap) return false; 10761 10762 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10763 const SCEV *One = getOne(Stride->getType()); 10764 10765 if (IsSigned) { 10766 APInt MinRHS = getSignedRangeMin(RHS); 10767 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10768 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10769 10770 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10771 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10772 } 10773 10774 APInt MinRHS = getUnsignedRangeMin(RHS); 10775 APInt MinValue = APInt::getMinValue(BitWidth); 10776 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10777 10778 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10779 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10780 } 10781 10782 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10783 bool Equality) { 10784 const SCEV *One = getOne(Step->getType()); 10785 Delta = Equality ? getAddExpr(Delta, Step) 10786 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10787 return getUDivExpr(Delta, Step); 10788 } 10789 10790 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10791 const SCEV *Stride, 10792 const SCEV *End, 10793 unsigned BitWidth, 10794 bool IsSigned) { 10795 10796 assert(!isKnownNonPositive(Stride) && 10797 "Stride is expected strictly positive!"); 10798 // Calculate the maximum backedge count based on the range of values 10799 // permitted by Start, End, and Stride. 10800 const SCEV *MaxBECount; 10801 APInt MinStart = 10802 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10803 10804 APInt StrideForMaxBECount = 10805 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10806 10807 // We already know that the stride is positive, so we paper over conservatism 10808 // in our range computation by forcing StrideForMaxBECount to be at least one. 10809 // In theory this is unnecessary, but we expect MaxBECount to be a 10810 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10811 // is nothing to constant fold it to). 10812 APInt One(BitWidth, 1, IsSigned); 10813 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10814 10815 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10816 : APInt::getMaxValue(BitWidth); 10817 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10818 10819 // Although End can be a MAX expression we estimate MaxEnd considering only 10820 // the case End = RHS of the loop termination condition. This is safe because 10821 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10822 // taken count. 10823 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10824 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10825 10826 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10827 getConstant(StrideForMaxBECount) /* Step */, 10828 false /* Equality */); 10829 10830 return MaxBECount; 10831 } 10832 10833 ScalarEvolution::ExitLimit 10834 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10835 const Loop *L, bool IsSigned, 10836 bool ControlsExit, bool AllowPredicates) { 10837 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10838 10839 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10840 bool PredicatedIV = false; 10841 10842 if (!IV && AllowPredicates) { 10843 // Try to make this an AddRec using runtime tests, in the first X 10844 // iterations of this loop, where X is the SCEV expression found by the 10845 // algorithm below. 10846 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10847 PredicatedIV = true; 10848 } 10849 10850 // Avoid weird loops 10851 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10852 return getCouldNotCompute(); 10853 10854 bool NoWrap = ControlsExit && 10855 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10856 10857 const SCEV *Stride = IV->getStepRecurrence(*this); 10858 10859 bool PositiveStride = isKnownPositive(Stride); 10860 10861 // Avoid negative or zero stride values. 10862 if (!PositiveStride) { 10863 // We can compute the correct backedge taken count for loops with unknown 10864 // strides if we can prove that the loop is not an infinite loop with side 10865 // effects. Here's the loop structure we are trying to handle - 10866 // 10867 // i = start 10868 // do { 10869 // A[i] = i; 10870 // i += s; 10871 // } while (i < end); 10872 // 10873 // The backedge taken count for such loops is evaluated as - 10874 // (max(end, start + stride) - start - 1) /u stride 10875 // 10876 // The additional preconditions that we need to check to prove correctness 10877 // of the above formula is as follows - 10878 // 10879 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10880 // NoWrap flag). 10881 // b) loop is single exit with no side effects. 10882 // 10883 // 10884 // Precondition a) implies that if the stride is negative, this is a single 10885 // trip loop. The backedge taken count formula reduces to zero in this case. 10886 // 10887 // Precondition b) implies that the unknown stride cannot be zero otherwise 10888 // we have UB. 10889 // 10890 // The positive stride case is the same as isKnownPositive(Stride) returning 10891 // true (original behavior of the function). 10892 // 10893 // We want to make sure that the stride is truly unknown as there are edge 10894 // cases where ScalarEvolution propagates no wrap flags to the 10895 // post-increment/decrement IV even though the increment/decrement operation 10896 // itself is wrapping. The computed backedge taken count may be wrong in 10897 // such cases. This is prevented by checking that the stride is not known to 10898 // be either positive or non-positive. For example, no wrap flags are 10899 // propagated to the post-increment IV of this loop with a trip count of 2 - 10900 // 10901 // unsigned char i; 10902 // for(i=127; i<128; i+=129) 10903 // A[i] = i; 10904 // 10905 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10906 !loopHasNoSideEffects(L)) 10907 return getCouldNotCompute(); 10908 } else if (!Stride->isOne() && 10909 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10910 // Avoid proven overflow cases: this will ensure that the backedge taken 10911 // count will not generate any unsigned overflow. Relaxed no-overflow 10912 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10913 // undefined behaviors like the case of C language. 10914 return getCouldNotCompute(); 10915 10916 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10917 : ICmpInst::ICMP_ULT; 10918 const SCEV *Start = IV->getStart(); 10919 const SCEV *End = RHS; 10920 // When the RHS is not invariant, we do not know the end bound of the loop and 10921 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10922 // calculate the MaxBECount, given the start, stride and max value for the end 10923 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10924 // checked above). 10925 if (!isLoopInvariant(RHS, L)) { 10926 const SCEV *MaxBECount = computeMaxBECountForLT( 10927 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10928 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10929 false /*MaxOrZero*/, Predicates); 10930 } 10931 // If the backedge is taken at least once, then it will be taken 10932 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10933 // is the LHS value of the less-than comparison the first time it is evaluated 10934 // and End is the RHS. 10935 const SCEV *BECountIfBackedgeTaken = 10936 computeBECount(getMinusSCEV(End, Start), Stride, false); 10937 // If the loop entry is guarded by the result of the backedge test of the 10938 // first loop iteration, then we know the backedge will be taken at least 10939 // once and so the backedge taken count is as above. If not then we use the 10940 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10941 // as if the backedge is taken at least once max(End,Start) is End and so the 10942 // result is as above, and if not max(End,Start) is Start so we get a backedge 10943 // count of zero. 10944 const SCEV *BECount; 10945 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10946 BECount = BECountIfBackedgeTaken; 10947 else { 10948 // If we know that RHS >= Start in the context of loop, then we know that 10949 // max(RHS, Start) = RHS at this point. 10950 if (isLoopEntryGuardedByCond( 10951 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 10952 End = RHS; 10953 else 10954 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10955 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10956 } 10957 10958 const SCEV *MaxBECount; 10959 bool MaxOrZero = false; 10960 if (isa<SCEVConstant>(BECount)) 10961 MaxBECount = BECount; 10962 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10963 // If we know exactly how many times the backedge will be taken if it's 10964 // taken at least once, then the backedge count will either be that or 10965 // zero. 10966 MaxBECount = BECountIfBackedgeTaken; 10967 MaxOrZero = true; 10968 } else { 10969 MaxBECount = computeMaxBECountForLT( 10970 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10971 } 10972 10973 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10974 !isa<SCEVCouldNotCompute>(BECount)) 10975 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10976 10977 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10978 } 10979 10980 ScalarEvolution::ExitLimit 10981 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10982 const Loop *L, bool IsSigned, 10983 bool ControlsExit, bool AllowPredicates) { 10984 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10985 // We handle only IV > Invariant 10986 if (!isLoopInvariant(RHS, L)) 10987 return getCouldNotCompute(); 10988 10989 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10990 if (!IV && AllowPredicates) 10991 // Try to make this an AddRec using runtime tests, in the first X 10992 // iterations of this loop, where X is the SCEV expression found by the 10993 // algorithm below. 10994 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10995 10996 // Avoid weird loops 10997 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10998 return getCouldNotCompute(); 10999 11000 bool NoWrap = ControlsExit && 11001 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11002 11003 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11004 11005 // Avoid negative or zero stride values 11006 if (!isKnownPositive(Stride)) 11007 return getCouldNotCompute(); 11008 11009 // Avoid proven overflow cases: this will ensure that the backedge taken count 11010 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11011 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11012 // behaviors like the case of C language. 11013 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 11014 return getCouldNotCompute(); 11015 11016 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 11017 : ICmpInst::ICMP_UGT; 11018 11019 const SCEV *Start = IV->getStart(); 11020 const SCEV *End = RHS; 11021 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11022 // If we know that Start >= RHS in the context of loop, then we know that 11023 // min(RHS, Start) = RHS at this point. 11024 if (isLoopEntryGuardedByCond( 11025 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11026 End = RHS; 11027 else 11028 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11029 } 11030 11031 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 11032 11033 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11034 : getUnsignedRangeMax(Start); 11035 11036 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11037 : getUnsignedRangeMin(Stride); 11038 11039 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11040 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11041 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11042 11043 // Although End can be a MIN expression we estimate MinEnd considering only 11044 // the case End = RHS. This is safe because in the other case (Start - End) 11045 // is zero, leading to a zero maximum backedge taken count. 11046 APInt MinEnd = 11047 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11048 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11049 11050 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11051 ? BECount 11052 : computeBECount(getConstant(MaxStart - MinEnd), 11053 getConstant(MinStride), false); 11054 11055 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11056 MaxBECount = BECount; 11057 11058 return ExitLimit(BECount, MaxBECount, false, Predicates); 11059 } 11060 11061 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11062 ScalarEvolution &SE) const { 11063 if (Range.isFullSet()) // Infinite loop. 11064 return SE.getCouldNotCompute(); 11065 11066 // If the start is a non-zero constant, shift the range to simplify things. 11067 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11068 if (!SC->getValue()->isZero()) { 11069 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 11070 Operands[0] = SE.getZero(SC->getType()); 11071 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11072 getNoWrapFlags(FlagNW)); 11073 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11074 return ShiftedAddRec->getNumIterationsInRange( 11075 Range.subtract(SC->getAPInt()), SE); 11076 // This is strange and shouldn't happen. 11077 return SE.getCouldNotCompute(); 11078 } 11079 11080 // The only time we can solve this is when we have all constant indices. 11081 // Otherwise, we cannot determine the overflow conditions. 11082 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11083 return SE.getCouldNotCompute(); 11084 11085 // Okay at this point we know that all elements of the chrec are constants and 11086 // that the start element is zero. 11087 11088 // First check to see if the range contains zero. If not, the first 11089 // iteration exits. 11090 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11091 if (!Range.contains(APInt(BitWidth, 0))) 11092 return SE.getZero(getType()); 11093 11094 if (isAffine()) { 11095 // If this is an affine expression then we have this situation: 11096 // Solve {0,+,A} in Range === Ax in Range 11097 11098 // We know that zero is in the range. If A is positive then we know that 11099 // the upper value of the range must be the first possible exit value. 11100 // If A is negative then the lower of the range is the last possible loop 11101 // value. Also note that we already checked for a full range. 11102 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11103 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11104 11105 // The exit value should be (End+A)/A. 11106 APInt ExitVal = (End + A).udiv(A); 11107 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11108 11109 // Evaluate at the exit value. If we really did fall out of the valid 11110 // range, then we computed our trip count, otherwise wrap around or other 11111 // things must have happened. 11112 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11113 if (Range.contains(Val->getValue())) 11114 return SE.getCouldNotCompute(); // Something strange happened 11115 11116 // Ensure that the previous value is in the range. This is a sanity check. 11117 assert(Range.contains( 11118 EvaluateConstantChrecAtConstant(this, 11119 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11120 "Linear scev computation is off in a bad way!"); 11121 return SE.getConstant(ExitValue); 11122 } 11123 11124 if (isQuadratic()) { 11125 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11126 return SE.getConstant(S.getValue()); 11127 } 11128 11129 return SE.getCouldNotCompute(); 11130 } 11131 11132 const SCEVAddRecExpr * 11133 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11134 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11135 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11136 // but in this case we cannot guarantee that the value returned will be an 11137 // AddRec because SCEV does not have a fixed point where it stops 11138 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11139 // may happen if we reach arithmetic depth limit while simplifying. So we 11140 // construct the returned value explicitly. 11141 SmallVector<const SCEV *, 3> Ops; 11142 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11143 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11144 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11145 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11146 // We know that the last operand is not a constant zero (otherwise it would 11147 // have been popped out earlier). This guarantees us that if the result has 11148 // the same last operand, then it will also not be popped out, meaning that 11149 // the returned value will be an AddRec. 11150 const SCEV *Last = getOperand(getNumOperands() - 1); 11151 assert(!Last->isZero() && "Recurrency with zero step?"); 11152 Ops.push_back(Last); 11153 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11154 SCEV::FlagAnyWrap)); 11155 } 11156 11157 // Return true when S contains at least an undef value. 11158 static inline bool containsUndefs(const SCEV *S) { 11159 return SCEVExprContains(S, [](const SCEV *S) { 11160 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11161 return isa<UndefValue>(SU->getValue()); 11162 return false; 11163 }); 11164 } 11165 11166 namespace { 11167 11168 // Collect all steps of SCEV expressions. 11169 struct SCEVCollectStrides { 11170 ScalarEvolution &SE; 11171 SmallVectorImpl<const SCEV *> &Strides; 11172 11173 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11174 : SE(SE), Strides(S) {} 11175 11176 bool follow(const SCEV *S) { 11177 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11178 Strides.push_back(AR->getStepRecurrence(SE)); 11179 return true; 11180 } 11181 11182 bool isDone() const { return false; } 11183 }; 11184 11185 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11186 struct SCEVCollectTerms { 11187 SmallVectorImpl<const SCEV *> &Terms; 11188 11189 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11190 11191 bool follow(const SCEV *S) { 11192 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11193 isa<SCEVSignExtendExpr>(S)) { 11194 if (!containsUndefs(S)) 11195 Terms.push_back(S); 11196 11197 // Stop recursion: once we collected a term, do not walk its operands. 11198 return false; 11199 } 11200 11201 // Keep looking. 11202 return true; 11203 } 11204 11205 bool isDone() const { return false; } 11206 }; 11207 11208 // Check if a SCEV contains an AddRecExpr. 11209 struct SCEVHasAddRec { 11210 bool &ContainsAddRec; 11211 11212 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11213 ContainsAddRec = false; 11214 } 11215 11216 bool follow(const SCEV *S) { 11217 if (isa<SCEVAddRecExpr>(S)) { 11218 ContainsAddRec = true; 11219 11220 // Stop recursion: once we collected a term, do not walk its operands. 11221 return false; 11222 } 11223 11224 // Keep looking. 11225 return true; 11226 } 11227 11228 bool isDone() const { return false; } 11229 }; 11230 11231 // Find factors that are multiplied with an expression that (possibly as a 11232 // subexpression) contains an AddRecExpr. In the expression: 11233 // 11234 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11235 // 11236 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11237 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11238 // parameters as they form a product with an induction variable. 11239 // 11240 // This collector expects all array size parameters to be in the same MulExpr. 11241 // It might be necessary to later add support for collecting parameters that are 11242 // spread over different nested MulExpr. 11243 struct SCEVCollectAddRecMultiplies { 11244 SmallVectorImpl<const SCEV *> &Terms; 11245 ScalarEvolution &SE; 11246 11247 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11248 : Terms(T), SE(SE) {} 11249 11250 bool follow(const SCEV *S) { 11251 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11252 bool HasAddRec = false; 11253 SmallVector<const SCEV *, 0> Operands; 11254 for (auto Op : Mul->operands()) { 11255 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11256 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11257 Operands.push_back(Op); 11258 } else if (Unknown) { 11259 HasAddRec = true; 11260 } else { 11261 bool ContainsAddRec = false; 11262 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11263 visitAll(Op, ContiansAddRec); 11264 HasAddRec |= ContainsAddRec; 11265 } 11266 } 11267 if (Operands.size() == 0) 11268 return true; 11269 11270 if (!HasAddRec) 11271 return false; 11272 11273 Terms.push_back(SE.getMulExpr(Operands)); 11274 // Stop recursion: once we collected a term, do not walk its operands. 11275 return false; 11276 } 11277 11278 // Keep looking. 11279 return true; 11280 } 11281 11282 bool isDone() const { return false; } 11283 }; 11284 11285 } // end anonymous namespace 11286 11287 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11288 /// two places: 11289 /// 1) The strides of AddRec expressions. 11290 /// 2) Unknowns that are multiplied with AddRec expressions. 11291 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11292 SmallVectorImpl<const SCEV *> &Terms) { 11293 SmallVector<const SCEV *, 4> Strides; 11294 SCEVCollectStrides StrideCollector(*this, Strides); 11295 visitAll(Expr, StrideCollector); 11296 11297 LLVM_DEBUG({ 11298 dbgs() << "Strides:\n"; 11299 for (const SCEV *S : Strides) 11300 dbgs() << *S << "\n"; 11301 }); 11302 11303 for (const SCEV *S : Strides) { 11304 SCEVCollectTerms TermCollector(Terms); 11305 visitAll(S, TermCollector); 11306 } 11307 11308 LLVM_DEBUG({ 11309 dbgs() << "Terms:\n"; 11310 for (const SCEV *T : Terms) 11311 dbgs() << *T << "\n"; 11312 }); 11313 11314 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11315 visitAll(Expr, MulCollector); 11316 } 11317 11318 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11319 SmallVectorImpl<const SCEV *> &Terms, 11320 SmallVectorImpl<const SCEV *> &Sizes) { 11321 int Last = Terms.size() - 1; 11322 const SCEV *Step = Terms[Last]; 11323 11324 // End of recursion. 11325 if (Last == 0) { 11326 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11327 SmallVector<const SCEV *, 2> Qs; 11328 for (const SCEV *Op : M->operands()) 11329 if (!isa<SCEVConstant>(Op)) 11330 Qs.push_back(Op); 11331 11332 Step = SE.getMulExpr(Qs); 11333 } 11334 11335 Sizes.push_back(Step); 11336 return true; 11337 } 11338 11339 for (const SCEV *&Term : Terms) { 11340 // Normalize the terms before the next call to findArrayDimensionsRec. 11341 const SCEV *Q, *R; 11342 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11343 11344 // Bail out when GCD does not evenly divide one of the terms. 11345 if (!R->isZero()) 11346 return false; 11347 11348 Term = Q; 11349 } 11350 11351 // Remove all SCEVConstants. 11352 Terms.erase( 11353 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11354 Terms.end()); 11355 11356 if (Terms.size() > 0) 11357 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11358 return false; 11359 11360 Sizes.push_back(Step); 11361 return true; 11362 } 11363 11364 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11365 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11366 for (const SCEV *T : Terms) 11367 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11368 return true; 11369 11370 return false; 11371 } 11372 11373 // Return the number of product terms in S. 11374 static inline int numberOfTerms(const SCEV *S) { 11375 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11376 return Expr->getNumOperands(); 11377 return 1; 11378 } 11379 11380 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11381 if (isa<SCEVConstant>(T)) 11382 return nullptr; 11383 11384 if (isa<SCEVUnknown>(T)) 11385 return T; 11386 11387 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11388 SmallVector<const SCEV *, 2> Factors; 11389 for (const SCEV *Op : M->operands()) 11390 if (!isa<SCEVConstant>(Op)) 11391 Factors.push_back(Op); 11392 11393 return SE.getMulExpr(Factors); 11394 } 11395 11396 return T; 11397 } 11398 11399 /// Return the size of an element read or written by Inst. 11400 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11401 Type *Ty; 11402 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11403 Ty = Store->getValueOperand()->getType(); 11404 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11405 Ty = Load->getType(); 11406 else 11407 return nullptr; 11408 11409 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11410 return getSizeOfExpr(ETy, Ty); 11411 } 11412 11413 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11414 SmallVectorImpl<const SCEV *> &Sizes, 11415 const SCEV *ElementSize) { 11416 if (Terms.size() < 1 || !ElementSize) 11417 return; 11418 11419 // Early return when Terms do not contain parameters: we do not delinearize 11420 // non parametric SCEVs. 11421 if (!containsParameters(Terms)) 11422 return; 11423 11424 LLVM_DEBUG({ 11425 dbgs() << "Terms:\n"; 11426 for (const SCEV *T : Terms) 11427 dbgs() << *T << "\n"; 11428 }); 11429 11430 // Remove duplicates. 11431 array_pod_sort(Terms.begin(), Terms.end()); 11432 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11433 11434 // Put larger terms first. 11435 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11436 return numberOfTerms(LHS) > numberOfTerms(RHS); 11437 }); 11438 11439 // Try to divide all terms by the element size. If term is not divisible by 11440 // element size, proceed with the original term. 11441 for (const SCEV *&Term : Terms) { 11442 const SCEV *Q, *R; 11443 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11444 if (!Q->isZero()) 11445 Term = Q; 11446 } 11447 11448 SmallVector<const SCEV *, 4> NewTerms; 11449 11450 // Remove constant factors. 11451 for (const SCEV *T : Terms) 11452 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11453 NewTerms.push_back(NewT); 11454 11455 LLVM_DEBUG({ 11456 dbgs() << "Terms after sorting:\n"; 11457 for (const SCEV *T : NewTerms) 11458 dbgs() << *T << "\n"; 11459 }); 11460 11461 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11462 Sizes.clear(); 11463 return; 11464 } 11465 11466 // The last element to be pushed into Sizes is the size of an element. 11467 Sizes.push_back(ElementSize); 11468 11469 LLVM_DEBUG({ 11470 dbgs() << "Sizes:\n"; 11471 for (const SCEV *S : Sizes) 11472 dbgs() << *S << "\n"; 11473 }); 11474 } 11475 11476 void ScalarEvolution::computeAccessFunctions( 11477 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11478 SmallVectorImpl<const SCEV *> &Sizes) { 11479 // Early exit in case this SCEV is not an affine multivariate function. 11480 if (Sizes.empty()) 11481 return; 11482 11483 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11484 if (!AR->isAffine()) 11485 return; 11486 11487 const SCEV *Res = Expr; 11488 int Last = Sizes.size() - 1; 11489 for (int i = Last; i >= 0; i--) { 11490 const SCEV *Q, *R; 11491 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11492 11493 LLVM_DEBUG({ 11494 dbgs() << "Res: " << *Res << "\n"; 11495 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11496 dbgs() << "Res divided by Sizes[i]:\n"; 11497 dbgs() << "Quotient: " << *Q << "\n"; 11498 dbgs() << "Remainder: " << *R << "\n"; 11499 }); 11500 11501 Res = Q; 11502 11503 // Do not record the last subscript corresponding to the size of elements in 11504 // the array. 11505 if (i == Last) { 11506 11507 // Bail out if the remainder is too complex. 11508 if (isa<SCEVAddRecExpr>(R)) { 11509 Subscripts.clear(); 11510 Sizes.clear(); 11511 return; 11512 } 11513 11514 continue; 11515 } 11516 11517 // Record the access function for the current subscript. 11518 Subscripts.push_back(R); 11519 } 11520 11521 // Also push in last position the remainder of the last division: it will be 11522 // the access function of the innermost dimension. 11523 Subscripts.push_back(Res); 11524 11525 std::reverse(Subscripts.begin(), Subscripts.end()); 11526 11527 LLVM_DEBUG({ 11528 dbgs() << "Subscripts:\n"; 11529 for (const SCEV *S : Subscripts) 11530 dbgs() << *S << "\n"; 11531 }); 11532 } 11533 11534 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11535 /// sizes of an array access. Returns the remainder of the delinearization that 11536 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11537 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11538 /// expressions in the stride and base of a SCEV corresponding to the 11539 /// computation of a GCD (greatest common divisor) of base and stride. When 11540 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11541 /// 11542 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11543 /// 11544 /// void foo(long n, long m, long o, double A[n][m][o]) { 11545 /// 11546 /// for (long i = 0; i < n; i++) 11547 /// for (long j = 0; j < m; j++) 11548 /// for (long k = 0; k < o; k++) 11549 /// A[i][j][k] = 1.0; 11550 /// } 11551 /// 11552 /// the delinearization input is the following AddRec SCEV: 11553 /// 11554 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11555 /// 11556 /// From this SCEV, we are able to say that the base offset of the access is %A 11557 /// because it appears as an offset that does not divide any of the strides in 11558 /// the loops: 11559 /// 11560 /// CHECK: Base offset: %A 11561 /// 11562 /// and then SCEV->delinearize determines the size of some of the dimensions of 11563 /// the array as these are the multiples by which the strides are happening: 11564 /// 11565 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11566 /// 11567 /// Note that the outermost dimension remains of UnknownSize because there are 11568 /// no strides that would help identifying the size of the last dimension: when 11569 /// the array has been statically allocated, one could compute the size of that 11570 /// dimension by dividing the overall size of the array by the size of the known 11571 /// dimensions: %m * %o * 8. 11572 /// 11573 /// Finally delinearize provides the access functions for the array reference 11574 /// that does correspond to A[i][j][k] of the above C testcase: 11575 /// 11576 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11577 /// 11578 /// The testcases are checking the output of a function pass: 11579 /// DelinearizationPass that walks through all loads and stores of a function 11580 /// asking for the SCEV of the memory access with respect to all enclosing 11581 /// loops, calling SCEV->delinearize on that and printing the results. 11582 void ScalarEvolution::delinearize(const SCEV *Expr, 11583 SmallVectorImpl<const SCEV *> &Subscripts, 11584 SmallVectorImpl<const SCEV *> &Sizes, 11585 const SCEV *ElementSize) { 11586 // First step: collect parametric terms. 11587 SmallVector<const SCEV *, 4> Terms; 11588 collectParametricTerms(Expr, Terms); 11589 11590 if (Terms.empty()) 11591 return; 11592 11593 // Second step: find subscript sizes. 11594 findArrayDimensions(Terms, Sizes, ElementSize); 11595 11596 if (Sizes.empty()) 11597 return; 11598 11599 // Third step: compute the access functions for each subscript. 11600 computeAccessFunctions(Expr, Subscripts, Sizes); 11601 11602 if (Subscripts.empty()) 11603 return; 11604 11605 LLVM_DEBUG({ 11606 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11607 dbgs() << "ArrayDecl[UnknownSize]"; 11608 for (const SCEV *S : Sizes) 11609 dbgs() << "[" << *S << "]"; 11610 11611 dbgs() << "\nArrayRef"; 11612 for (const SCEV *S : Subscripts) 11613 dbgs() << "[" << *S << "]"; 11614 dbgs() << "\n"; 11615 }); 11616 } 11617 11618 bool ScalarEvolution::getIndexExpressionsFromGEP( 11619 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11620 SmallVectorImpl<int> &Sizes) { 11621 assert(Subscripts.empty() && Sizes.empty() && 11622 "Expected output lists to be empty on entry to this function."); 11623 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11624 Type *Ty = GEP->getPointerOperandType(); 11625 bool DroppedFirstDim = false; 11626 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11627 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11628 if (i == 1) { 11629 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11630 Ty = PtrTy->getElementType(); 11631 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11632 Ty = ArrayTy->getElementType(); 11633 } else { 11634 Subscripts.clear(); 11635 Sizes.clear(); 11636 return false; 11637 } 11638 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11639 if (Const->getValue()->isZero()) { 11640 DroppedFirstDim = true; 11641 continue; 11642 } 11643 Subscripts.push_back(Expr); 11644 continue; 11645 } 11646 11647 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11648 if (!ArrayTy) { 11649 Subscripts.clear(); 11650 Sizes.clear(); 11651 return false; 11652 } 11653 11654 Subscripts.push_back(Expr); 11655 if (!(DroppedFirstDim && i == 2)) 11656 Sizes.push_back(ArrayTy->getNumElements()); 11657 11658 Ty = ArrayTy->getElementType(); 11659 } 11660 return !Subscripts.empty(); 11661 } 11662 11663 //===----------------------------------------------------------------------===// 11664 // SCEVCallbackVH Class Implementation 11665 //===----------------------------------------------------------------------===// 11666 11667 void ScalarEvolution::SCEVCallbackVH::deleted() { 11668 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11669 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11670 SE->ConstantEvolutionLoopExitValue.erase(PN); 11671 SE->eraseValueFromMap(getValPtr()); 11672 // this now dangles! 11673 } 11674 11675 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11676 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11677 11678 // Forget all the expressions associated with users of the old value, 11679 // so that future queries will recompute the expressions using the new 11680 // value. 11681 Value *Old = getValPtr(); 11682 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11683 SmallPtrSet<User *, 8> Visited; 11684 while (!Worklist.empty()) { 11685 User *U = Worklist.pop_back_val(); 11686 // Deleting the Old value will cause this to dangle. Postpone 11687 // that until everything else is done. 11688 if (U == Old) 11689 continue; 11690 if (!Visited.insert(U).second) 11691 continue; 11692 if (PHINode *PN = dyn_cast<PHINode>(U)) 11693 SE->ConstantEvolutionLoopExitValue.erase(PN); 11694 SE->eraseValueFromMap(U); 11695 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11696 } 11697 // Delete the Old value. 11698 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11699 SE->ConstantEvolutionLoopExitValue.erase(PN); 11700 SE->eraseValueFromMap(Old); 11701 // this now dangles! 11702 } 11703 11704 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11705 : CallbackVH(V), SE(se) {} 11706 11707 //===----------------------------------------------------------------------===// 11708 // ScalarEvolution Class Implementation 11709 //===----------------------------------------------------------------------===// 11710 11711 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11712 AssumptionCache &AC, DominatorTree &DT, 11713 LoopInfo &LI) 11714 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11715 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11716 LoopDispositions(64), BlockDispositions(64) { 11717 // To use guards for proving predicates, we need to scan every instruction in 11718 // relevant basic blocks, and not just terminators. Doing this is a waste of 11719 // time if the IR does not actually contain any calls to 11720 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11721 // 11722 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11723 // to _add_ guards to the module when there weren't any before, and wants 11724 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11725 // efficient in lieu of being smart in that rather obscure case. 11726 11727 auto *GuardDecl = F.getParent()->getFunction( 11728 Intrinsic::getName(Intrinsic::experimental_guard)); 11729 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11730 } 11731 11732 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11733 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11734 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11735 ValueExprMap(std::move(Arg.ValueExprMap)), 11736 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11737 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11738 PendingMerges(std::move(Arg.PendingMerges)), 11739 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11740 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11741 PredicatedBackedgeTakenCounts( 11742 std::move(Arg.PredicatedBackedgeTakenCounts)), 11743 ConstantEvolutionLoopExitValue( 11744 std::move(Arg.ConstantEvolutionLoopExitValue)), 11745 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11746 LoopDispositions(std::move(Arg.LoopDispositions)), 11747 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11748 BlockDispositions(std::move(Arg.BlockDispositions)), 11749 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11750 SignedRanges(std::move(Arg.SignedRanges)), 11751 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11752 UniquePreds(std::move(Arg.UniquePreds)), 11753 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11754 LoopUsers(std::move(Arg.LoopUsers)), 11755 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11756 FirstUnknown(Arg.FirstUnknown) { 11757 Arg.FirstUnknown = nullptr; 11758 } 11759 11760 ScalarEvolution::~ScalarEvolution() { 11761 // Iterate through all the SCEVUnknown instances and call their 11762 // destructors, so that they release their references to their values. 11763 for (SCEVUnknown *U = FirstUnknown; U;) { 11764 SCEVUnknown *Tmp = U; 11765 U = U->Next; 11766 Tmp->~SCEVUnknown(); 11767 } 11768 FirstUnknown = nullptr; 11769 11770 ExprValueMap.clear(); 11771 ValueExprMap.clear(); 11772 HasRecMap.clear(); 11773 11774 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11775 // that a loop had multiple computable exits. 11776 for (auto &BTCI : BackedgeTakenCounts) 11777 BTCI.second.clear(); 11778 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11779 BTCI.second.clear(); 11780 11781 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11782 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11783 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11784 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11785 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11786 } 11787 11788 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11789 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11790 } 11791 11792 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11793 const Loop *L) { 11794 // Print all inner loops first 11795 for (Loop *I : *L) 11796 PrintLoopInfo(OS, SE, I); 11797 11798 OS << "Loop "; 11799 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11800 OS << ": "; 11801 11802 SmallVector<BasicBlock *, 8> ExitingBlocks; 11803 L->getExitingBlocks(ExitingBlocks); 11804 if (ExitingBlocks.size() != 1) 11805 OS << "<multiple exits> "; 11806 11807 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 11808 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 11809 else 11810 OS << "Unpredictable backedge-taken count.\n"; 11811 11812 if (ExitingBlocks.size() > 1) 11813 for (BasicBlock *ExitingBlock : ExitingBlocks) { 11814 OS << " exit count for " << ExitingBlock->getName() << ": " 11815 << *SE->getExitCount(L, ExitingBlock) << "\n"; 11816 } 11817 11818 OS << "Loop "; 11819 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11820 OS << ": "; 11821 11822 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 11823 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 11824 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11825 OS << ", actual taken count either this or zero."; 11826 } else { 11827 OS << "Unpredictable max backedge-taken count. "; 11828 } 11829 11830 OS << "\n" 11831 "Loop "; 11832 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11833 OS << ": "; 11834 11835 SCEVUnionPredicate Pred; 11836 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11837 if (!isa<SCEVCouldNotCompute>(PBT)) { 11838 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11839 OS << " Predicates:\n"; 11840 Pred.print(OS, 4); 11841 } else { 11842 OS << "Unpredictable predicated backedge-taken count. "; 11843 } 11844 OS << "\n"; 11845 11846 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11847 OS << "Loop "; 11848 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11849 OS << ": "; 11850 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11851 } 11852 } 11853 11854 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11855 switch (LD) { 11856 case ScalarEvolution::LoopVariant: 11857 return "Variant"; 11858 case ScalarEvolution::LoopInvariant: 11859 return "Invariant"; 11860 case ScalarEvolution::LoopComputable: 11861 return "Computable"; 11862 } 11863 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11864 } 11865 11866 void ScalarEvolution::print(raw_ostream &OS) const { 11867 // ScalarEvolution's implementation of the print method is to print 11868 // out SCEV values of all instructions that are interesting. Doing 11869 // this potentially causes it to create new SCEV objects though, 11870 // which technically conflicts with the const qualifier. This isn't 11871 // observable from outside the class though, so casting away the 11872 // const isn't dangerous. 11873 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11874 11875 if (ClassifyExpressions) { 11876 OS << "Classifying expressions for: "; 11877 F.printAsOperand(OS, /*PrintType=*/false); 11878 OS << "\n"; 11879 for (Instruction &I : instructions(F)) 11880 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11881 OS << I << '\n'; 11882 OS << " --> "; 11883 const SCEV *SV = SE.getSCEV(&I); 11884 SV->print(OS); 11885 if (!isa<SCEVCouldNotCompute>(SV)) { 11886 OS << " U: "; 11887 SE.getUnsignedRange(SV).print(OS); 11888 OS << " S: "; 11889 SE.getSignedRange(SV).print(OS); 11890 } 11891 11892 const Loop *L = LI.getLoopFor(I.getParent()); 11893 11894 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11895 if (AtUse != SV) { 11896 OS << " --> "; 11897 AtUse->print(OS); 11898 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11899 OS << " U: "; 11900 SE.getUnsignedRange(AtUse).print(OS); 11901 OS << " S: "; 11902 SE.getSignedRange(AtUse).print(OS); 11903 } 11904 } 11905 11906 if (L) { 11907 OS << "\t\t" "Exits: "; 11908 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11909 if (!SE.isLoopInvariant(ExitValue, L)) { 11910 OS << "<<Unknown>>"; 11911 } else { 11912 OS << *ExitValue; 11913 } 11914 11915 bool First = true; 11916 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11917 if (First) { 11918 OS << "\t\t" "LoopDispositions: { "; 11919 First = false; 11920 } else { 11921 OS << ", "; 11922 } 11923 11924 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11925 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11926 } 11927 11928 for (auto *InnerL : depth_first(L)) { 11929 if (InnerL == L) 11930 continue; 11931 if (First) { 11932 OS << "\t\t" "LoopDispositions: { "; 11933 First = false; 11934 } else { 11935 OS << ", "; 11936 } 11937 11938 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11939 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11940 } 11941 11942 OS << " }"; 11943 } 11944 11945 OS << "\n"; 11946 } 11947 } 11948 11949 OS << "Determining loop execution counts for: "; 11950 F.printAsOperand(OS, /*PrintType=*/false); 11951 OS << "\n"; 11952 for (Loop *I : LI) 11953 PrintLoopInfo(OS, &SE, I); 11954 } 11955 11956 ScalarEvolution::LoopDisposition 11957 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11958 auto &Values = LoopDispositions[S]; 11959 for (auto &V : Values) { 11960 if (V.getPointer() == L) 11961 return V.getInt(); 11962 } 11963 Values.emplace_back(L, LoopVariant); 11964 LoopDisposition D = computeLoopDisposition(S, L); 11965 auto &Values2 = LoopDispositions[S]; 11966 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11967 if (V.getPointer() == L) { 11968 V.setInt(D); 11969 break; 11970 } 11971 } 11972 return D; 11973 } 11974 11975 ScalarEvolution::LoopDisposition 11976 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11977 switch (S->getSCEVType()) { 11978 case scConstant: 11979 return LoopInvariant; 11980 case scTruncate: 11981 case scZeroExtend: 11982 case scSignExtend: 11983 return getLoopDisposition(cast<SCEVIntegralCastExpr>(S)->getOperand(), L); 11984 case scAddRecExpr: { 11985 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11986 11987 // If L is the addrec's loop, it's computable. 11988 if (AR->getLoop() == L) 11989 return LoopComputable; 11990 11991 // Add recurrences are never invariant in the function-body (null loop). 11992 if (!L) 11993 return LoopVariant; 11994 11995 // Everything that is not defined at loop entry is variant. 11996 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11997 return LoopVariant; 11998 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11999 " dominate the contained loop's header?"); 12000 12001 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12002 if (AR->getLoop()->contains(L)) 12003 return LoopInvariant; 12004 12005 // This recurrence is variant w.r.t. L if any of its operands 12006 // are variant. 12007 for (auto *Op : AR->operands()) 12008 if (!isLoopInvariant(Op, L)) 12009 return LoopVariant; 12010 12011 // Otherwise it's loop-invariant. 12012 return LoopInvariant; 12013 } 12014 case scAddExpr: 12015 case scMulExpr: 12016 case scUMaxExpr: 12017 case scSMaxExpr: 12018 case scUMinExpr: 12019 case scSMinExpr: { 12020 bool HasVarying = false; 12021 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12022 LoopDisposition D = getLoopDisposition(Op, L); 12023 if (D == LoopVariant) 12024 return LoopVariant; 12025 if (D == LoopComputable) 12026 HasVarying = true; 12027 } 12028 return HasVarying ? LoopComputable : LoopInvariant; 12029 } 12030 case scUDivExpr: { 12031 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12032 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12033 if (LD == LoopVariant) 12034 return LoopVariant; 12035 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12036 if (RD == LoopVariant) 12037 return LoopVariant; 12038 return (LD == LoopInvariant && RD == LoopInvariant) ? 12039 LoopInvariant : LoopComputable; 12040 } 12041 case scUnknown: 12042 // All non-instruction values are loop invariant. All instructions are loop 12043 // invariant if they are not contained in the specified loop. 12044 // Instructions are never considered invariant in the function body 12045 // (null loop) because they are defined within the "loop". 12046 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12047 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12048 return LoopInvariant; 12049 case scCouldNotCompute: 12050 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12051 } 12052 llvm_unreachable("Unknown SCEV kind!"); 12053 } 12054 12055 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12056 return getLoopDisposition(S, L) == LoopInvariant; 12057 } 12058 12059 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12060 return getLoopDisposition(S, L) == LoopComputable; 12061 } 12062 12063 ScalarEvolution::BlockDisposition 12064 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12065 auto &Values = BlockDispositions[S]; 12066 for (auto &V : Values) { 12067 if (V.getPointer() == BB) 12068 return V.getInt(); 12069 } 12070 Values.emplace_back(BB, DoesNotDominateBlock); 12071 BlockDisposition D = computeBlockDisposition(S, BB); 12072 auto &Values2 = BlockDispositions[S]; 12073 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12074 if (V.getPointer() == BB) { 12075 V.setInt(D); 12076 break; 12077 } 12078 } 12079 return D; 12080 } 12081 12082 ScalarEvolution::BlockDisposition 12083 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12084 switch (S->getSCEVType()) { 12085 case scConstant: 12086 return ProperlyDominatesBlock; 12087 case scTruncate: 12088 case scZeroExtend: 12089 case scSignExtend: 12090 return getBlockDisposition(cast<SCEVIntegralCastExpr>(S)->getOperand(), BB); 12091 case scAddRecExpr: { 12092 // This uses a "dominates" query instead of "properly dominates" query 12093 // to test for proper dominance too, because the instruction which 12094 // produces the addrec's value is a PHI, and a PHI effectively properly 12095 // dominates its entire containing block. 12096 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12097 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12098 return DoesNotDominateBlock; 12099 12100 // Fall through into SCEVNAryExpr handling. 12101 LLVM_FALLTHROUGH; 12102 } 12103 case scAddExpr: 12104 case scMulExpr: 12105 case scUMaxExpr: 12106 case scSMaxExpr: 12107 case scUMinExpr: 12108 case scSMinExpr: { 12109 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12110 bool Proper = true; 12111 for (const SCEV *NAryOp : NAry->operands()) { 12112 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12113 if (D == DoesNotDominateBlock) 12114 return DoesNotDominateBlock; 12115 if (D == DominatesBlock) 12116 Proper = false; 12117 } 12118 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12119 } 12120 case scUDivExpr: { 12121 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12122 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12123 BlockDisposition LD = getBlockDisposition(LHS, BB); 12124 if (LD == DoesNotDominateBlock) 12125 return DoesNotDominateBlock; 12126 BlockDisposition RD = getBlockDisposition(RHS, BB); 12127 if (RD == DoesNotDominateBlock) 12128 return DoesNotDominateBlock; 12129 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12130 ProperlyDominatesBlock : DominatesBlock; 12131 } 12132 case scUnknown: 12133 if (Instruction *I = 12134 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12135 if (I->getParent() == BB) 12136 return DominatesBlock; 12137 if (DT.properlyDominates(I->getParent(), BB)) 12138 return ProperlyDominatesBlock; 12139 return DoesNotDominateBlock; 12140 } 12141 return ProperlyDominatesBlock; 12142 case scCouldNotCompute: 12143 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12144 } 12145 llvm_unreachable("Unknown SCEV kind!"); 12146 } 12147 12148 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12149 return getBlockDisposition(S, BB) >= DominatesBlock; 12150 } 12151 12152 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12153 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12154 } 12155 12156 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12157 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12158 } 12159 12160 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12161 auto IsS = [&](const SCEV *X) { return S == X; }; 12162 auto ContainsS = [&](const SCEV *X) { 12163 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12164 }; 12165 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12166 } 12167 12168 void 12169 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12170 ValuesAtScopes.erase(S); 12171 LoopDispositions.erase(S); 12172 BlockDispositions.erase(S); 12173 UnsignedRanges.erase(S); 12174 SignedRanges.erase(S); 12175 ExprValueMap.erase(S); 12176 HasRecMap.erase(S); 12177 MinTrailingZerosCache.erase(S); 12178 12179 for (auto I = PredicatedSCEVRewrites.begin(); 12180 I != PredicatedSCEVRewrites.end();) { 12181 std::pair<const SCEV *, const Loop *> Entry = I->first; 12182 if (Entry.first == S) 12183 PredicatedSCEVRewrites.erase(I++); 12184 else 12185 ++I; 12186 } 12187 12188 auto RemoveSCEVFromBackedgeMap = 12189 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12190 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12191 BackedgeTakenInfo &BEInfo = I->second; 12192 if (BEInfo.hasOperand(S, this)) { 12193 BEInfo.clear(); 12194 Map.erase(I++); 12195 } else 12196 ++I; 12197 } 12198 }; 12199 12200 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12201 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12202 } 12203 12204 void 12205 ScalarEvolution::getUsedLoops(const SCEV *S, 12206 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12207 struct FindUsedLoops { 12208 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12209 : LoopsUsed(LoopsUsed) {} 12210 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12211 bool follow(const SCEV *S) { 12212 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12213 LoopsUsed.insert(AR->getLoop()); 12214 return true; 12215 } 12216 12217 bool isDone() const { return false; } 12218 }; 12219 12220 FindUsedLoops F(LoopsUsed); 12221 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12222 } 12223 12224 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12225 SmallPtrSet<const Loop *, 8> LoopsUsed; 12226 getUsedLoops(S, LoopsUsed); 12227 for (auto *L : LoopsUsed) 12228 LoopUsers[L].push_back(S); 12229 } 12230 12231 void ScalarEvolution::verify() const { 12232 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12233 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12234 12235 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12236 12237 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12238 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12239 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12240 12241 const SCEV *visitConstant(const SCEVConstant *Constant) { 12242 return SE.getConstant(Constant->getAPInt()); 12243 } 12244 12245 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12246 return SE.getUnknown(Expr->getValue()); 12247 } 12248 12249 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12250 return SE.getCouldNotCompute(); 12251 } 12252 }; 12253 12254 SCEVMapper SCM(SE2); 12255 12256 while (!LoopStack.empty()) { 12257 auto *L = LoopStack.pop_back_val(); 12258 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 12259 12260 auto *CurBECount = SCM.visit( 12261 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12262 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12263 12264 if (CurBECount == SE2.getCouldNotCompute() || 12265 NewBECount == SE2.getCouldNotCompute()) { 12266 // NB! This situation is legal, but is very suspicious -- whatever pass 12267 // change the loop to make a trip count go from could not compute to 12268 // computable or vice-versa *should have* invalidated SCEV. However, we 12269 // choose not to assert here (for now) since we don't want false 12270 // positives. 12271 continue; 12272 } 12273 12274 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12275 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12276 // not propagate undef aggressively). This means we can (and do) fail 12277 // verification in cases where a transform makes the trip count of a loop 12278 // go from "undef" to "undef+1" (say). The transform is fine, since in 12279 // both cases the loop iterates "undef" times, but SCEV thinks we 12280 // increased the trip count of the loop by 1 incorrectly. 12281 continue; 12282 } 12283 12284 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12285 SE.getTypeSizeInBits(NewBECount->getType())) 12286 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12287 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12288 SE.getTypeSizeInBits(NewBECount->getType())) 12289 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12290 12291 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12292 12293 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12294 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12295 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12296 dbgs() << "Old: " << *CurBECount << "\n"; 12297 dbgs() << "New: " << *NewBECount << "\n"; 12298 dbgs() << "Delta: " << *Delta << "\n"; 12299 std::abort(); 12300 } 12301 } 12302 12303 // Collect all valid loops currently in LoopInfo. 12304 SmallPtrSet<Loop *, 32> ValidLoops; 12305 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12306 while (!Worklist.empty()) { 12307 Loop *L = Worklist.pop_back_val(); 12308 if (ValidLoops.contains(L)) 12309 continue; 12310 ValidLoops.insert(L); 12311 Worklist.append(L->begin(), L->end()); 12312 } 12313 // Check for SCEV expressions referencing invalid/deleted loops. 12314 for (auto &KV : ValueExprMap) { 12315 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12316 if (!AR) 12317 continue; 12318 assert(ValidLoops.contains(AR->getLoop()) && 12319 "AddRec references invalid loop"); 12320 } 12321 } 12322 12323 bool ScalarEvolution::invalidate( 12324 Function &F, const PreservedAnalyses &PA, 12325 FunctionAnalysisManager::Invalidator &Inv) { 12326 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12327 // of its dependencies is invalidated. 12328 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12329 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12330 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12331 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12332 Inv.invalidate<LoopAnalysis>(F, PA); 12333 } 12334 12335 AnalysisKey ScalarEvolutionAnalysis::Key; 12336 12337 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12338 FunctionAnalysisManager &AM) { 12339 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12340 AM.getResult<AssumptionAnalysis>(F), 12341 AM.getResult<DominatorTreeAnalysis>(F), 12342 AM.getResult<LoopAnalysis>(F)); 12343 } 12344 12345 PreservedAnalyses 12346 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12347 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12348 return PreservedAnalyses::all(); 12349 } 12350 12351 PreservedAnalyses 12352 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12353 // For compatibility with opt's -analyze feature under legacy pass manager 12354 // which was not ported to NPM. This keeps tests using 12355 // update_analyze_test_checks.py working. 12356 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12357 << F.getName() << "':\n"; 12358 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12359 return PreservedAnalyses::all(); 12360 } 12361 12362 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12363 "Scalar Evolution Analysis", false, true) 12364 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12365 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12366 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12367 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12368 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12369 "Scalar Evolution Analysis", false, true) 12370 12371 char ScalarEvolutionWrapperPass::ID = 0; 12372 12373 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12374 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12375 } 12376 12377 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12378 SE.reset(new ScalarEvolution( 12379 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12380 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12381 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12382 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12383 return false; 12384 } 12385 12386 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12387 12388 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12389 SE->print(OS); 12390 } 12391 12392 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12393 if (!VerifySCEV) 12394 return; 12395 12396 SE->verify(); 12397 } 12398 12399 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12400 AU.setPreservesAll(); 12401 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12402 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12403 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12404 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12405 } 12406 12407 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12408 const SCEV *RHS) { 12409 FoldingSetNodeID ID; 12410 assert(LHS->getType() == RHS->getType() && 12411 "Type mismatch between LHS and RHS"); 12412 // Unique this node based on the arguments 12413 ID.AddInteger(SCEVPredicate::P_Equal); 12414 ID.AddPointer(LHS); 12415 ID.AddPointer(RHS); 12416 void *IP = nullptr; 12417 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12418 return S; 12419 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12420 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12421 UniquePreds.InsertNode(Eq, IP); 12422 return Eq; 12423 } 12424 12425 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12426 const SCEVAddRecExpr *AR, 12427 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12428 FoldingSetNodeID ID; 12429 // Unique this node based on the arguments 12430 ID.AddInteger(SCEVPredicate::P_Wrap); 12431 ID.AddPointer(AR); 12432 ID.AddInteger(AddedFlags); 12433 void *IP = nullptr; 12434 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12435 return S; 12436 auto *OF = new (SCEVAllocator) 12437 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12438 UniquePreds.InsertNode(OF, IP); 12439 return OF; 12440 } 12441 12442 namespace { 12443 12444 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12445 public: 12446 12447 /// Rewrites \p S in the context of a loop L and the SCEV predication 12448 /// infrastructure. 12449 /// 12450 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12451 /// equivalences present in \p Pred. 12452 /// 12453 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12454 /// \p NewPreds such that the result will be an AddRecExpr. 12455 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12456 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12457 SCEVUnionPredicate *Pred) { 12458 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12459 return Rewriter.visit(S); 12460 } 12461 12462 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12463 if (Pred) { 12464 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12465 for (auto *Pred : ExprPreds) 12466 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12467 if (IPred->getLHS() == Expr) 12468 return IPred->getRHS(); 12469 } 12470 return convertToAddRecWithPreds(Expr); 12471 } 12472 12473 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12474 const SCEV *Operand = visit(Expr->getOperand()); 12475 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12476 if (AR && AR->getLoop() == L && AR->isAffine()) { 12477 // This couldn't be folded because the operand didn't have the nuw 12478 // flag. Add the nusw flag as an assumption that we could make. 12479 const SCEV *Step = AR->getStepRecurrence(SE); 12480 Type *Ty = Expr->getType(); 12481 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12482 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12483 SE.getSignExtendExpr(Step, Ty), L, 12484 AR->getNoWrapFlags()); 12485 } 12486 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12487 } 12488 12489 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12490 const SCEV *Operand = visit(Expr->getOperand()); 12491 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12492 if (AR && AR->getLoop() == L && AR->isAffine()) { 12493 // This couldn't be folded because the operand didn't have the nsw 12494 // flag. Add the nssw flag as an assumption that we could make. 12495 const SCEV *Step = AR->getStepRecurrence(SE); 12496 Type *Ty = Expr->getType(); 12497 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12498 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12499 SE.getSignExtendExpr(Step, Ty), L, 12500 AR->getNoWrapFlags()); 12501 } 12502 return SE.getSignExtendExpr(Operand, Expr->getType()); 12503 } 12504 12505 private: 12506 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12507 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12508 SCEVUnionPredicate *Pred) 12509 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12510 12511 bool addOverflowAssumption(const SCEVPredicate *P) { 12512 if (!NewPreds) { 12513 // Check if we've already made this assumption. 12514 return Pred && Pred->implies(P); 12515 } 12516 NewPreds->insert(P); 12517 return true; 12518 } 12519 12520 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12521 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12522 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12523 return addOverflowAssumption(A); 12524 } 12525 12526 // If \p Expr represents a PHINode, we try to see if it can be represented 12527 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12528 // to add this predicate as a runtime overflow check, we return the AddRec. 12529 // If \p Expr does not meet these conditions (is not a PHI node, or we 12530 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12531 // return \p Expr. 12532 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12533 if (!isa<PHINode>(Expr->getValue())) 12534 return Expr; 12535 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12536 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12537 if (!PredicatedRewrite) 12538 return Expr; 12539 for (auto *P : PredicatedRewrite->second){ 12540 // Wrap predicates from outer loops are not supported. 12541 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12542 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12543 if (L != AR->getLoop()) 12544 return Expr; 12545 } 12546 if (!addOverflowAssumption(P)) 12547 return Expr; 12548 } 12549 return PredicatedRewrite->first; 12550 } 12551 12552 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12553 SCEVUnionPredicate *Pred; 12554 const Loop *L; 12555 }; 12556 12557 } // end anonymous namespace 12558 12559 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12560 SCEVUnionPredicate &Preds) { 12561 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12562 } 12563 12564 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12565 const SCEV *S, const Loop *L, 12566 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12567 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12568 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12569 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12570 12571 if (!AddRec) 12572 return nullptr; 12573 12574 // Since the transformation was successful, we can now transfer the SCEV 12575 // predicates. 12576 for (auto *P : TransformPreds) 12577 Preds.insert(P); 12578 12579 return AddRec; 12580 } 12581 12582 /// SCEV predicates 12583 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12584 SCEVPredicateKind Kind) 12585 : FastID(ID), Kind(Kind) {} 12586 12587 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12588 const SCEV *LHS, const SCEV *RHS) 12589 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12590 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12591 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12592 } 12593 12594 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12595 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12596 12597 if (!Op) 12598 return false; 12599 12600 return Op->LHS == LHS && Op->RHS == RHS; 12601 } 12602 12603 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12604 12605 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12606 12607 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12608 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12609 } 12610 12611 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12612 const SCEVAddRecExpr *AR, 12613 IncrementWrapFlags Flags) 12614 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12615 12616 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12617 12618 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12619 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12620 12621 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12622 } 12623 12624 bool SCEVWrapPredicate::isAlwaysTrue() const { 12625 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12626 IncrementWrapFlags IFlags = Flags; 12627 12628 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12629 IFlags = clearFlags(IFlags, IncrementNSSW); 12630 12631 return IFlags == IncrementAnyWrap; 12632 } 12633 12634 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12635 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12636 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12637 OS << "<nusw>"; 12638 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12639 OS << "<nssw>"; 12640 OS << "\n"; 12641 } 12642 12643 SCEVWrapPredicate::IncrementWrapFlags 12644 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12645 ScalarEvolution &SE) { 12646 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12647 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12648 12649 // We can safely transfer the NSW flag as NSSW. 12650 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12651 ImpliedFlags = IncrementNSSW; 12652 12653 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12654 // If the increment is positive, the SCEV NUW flag will also imply the 12655 // WrapPredicate NUSW flag. 12656 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12657 if (Step->getValue()->getValue().isNonNegative()) 12658 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12659 } 12660 12661 return ImpliedFlags; 12662 } 12663 12664 /// Union predicates don't get cached so create a dummy set ID for it. 12665 SCEVUnionPredicate::SCEVUnionPredicate() 12666 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12667 12668 bool SCEVUnionPredicate::isAlwaysTrue() const { 12669 return all_of(Preds, 12670 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12671 } 12672 12673 ArrayRef<const SCEVPredicate *> 12674 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12675 auto I = SCEVToPreds.find(Expr); 12676 if (I == SCEVToPreds.end()) 12677 return ArrayRef<const SCEVPredicate *>(); 12678 return I->second; 12679 } 12680 12681 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12682 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12683 return all_of(Set->Preds, 12684 [this](const SCEVPredicate *I) { return this->implies(I); }); 12685 12686 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12687 if (ScevPredsIt == SCEVToPreds.end()) 12688 return false; 12689 auto &SCEVPreds = ScevPredsIt->second; 12690 12691 return any_of(SCEVPreds, 12692 [N](const SCEVPredicate *I) { return I->implies(N); }); 12693 } 12694 12695 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12696 12697 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12698 for (auto Pred : Preds) 12699 Pred->print(OS, Depth); 12700 } 12701 12702 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12703 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12704 for (auto Pred : Set->Preds) 12705 add(Pred); 12706 return; 12707 } 12708 12709 if (implies(N)) 12710 return; 12711 12712 const SCEV *Key = N->getExpr(); 12713 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12714 " associated expression!"); 12715 12716 SCEVToPreds[Key].push_back(N); 12717 Preds.push_back(N); 12718 } 12719 12720 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12721 Loop &L) 12722 : SE(SE), L(L) {} 12723 12724 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12725 const SCEV *Expr = SE.getSCEV(V); 12726 RewriteEntry &Entry = RewriteMap[Expr]; 12727 12728 // If we already have an entry and the version matches, return it. 12729 if (Entry.second && Generation == Entry.first) 12730 return Entry.second; 12731 12732 // We found an entry but it's stale. Rewrite the stale entry 12733 // according to the current predicate. 12734 if (Entry.second) 12735 Expr = Entry.second; 12736 12737 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12738 Entry = {Generation, NewSCEV}; 12739 12740 return NewSCEV; 12741 } 12742 12743 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12744 if (!BackedgeCount) { 12745 SCEVUnionPredicate BackedgePred; 12746 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12747 addPredicate(BackedgePred); 12748 } 12749 return BackedgeCount; 12750 } 12751 12752 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12753 if (Preds.implies(&Pred)) 12754 return; 12755 Preds.add(&Pred); 12756 updateGeneration(); 12757 } 12758 12759 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12760 return Preds; 12761 } 12762 12763 void PredicatedScalarEvolution::updateGeneration() { 12764 // If the generation number wrapped recompute everything. 12765 if (++Generation == 0) { 12766 for (auto &II : RewriteMap) { 12767 const SCEV *Rewritten = II.second.second; 12768 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12769 } 12770 } 12771 } 12772 12773 void PredicatedScalarEvolution::setNoOverflow( 12774 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12775 const SCEV *Expr = getSCEV(V); 12776 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12777 12778 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12779 12780 // Clear the statically implied flags. 12781 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12782 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12783 12784 auto II = FlagsMap.insert({V, Flags}); 12785 if (!II.second) 12786 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12787 } 12788 12789 bool PredicatedScalarEvolution::hasNoOverflow( 12790 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12791 const SCEV *Expr = getSCEV(V); 12792 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12793 12794 Flags = SCEVWrapPredicate::clearFlags( 12795 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12796 12797 auto II = FlagsMap.find(V); 12798 12799 if (II != FlagsMap.end()) 12800 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12801 12802 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12803 } 12804 12805 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12806 const SCEV *Expr = this->getSCEV(V); 12807 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12808 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12809 12810 if (!New) 12811 return nullptr; 12812 12813 for (auto *P : NewPreds) 12814 Preds.add(P); 12815 12816 updateGeneration(); 12817 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12818 return New; 12819 } 12820 12821 PredicatedScalarEvolution::PredicatedScalarEvolution( 12822 const PredicatedScalarEvolution &Init) 12823 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12824 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12825 for (auto I : Init.FlagsMap) 12826 FlagsMap.insert(I); 12827 } 12828 12829 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12830 // For each block. 12831 for (auto *BB : L.getBlocks()) 12832 for (auto &I : *BB) { 12833 if (!SE.isSCEVable(I.getType())) 12834 continue; 12835 12836 auto *Expr = SE.getSCEV(&I); 12837 auto II = RewriteMap.find(Expr); 12838 12839 if (II == RewriteMap.end()) 12840 continue; 12841 12842 // Don't print things that are not interesting. 12843 if (II->second.second == Expr) 12844 continue; 12845 12846 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12847 OS.indent(Depth + 2) << *Expr << "\n"; 12848 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12849 } 12850 } 12851 12852 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12853 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 12854 // for URem with constant power-of-2 second operands. 12855 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12856 // 4, A / B becomes X / 8). 12857 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12858 const SCEV *&RHS) { 12859 // Try to match 'zext (trunc A to iB) to iY', which is used 12860 // for URem with constant power-of-2 second operands. Make sure the size of 12861 // the operand A matches the size of the whole expressions. 12862 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 12863 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 12864 LHS = Trunc->getOperand(); 12865 if (LHS->getType() != Expr->getType()) 12866 LHS = getZeroExtendExpr(LHS, Expr->getType()); 12867 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 12868 << getTypeSizeInBits(Trunc->getType())); 12869 return true; 12870 } 12871 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12872 if (Add == nullptr || Add->getNumOperands() != 2) 12873 return false; 12874 12875 const SCEV *A = Add->getOperand(1); 12876 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12877 12878 if (Mul == nullptr) 12879 return false; 12880 12881 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12882 // (SomeExpr + (-(SomeExpr / B) * B)). 12883 if (Expr == getURemExpr(A, B)) { 12884 LHS = A; 12885 RHS = B; 12886 return true; 12887 } 12888 return false; 12889 }; 12890 12891 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12892 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12893 return MatchURemWithDivisor(Mul->getOperand(1)) || 12894 MatchURemWithDivisor(Mul->getOperand(2)); 12895 12896 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12897 if (Mul->getNumOperands() == 2) 12898 return MatchURemWithDivisor(Mul->getOperand(1)) || 12899 MatchURemWithDivisor(Mul->getOperand(0)) || 12900 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12901 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12902 return false; 12903 } 12904 12905 const SCEV * 12906 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 12907 SmallVector<BasicBlock*, 16> ExitingBlocks; 12908 L->getExitingBlocks(ExitingBlocks); 12909 12910 // Form an expression for the maximum exit count possible for this loop. We 12911 // merge the max and exact information to approximate a version of 12912 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 12913 SmallVector<const SCEV*, 4> ExitCounts; 12914 for (BasicBlock *ExitingBB : ExitingBlocks) { 12915 const SCEV *ExitCount = getExitCount(L, ExitingBB); 12916 if (isa<SCEVCouldNotCompute>(ExitCount)) 12917 ExitCount = getExitCount(L, ExitingBB, 12918 ScalarEvolution::ConstantMaximum); 12919 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 12920 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 12921 "We should only have known counts for exiting blocks that " 12922 "dominate latch!"); 12923 ExitCounts.push_back(ExitCount); 12924 } 12925 } 12926 if (ExitCounts.empty()) 12927 return getCouldNotCompute(); 12928 return getUMinFromMismatchedTypes(ExitCounts); 12929 } 12930 12931 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 12932 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 12933 /// we cannot guarantee that the replacement is loop invariant in the loop of 12934 /// the AddRec. 12935 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 12936 ValueToSCEVMapTy ⤅ 12937 12938 public: 12939 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 12940 : SCEVRewriteVisitor(SE), Map(M) {} 12941 12942 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 12943 12944 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12945 auto I = Map.find(Expr->getValue()); 12946 if (I == Map.end()) 12947 return Expr; 12948 return I->second; 12949 } 12950 }; 12951 12952 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 12953 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 12954 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 12955 if (!isa<SCEVUnknown>(LHS)) { 12956 std::swap(LHS, RHS); 12957 Predicate = CmpInst::getSwappedPredicate(Predicate); 12958 } 12959 12960 // For now, limit to conditions that provide information about unknown 12961 // expressions. 12962 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 12963 if (!LHSUnknown) 12964 return; 12965 12966 // TODO: use information from more predicates. 12967 switch (Predicate) { 12968 case CmpInst::ICMP_ULT: { 12969 if (!containsAddRecurrence(RHS)) { 12970 const SCEV *Base = LHS; 12971 auto I = RewriteMap.find(LHSUnknown->getValue()); 12972 if (I != RewriteMap.end()) 12973 Base = I->second; 12974 12975 RewriteMap[LHSUnknown->getValue()] = 12976 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 12977 } 12978 break; 12979 } 12980 case CmpInst::ICMP_ULE: { 12981 if (!containsAddRecurrence(RHS)) { 12982 const SCEV *Base = LHS; 12983 auto I = RewriteMap.find(LHSUnknown->getValue()); 12984 if (I != RewriteMap.end()) 12985 Base = I->second; 12986 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 12987 } 12988 break; 12989 } 12990 case CmpInst::ICMP_EQ: 12991 if (isa<SCEVConstant>(RHS)) 12992 RewriteMap[LHSUnknown->getValue()] = RHS; 12993 break; 12994 case CmpInst::ICMP_NE: 12995 if (isa<SCEVConstant>(RHS) && 12996 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 12997 RewriteMap[LHSUnknown->getValue()] = 12998 getUMaxExpr(LHS, getOne(RHS->getType())); 12999 break; 13000 default: 13001 break; 13002 } 13003 }; 13004 // Starting at the loop predecessor, climb up the predecessor chain, as long 13005 // as there are predecessors that can be found that have unique successors 13006 // leading to the original header. 13007 // TODO: share this logic with isLoopEntryGuardedByCond. 13008 ValueToSCEVMapTy RewriteMap; 13009 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13010 L->getLoopPredecessor(), L->getHeader()); 13011 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13012 13013 const BranchInst *LoopEntryPredicate = 13014 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13015 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13016 continue; 13017 13018 // TODO: use information from more complex conditions, e.g. AND expressions. 13019 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 13020 if (!Cmp) 13021 continue; 13022 13023 auto Predicate = Cmp->getPredicate(); 13024 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 13025 Predicate = CmpInst::getInversePredicate(Predicate); 13026 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13027 getSCEV(Cmp->getOperand(1)), RewriteMap); 13028 } 13029 13030 // Also collect information from assumptions dominating the loop. 13031 for (auto &AssumeVH : AC.assumptions()) { 13032 if (!AssumeVH) 13033 continue; 13034 auto *AssumeI = cast<CallInst>(AssumeVH); 13035 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13036 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13037 continue; 13038 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13039 getSCEV(Cmp->getOperand(1)), RewriteMap); 13040 } 13041 13042 if (RewriteMap.empty()) 13043 return Expr; 13044 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13045 return Rewriter.visit(Expr); 13046 } 13047