1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 139 #define DEBUG_TYPE "scalar-evolution" 140 141 STATISTIC(NumArrayLenItCounts, 142 "Number of trip counts computed with array length"); 143 STATISTIC(NumTripCountsComputed, 144 "Number of loops with predictable loop counts"); 145 STATISTIC(NumTripCountsNotComputed, 146 "Number of loops without predictable loop counts"); 147 STATISTIC(NumBruteForceTripCountsComputed, 148 "Number of loops with trip counts computed by force"); 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::ZeroOrMore, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 159 static cl::opt<bool> VerifySCEV( 160 "verify-scev", cl::Hidden, 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 230 //===----------------------------------------------------------------------===// 231 // SCEV class definitions 232 //===----------------------------------------------------------------------===// 233 234 //===----------------------------------------------------------------------===// 235 // Implementation of the SCEV class. 236 // 237 238 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 239 LLVM_DUMP_METHOD void SCEV::dump() const { 240 print(dbgs()); 241 dbgs() << '\n'; 242 } 243 #endif 244 245 void SCEV::print(raw_ostream &OS) const { 246 switch (static_cast<SCEVTypes>(getSCEVType())) { 247 case scConstant: 248 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 249 return; 250 case scTruncate: { 251 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 252 const SCEV *Op = Trunc->getOperand(); 253 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 254 << *Trunc->getType() << ")"; 255 return; 256 } 257 case scZeroExtend: { 258 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 259 const SCEV *Op = ZExt->getOperand(); 260 OS << "(zext " << *Op->getType() << " " << *Op << " to " 261 << *ZExt->getType() << ")"; 262 return; 263 } 264 case scSignExtend: { 265 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 266 const SCEV *Op = SExt->getOperand(); 267 OS << "(sext " << *Op->getType() << " " << *Op << " to " 268 << *SExt->getType() << ")"; 269 return; 270 } 271 case scAddRecExpr: { 272 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 273 OS << "{" << *AR->getOperand(0); 274 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 275 OS << ",+," << *AR->getOperand(i); 276 OS << "}<"; 277 if (AR->hasNoUnsignedWrap()) 278 OS << "nuw><"; 279 if (AR->hasNoSignedWrap()) 280 OS << "nsw><"; 281 if (AR->hasNoSelfWrap() && 282 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 283 OS << "nw><"; 284 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 285 OS << ">"; 286 return; 287 } 288 case scAddExpr: 289 case scMulExpr: 290 case scUMaxExpr: 291 case scSMaxExpr: 292 case scUMinExpr: 293 case scSMinExpr: { 294 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 295 const char *OpStr = nullptr; 296 switch (NAry->getSCEVType()) { 297 case scAddExpr: OpStr = " + "; break; 298 case scMulExpr: OpStr = " * "; break; 299 case scUMaxExpr: OpStr = " umax "; break; 300 case scSMaxExpr: OpStr = " smax "; break; 301 case scUMinExpr: 302 OpStr = " umin "; 303 break; 304 case scSMinExpr: 305 OpStr = " smin "; 306 break; 307 } 308 OS << "("; 309 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 310 I != E; ++I) { 311 OS << **I; 312 if (std::next(I) != E) 313 OS << OpStr; 314 } 315 OS << ")"; 316 switch (NAry->getSCEVType()) { 317 case scAddExpr: 318 case scMulExpr: 319 if (NAry->hasNoUnsignedWrap()) 320 OS << "<nuw>"; 321 if (NAry->hasNoSignedWrap()) 322 OS << "<nsw>"; 323 } 324 return; 325 } 326 case scUDivExpr: { 327 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 328 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 329 return; 330 } 331 case scUnknown: { 332 const SCEVUnknown *U = cast<SCEVUnknown>(this); 333 Type *AllocTy; 334 if (U->isSizeOf(AllocTy)) { 335 OS << "sizeof(" << *AllocTy << ")"; 336 return; 337 } 338 if (U->isAlignOf(AllocTy)) { 339 OS << "alignof(" << *AllocTy << ")"; 340 return; 341 } 342 343 Type *CTy; 344 Constant *FieldNo; 345 if (U->isOffsetOf(CTy, FieldNo)) { 346 OS << "offsetof(" << *CTy << ", "; 347 FieldNo->printAsOperand(OS, false); 348 OS << ")"; 349 return; 350 } 351 352 // Otherwise just print it normally. 353 U->getValue()->printAsOperand(OS, false); 354 return; 355 } 356 case scCouldNotCompute: 357 OS << "***COULDNOTCOMPUTE***"; 358 return; 359 } 360 llvm_unreachable("Unknown SCEV kind!"); 361 } 362 363 Type *SCEV::getType() const { 364 switch (static_cast<SCEVTypes>(getSCEVType())) { 365 case scConstant: 366 return cast<SCEVConstant>(this)->getType(); 367 case scTruncate: 368 case scZeroExtend: 369 case scSignExtend: 370 return cast<SCEVCastExpr>(this)->getType(); 371 case scAddRecExpr: 372 case scMulExpr: 373 case scUMaxExpr: 374 case scSMaxExpr: 375 case scUMinExpr: 376 case scSMinExpr: 377 return cast<SCEVNAryExpr>(this)->getType(); 378 case scAddExpr: 379 return cast<SCEVAddExpr>(this)->getType(); 380 case scUDivExpr: 381 return cast<SCEVUDivExpr>(this)->getType(); 382 case scUnknown: 383 return cast<SCEVUnknown>(this)->getType(); 384 case scCouldNotCompute: 385 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 386 } 387 llvm_unreachable("Unknown SCEV kind!"); 388 } 389 390 bool SCEV::isZero() const { 391 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 392 return SC->getValue()->isZero(); 393 return false; 394 } 395 396 bool SCEV::isOne() const { 397 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 398 return SC->getValue()->isOne(); 399 return false; 400 } 401 402 bool SCEV::isAllOnesValue() const { 403 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 404 return SC->getValue()->isMinusOne(); 405 return false; 406 } 407 408 bool SCEV::isNonConstantNegative() const { 409 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 410 if (!Mul) return false; 411 412 // If there is a constant factor, it will be first. 413 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 414 if (!SC) return false; 415 416 // Return true if the value is negative, this matches things like (-42 * V). 417 return SC->getAPInt().isNegative(); 418 } 419 420 SCEVCouldNotCompute::SCEVCouldNotCompute() : 421 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 422 423 bool SCEVCouldNotCompute::classof(const SCEV *S) { 424 return S->getSCEVType() == scCouldNotCompute; 425 } 426 427 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 428 FoldingSetNodeID ID; 429 ID.AddInteger(scConstant); 430 ID.AddPointer(V); 431 void *IP = nullptr; 432 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 433 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 434 UniqueSCEVs.InsertNode(S, IP); 435 return S; 436 } 437 438 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 439 return getConstant(ConstantInt::get(getContext(), Val)); 440 } 441 442 const SCEV * 443 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 444 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 445 return getConstant(ConstantInt::get(ITy, V, isSigned)); 446 } 447 448 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 449 unsigned SCEVTy, const SCEV *op, Type *ty) 450 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 451 452 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 453 const SCEV *op, Type *ty) 454 : SCEVCastExpr(ID, scTruncate, op, ty) { 455 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 456 "Cannot truncate non-integer value!"); 457 } 458 459 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 460 const SCEV *op, Type *ty) 461 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 462 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 463 "Cannot zero extend non-integer value!"); 464 } 465 466 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 467 const SCEV *op, Type *ty) 468 : SCEVCastExpr(ID, scSignExtend, op, ty) { 469 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 470 "Cannot sign extend non-integer value!"); 471 } 472 473 void SCEVUnknown::deleted() { 474 // Clear this SCEVUnknown from various maps. 475 SE->forgetMemoizedResults(this); 476 477 // Remove this SCEVUnknown from the uniquing map. 478 SE->UniqueSCEVs.RemoveNode(this); 479 480 // Release the value. 481 setValPtr(nullptr); 482 } 483 484 void SCEVUnknown::allUsesReplacedWith(Value *New) { 485 // Remove this SCEVUnknown from the uniquing map. 486 SE->UniqueSCEVs.RemoveNode(this); 487 488 // Update this SCEVUnknown to point to the new value. This is needed 489 // because there may still be outstanding SCEVs which still point to 490 // this SCEVUnknown. 491 setValPtr(New); 492 } 493 494 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 495 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 496 if (VCE->getOpcode() == Instruction::PtrToInt) 497 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 498 if (CE->getOpcode() == Instruction::GetElementPtr && 499 CE->getOperand(0)->isNullValue() && 500 CE->getNumOperands() == 2) 501 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 502 if (CI->isOne()) { 503 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 504 ->getElementType(); 505 return true; 506 } 507 508 return false; 509 } 510 511 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 512 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 513 if (VCE->getOpcode() == Instruction::PtrToInt) 514 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 515 if (CE->getOpcode() == Instruction::GetElementPtr && 516 CE->getOperand(0)->isNullValue()) { 517 Type *Ty = 518 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 519 if (StructType *STy = dyn_cast<StructType>(Ty)) 520 if (!STy->isPacked() && 521 CE->getNumOperands() == 3 && 522 CE->getOperand(1)->isNullValue()) { 523 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 524 if (CI->isOne() && 525 STy->getNumElements() == 2 && 526 STy->getElementType(0)->isIntegerTy(1)) { 527 AllocTy = STy->getElementType(1); 528 return true; 529 } 530 } 531 } 532 533 return false; 534 } 535 536 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 537 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 538 if (VCE->getOpcode() == Instruction::PtrToInt) 539 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 540 if (CE->getOpcode() == Instruction::GetElementPtr && 541 CE->getNumOperands() == 3 && 542 CE->getOperand(0)->isNullValue() && 543 CE->getOperand(1)->isNullValue()) { 544 Type *Ty = 545 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 546 // Ignore vector types here so that ScalarEvolutionExpander doesn't 547 // emit getelementptrs that index into vectors. 548 if (Ty->isStructTy() || Ty->isArrayTy()) { 549 CTy = Ty; 550 FieldNo = CE->getOperand(2); 551 return true; 552 } 553 } 554 555 return false; 556 } 557 558 //===----------------------------------------------------------------------===// 559 // SCEV Utilities 560 //===----------------------------------------------------------------------===// 561 562 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 563 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 564 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 565 /// have been previously deemed to be "equally complex" by this routine. It is 566 /// intended to avoid exponential time complexity in cases like: 567 /// 568 /// %a = f(%x, %y) 569 /// %b = f(%a, %a) 570 /// %c = f(%b, %b) 571 /// 572 /// %d = f(%x, %y) 573 /// %e = f(%d, %d) 574 /// %f = f(%e, %e) 575 /// 576 /// CompareValueComplexity(%f, %c) 577 /// 578 /// Since we do not continue running this routine on expression trees once we 579 /// have seen unequal values, there is no need to track them in the cache. 580 static int 581 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 582 const LoopInfo *const LI, Value *LV, Value *RV, 583 unsigned Depth) { 584 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 585 return 0; 586 587 // Order pointer values after integer values. This helps SCEVExpander form 588 // GEPs. 589 bool LIsPointer = LV->getType()->isPointerTy(), 590 RIsPointer = RV->getType()->isPointerTy(); 591 if (LIsPointer != RIsPointer) 592 return (int)LIsPointer - (int)RIsPointer; 593 594 // Compare getValueID values. 595 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 596 if (LID != RID) 597 return (int)LID - (int)RID; 598 599 // Sort arguments by their position. 600 if (const auto *LA = dyn_cast<Argument>(LV)) { 601 const auto *RA = cast<Argument>(RV); 602 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 603 return (int)LArgNo - (int)RArgNo; 604 } 605 606 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 607 const auto *RGV = cast<GlobalValue>(RV); 608 609 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 610 auto LT = GV->getLinkage(); 611 return !(GlobalValue::isPrivateLinkage(LT) || 612 GlobalValue::isInternalLinkage(LT)); 613 }; 614 615 // Use the names to distinguish the two values, but only if the 616 // names are semantically important. 617 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 618 return LGV->getName().compare(RGV->getName()); 619 } 620 621 // For instructions, compare their loop depth, and their operand count. This 622 // is pretty loose. 623 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 624 const auto *RInst = cast<Instruction>(RV); 625 626 // Compare loop depths. 627 const BasicBlock *LParent = LInst->getParent(), 628 *RParent = RInst->getParent(); 629 if (LParent != RParent) { 630 unsigned LDepth = LI->getLoopDepth(LParent), 631 RDepth = LI->getLoopDepth(RParent); 632 if (LDepth != RDepth) 633 return (int)LDepth - (int)RDepth; 634 } 635 636 // Compare the number of operands. 637 unsigned LNumOps = LInst->getNumOperands(), 638 RNumOps = RInst->getNumOperands(); 639 if (LNumOps != RNumOps) 640 return (int)LNumOps - (int)RNumOps; 641 642 for (unsigned Idx : seq(0u, LNumOps)) { 643 int Result = 644 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 645 RInst->getOperand(Idx), Depth + 1); 646 if (Result != 0) 647 return Result; 648 } 649 } 650 651 EqCacheValue.unionSets(LV, RV); 652 return 0; 653 } 654 655 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 656 // than RHS, respectively. A three-way result allows recursive comparisons to be 657 // more efficient. 658 static int CompareSCEVComplexity( 659 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 660 EquivalenceClasses<const Value *> &EqCacheValue, 661 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 662 DominatorTree &DT, unsigned Depth = 0) { 663 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 664 if (LHS == RHS) 665 return 0; 666 667 // Primarily, sort the SCEVs by their getSCEVType(). 668 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 669 if (LType != RType) 670 return (int)LType - (int)RType; 671 672 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 673 return 0; 674 // Aside from the getSCEVType() ordering, the particular ordering 675 // isn't very important except that it's beneficial to be consistent, 676 // so that (a + b) and (b + a) don't end up as different expressions. 677 switch (static_cast<SCEVTypes>(LType)) { 678 case scUnknown: { 679 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 680 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 681 682 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 683 RU->getValue(), Depth + 1); 684 if (X == 0) 685 EqCacheSCEV.unionSets(LHS, RHS); 686 return X; 687 } 688 689 case scConstant: { 690 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 691 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 692 693 // Compare constant values. 694 const APInt &LA = LC->getAPInt(); 695 const APInt &RA = RC->getAPInt(); 696 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 697 if (LBitWidth != RBitWidth) 698 return (int)LBitWidth - (int)RBitWidth; 699 return LA.ult(RA) ? -1 : 1; 700 } 701 702 case scAddRecExpr: { 703 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 704 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 705 706 // There is always a dominance between two recs that are used by one SCEV, 707 // so we can safely sort recs by loop header dominance. We require such 708 // order in getAddExpr. 709 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 710 if (LLoop != RLoop) { 711 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 712 assert(LHead != RHead && "Two loops share the same header?"); 713 if (DT.dominates(LHead, RHead)) 714 return 1; 715 else 716 assert(DT.dominates(RHead, LHead) && 717 "No dominance between recurrences used by one SCEV?"); 718 return -1; 719 } 720 721 // Addrec complexity grows with operand count. 722 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 723 if (LNumOps != RNumOps) 724 return (int)LNumOps - (int)RNumOps; 725 726 // Lexicographically compare. 727 for (unsigned i = 0; i != LNumOps; ++i) { 728 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 729 LA->getOperand(i), RA->getOperand(i), DT, 730 Depth + 1); 731 if (X != 0) 732 return X; 733 } 734 EqCacheSCEV.unionSets(LHS, RHS); 735 return 0; 736 } 737 738 case scAddExpr: 739 case scMulExpr: 740 case scSMaxExpr: 741 case scUMaxExpr: 742 case scSMinExpr: 743 case scUMinExpr: { 744 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 745 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 746 747 // Lexicographically compare n-ary expressions. 748 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 749 if (LNumOps != RNumOps) 750 return (int)LNumOps - (int)RNumOps; 751 752 for (unsigned i = 0; i != LNumOps; ++i) { 753 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 754 LC->getOperand(i), RC->getOperand(i), DT, 755 Depth + 1); 756 if (X != 0) 757 return X; 758 } 759 EqCacheSCEV.unionSets(LHS, RHS); 760 return 0; 761 } 762 763 case scUDivExpr: { 764 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 765 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 766 767 // Lexicographically compare udiv expressions. 768 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 769 RC->getLHS(), DT, Depth + 1); 770 if (X != 0) 771 return X; 772 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 773 RC->getRHS(), DT, Depth + 1); 774 if (X == 0) 775 EqCacheSCEV.unionSets(LHS, RHS); 776 return X; 777 } 778 779 case scTruncate: 780 case scZeroExtend: 781 case scSignExtend: { 782 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 783 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 784 785 // Compare cast expressions by operand. 786 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 787 LC->getOperand(), RC->getOperand(), DT, 788 Depth + 1); 789 if (X == 0) 790 EqCacheSCEV.unionSets(LHS, RHS); 791 return X; 792 } 793 794 case scCouldNotCompute: 795 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 796 } 797 llvm_unreachable("Unknown SCEV kind!"); 798 } 799 800 /// Given a list of SCEV objects, order them by their complexity, and group 801 /// objects of the same complexity together by value. When this routine is 802 /// finished, we know that any duplicates in the vector are consecutive and that 803 /// complexity is monotonically increasing. 804 /// 805 /// Note that we go take special precautions to ensure that we get deterministic 806 /// results from this routine. In other words, we don't want the results of 807 /// this to depend on where the addresses of various SCEV objects happened to 808 /// land in memory. 809 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 810 LoopInfo *LI, DominatorTree &DT) { 811 if (Ops.size() < 2) return; // Noop 812 813 EquivalenceClasses<const SCEV *> EqCacheSCEV; 814 EquivalenceClasses<const Value *> EqCacheValue; 815 if (Ops.size() == 2) { 816 // This is the common case, which also happens to be trivially simple. 817 // Special case it. 818 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 819 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 820 std::swap(LHS, RHS); 821 return; 822 } 823 824 // Do the rough sort by complexity. 825 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 826 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 827 0; 828 }); 829 830 // Now that we are sorted by complexity, group elements of the same 831 // complexity. Note that this is, at worst, N^2, but the vector is likely to 832 // be extremely short in practice. Note that we take this approach because we 833 // do not want to depend on the addresses of the objects we are grouping. 834 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 835 const SCEV *S = Ops[i]; 836 unsigned Complexity = S->getSCEVType(); 837 838 // If there are any objects of the same complexity and same value as this 839 // one, group them. 840 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 841 if (Ops[j] == S) { // Found a duplicate. 842 // Move it to immediately after i'th element. 843 std::swap(Ops[i+1], Ops[j]); 844 ++i; // no need to rescan it. 845 if (i == e-2) return; // Done! 846 } 847 } 848 } 849 } 850 851 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 852 /// least HugeExprThreshold nodes). 853 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 854 return any_of(Ops, [](const SCEV *S) { 855 return S->getExpressionSize() >= HugeExprThreshold; 856 }); 857 } 858 859 //===----------------------------------------------------------------------===// 860 // Simple SCEV method implementations 861 //===----------------------------------------------------------------------===// 862 863 /// Compute BC(It, K). The result has width W. Assume, K > 0. 864 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 865 ScalarEvolution &SE, 866 Type *ResultTy) { 867 // Handle the simplest case efficiently. 868 if (K == 1) 869 return SE.getTruncateOrZeroExtend(It, ResultTy); 870 871 // We are using the following formula for BC(It, K): 872 // 873 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 874 // 875 // Suppose, W is the bitwidth of the return value. We must be prepared for 876 // overflow. Hence, we must assure that the result of our computation is 877 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 878 // safe in modular arithmetic. 879 // 880 // However, this code doesn't use exactly that formula; the formula it uses 881 // is something like the following, where T is the number of factors of 2 in 882 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 883 // exponentiation: 884 // 885 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 886 // 887 // This formula is trivially equivalent to the previous formula. However, 888 // this formula can be implemented much more efficiently. The trick is that 889 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 890 // arithmetic. To do exact division in modular arithmetic, all we have 891 // to do is multiply by the inverse. Therefore, this step can be done at 892 // width W. 893 // 894 // The next issue is how to safely do the division by 2^T. The way this 895 // is done is by doing the multiplication step at a width of at least W + T 896 // bits. This way, the bottom W+T bits of the product are accurate. Then, 897 // when we perform the division by 2^T (which is equivalent to a right shift 898 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 899 // truncated out after the division by 2^T. 900 // 901 // In comparison to just directly using the first formula, this technique 902 // is much more efficient; using the first formula requires W * K bits, 903 // but this formula less than W + K bits. Also, the first formula requires 904 // a division step, whereas this formula only requires multiplies and shifts. 905 // 906 // It doesn't matter whether the subtraction step is done in the calculation 907 // width or the input iteration count's width; if the subtraction overflows, 908 // the result must be zero anyway. We prefer here to do it in the width of 909 // the induction variable because it helps a lot for certain cases; CodeGen 910 // isn't smart enough to ignore the overflow, which leads to much less 911 // efficient code if the width of the subtraction is wider than the native 912 // register width. 913 // 914 // (It's possible to not widen at all by pulling out factors of 2 before 915 // the multiplication; for example, K=2 can be calculated as 916 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 917 // extra arithmetic, so it's not an obvious win, and it gets 918 // much more complicated for K > 3.) 919 920 // Protection from insane SCEVs; this bound is conservative, 921 // but it probably doesn't matter. 922 if (K > 1000) 923 return SE.getCouldNotCompute(); 924 925 unsigned W = SE.getTypeSizeInBits(ResultTy); 926 927 // Calculate K! / 2^T and T; we divide out the factors of two before 928 // multiplying for calculating K! / 2^T to avoid overflow. 929 // Other overflow doesn't matter because we only care about the bottom 930 // W bits of the result. 931 APInt OddFactorial(W, 1); 932 unsigned T = 1; 933 for (unsigned i = 3; i <= K; ++i) { 934 APInt Mult(W, i); 935 unsigned TwoFactors = Mult.countTrailingZeros(); 936 T += TwoFactors; 937 Mult.lshrInPlace(TwoFactors); 938 OddFactorial *= Mult; 939 } 940 941 // We need at least W + T bits for the multiplication step 942 unsigned CalculationBits = W + T; 943 944 // Calculate 2^T, at width T+W. 945 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 946 947 // Calculate the multiplicative inverse of K! / 2^T; 948 // this multiplication factor will perform the exact division by 949 // K! / 2^T. 950 APInt Mod = APInt::getSignedMinValue(W+1); 951 APInt MultiplyFactor = OddFactorial.zext(W+1); 952 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 953 MultiplyFactor = MultiplyFactor.trunc(W); 954 955 // Calculate the product, at width T+W 956 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 957 CalculationBits); 958 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 959 for (unsigned i = 1; i != K; ++i) { 960 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 961 Dividend = SE.getMulExpr(Dividend, 962 SE.getTruncateOrZeroExtend(S, CalculationTy)); 963 } 964 965 // Divide by 2^T 966 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 967 968 // Truncate the result, and divide by K! / 2^T. 969 970 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 971 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 972 } 973 974 /// Return the value of this chain of recurrences at the specified iteration 975 /// number. We can evaluate this recurrence by multiplying each element in the 976 /// chain by the binomial coefficient corresponding to it. In other words, we 977 /// can evaluate {A,+,B,+,C,+,D} as: 978 /// 979 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 980 /// 981 /// where BC(It, k) stands for binomial coefficient. 982 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 983 ScalarEvolution &SE) const { 984 const SCEV *Result = getStart(); 985 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 986 // The computation is correct in the face of overflow provided that the 987 // multiplication is performed _after_ the evaluation of the binomial 988 // coefficient. 989 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 990 if (isa<SCEVCouldNotCompute>(Coeff)) 991 return Coeff; 992 993 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 994 } 995 return Result; 996 } 997 998 //===----------------------------------------------------------------------===// 999 // SCEV Expression folder implementations 1000 //===----------------------------------------------------------------------===// 1001 1002 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1003 unsigned Depth) { 1004 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1005 "This is not a truncating conversion!"); 1006 assert(isSCEVable(Ty) && 1007 "This is not a conversion to a SCEVable type!"); 1008 Ty = getEffectiveSCEVType(Ty); 1009 1010 FoldingSetNodeID ID; 1011 ID.AddInteger(scTruncate); 1012 ID.AddPointer(Op); 1013 ID.AddPointer(Ty); 1014 void *IP = nullptr; 1015 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1016 1017 // Fold if the operand is constant. 1018 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1019 return getConstant( 1020 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1021 1022 // trunc(trunc(x)) --> trunc(x) 1023 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1024 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1025 1026 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1027 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1028 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1029 1030 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1031 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1032 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1033 1034 if (Depth > MaxCastDepth) { 1035 SCEV *S = 1036 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1037 UniqueSCEVs.InsertNode(S, IP); 1038 addToLoopUseLists(S); 1039 return S; 1040 } 1041 1042 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1043 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1044 // if after transforming we have at most one truncate, not counting truncates 1045 // that replace other casts. 1046 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1047 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1048 SmallVector<const SCEV *, 4> Operands; 1049 unsigned numTruncs = 0; 1050 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1051 ++i) { 1052 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1053 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1054 numTruncs++; 1055 Operands.push_back(S); 1056 } 1057 if (numTruncs < 2) { 1058 if (isa<SCEVAddExpr>(Op)) 1059 return getAddExpr(Operands); 1060 else if (isa<SCEVMulExpr>(Op)) 1061 return getMulExpr(Operands); 1062 else 1063 llvm_unreachable("Unexpected SCEV type for Op."); 1064 } 1065 // Although we checked in the beginning that ID is not in the cache, it is 1066 // possible that during recursion and different modification ID was inserted 1067 // into the cache. So if we find it, just return it. 1068 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1069 return S; 1070 } 1071 1072 // If the input value is a chrec scev, truncate the chrec's operands. 1073 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1074 SmallVector<const SCEV *, 4> Operands; 1075 for (const SCEV *Op : AddRec->operands()) 1076 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1077 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1078 } 1079 1080 // The cast wasn't folded; create an explicit cast node. We can reuse 1081 // the existing insert position since if we get here, we won't have 1082 // made any changes which would invalidate it. 1083 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1084 Op, Ty); 1085 UniqueSCEVs.InsertNode(S, IP); 1086 addToLoopUseLists(S); 1087 return S; 1088 } 1089 1090 // Get the limit of a recurrence such that incrementing by Step cannot cause 1091 // signed overflow as long as the value of the recurrence within the 1092 // loop does not exceed this limit before incrementing. 1093 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1094 ICmpInst::Predicate *Pred, 1095 ScalarEvolution *SE) { 1096 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1097 if (SE->isKnownPositive(Step)) { 1098 *Pred = ICmpInst::ICMP_SLT; 1099 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1100 SE->getSignedRangeMax(Step)); 1101 } 1102 if (SE->isKnownNegative(Step)) { 1103 *Pred = ICmpInst::ICMP_SGT; 1104 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1105 SE->getSignedRangeMin(Step)); 1106 } 1107 return nullptr; 1108 } 1109 1110 // Get the limit of a recurrence such that incrementing by Step cannot cause 1111 // unsigned overflow as long as the value of the recurrence within the loop does 1112 // not exceed this limit before incrementing. 1113 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1114 ICmpInst::Predicate *Pred, 1115 ScalarEvolution *SE) { 1116 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1117 *Pred = ICmpInst::ICMP_ULT; 1118 1119 return SE->getConstant(APInt::getMinValue(BitWidth) - 1120 SE->getUnsignedRangeMax(Step)); 1121 } 1122 1123 namespace { 1124 1125 struct ExtendOpTraitsBase { 1126 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1127 unsigned); 1128 }; 1129 1130 // Used to make code generic over signed and unsigned overflow. 1131 template <typename ExtendOp> struct ExtendOpTraits { 1132 // Members present: 1133 // 1134 // static const SCEV::NoWrapFlags WrapType; 1135 // 1136 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1137 // 1138 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1139 // ICmpInst::Predicate *Pred, 1140 // ScalarEvolution *SE); 1141 }; 1142 1143 template <> 1144 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1145 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1146 1147 static const GetExtendExprTy GetExtendExpr; 1148 1149 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1150 ICmpInst::Predicate *Pred, 1151 ScalarEvolution *SE) { 1152 return getSignedOverflowLimitForStep(Step, Pred, SE); 1153 } 1154 }; 1155 1156 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1157 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1158 1159 template <> 1160 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1161 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1162 1163 static const GetExtendExprTy GetExtendExpr; 1164 1165 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1166 ICmpInst::Predicate *Pred, 1167 ScalarEvolution *SE) { 1168 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1169 } 1170 }; 1171 1172 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1173 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1174 1175 } // end anonymous namespace 1176 1177 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1178 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1179 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1180 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1181 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1182 // expression "Step + sext/zext(PreIncAR)" is congruent with 1183 // "sext/zext(PostIncAR)" 1184 template <typename ExtendOpTy> 1185 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1186 ScalarEvolution *SE, unsigned Depth) { 1187 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1188 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1189 1190 const Loop *L = AR->getLoop(); 1191 const SCEV *Start = AR->getStart(); 1192 const SCEV *Step = AR->getStepRecurrence(*SE); 1193 1194 // Check for a simple looking step prior to loop entry. 1195 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1196 if (!SA) 1197 return nullptr; 1198 1199 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1200 // subtraction is expensive. For this purpose, perform a quick and dirty 1201 // difference, by checking for Step in the operand list. 1202 SmallVector<const SCEV *, 4> DiffOps; 1203 for (const SCEV *Op : SA->operands()) 1204 if (Op != Step) 1205 DiffOps.push_back(Op); 1206 1207 if (DiffOps.size() == SA->getNumOperands()) 1208 return nullptr; 1209 1210 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1211 // `Step`: 1212 1213 // 1. NSW/NUW flags on the step increment. 1214 auto PreStartFlags = 1215 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1216 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1217 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1218 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1219 1220 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1221 // "S+X does not sign/unsign-overflow". 1222 // 1223 1224 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1225 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1226 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1227 return PreStart; 1228 1229 // 2. Direct overflow check on the step operation's expression. 1230 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1231 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1232 const SCEV *OperandExtendedStart = 1233 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1234 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1235 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1236 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1237 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1238 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1239 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1240 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1241 } 1242 return PreStart; 1243 } 1244 1245 // 3. Loop precondition. 1246 ICmpInst::Predicate Pred; 1247 const SCEV *OverflowLimit = 1248 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1249 1250 if (OverflowLimit && 1251 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1252 return PreStart; 1253 1254 return nullptr; 1255 } 1256 1257 // Get the normalized zero or sign extended expression for this AddRec's Start. 1258 template <typename ExtendOpTy> 1259 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1260 ScalarEvolution *SE, 1261 unsigned Depth) { 1262 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1263 1264 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1265 if (!PreStart) 1266 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1267 1268 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1269 Depth), 1270 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1271 } 1272 1273 // Try to prove away overflow by looking at "nearby" add recurrences. A 1274 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1275 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1276 // 1277 // Formally: 1278 // 1279 // {S,+,X} == {S-T,+,X} + T 1280 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1281 // 1282 // If ({S-T,+,X} + T) does not overflow ... (1) 1283 // 1284 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1285 // 1286 // If {S-T,+,X} does not overflow ... (2) 1287 // 1288 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1289 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1290 // 1291 // If (S-T)+T does not overflow ... (3) 1292 // 1293 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1294 // == {Ext(S),+,Ext(X)} == LHS 1295 // 1296 // Thus, if (1), (2) and (3) are true for some T, then 1297 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1298 // 1299 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1300 // does not overflow" restricted to the 0th iteration. Therefore we only need 1301 // to check for (1) and (2). 1302 // 1303 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1304 // is `Delta` (defined below). 1305 template <typename ExtendOpTy> 1306 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1307 const SCEV *Step, 1308 const Loop *L) { 1309 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1310 1311 // We restrict `Start` to a constant to prevent SCEV from spending too much 1312 // time here. It is correct (but more expensive) to continue with a 1313 // non-constant `Start` and do a general SCEV subtraction to compute 1314 // `PreStart` below. 1315 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1316 if (!StartC) 1317 return false; 1318 1319 APInt StartAI = StartC->getAPInt(); 1320 1321 for (unsigned Delta : {-2, -1, 1, 2}) { 1322 const SCEV *PreStart = getConstant(StartAI - Delta); 1323 1324 FoldingSetNodeID ID; 1325 ID.AddInteger(scAddRecExpr); 1326 ID.AddPointer(PreStart); 1327 ID.AddPointer(Step); 1328 ID.AddPointer(L); 1329 void *IP = nullptr; 1330 const auto *PreAR = 1331 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1332 1333 // Give up if we don't already have the add recurrence we need because 1334 // actually constructing an add recurrence is relatively expensive. 1335 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1336 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1337 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1338 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1339 DeltaS, &Pred, this); 1340 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1341 return true; 1342 } 1343 } 1344 1345 return false; 1346 } 1347 1348 // Finds an integer D for an expression (C + x + y + ...) such that the top 1349 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1350 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1351 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1352 // the (C + x + y + ...) expression is \p WholeAddExpr. 1353 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1354 const SCEVConstant *ConstantTerm, 1355 const SCEVAddExpr *WholeAddExpr) { 1356 const APInt &C = ConstantTerm->getAPInt(); 1357 const unsigned BitWidth = C.getBitWidth(); 1358 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1359 uint32_t TZ = BitWidth; 1360 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1361 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1362 if (TZ) { 1363 // Set D to be as many least significant bits of C as possible while still 1364 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1365 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1366 } 1367 return APInt(BitWidth, 0); 1368 } 1369 1370 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1371 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1372 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1373 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1374 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1375 const APInt &ConstantStart, 1376 const SCEV *Step) { 1377 const unsigned BitWidth = ConstantStart.getBitWidth(); 1378 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1379 if (TZ) 1380 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1381 : ConstantStart; 1382 return APInt(BitWidth, 0); 1383 } 1384 1385 const SCEV * 1386 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1387 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1388 "This is not an extending conversion!"); 1389 assert(isSCEVable(Ty) && 1390 "This is not a conversion to a SCEVable type!"); 1391 Ty = getEffectiveSCEVType(Ty); 1392 1393 // Fold if the operand is constant. 1394 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1395 return getConstant( 1396 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1397 1398 // zext(zext(x)) --> zext(x) 1399 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1400 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1401 1402 // Before doing any expensive analysis, check to see if we've already 1403 // computed a SCEV for this Op and Ty. 1404 FoldingSetNodeID ID; 1405 ID.AddInteger(scZeroExtend); 1406 ID.AddPointer(Op); 1407 ID.AddPointer(Ty); 1408 void *IP = nullptr; 1409 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1410 if (Depth > MaxCastDepth) { 1411 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1412 Op, Ty); 1413 UniqueSCEVs.InsertNode(S, IP); 1414 addToLoopUseLists(S); 1415 return S; 1416 } 1417 1418 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1419 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1420 // It's possible the bits taken off by the truncate were all zero bits. If 1421 // so, we should be able to simplify this further. 1422 const SCEV *X = ST->getOperand(); 1423 ConstantRange CR = getUnsignedRange(X); 1424 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1425 unsigned NewBits = getTypeSizeInBits(Ty); 1426 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1427 CR.zextOrTrunc(NewBits))) 1428 return getTruncateOrZeroExtend(X, Ty, Depth); 1429 } 1430 1431 // If the input value is a chrec scev, and we can prove that the value 1432 // did not overflow the old, smaller, value, we can zero extend all of the 1433 // operands (often constants). This allows analysis of something like 1434 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1435 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1436 if (AR->isAffine()) { 1437 const SCEV *Start = AR->getStart(); 1438 const SCEV *Step = AR->getStepRecurrence(*this); 1439 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1440 const Loop *L = AR->getLoop(); 1441 1442 if (!AR->hasNoUnsignedWrap()) { 1443 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1444 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1445 } 1446 1447 // If we have special knowledge that this addrec won't overflow, 1448 // we don't need to do any further analysis. 1449 if (AR->hasNoUnsignedWrap()) 1450 return getAddRecExpr( 1451 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1452 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1453 1454 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1455 // Note that this serves two purposes: It filters out loops that are 1456 // simply not analyzable, and it covers the case where this code is 1457 // being called from within backedge-taken count analysis, such that 1458 // attempting to ask for the backedge-taken count would likely result 1459 // in infinite recursion. In the later case, the analysis code will 1460 // cope with a conservative value, and it will take care to purge 1461 // that value once it has finished. 1462 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1463 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1464 // Manually compute the final value for AR, checking for 1465 // overflow. 1466 1467 // Check whether the backedge-taken count can be losslessly casted to 1468 // the addrec's type. The count is always unsigned. 1469 const SCEV *CastedMaxBECount = 1470 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1471 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1472 CastedMaxBECount, MaxBECount->getType(), Depth); 1473 if (MaxBECount == RecastedMaxBECount) { 1474 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1475 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1476 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1477 SCEV::FlagAnyWrap, Depth + 1); 1478 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1479 SCEV::FlagAnyWrap, 1480 Depth + 1), 1481 WideTy, Depth + 1); 1482 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1483 const SCEV *WideMaxBECount = 1484 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1485 const SCEV *OperandExtendedAdd = 1486 getAddExpr(WideStart, 1487 getMulExpr(WideMaxBECount, 1488 getZeroExtendExpr(Step, WideTy, Depth + 1), 1489 SCEV::FlagAnyWrap, Depth + 1), 1490 SCEV::FlagAnyWrap, Depth + 1); 1491 if (ZAdd == OperandExtendedAdd) { 1492 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1493 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1494 // Return the expression with the addrec on the outside. 1495 return getAddRecExpr( 1496 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1497 Depth + 1), 1498 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1499 AR->getNoWrapFlags()); 1500 } 1501 // Similar to above, only this time treat the step value as signed. 1502 // This covers loops that count down. 1503 OperandExtendedAdd = 1504 getAddExpr(WideStart, 1505 getMulExpr(WideMaxBECount, 1506 getSignExtendExpr(Step, WideTy, Depth + 1), 1507 SCEV::FlagAnyWrap, Depth + 1), 1508 SCEV::FlagAnyWrap, Depth + 1); 1509 if (ZAdd == OperandExtendedAdd) { 1510 // Cache knowledge of AR NW, which is propagated to this AddRec. 1511 // Negative step causes unsigned wrap, but it still can't self-wrap. 1512 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1513 // Return the expression with the addrec on the outside. 1514 return getAddRecExpr( 1515 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1516 Depth + 1), 1517 getSignExtendExpr(Step, Ty, Depth + 1), L, 1518 AR->getNoWrapFlags()); 1519 } 1520 } 1521 } 1522 1523 // Normally, in the cases we can prove no-overflow via a 1524 // backedge guarding condition, we can also compute a backedge 1525 // taken count for the loop. The exceptions are assumptions and 1526 // guards present in the loop -- SCEV is not great at exploiting 1527 // these to compute max backedge taken counts, but can still use 1528 // these to prove lack of overflow. Use this fact to avoid 1529 // doing extra work that may not pay off. 1530 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1531 !AC.assumptions().empty()) { 1532 // If the backedge is guarded by a comparison with the pre-inc 1533 // value the addrec is safe. Also, if the entry is guarded by 1534 // a comparison with the start value and the backedge is 1535 // guarded by a comparison with the post-inc value, the addrec 1536 // is safe. 1537 if (isKnownPositive(Step)) { 1538 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1539 getUnsignedRangeMax(Step)); 1540 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1541 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1542 // Cache knowledge of AR NUW, which is propagated to this 1543 // AddRec. 1544 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1545 // Return the expression with the addrec on the outside. 1546 return getAddRecExpr( 1547 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1548 Depth + 1), 1549 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1550 AR->getNoWrapFlags()); 1551 } 1552 } else if (isKnownNegative(Step)) { 1553 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1554 getSignedRangeMin(Step)); 1555 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1556 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1557 // Cache knowledge of AR NW, which is propagated to this 1558 // AddRec. Negative step causes unsigned wrap, but it 1559 // still can't self-wrap. 1560 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1561 // Return the expression with the addrec on the outside. 1562 return getAddRecExpr( 1563 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1564 Depth + 1), 1565 getSignExtendExpr(Step, Ty, Depth + 1), L, 1566 AR->getNoWrapFlags()); 1567 } 1568 } 1569 } 1570 1571 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1572 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1573 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1574 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1575 const APInt &C = SC->getAPInt(); 1576 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1577 if (D != 0) { 1578 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1579 const SCEV *SResidual = 1580 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1581 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1582 return getAddExpr(SZExtD, SZExtR, 1583 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1584 Depth + 1); 1585 } 1586 } 1587 1588 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1589 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1590 return getAddRecExpr( 1591 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1592 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1593 } 1594 } 1595 1596 // zext(A % B) --> zext(A) % zext(B) 1597 { 1598 const SCEV *LHS; 1599 const SCEV *RHS; 1600 if (matchURem(Op, LHS, RHS)) 1601 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1602 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1603 } 1604 1605 // zext(A / B) --> zext(A) / zext(B). 1606 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1607 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1608 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1609 1610 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1611 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1612 if (SA->hasNoUnsignedWrap()) { 1613 // If the addition does not unsign overflow then we can, by definition, 1614 // commute the zero extension with the addition operation. 1615 SmallVector<const SCEV *, 4> Ops; 1616 for (const auto *Op : SA->operands()) 1617 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1618 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1619 } 1620 1621 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1622 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1623 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1624 // 1625 // Often address arithmetics contain expressions like 1626 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1627 // This transformation is useful while proving that such expressions are 1628 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1629 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1630 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1631 if (D != 0) { 1632 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1633 const SCEV *SResidual = 1634 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1635 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1636 return getAddExpr(SZExtD, SZExtR, 1637 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1638 Depth + 1); 1639 } 1640 } 1641 } 1642 1643 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1644 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1645 if (SM->hasNoUnsignedWrap()) { 1646 // If the multiply does not unsign overflow then we can, by definition, 1647 // commute the zero extension with the multiply operation. 1648 SmallVector<const SCEV *, 4> Ops; 1649 for (const auto *Op : SM->operands()) 1650 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1651 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1652 } 1653 1654 // zext(2^K * (trunc X to iN)) to iM -> 1655 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1656 // 1657 // Proof: 1658 // 1659 // zext(2^K * (trunc X to iN)) to iM 1660 // = zext((trunc X to iN) << K) to iM 1661 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1662 // (because shl removes the top K bits) 1663 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1664 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1665 // 1666 if (SM->getNumOperands() == 2) 1667 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1668 if (MulLHS->getAPInt().isPowerOf2()) 1669 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1670 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1671 MulLHS->getAPInt().logBase2(); 1672 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1673 return getMulExpr( 1674 getZeroExtendExpr(MulLHS, Ty), 1675 getZeroExtendExpr( 1676 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1677 SCEV::FlagNUW, Depth + 1); 1678 } 1679 } 1680 1681 // The cast wasn't folded; create an explicit cast node. 1682 // Recompute the insert position, as it may have been invalidated. 1683 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1684 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1685 Op, Ty); 1686 UniqueSCEVs.InsertNode(S, IP); 1687 addToLoopUseLists(S); 1688 return S; 1689 } 1690 1691 const SCEV * 1692 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1693 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1694 "This is not an extending conversion!"); 1695 assert(isSCEVable(Ty) && 1696 "This is not a conversion to a SCEVable type!"); 1697 Ty = getEffectiveSCEVType(Ty); 1698 1699 // Fold if the operand is constant. 1700 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1701 return getConstant( 1702 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1703 1704 // sext(sext(x)) --> sext(x) 1705 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1706 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1707 1708 // sext(zext(x)) --> zext(x) 1709 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1710 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1711 1712 // Before doing any expensive analysis, check to see if we've already 1713 // computed a SCEV for this Op and Ty. 1714 FoldingSetNodeID ID; 1715 ID.AddInteger(scSignExtend); 1716 ID.AddPointer(Op); 1717 ID.AddPointer(Ty); 1718 void *IP = nullptr; 1719 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1720 // Limit recursion depth. 1721 if (Depth > MaxCastDepth) { 1722 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1723 Op, Ty); 1724 UniqueSCEVs.InsertNode(S, IP); 1725 addToLoopUseLists(S); 1726 return S; 1727 } 1728 1729 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1730 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1731 // It's possible the bits taken off by the truncate were all sign bits. If 1732 // so, we should be able to simplify this further. 1733 const SCEV *X = ST->getOperand(); 1734 ConstantRange CR = getSignedRange(X); 1735 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1736 unsigned NewBits = getTypeSizeInBits(Ty); 1737 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1738 CR.sextOrTrunc(NewBits))) 1739 return getTruncateOrSignExtend(X, Ty, Depth); 1740 } 1741 1742 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1743 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1744 if (SA->hasNoSignedWrap()) { 1745 // If the addition does not sign overflow then we can, by definition, 1746 // commute the sign extension with the addition operation. 1747 SmallVector<const SCEV *, 4> Ops; 1748 for (const auto *Op : SA->operands()) 1749 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1750 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1751 } 1752 1753 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1754 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1755 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1756 // 1757 // For instance, this will bring two seemingly different expressions: 1758 // 1 + sext(5 + 20 * %x + 24 * %y) and 1759 // sext(6 + 20 * %x + 24 * %y) 1760 // to the same form: 1761 // 2 + sext(4 + 20 * %x + 24 * %y) 1762 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1763 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1764 if (D != 0) { 1765 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1766 const SCEV *SResidual = 1767 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1768 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1769 return getAddExpr(SSExtD, SSExtR, 1770 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1771 Depth + 1); 1772 } 1773 } 1774 } 1775 // If the input value is a chrec scev, and we can prove that the value 1776 // did not overflow the old, smaller, value, we can sign extend all of the 1777 // operands (often constants). This allows analysis of something like 1778 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1779 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1780 if (AR->isAffine()) { 1781 const SCEV *Start = AR->getStart(); 1782 const SCEV *Step = AR->getStepRecurrence(*this); 1783 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1784 const Loop *L = AR->getLoop(); 1785 1786 if (!AR->hasNoSignedWrap()) { 1787 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1788 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1789 } 1790 1791 // If we have special knowledge that this addrec won't overflow, 1792 // we don't need to do any further analysis. 1793 if (AR->hasNoSignedWrap()) 1794 return getAddRecExpr( 1795 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1796 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1797 1798 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1799 // Note that this serves two purposes: It filters out loops that are 1800 // simply not analyzable, and it covers the case where this code is 1801 // being called from within backedge-taken count analysis, such that 1802 // attempting to ask for the backedge-taken count would likely result 1803 // in infinite recursion. In the later case, the analysis code will 1804 // cope with a conservative value, and it will take care to purge 1805 // that value once it has finished. 1806 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1807 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1808 // Manually compute the final value for AR, checking for 1809 // overflow. 1810 1811 // Check whether the backedge-taken count can be losslessly casted to 1812 // the addrec's type. The count is always unsigned. 1813 const SCEV *CastedMaxBECount = 1814 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1815 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1816 CastedMaxBECount, MaxBECount->getType(), Depth); 1817 if (MaxBECount == RecastedMaxBECount) { 1818 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1819 // Check whether Start+Step*MaxBECount has no signed overflow. 1820 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1821 SCEV::FlagAnyWrap, Depth + 1); 1822 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1823 SCEV::FlagAnyWrap, 1824 Depth + 1), 1825 WideTy, Depth + 1); 1826 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1827 const SCEV *WideMaxBECount = 1828 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1829 const SCEV *OperandExtendedAdd = 1830 getAddExpr(WideStart, 1831 getMulExpr(WideMaxBECount, 1832 getSignExtendExpr(Step, WideTy, Depth + 1), 1833 SCEV::FlagAnyWrap, Depth + 1), 1834 SCEV::FlagAnyWrap, Depth + 1); 1835 if (SAdd == OperandExtendedAdd) { 1836 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1837 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1838 // Return the expression with the addrec on the outside. 1839 return getAddRecExpr( 1840 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1841 Depth + 1), 1842 getSignExtendExpr(Step, Ty, Depth + 1), L, 1843 AR->getNoWrapFlags()); 1844 } 1845 // Similar to above, only this time treat the step value as unsigned. 1846 // This covers loops that count up with an unsigned step. 1847 OperandExtendedAdd = 1848 getAddExpr(WideStart, 1849 getMulExpr(WideMaxBECount, 1850 getZeroExtendExpr(Step, WideTy, Depth + 1), 1851 SCEV::FlagAnyWrap, Depth + 1), 1852 SCEV::FlagAnyWrap, Depth + 1); 1853 if (SAdd == OperandExtendedAdd) { 1854 // If AR wraps around then 1855 // 1856 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1857 // => SAdd != OperandExtendedAdd 1858 // 1859 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1860 // (SAdd == OperandExtendedAdd => AR is NW) 1861 1862 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1863 1864 // Return the expression with the addrec on the outside. 1865 return getAddRecExpr( 1866 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1867 Depth + 1), 1868 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1869 AR->getNoWrapFlags()); 1870 } 1871 } 1872 } 1873 1874 // Normally, in the cases we can prove no-overflow via a 1875 // backedge guarding condition, we can also compute a backedge 1876 // taken count for the loop. The exceptions are assumptions and 1877 // guards present in the loop -- SCEV is not great at exploiting 1878 // these to compute max backedge taken counts, but can still use 1879 // these to prove lack of overflow. Use this fact to avoid 1880 // doing extra work that may not pay off. 1881 1882 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1883 !AC.assumptions().empty()) { 1884 // If the backedge is guarded by a comparison with the pre-inc 1885 // value the addrec is safe. Also, if the entry is guarded by 1886 // a comparison with the start value and the backedge is 1887 // guarded by a comparison with the post-inc value, the addrec 1888 // is safe. 1889 ICmpInst::Predicate Pred; 1890 const SCEV *OverflowLimit = 1891 getSignedOverflowLimitForStep(Step, &Pred, this); 1892 if (OverflowLimit && 1893 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1894 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 1895 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1896 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1897 return getAddRecExpr( 1898 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1899 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1900 } 1901 } 1902 1903 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 1904 // if D + (C - D + Step * n) could be proven to not signed wrap 1905 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1906 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1907 const APInt &C = SC->getAPInt(); 1908 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1909 if (D != 0) { 1910 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1911 const SCEV *SResidual = 1912 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1913 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1914 return getAddExpr(SSExtD, SSExtR, 1915 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1916 Depth + 1); 1917 } 1918 } 1919 1920 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1921 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1922 return getAddRecExpr( 1923 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1924 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1925 } 1926 } 1927 1928 // If the input value is provably positive and we could not simplify 1929 // away the sext build a zext instead. 1930 if (isKnownNonNegative(Op)) 1931 return getZeroExtendExpr(Op, Ty, Depth + 1); 1932 1933 // The cast wasn't folded; create an explicit cast node. 1934 // Recompute the insert position, as it may have been invalidated. 1935 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1936 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1937 Op, Ty); 1938 UniqueSCEVs.InsertNode(S, IP); 1939 addToLoopUseLists(S); 1940 return S; 1941 } 1942 1943 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1944 /// unspecified bits out to the given type. 1945 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1946 Type *Ty) { 1947 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1948 "This is not an extending conversion!"); 1949 assert(isSCEVable(Ty) && 1950 "This is not a conversion to a SCEVable type!"); 1951 Ty = getEffectiveSCEVType(Ty); 1952 1953 // Sign-extend negative constants. 1954 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1955 if (SC->getAPInt().isNegative()) 1956 return getSignExtendExpr(Op, Ty); 1957 1958 // Peel off a truncate cast. 1959 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1960 const SCEV *NewOp = T->getOperand(); 1961 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1962 return getAnyExtendExpr(NewOp, Ty); 1963 return getTruncateOrNoop(NewOp, Ty); 1964 } 1965 1966 // Next try a zext cast. If the cast is folded, use it. 1967 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1968 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1969 return ZExt; 1970 1971 // Next try a sext cast. If the cast is folded, use it. 1972 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1973 if (!isa<SCEVSignExtendExpr>(SExt)) 1974 return SExt; 1975 1976 // Force the cast to be folded into the operands of an addrec. 1977 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1978 SmallVector<const SCEV *, 4> Ops; 1979 for (const SCEV *Op : AR->operands()) 1980 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1981 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1982 } 1983 1984 // If the expression is obviously signed, use the sext cast value. 1985 if (isa<SCEVSMaxExpr>(Op)) 1986 return SExt; 1987 1988 // Absent any other information, use the zext cast value. 1989 return ZExt; 1990 } 1991 1992 /// Process the given Ops list, which is a list of operands to be added under 1993 /// the given scale, update the given map. This is a helper function for 1994 /// getAddRecExpr. As an example of what it does, given a sequence of operands 1995 /// that would form an add expression like this: 1996 /// 1997 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 1998 /// 1999 /// where A and B are constants, update the map with these values: 2000 /// 2001 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2002 /// 2003 /// and add 13 + A*B*29 to AccumulatedConstant. 2004 /// This will allow getAddRecExpr to produce this: 2005 /// 2006 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2007 /// 2008 /// This form often exposes folding opportunities that are hidden in 2009 /// the original operand list. 2010 /// 2011 /// Return true iff it appears that any interesting folding opportunities 2012 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2013 /// the common case where no interesting opportunities are present, and 2014 /// is also used as a check to avoid infinite recursion. 2015 static bool 2016 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2017 SmallVectorImpl<const SCEV *> &NewOps, 2018 APInt &AccumulatedConstant, 2019 const SCEV *const *Ops, size_t NumOperands, 2020 const APInt &Scale, 2021 ScalarEvolution &SE) { 2022 bool Interesting = false; 2023 2024 // Iterate over the add operands. They are sorted, with constants first. 2025 unsigned i = 0; 2026 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2027 ++i; 2028 // Pull a buried constant out to the outside. 2029 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2030 Interesting = true; 2031 AccumulatedConstant += Scale * C->getAPInt(); 2032 } 2033 2034 // Next comes everything else. We're especially interested in multiplies 2035 // here, but they're in the middle, so just visit the rest with one loop. 2036 for (; i != NumOperands; ++i) { 2037 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2038 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2039 APInt NewScale = 2040 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2041 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2042 // A multiplication of a constant with another add; recurse. 2043 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2044 Interesting |= 2045 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2046 Add->op_begin(), Add->getNumOperands(), 2047 NewScale, SE); 2048 } else { 2049 // A multiplication of a constant with some other value. Update 2050 // the map. 2051 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2052 const SCEV *Key = SE.getMulExpr(MulOps); 2053 auto Pair = M.insert({Key, NewScale}); 2054 if (Pair.second) { 2055 NewOps.push_back(Pair.first->first); 2056 } else { 2057 Pair.first->second += NewScale; 2058 // The map already had an entry for this value, which may indicate 2059 // a folding opportunity. 2060 Interesting = true; 2061 } 2062 } 2063 } else { 2064 // An ordinary operand. Update the map. 2065 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2066 M.insert({Ops[i], Scale}); 2067 if (Pair.second) { 2068 NewOps.push_back(Pair.first->first); 2069 } else { 2070 Pair.first->second += Scale; 2071 // The map already had an entry for this value, which may indicate 2072 // a folding opportunity. 2073 Interesting = true; 2074 } 2075 } 2076 } 2077 2078 return Interesting; 2079 } 2080 2081 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2082 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2083 // can't-overflow flags for the operation if possible. 2084 static SCEV::NoWrapFlags 2085 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2086 const ArrayRef<const SCEV *> Ops, 2087 SCEV::NoWrapFlags Flags) { 2088 using namespace std::placeholders; 2089 2090 using OBO = OverflowingBinaryOperator; 2091 2092 bool CanAnalyze = 2093 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2094 (void)CanAnalyze; 2095 assert(CanAnalyze && "don't call from other places!"); 2096 2097 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2098 SCEV::NoWrapFlags SignOrUnsignWrap = 2099 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2100 2101 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2102 auto IsKnownNonNegative = [&](const SCEV *S) { 2103 return SE->isKnownNonNegative(S); 2104 }; 2105 2106 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2107 Flags = 2108 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2109 2110 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2111 2112 if (SignOrUnsignWrap != SignOrUnsignMask && 2113 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2114 isa<SCEVConstant>(Ops[0])) { 2115 2116 auto Opcode = [&] { 2117 switch (Type) { 2118 case scAddExpr: 2119 return Instruction::Add; 2120 case scMulExpr: 2121 return Instruction::Mul; 2122 default: 2123 llvm_unreachable("Unexpected SCEV op."); 2124 } 2125 }(); 2126 2127 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2128 2129 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2130 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2131 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2132 Opcode, C, OBO::NoSignedWrap); 2133 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2134 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2135 } 2136 2137 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2138 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2139 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2140 Opcode, C, OBO::NoUnsignedWrap); 2141 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2142 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2143 } 2144 } 2145 2146 return Flags; 2147 } 2148 2149 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2150 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2151 } 2152 2153 /// Get a canonical add expression, or something simpler if possible. 2154 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2155 SCEV::NoWrapFlags Flags, 2156 unsigned Depth) { 2157 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2158 "only nuw or nsw allowed"); 2159 assert(!Ops.empty() && "Cannot get empty add!"); 2160 if (Ops.size() == 1) return Ops[0]; 2161 #ifndef NDEBUG 2162 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2163 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2164 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2165 "SCEVAddExpr operand types don't match!"); 2166 #endif 2167 2168 // Sort by complexity, this groups all similar expression types together. 2169 GroupByComplexity(Ops, &LI, DT); 2170 2171 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2172 2173 // If there are any constants, fold them together. 2174 unsigned Idx = 0; 2175 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2176 ++Idx; 2177 assert(Idx < Ops.size()); 2178 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2179 // We found two constants, fold them together! 2180 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2181 if (Ops.size() == 2) return Ops[0]; 2182 Ops.erase(Ops.begin()+1); // Erase the folded element 2183 LHSC = cast<SCEVConstant>(Ops[0]); 2184 } 2185 2186 // If we are left with a constant zero being added, strip it off. 2187 if (LHSC->getValue()->isZero()) { 2188 Ops.erase(Ops.begin()); 2189 --Idx; 2190 } 2191 2192 if (Ops.size() == 1) return Ops[0]; 2193 } 2194 2195 // Limit recursion calls depth. 2196 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2197 return getOrCreateAddExpr(Ops, Flags); 2198 2199 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2200 static_cast<SCEVAddExpr *>(S)->setNoWrapFlags(Flags); 2201 return S; 2202 } 2203 2204 // Okay, check to see if the same value occurs in the operand list more than 2205 // once. If so, merge them together into an multiply expression. Since we 2206 // sorted the list, these values are required to be adjacent. 2207 Type *Ty = Ops[0]->getType(); 2208 bool FoundMatch = false; 2209 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2210 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2211 // Scan ahead to count how many equal operands there are. 2212 unsigned Count = 2; 2213 while (i+Count != e && Ops[i+Count] == Ops[i]) 2214 ++Count; 2215 // Merge the values into a multiply. 2216 const SCEV *Scale = getConstant(Ty, Count); 2217 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2218 if (Ops.size() == Count) 2219 return Mul; 2220 Ops[i] = Mul; 2221 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2222 --i; e -= Count - 1; 2223 FoundMatch = true; 2224 } 2225 if (FoundMatch) 2226 return getAddExpr(Ops, Flags, Depth + 1); 2227 2228 // Check for truncates. If all the operands are truncated from the same 2229 // type, see if factoring out the truncate would permit the result to be 2230 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2231 // if the contents of the resulting outer trunc fold to something simple. 2232 auto FindTruncSrcType = [&]() -> Type * { 2233 // We're ultimately looking to fold an addrec of truncs and muls of only 2234 // constants and truncs, so if we find any other types of SCEV 2235 // as operands of the addrec then we bail and return nullptr here. 2236 // Otherwise, we return the type of the operand of a trunc that we find. 2237 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2238 return T->getOperand()->getType(); 2239 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2240 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2241 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2242 return T->getOperand()->getType(); 2243 } 2244 return nullptr; 2245 }; 2246 if (auto *SrcType = FindTruncSrcType()) { 2247 SmallVector<const SCEV *, 8> LargeOps; 2248 bool Ok = true; 2249 // Check all the operands to see if they can be represented in the 2250 // source type of the truncate. 2251 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2252 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2253 if (T->getOperand()->getType() != SrcType) { 2254 Ok = false; 2255 break; 2256 } 2257 LargeOps.push_back(T->getOperand()); 2258 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2259 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2260 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2261 SmallVector<const SCEV *, 8> LargeMulOps; 2262 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2263 if (const SCEVTruncateExpr *T = 2264 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2265 if (T->getOperand()->getType() != SrcType) { 2266 Ok = false; 2267 break; 2268 } 2269 LargeMulOps.push_back(T->getOperand()); 2270 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2271 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2272 } else { 2273 Ok = false; 2274 break; 2275 } 2276 } 2277 if (Ok) 2278 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2279 } else { 2280 Ok = false; 2281 break; 2282 } 2283 } 2284 if (Ok) { 2285 // Evaluate the expression in the larger type. 2286 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2287 // If it folds to something simple, use it. Otherwise, don't. 2288 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2289 return getTruncateExpr(Fold, Ty); 2290 } 2291 } 2292 2293 // Skip past any other cast SCEVs. 2294 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2295 ++Idx; 2296 2297 // If there are add operands they would be next. 2298 if (Idx < Ops.size()) { 2299 bool DeletedAdd = false; 2300 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2301 if (Ops.size() > AddOpsInlineThreshold || 2302 Add->getNumOperands() > AddOpsInlineThreshold) 2303 break; 2304 // If we have an add, expand the add operands onto the end of the operands 2305 // list. 2306 Ops.erase(Ops.begin()+Idx); 2307 Ops.append(Add->op_begin(), Add->op_end()); 2308 DeletedAdd = true; 2309 } 2310 2311 // If we deleted at least one add, we added operands to the end of the list, 2312 // and they are not necessarily sorted. Recurse to resort and resimplify 2313 // any operands we just acquired. 2314 if (DeletedAdd) 2315 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2316 } 2317 2318 // Skip over the add expression until we get to a multiply. 2319 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2320 ++Idx; 2321 2322 // Check to see if there are any folding opportunities present with 2323 // operands multiplied by constant values. 2324 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2325 uint64_t BitWidth = getTypeSizeInBits(Ty); 2326 DenseMap<const SCEV *, APInt> M; 2327 SmallVector<const SCEV *, 8> NewOps; 2328 APInt AccumulatedConstant(BitWidth, 0); 2329 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2330 Ops.data(), Ops.size(), 2331 APInt(BitWidth, 1), *this)) { 2332 struct APIntCompare { 2333 bool operator()(const APInt &LHS, const APInt &RHS) const { 2334 return LHS.ult(RHS); 2335 } 2336 }; 2337 2338 // Some interesting folding opportunity is present, so its worthwhile to 2339 // re-generate the operands list. Group the operands by constant scale, 2340 // to avoid multiplying by the same constant scale multiple times. 2341 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2342 for (const SCEV *NewOp : NewOps) 2343 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2344 // Re-generate the operands list. 2345 Ops.clear(); 2346 if (AccumulatedConstant != 0) 2347 Ops.push_back(getConstant(AccumulatedConstant)); 2348 for (auto &MulOp : MulOpLists) 2349 if (MulOp.first != 0) 2350 Ops.push_back(getMulExpr( 2351 getConstant(MulOp.first), 2352 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2353 SCEV::FlagAnyWrap, Depth + 1)); 2354 if (Ops.empty()) 2355 return getZero(Ty); 2356 if (Ops.size() == 1) 2357 return Ops[0]; 2358 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2359 } 2360 } 2361 2362 // If we are adding something to a multiply expression, make sure the 2363 // something is not already an operand of the multiply. If so, merge it into 2364 // the multiply. 2365 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2366 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2367 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2368 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2369 if (isa<SCEVConstant>(MulOpSCEV)) 2370 continue; 2371 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2372 if (MulOpSCEV == Ops[AddOp]) { 2373 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2374 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2375 if (Mul->getNumOperands() != 2) { 2376 // If the multiply has more than two operands, we must get the 2377 // Y*Z term. 2378 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2379 Mul->op_begin()+MulOp); 2380 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2381 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2382 } 2383 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2384 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2385 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2386 SCEV::FlagAnyWrap, Depth + 1); 2387 if (Ops.size() == 2) return OuterMul; 2388 if (AddOp < Idx) { 2389 Ops.erase(Ops.begin()+AddOp); 2390 Ops.erase(Ops.begin()+Idx-1); 2391 } else { 2392 Ops.erase(Ops.begin()+Idx); 2393 Ops.erase(Ops.begin()+AddOp-1); 2394 } 2395 Ops.push_back(OuterMul); 2396 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2397 } 2398 2399 // Check this multiply against other multiplies being added together. 2400 for (unsigned OtherMulIdx = Idx+1; 2401 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2402 ++OtherMulIdx) { 2403 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2404 // If MulOp occurs in OtherMul, we can fold the two multiplies 2405 // together. 2406 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2407 OMulOp != e; ++OMulOp) 2408 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2409 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2410 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2411 if (Mul->getNumOperands() != 2) { 2412 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2413 Mul->op_begin()+MulOp); 2414 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2415 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2416 } 2417 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2418 if (OtherMul->getNumOperands() != 2) { 2419 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2420 OtherMul->op_begin()+OMulOp); 2421 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2422 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2423 } 2424 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2425 const SCEV *InnerMulSum = 2426 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2427 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2428 SCEV::FlagAnyWrap, Depth + 1); 2429 if (Ops.size() == 2) return OuterMul; 2430 Ops.erase(Ops.begin()+Idx); 2431 Ops.erase(Ops.begin()+OtherMulIdx-1); 2432 Ops.push_back(OuterMul); 2433 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2434 } 2435 } 2436 } 2437 } 2438 2439 // If there are any add recurrences in the operands list, see if any other 2440 // added values are loop invariant. If so, we can fold them into the 2441 // recurrence. 2442 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2443 ++Idx; 2444 2445 // Scan over all recurrences, trying to fold loop invariants into them. 2446 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2447 // Scan all of the other operands to this add and add them to the vector if 2448 // they are loop invariant w.r.t. the recurrence. 2449 SmallVector<const SCEV *, 8> LIOps; 2450 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2451 const Loop *AddRecLoop = AddRec->getLoop(); 2452 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2453 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2454 LIOps.push_back(Ops[i]); 2455 Ops.erase(Ops.begin()+i); 2456 --i; --e; 2457 } 2458 2459 // If we found some loop invariants, fold them into the recurrence. 2460 if (!LIOps.empty()) { 2461 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2462 LIOps.push_back(AddRec->getStart()); 2463 2464 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2465 AddRec->op_end()); 2466 // This follows from the fact that the no-wrap flags on the outer add 2467 // expression are applicable on the 0th iteration, when the add recurrence 2468 // will be equal to its start value. 2469 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2470 2471 // Build the new addrec. Propagate the NUW and NSW flags if both the 2472 // outer add and the inner addrec are guaranteed to have no overflow. 2473 // Always propagate NW. 2474 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2475 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2476 2477 // If all of the other operands were loop invariant, we are done. 2478 if (Ops.size() == 1) return NewRec; 2479 2480 // Otherwise, add the folded AddRec by the non-invariant parts. 2481 for (unsigned i = 0;; ++i) 2482 if (Ops[i] == AddRec) { 2483 Ops[i] = NewRec; 2484 break; 2485 } 2486 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2487 } 2488 2489 // Okay, if there weren't any loop invariants to be folded, check to see if 2490 // there are multiple AddRec's with the same loop induction variable being 2491 // added together. If so, we can fold them. 2492 for (unsigned OtherIdx = Idx+1; 2493 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2494 ++OtherIdx) { 2495 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2496 // so that the 1st found AddRecExpr is dominated by all others. 2497 assert(DT.dominates( 2498 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2499 AddRec->getLoop()->getHeader()) && 2500 "AddRecExprs are not sorted in reverse dominance order?"); 2501 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2502 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2503 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2504 AddRec->op_end()); 2505 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2506 ++OtherIdx) { 2507 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2508 if (OtherAddRec->getLoop() == AddRecLoop) { 2509 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2510 i != e; ++i) { 2511 if (i >= AddRecOps.size()) { 2512 AddRecOps.append(OtherAddRec->op_begin()+i, 2513 OtherAddRec->op_end()); 2514 break; 2515 } 2516 SmallVector<const SCEV *, 2> TwoOps = { 2517 AddRecOps[i], OtherAddRec->getOperand(i)}; 2518 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2519 } 2520 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2521 } 2522 } 2523 // Step size has changed, so we cannot guarantee no self-wraparound. 2524 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2525 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2526 } 2527 } 2528 2529 // Otherwise couldn't fold anything into this recurrence. Move onto the 2530 // next one. 2531 } 2532 2533 // Okay, it looks like we really DO need an add expr. Check to see if we 2534 // already have one, otherwise create a new one. 2535 return getOrCreateAddExpr(Ops, Flags); 2536 } 2537 2538 const SCEV * 2539 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2540 SCEV::NoWrapFlags Flags) { 2541 FoldingSetNodeID ID; 2542 ID.AddInteger(scAddExpr); 2543 for (const SCEV *Op : Ops) 2544 ID.AddPointer(Op); 2545 void *IP = nullptr; 2546 SCEVAddExpr *S = 2547 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2548 if (!S) { 2549 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2550 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2551 S = new (SCEVAllocator) 2552 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2553 UniqueSCEVs.InsertNode(S, IP); 2554 addToLoopUseLists(S); 2555 } 2556 S->setNoWrapFlags(Flags); 2557 return S; 2558 } 2559 2560 const SCEV * 2561 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2562 const Loop *L, SCEV::NoWrapFlags Flags) { 2563 FoldingSetNodeID ID; 2564 ID.AddInteger(scAddRecExpr); 2565 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2566 ID.AddPointer(Ops[i]); 2567 ID.AddPointer(L); 2568 void *IP = nullptr; 2569 SCEVAddRecExpr *S = 2570 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2571 if (!S) { 2572 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2573 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2574 S = new (SCEVAllocator) 2575 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2576 UniqueSCEVs.InsertNode(S, IP); 2577 addToLoopUseLists(S); 2578 } 2579 S->setNoWrapFlags(Flags); 2580 return S; 2581 } 2582 2583 const SCEV * 2584 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2585 SCEV::NoWrapFlags Flags) { 2586 FoldingSetNodeID ID; 2587 ID.AddInteger(scMulExpr); 2588 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2589 ID.AddPointer(Ops[i]); 2590 void *IP = nullptr; 2591 SCEVMulExpr *S = 2592 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2593 if (!S) { 2594 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2595 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2596 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2597 O, Ops.size()); 2598 UniqueSCEVs.InsertNode(S, IP); 2599 addToLoopUseLists(S); 2600 } 2601 S->setNoWrapFlags(Flags); 2602 return S; 2603 } 2604 2605 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2606 uint64_t k = i*j; 2607 if (j > 1 && k / j != i) Overflow = true; 2608 return k; 2609 } 2610 2611 /// Compute the result of "n choose k", the binomial coefficient. If an 2612 /// intermediate computation overflows, Overflow will be set and the return will 2613 /// be garbage. Overflow is not cleared on absence of overflow. 2614 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2615 // We use the multiplicative formula: 2616 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2617 // At each iteration, we take the n-th term of the numeral and divide by the 2618 // (k-n)th term of the denominator. This division will always produce an 2619 // integral result, and helps reduce the chance of overflow in the 2620 // intermediate computations. However, we can still overflow even when the 2621 // final result would fit. 2622 2623 if (n == 0 || n == k) return 1; 2624 if (k > n) return 0; 2625 2626 if (k > n/2) 2627 k = n-k; 2628 2629 uint64_t r = 1; 2630 for (uint64_t i = 1; i <= k; ++i) { 2631 r = umul_ov(r, n-(i-1), Overflow); 2632 r /= i; 2633 } 2634 return r; 2635 } 2636 2637 /// Determine if any of the operands in this SCEV are a constant or if 2638 /// any of the add or multiply expressions in this SCEV contain a constant. 2639 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2640 struct FindConstantInAddMulChain { 2641 bool FoundConstant = false; 2642 2643 bool follow(const SCEV *S) { 2644 FoundConstant |= isa<SCEVConstant>(S); 2645 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2646 } 2647 2648 bool isDone() const { 2649 return FoundConstant; 2650 } 2651 }; 2652 2653 FindConstantInAddMulChain F; 2654 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2655 ST.visitAll(StartExpr); 2656 return F.FoundConstant; 2657 } 2658 2659 /// Get a canonical multiply expression, or something simpler if possible. 2660 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2661 SCEV::NoWrapFlags Flags, 2662 unsigned Depth) { 2663 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2664 "only nuw or nsw allowed"); 2665 assert(!Ops.empty() && "Cannot get empty mul!"); 2666 if (Ops.size() == 1) return Ops[0]; 2667 #ifndef NDEBUG 2668 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2669 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2670 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2671 "SCEVMulExpr operand types don't match!"); 2672 #endif 2673 2674 // Sort by complexity, this groups all similar expression types together. 2675 GroupByComplexity(Ops, &LI, DT); 2676 2677 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2678 2679 // Limit recursion calls depth, but fold all-constant expressions. 2680 // `Ops` is sorted, so it's enough to check just last one. 2681 if ((Depth > MaxArithDepth || hasHugeExpression(Ops)) && 2682 !isa<SCEVConstant>(Ops.back())) 2683 return getOrCreateMulExpr(Ops, Flags); 2684 2685 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2686 static_cast<SCEVMulExpr *>(S)->setNoWrapFlags(Flags); 2687 return S; 2688 } 2689 2690 // If there are any constants, fold them together. 2691 unsigned Idx = 0; 2692 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2693 2694 if (Ops.size() == 2) 2695 // C1*(C2+V) -> C1*C2 + C1*V 2696 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2697 // If any of Add's ops are Adds or Muls with a constant, apply this 2698 // transformation as well. 2699 // 2700 // TODO: There are some cases where this transformation is not 2701 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2702 // this transformation should be narrowed down. 2703 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2704 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2705 SCEV::FlagAnyWrap, Depth + 1), 2706 getMulExpr(LHSC, Add->getOperand(1), 2707 SCEV::FlagAnyWrap, Depth + 1), 2708 SCEV::FlagAnyWrap, Depth + 1); 2709 2710 ++Idx; 2711 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2712 // We found two constants, fold them together! 2713 ConstantInt *Fold = 2714 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2715 Ops[0] = getConstant(Fold); 2716 Ops.erase(Ops.begin()+1); // Erase the folded element 2717 if (Ops.size() == 1) return Ops[0]; 2718 LHSC = cast<SCEVConstant>(Ops[0]); 2719 } 2720 2721 // If we are left with a constant one being multiplied, strip it off. 2722 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2723 Ops.erase(Ops.begin()); 2724 --Idx; 2725 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2726 // If we have a multiply of zero, it will always be zero. 2727 return Ops[0]; 2728 } else if (Ops[0]->isAllOnesValue()) { 2729 // If we have a mul by -1 of an add, try distributing the -1 among the 2730 // add operands. 2731 if (Ops.size() == 2) { 2732 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2733 SmallVector<const SCEV *, 4> NewOps; 2734 bool AnyFolded = false; 2735 for (const SCEV *AddOp : Add->operands()) { 2736 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2737 Depth + 1); 2738 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2739 NewOps.push_back(Mul); 2740 } 2741 if (AnyFolded) 2742 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2743 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2744 // Negation preserves a recurrence's no self-wrap property. 2745 SmallVector<const SCEV *, 4> Operands; 2746 for (const SCEV *AddRecOp : AddRec->operands()) 2747 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2748 Depth + 1)); 2749 2750 return getAddRecExpr(Operands, AddRec->getLoop(), 2751 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2752 } 2753 } 2754 } 2755 2756 if (Ops.size() == 1) 2757 return Ops[0]; 2758 } 2759 2760 // Skip over the add expression until we get to a multiply. 2761 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2762 ++Idx; 2763 2764 // If there are mul operands inline them all into this expression. 2765 if (Idx < Ops.size()) { 2766 bool DeletedMul = false; 2767 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2768 if (Ops.size() > MulOpsInlineThreshold) 2769 break; 2770 // If we have an mul, expand the mul operands onto the end of the 2771 // operands list. 2772 Ops.erase(Ops.begin()+Idx); 2773 Ops.append(Mul->op_begin(), Mul->op_end()); 2774 DeletedMul = true; 2775 } 2776 2777 // If we deleted at least one mul, we added operands to the end of the 2778 // list, and they are not necessarily sorted. Recurse to resort and 2779 // resimplify any operands we just acquired. 2780 if (DeletedMul) 2781 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2782 } 2783 2784 // If there are any add recurrences in the operands list, see if any other 2785 // added values are loop invariant. If so, we can fold them into the 2786 // recurrence. 2787 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2788 ++Idx; 2789 2790 // Scan over all recurrences, trying to fold loop invariants into them. 2791 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2792 // Scan all of the other operands to this mul and add them to the vector 2793 // if they are loop invariant w.r.t. the recurrence. 2794 SmallVector<const SCEV *, 8> LIOps; 2795 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2796 const Loop *AddRecLoop = AddRec->getLoop(); 2797 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2798 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2799 LIOps.push_back(Ops[i]); 2800 Ops.erase(Ops.begin()+i); 2801 --i; --e; 2802 } 2803 2804 // If we found some loop invariants, fold them into the recurrence. 2805 if (!LIOps.empty()) { 2806 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2807 SmallVector<const SCEV *, 4> NewOps; 2808 NewOps.reserve(AddRec->getNumOperands()); 2809 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2810 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2811 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2812 SCEV::FlagAnyWrap, Depth + 1)); 2813 2814 // Build the new addrec. Propagate the NUW and NSW flags if both the 2815 // outer mul and the inner addrec are guaranteed to have no overflow. 2816 // 2817 // No self-wrap cannot be guaranteed after changing the step size, but 2818 // will be inferred if either NUW or NSW is true. 2819 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2820 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2821 2822 // If all of the other operands were loop invariant, we are done. 2823 if (Ops.size() == 1) return NewRec; 2824 2825 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2826 for (unsigned i = 0;; ++i) 2827 if (Ops[i] == AddRec) { 2828 Ops[i] = NewRec; 2829 break; 2830 } 2831 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2832 } 2833 2834 // Okay, if there weren't any loop invariants to be folded, check to see 2835 // if there are multiple AddRec's with the same loop induction variable 2836 // being multiplied together. If so, we can fold them. 2837 2838 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2839 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2840 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2841 // ]]],+,...up to x=2n}. 2842 // Note that the arguments to choose() are always integers with values 2843 // known at compile time, never SCEV objects. 2844 // 2845 // The implementation avoids pointless extra computations when the two 2846 // addrec's are of different length (mathematically, it's equivalent to 2847 // an infinite stream of zeros on the right). 2848 bool OpsModified = false; 2849 for (unsigned OtherIdx = Idx+1; 2850 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2851 ++OtherIdx) { 2852 const SCEVAddRecExpr *OtherAddRec = 2853 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2854 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2855 continue; 2856 2857 // Limit max number of arguments to avoid creation of unreasonably big 2858 // SCEVAddRecs with very complex operands. 2859 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2860 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 2861 continue; 2862 2863 bool Overflow = false; 2864 Type *Ty = AddRec->getType(); 2865 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2866 SmallVector<const SCEV*, 7> AddRecOps; 2867 for (int x = 0, xe = AddRec->getNumOperands() + 2868 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2869 SmallVector <const SCEV *, 7> SumOps; 2870 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2871 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2872 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2873 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2874 z < ze && !Overflow; ++z) { 2875 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2876 uint64_t Coeff; 2877 if (LargerThan64Bits) 2878 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2879 else 2880 Coeff = Coeff1*Coeff2; 2881 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2882 const SCEV *Term1 = AddRec->getOperand(y-z); 2883 const SCEV *Term2 = OtherAddRec->getOperand(z); 2884 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 2885 SCEV::FlagAnyWrap, Depth + 1)); 2886 } 2887 } 2888 if (SumOps.empty()) 2889 SumOps.push_back(getZero(Ty)); 2890 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 2891 } 2892 if (!Overflow) { 2893 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 2894 SCEV::FlagAnyWrap); 2895 if (Ops.size() == 2) return NewAddRec; 2896 Ops[Idx] = NewAddRec; 2897 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2898 OpsModified = true; 2899 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2900 if (!AddRec) 2901 break; 2902 } 2903 } 2904 if (OpsModified) 2905 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2906 2907 // Otherwise couldn't fold anything into this recurrence. Move onto the 2908 // next one. 2909 } 2910 2911 // Okay, it looks like we really DO need an mul expr. Check to see if we 2912 // already have one, otherwise create a new one. 2913 return getOrCreateMulExpr(Ops, Flags); 2914 } 2915 2916 /// Represents an unsigned remainder expression based on unsigned division. 2917 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 2918 const SCEV *RHS) { 2919 assert(getEffectiveSCEVType(LHS->getType()) == 2920 getEffectiveSCEVType(RHS->getType()) && 2921 "SCEVURemExpr operand types don't match!"); 2922 2923 // Short-circuit easy cases 2924 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2925 // If constant is one, the result is trivial 2926 if (RHSC->getValue()->isOne()) 2927 return getZero(LHS->getType()); // X urem 1 --> 0 2928 2929 // If constant is a power of two, fold into a zext(trunc(LHS)). 2930 if (RHSC->getAPInt().isPowerOf2()) { 2931 Type *FullTy = LHS->getType(); 2932 Type *TruncTy = 2933 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 2934 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 2935 } 2936 } 2937 2938 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 2939 const SCEV *UDiv = getUDivExpr(LHS, RHS); 2940 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 2941 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 2942 } 2943 2944 /// Get a canonical unsigned division expression, or something simpler if 2945 /// possible. 2946 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2947 const SCEV *RHS) { 2948 assert(getEffectiveSCEVType(LHS->getType()) == 2949 getEffectiveSCEVType(RHS->getType()) && 2950 "SCEVUDivExpr operand types don't match!"); 2951 2952 FoldingSetNodeID ID; 2953 ID.AddInteger(scUDivExpr); 2954 ID.AddPointer(LHS); 2955 ID.AddPointer(RHS); 2956 void *IP = nullptr; 2957 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 2958 return S; 2959 2960 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2961 if (RHSC->getValue()->isOne()) 2962 return LHS; // X udiv 1 --> x 2963 // If the denominator is zero, the result of the udiv is undefined. Don't 2964 // try to analyze it, because the resolution chosen here may differ from 2965 // the resolution chosen in other parts of the compiler. 2966 if (!RHSC->getValue()->isZero()) { 2967 // Determine if the division can be folded into the operands of 2968 // its operands. 2969 // TODO: Generalize this to non-constants by using known-bits information. 2970 Type *Ty = LHS->getType(); 2971 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2972 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2973 // For non-power-of-two values, effectively round the value up to the 2974 // nearest power of two. 2975 if (!RHSC->getAPInt().isPowerOf2()) 2976 ++MaxShiftAmt; 2977 IntegerType *ExtTy = 2978 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2979 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2980 if (const SCEVConstant *Step = 2981 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2982 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2983 const APInt &StepInt = Step->getAPInt(); 2984 const APInt &DivInt = RHSC->getAPInt(); 2985 if (!StepInt.urem(DivInt) && 2986 getZeroExtendExpr(AR, ExtTy) == 2987 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2988 getZeroExtendExpr(Step, ExtTy), 2989 AR->getLoop(), SCEV::FlagAnyWrap)) { 2990 SmallVector<const SCEV *, 4> Operands; 2991 for (const SCEV *Op : AR->operands()) 2992 Operands.push_back(getUDivExpr(Op, RHS)); 2993 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2994 } 2995 /// Get a canonical UDivExpr for a recurrence. 2996 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2997 // We can currently only fold X%N if X is constant. 2998 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2999 if (StartC && !DivInt.urem(StepInt) && 3000 getZeroExtendExpr(AR, ExtTy) == 3001 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3002 getZeroExtendExpr(Step, ExtTy), 3003 AR->getLoop(), SCEV::FlagAnyWrap)) { 3004 const APInt &StartInt = StartC->getAPInt(); 3005 const APInt &StartRem = StartInt.urem(StepInt); 3006 if (StartRem != 0) { 3007 const SCEV *NewLHS = 3008 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3009 AR->getLoop(), SCEV::FlagNW); 3010 if (LHS != NewLHS) { 3011 LHS = NewLHS; 3012 3013 // Reset the ID to include the new LHS, and check if it is 3014 // already cached. 3015 ID.clear(); 3016 ID.AddInteger(scUDivExpr); 3017 ID.AddPointer(LHS); 3018 ID.AddPointer(RHS); 3019 IP = nullptr; 3020 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3021 return S; 3022 } 3023 } 3024 } 3025 } 3026 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3027 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3028 SmallVector<const SCEV *, 4> Operands; 3029 for (const SCEV *Op : M->operands()) 3030 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3031 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3032 // Find an operand that's safely divisible. 3033 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3034 const SCEV *Op = M->getOperand(i); 3035 const SCEV *Div = getUDivExpr(Op, RHSC); 3036 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3037 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3038 M->op_end()); 3039 Operands[i] = Div; 3040 return getMulExpr(Operands); 3041 } 3042 } 3043 } 3044 3045 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3046 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3047 if (auto *DivisorConstant = 3048 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3049 bool Overflow = false; 3050 APInt NewRHS = 3051 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3052 if (Overflow) { 3053 return getConstant(RHSC->getType(), 0, false); 3054 } 3055 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3056 } 3057 } 3058 3059 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3060 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3061 SmallVector<const SCEV *, 4> Operands; 3062 for (const SCEV *Op : A->operands()) 3063 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3064 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3065 Operands.clear(); 3066 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3067 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3068 if (isa<SCEVUDivExpr>(Op) || 3069 getMulExpr(Op, RHS) != A->getOperand(i)) 3070 break; 3071 Operands.push_back(Op); 3072 } 3073 if (Operands.size() == A->getNumOperands()) 3074 return getAddExpr(Operands); 3075 } 3076 } 3077 3078 // Fold if both operands are constant. 3079 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3080 Constant *LHSCV = LHSC->getValue(); 3081 Constant *RHSCV = RHSC->getValue(); 3082 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3083 RHSCV))); 3084 } 3085 } 3086 } 3087 3088 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3089 // changes). Make sure we get a new one. 3090 IP = nullptr; 3091 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3092 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3093 LHS, RHS); 3094 UniqueSCEVs.InsertNode(S, IP); 3095 addToLoopUseLists(S); 3096 return S; 3097 } 3098 3099 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3100 APInt A = C1->getAPInt().abs(); 3101 APInt B = C2->getAPInt().abs(); 3102 uint32_t ABW = A.getBitWidth(); 3103 uint32_t BBW = B.getBitWidth(); 3104 3105 if (ABW > BBW) 3106 B = B.zext(ABW); 3107 else if (ABW < BBW) 3108 A = A.zext(BBW); 3109 3110 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3111 } 3112 3113 /// Get a canonical unsigned division expression, or something simpler if 3114 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3115 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3116 /// it's not exact because the udiv may be clearing bits. 3117 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3118 const SCEV *RHS) { 3119 // TODO: we could try to find factors in all sorts of things, but for now we 3120 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3121 // end of this file for inspiration. 3122 3123 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3124 if (!Mul || !Mul->hasNoUnsignedWrap()) 3125 return getUDivExpr(LHS, RHS); 3126 3127 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3128 // If the mulexpr multiplies by a constant, then that constant must be the 3129 // first element of the mulexpr. 3130 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3131 if (LHSCst == RHSCst) { 3132 SmallVector<const SCEV *, 2> Operands; 3133 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3134 return getMulExpr(Operands); 3135 } 3136 3137 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3138 // that there's a factor provided by one of the other terms. We need to 3139 // check. 3140 APInt Factor = gcd(LHSCst, RHSCst); 3141 if (!Factor.isIntN(1)) { 3142 LHSCst = 3143 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3144 RHSCst = 3145 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3146 SmallVector<const SCEV *, 2> Operands; 3147 Operands.push_back(LHSCst); 3148 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3149 LHS = getMulExpr(Operands); 3150 RHS = RHSCst; 3151 Mul = dyn_cast<SCEVMulExpr>(LHS); 3152 if (!Mul) 3153 return getUDivExactExpr(LHS, RHS); 3154 } 3155 } 3156 } 3157 3158 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3159 if (Mul->getOperand(i) == RHS) { 3160 SmallVector<const SCEV *, 2> Operands; 3161 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3162 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3163 return getMulExpr(Operands); 3164 } 3165 } 3166 3167 return getUDivExpr(LHS, RHS); 3168 } 3169 3170 /// Get an add recurrence expression for the specified loop. Simplify the 3171 /// expression as much as possible. 3172 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3173 const Loop *L, 3174 SCEV::NoWrapFlags Flags) { 3175 SmallVector<const SCEV *, 4> Operands; 3176 Operands.push_back(Start); 3177 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3178 if (StepChrec->getLoop() == L) { 3179 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3180 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3181 } 3182 3183 Operands.push_back(Step); 3184 return getAddRecExpr(Operands, L, Flags); 3185 } 3186 3187 /// Get an add recurrence expression for the specified loop. Simplify the 3188 /// expression as much as possible. 3189 const SCEV * 3190 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3191 const Loop *L, SCEV::NoWrapFlags Flags) { 3192 if (Operands.size() == 1) return Operands[0]; 3193 #ifndef NDEBUG 3194 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3195 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3196 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3197 "SCEVAddRecExpr operand types don't match!"); 3198 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3199 assert(isLoopInvariant(Operands[i], L) && 3200 "SCEVAddRecExpr operand is not loop-invariant!"); 3201 #endif 3202 3203 if (Operands.back()->isZero()) { 3204 Operands.pop_back(); 3205 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3206 } 3207 3208 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3209 // use that information to infer NUW and NSW flags. However, computing a 3210 // BE count requires calling getAddRecExpr, so we may not yet have a 3211 // meaningful BE count at this point (and if we don't, we'd be stuck 3212 // with a SCEVCouldNotCompute as the cached BE count). 3213 3214 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3215 3216 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3217 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3218 const Loop *NestedLoop = NestedAR->getLoop(); 3219 if (L->contains(NestedLoop) 3220 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3221 : (!NestedLoop->contains(L) && 3222 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3223 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3224 NestedAR->op_end()); 3225 Operands[0] = NestedAR->getStart(); 3226 // AddRecs require their operands be loop-invariant with respect to their 3227 // loops. Don't perform this transformation if it would break this 3228 // requirement. 3229 bool AllInvariant = all_of( 3230 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3231 3232 if (AllInvariant) { 3233 // Create a recurrence for the outer loop with the same step size. 3234 // 3235 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3236 // inner recurrence has the same property. 3237 SCEV::NoWrapFlags OuterFlags = 3238 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3239 3240 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3241 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3242 return isLoopInvariant(Op, NestedLoop); 3243 }); 3244 3245 if (AllInvariant) { 3246 // Ok, both add recurrences are valid after the transformation. 3247 // 3248 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3249 // the outer recurrence has the same property. 3250 SCEV::NoWrapFlags InnerFlags = 3251 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3252 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3253 } 3254 } 3255 // Reset Operands to its original state. 3256 Operands[0] = NestedAR; 3257 } 3258 } 3259 3260 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3261 // already have one, otherwise create a new one. 3262 return getOrCreateAddRecExpr(Operands, L, Flags); 3263 } 3264 3265 const SCEV * 3266 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3267 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3268 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3269 // getSCEV(Base)->getType() has the same address space as Base->getType() 3270 // because SCEV::getType() preserves the address space. 3271 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3272 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3273 // instruction to its SCEV, because the Instruction may be guarded by control 3274 // flow and the no-overflow bits may not be valid for the expression in any 3275 // context. This can be fixed similarly to how these flags are handled for 3276 // adds. 3277 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3278 : SCEV::FlagAnyWrap; 3279 3280 const SCEV *TotalOffset = getZero(IntIdxTy); 3281 Type *CurTy = GEP->getType(); 3282 bool FirstIter = true; 3283 for (const SCEV *IndexExpr : IndexExprs) { 3284 // Compute the (potentially symbolic) offset in bytes for this index. 3285 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3286 // For a struct, add the member offset. 3287 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3288 unsigned FieldNo = Index->getZExtValue(); 3289 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3290 3291 // Add the field offset to the running total offset. 3292 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3293 3294 // Update CurTy to the type of the field at Index. 3295 CurTy = STy->getTypeAtIndex(Index); 3296 } else { 3297 // Update CurTy to its element type. 3298 if (FirstIter) { 3299 assert(isa<PointerType>(CurTy) && 3300 "The first index of a GEP indexes a pointer"); 3301 CurTy = GEP->getSourceElementType(); 3302 FirstIter = false; 3303 } else { 3304 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3305 } 3306 // For an array, add the element offset, explicitly scaled. 3307 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3308 // Getelementptr indices are signed. 3309 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3310 3311 // Multiply the index by the element size to compute the element offset. 3312 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3313 3314 // Add the element offset to the running total offset. 3315 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3316 } 3317 } 3318 3319 // Add the total offset from all the GEP indices to the base. 3320 auto *GEPExpr = getAddExpr(BaseExpr, TotalOffset, Wrap); 3321 assert(BaseExpr->getType() == GEPExpr->getType() && 3322 "GEP should not change type mid-flight."); 3323 return GEPExpr; 3324 } 3325 3326 std::tuple<SCEV *, FoldingSetNodeID, void *> 3327 ScalarEvolution::findExistingSCEVInCache(int SCEVType, 3328 ArrayRef<const SCEV *> Ops) { 3329 FoldingSetNodeID ID; 3330 void *IP = nullptr; 3331 ID.AddInteger(SCEVType); 3332 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3333 ID.AddPointer(Ops[i]); 3334 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3335 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3336 } 3337 3338 const SCEV *ScalarEvolution::getMinMaxExpr(unsigned Kind, 3339 SmallVectorImpl<const SCEV *> &Ops) { 3340 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3341 if (Ops.size() == 1) return Ops[0]; 3342 #ifndef NDEBUG 3343 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3344 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3345 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3346 "Operand types don't match!"); 3347 #endif 3348 3349 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3350 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3351 3352 // Sort by complexity, this groups all similar expression types together. 3353 GroupByComplexity(Ops, &LI, DT); 3354 3355 // Check if we have created the same expression before. 3356 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3357 return S; 3358 } 3359 3360 // If there are any constants, fold them together. 3361 unsigned Idx = 0; 3362 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3363 ++Idx; 3364 assert(Idx < Ops.size()); 3365 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3366 if (Kind == scSMaxExpr) 3367 return APIntOps::smax(LHS, RHS); 3368 else if (Kind == scSMinExpr) 3369 return APIntOps::smin(LHS, RHS); 3370 else if (Kind == scUMaxExpr) 3371 return APIntOps::umax(LHS, RHS); 3372 else if (Kind == scUMinExpr) 3373 return APIntOps::umin(LHS, RHS); 3374 llvm_unreachable("Unknown SCEV min/max opcode"); 3375 }; 3376 3377 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3378 // We found two constants, fold them together! 3379 ConstantInt *Fold = ConstantInt::get( 3380 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3381 Ops[0] = getConstant(Fold); 3382 Ops.erase(Ops.begin()+1); // Erase the folded element 3383 if (Ops.size() == 1) return Ops[0]; 3384 LHSC = cast<SCEVConstant>(Ops[0]); 3385 } 3386 3387 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3388 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3389 3390 if (IsMax ? IsMinV : IsMaxV) { 3391 // If we are left with a constant minimum(/maximum)-int, strip it off. 3392 Ops.erase(Ops.begin()); 3393 --Idx; 3394 } else if (IsMax ? IsMaxV : IsMinV) { 3395 // If we have a max(/min) with a constant maximum(/minimum)-int, 3396 // it will always be the extremum. 3397 return LHSC; 3398 } 3399 3400 if (Ops.size() == 1) return Ops[0]; 3401 } 3402 3403 // Find the first operation of the same kind 3404 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3405 ++Idx; 3406 3407 // Check to see if one of the operands is of the same kind. If so, expand its 3408 // operands onto our operand list, and recurse to simplify. 3409 if (Idx < Ops.size()) { 3410 bool DeletedAny = false; 3411 while (Ops[Idx]->getSCEVType() == Kind) { 3412 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3413 Ops.erase(Ops.begin()+Idx); 3414 Ops.append(SMME->op_begin(), SMME->op_end()); 3415 DeletedAny = true; 3416 } 3417 3418 if (DeletedAny) 3419 return getMinMaxExpr(Kind, Ops); 3420 } 3421 3422 // Okay, check to see if the same value occurs in the operand list twice. If 3423 // so, delete one. Since we sorted the list, these values are required to 3424 // be adjacent. 3425 llvm::CmpInst::Predicate GEPred = 3426 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3427 llvm::CmpInst::Predicate LEPred = 3428 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3429 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3430 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3431 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3432 if (Ops[i] == Ops[i + 1] || 3433 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3434 // X op Y op Y --> X op Y 3435 // X op Y --> X, if we know X, Y are ordered appropriately 3436 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3437 --i; 3438 --e; 3439 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3440 Ops[i + 1])) { 3441 // X op Y --> Y, if we know X, Y are ordered appropriately 3442 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3443 --i; 3444 --e; 3445 } 3446 } 3447 3448 if (Ops.size() == 1) return Ops[0]; 3449 3450 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3451 3452 // Okay, it looks like we really DO need an expr. Check to see if we 3453 // already have one, otherwise create a new one. 3454 const SCEV *ExistingSCEV; 3455 FoldingSetNodeID ID; 3456 void *IP; 3457 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3458 if (ExistingSCEV) 3459 return ExistingSCEV; 3460 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3461 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3462 SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr( 3463 ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size()); 3464 3465 UniqueSCEVs.InsertNode(S, IP); 3466 addToLoopUseLists(S); 3467 return S; 3468 } 3469 3470 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3471 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3472 return getSMaxExpr(Ops); 3473 } 3474 3475 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3476 return getMinMaxExpr(scSMaxExpr, Ops); 3477 } 3478 3479 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3480 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3481 return getUMaxExpr(Ops); 3482 } 3483 3484 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3485 return getMinMaxExpr(scUMaxExpr, Ops); 3486 } 3487 3488 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3489 const SCEV *RHS) { 3490 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3491 return getSMinExpr(Ops); 3492 } 3493 3494 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3495 return getMinMaxExpr(scSMinExpr, Ops); 3496 } 3497 3498 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3499 const SCEV *RHS) { 3500 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3501 return getUMinExpr(Ops); 3502 } 3503 3504 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3505 return getMinMaxExpr(scUMinExpr, Ops); 3506 } 3507 3508 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3509 // We can bypass creating a target-independent 3510 // constant expression and then folding it back into a ConstantInt. 3511 // This is just a compile-time optimization. 3512 if (isa<ScalableVectorType>(AllocTy)) { 3513 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); 3514 Constant *One = ConstantInt::get(IntTy, 1); 3515 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); 3516 return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); 3517 } 3518 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3519 } 3520 3521 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3522 StructType *STy, 3523 unsigned FieldNo) { 3524 // We can bypass creating a target-independent 3525 // constant expression and then folding it back into a ConstantInt. 3526 // This is just a compile-time optimization. 3527 return getConstant( 3528 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3529 } 3530 3531 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3532 // Don't attempt to do anything other than create a SCEVUnknown object 3533 // here. createSCEV only calls getUnknown after checking for all other 3534 // interesting possibilities, and any other code that calls getUnknown 3535 // is doing so in order to hide a value from SCEV canonicalization. 3536 3537 FoldingSetNodeID ID; 3538 ID.AddInteger(scUnknown); 3539 ID.AddPointer(V); 3540 void *IP = nullptr; 3541 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3542 assert(cast<SCEVUnknown>(S)->getValue() == V && 3543 "Stale SCEVUnknown in uniquing map!"); 3544 return S; 3545 } 3546 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3547 FirstUnknown); 3548 FirstUnknown = cast<SCEVUnknown>(S); 3549 UniqueSCEVs.InsertNode(S, IP); 3550 return S; 3551 } 3552 3553 //===----------------------------------------------------------------------===// 3554 // Basic SCEV Analysis and PHI Idiom Recognition Code 3555 // 3556 3557 /// Test if values of the given type are analyzable within the SCEV 3558 /// framework. This primarily includes integer types, and it can optionally 3559 /// include pointer types if the ScalarEvolution class has access to 3560 /// target-specific information. 3561 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3562 // Integers and pointers are always SCEVable. 3563 return Ty->isIntOrPtrTy(); 3564 } 3565 3566 /// Return the size in bits of the specified type, for which isSCEVable must 3567 /// return true. 3568 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3569 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3570 if (Ty->isPointerTy()) 3571 return getDataLayout().getIndexTypeSizeInBits(Ty); 3572 return getDataLayout().getTypeSizeInBits(Ty); 3573 } 3574 3575 /// Return a type with the same bitwidth as the given type and which represents 3576 /// how SCEV will treat the given type, for which isSCEVable must return 3577 /// true. For pointer types, this is the pointer index sized integer type. 3578 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3579 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3580 3581 if (Ty->isIntegerTy()) 3582 return Ty; 3583 3584 // The only other support type is pointer. 3585 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3586 return getDataLayout().getIndexType(Ty); 3587 } 3588 3589 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3590 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3591 } 3592 3593 const SCEV *ScalarEvolution::getCouldNotCompute() { 3594 return CouldNotCompute.get(); 3595 } 3596 3597 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3598 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3599 auto *SU = dyn_cast<SCEVUnknown>(S); 3600 return SU && SU->getValue() == nullptr; 3601 }); 3602 3603 return !ContainsNulls; 3604 } 3605 3606 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3607 HasRecMapType::iterator I = HasRecMap.find(S); 3608 if (I != HasRecMap.end()) 3609 return I->second; 3610 3611 bool FoundAddRec = 3612 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3613 HasRecMap.insert({S, FoundAddRec}); 3614 return FoundAddRec; 3615 } 3616 3617 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3618 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3619 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3620 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3621 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3622 if (!Add) 3623 return {S, nullptr}; 3624 3625 if (Add->getNumOperands() != 2) 3626 return {S, nullptr}; 3627 3628 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3629 if (!ConstOp) 3630 return {S, nullptr}; 3631 3632 return {Add->getOperand(1), ConstOp->getValue()}; 3633 } 3634 3635 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3636 /// by the value and offset from any ValueOffsetPair in the set. 3637 SetVector<ScalarEvolution::ValueOffsetPair> * 3638 ScalarEvolution::getSCEVValues(const SCEV *S) { 3639 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3640 if (SI == ExprValueMap.end()) 3641 return nullptr; 3642 #ifndef NDEBUG 3643 if (VerifySCEVMap) { 3644 // Check there is no dangling Value in the set returned. 3645 for (const auto &VE : SI->second) 3646 assert(ValueExprMap.count(VE.first)); 3647 } 3648 #endif 3649 return &SI->second; 3650 } 3651 3652 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3653 /// cannot be used separately. eraseValueFromMap should be used to remove 3654 /// V from ValueExprMap and ExprValueMap at the same time. 3655 void ScalarEvolution::eraseValueFromMap(Value *V) { 3656 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3657 if (I != ValueExprMap.end()) { 3658 const SCEV *S = I->second; 3659 // Remove {V, 0} from the set of ExprValueMap[S] 3660 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3661 SV->remove({V, nullptr}); 3662 3663 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3664 const SCEV *Stripped; 3665 ConstantInt *Offset; 3666 std::tie(Stripped, Offset) = splitAddExpr(S); 3667 if (Offset != nullptr) { 3668 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3669 SV->remove({V, Offset}); 3670 } 3671 ValueExprMap.erase(V); 3672 } 3673 } 3674 3675 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3676 /// TODO: In reality it is better to check the poison recursively 3677 /// but this is better than nothing. 3678 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3679 if (auto *I = dyn_cast<Instruction>(V)) { 3680 if (isa<OverflowingBinaryOperator>(I)) { 3681 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3682 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3683 return true; 3684 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3685 return true; 3686 } 3687 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3688 return true; 3689 } 3690 return false; 3691 } 3692 3693 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3694 /// create a new one. 3695 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3696 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3697 3698 const SCEV *S = getExistingSCEV(V); 3699 if (S == nullptr) { 3700 S = createSCEV(V); 3701 // During PHI resolution, it is possible to create two SCEVs for the same 3702 // V, so it is needed to double check whether V->S is inserted into 3703 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3704 std::pair<ValueExprMapType::iterator, bool> Pair = 3705 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3706 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3707 ExprValueMap[S].insert({V, nullptr}); 3708 3709 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3710 // ExprValueMap. 3711 const SCEV *Stripped = S; 3712 ConstantInt *Offset = nullptr; 3713 std::tie(Stripped, Offset) = splitAddExpr(S); 3714 // If stripped is SCEVUnknown, don't bother to save 3715 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3716 // increase the complexity of the expansion code. 3717 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3718 // because it may generate add/sub instead of GEP in SCEV expansion. 3719 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3720 !isa<GetElementPtrInst>(V)) 3721 ExprValueMap[Stripped].insert({V, Offset}); 3722 } 3723 } 3724 return S; 3725 } 3726 3727 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3728 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3729 3730 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3731 if (I != ValueExprMap.end()) { 3732 const SCEV *S = I->second; 3733 if (checkValidity(S)) 3734 return S; 3735 eraseValueFromMap(V); 3736 forgetMemoizedResults(S); 3737 } 3738 return nullptr; 3739 } 3740 3741 /// Return a SCEV corresponding to -V = -1*V 3742 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3743 SCEV::NoWrapFlags Flags) { 3744 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3745 return getConstant( 3746 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3747 3748 Type *Ty = V->getType(); 3749 Ty = getEffectiveSCEVType(Ty); 3750 return getMulExpr( 3751 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3752 } 3753 3754 /// If Expr computes ~A, return A else return nullptr 3755 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3756 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3757 if (!Add || Add->getNumOperands() != 2 || 3758 !Add->getOperand(0)->isAllOnesValue()) 3759 return nullptr; 3760 3761 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3762 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3763 !AddRHS->getOperand(0)->isAllOnesValue()) 3764 return nullptr; 3765 3766 return AddRHS->getOperand(1); 3767 } 3768 3769 /// Return a SCEV corresponding to ~V = -1-V 3770 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3771 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3772 return getConstant( 3773 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3774 3775 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3776 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3777 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3778 SmallVector<const SCEV *, 2> MatchedOperands; 3779 for (const SCEV *Operand : MME->operands()) { 3780 const SCEV *Matched = MatchNotExpr(Operand); 3781 if (!Matched) 3782 return (const SCEV *)nullptr; 3783 MatchedOperands.push_back(Matched); 3784 } 3785 return getMinMaxExpr( 3786 SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())), 3787 MatchedOperands); 3788 }; 3789 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3790 return Replaced; 3791 } 3792 3793 Type *Ty = V->getType(); 3794 Ty = getEffectiveSCEVType(Ty); 3795 const SCEV *AllOnes = 3796 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3797 return getMinusSCEV(AllOnes, V); 3798 } 3799 3800 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3801 SCEV::NoWrapFlags Flags, 3802 unsigned Depth) { 3803 // Fast path: X - X --> 0. 3804 if (LHS == RHS) 3805 return getZero(LHS->getType()); 3806 3807 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3808 // makes it so that we cannot make much use of NUW. 3809 auto AddFlags = SCEV::FlagAnyWrap; 3810 const bool RHSIsNotMinSigned = 3811 !getSignedRangeMin(RHS).isMinSignedValue(); 3812 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3813 // Let M be the minimum representable signed value. Then (-1)*RHS 3814 // signed-wraps if and only if RHS is M. That can happen even for 3815 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3816 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3817 // (-1)*RHS, we need to prove that RHS != M. 3818 // 3819 // If LHS is non-negative and we know that LHS - RHS does not 3820 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3821 // either by proving that RHS > M or that LHS >= 0. 3822 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3823 AddFlags = SCEV::FlagNSW; 3824 } 3825 } 3826 3827 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3828 // RHS is NSW and LHS >= 0. 3829 // 3830 // The difficulty here is that the NSW flag may have been proven 3831 // relative to a loop that is to be found in a recurrence in LHS and 3832 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3833 // larger scope than intended. 3834 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3835 3836 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3837 } 3838 3839 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 3840 unsigned Depth) { 3841 Type *SrcTy = V->getType(); 3842 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3843 "Cannot truncate or zero extend with non-integer arguments!"); 3844 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3845 return V; // No conversion 3846 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3847 return getTruncateExpr(V, Ty, Depth); 3848 return getZeroExtendExpr(V, Ty, Depth); 3849 } 3850 3851 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 3852 unsigned Depth) { 3853 Type *SrcTy = V->getType(); 3854 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3855 "Cannot truncate or zero extend with non-integer arguments!"); 3856 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3857 return V; // No conversion 3858 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3859 return getTruncateExpr(V, Ty, Depth); 3860 return getSignExtendExpr(V, Ty, Depth); 3861 } 3862 3863 const SCEV * 3864 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3865 Type *SrcTy = V->getType(); 3866 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3867 "Cannot noop or zero extend with non-integer arguments!"); 3868 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3869 "getNoopOrZeroExtend cannot truncate!"); 3870 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3871 return V; // No conversion 3872 return getZeroExtendExpr(V, Ty); 3873 } 3874 3875 const SCEV * 3876 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3877 Type *SrcTy = V->getType(); 3878 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3879 "Cannot noop or sign extend with non-integer arguments!"); 3880 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3881 "getNoopOrSignExtend cannot truncate!"); 3882 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3883 return V; // No conversion 3884 return getSignExtendExpr(V, Ty); 3885 } 3886 3887 const SCEV * 3888 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3889 Type *SrcTy = V->getType(); 3890 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3891 "Cannot noop or any extend with non-integer arguments!"); 3892 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3893 "getNoopOrAnyExtend cannot truncate!"); 3894 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3895 return V; // No conversion 3896 return getAnyExtendExpr(V, Ty); 3897 } 3898 3899 const SCEV * 3900 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3901 Type *SrcTy = V->getType(); 3902 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3903 "Cannot truncate or noop with non-integer arguments!"); 3904 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3905 "getTruncateOrNoop cannot extend!"); 3906 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3907 return V; // No conversion 3908 return getTruncateExpr(V, Ty); 3909 } 3910 3911 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3912 const SCEV *RHS) { 3913 const SCEV *PromotedLHS = LHS; 3914 const SCEV *PromotedRHS = RHS; 3915 3916 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3917 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3918 else 3919 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3920 3921 return getUMaxExpr(PromotedLHS, PromotedRHS); 3922 } 3923 3924 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3925 const SCEV *RHS) { 3926 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3927 return getUMinFromMismatchedTypes(Ops); 3928 } 3929 3930 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 3931 SmallVectorImpl<const SCEV *> &Ops) { 3932 assert(!Ops.empty() && "At least one operand must be!"); 3933 // Trivial case. 3934 if (Ops.size() == 1) 3935 return Ops[0]; 3936 3937 // Find the max type first. 3938 Type *MaxType = nullptr; 3939 for (auto *S : Ops) 3940 if (MaxType) 3941 MaxType = getWiderType(MaxType, S->getType()); 3942 else 3943 MaxType = S->getType(); 3944 3945 // Extend all ops to max type. 3946 SmallVector<const SCEV *, 2> PromotedOps; 3947 for (auto *S : Ops) 3948 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 3949 3950 // Generate umin. 3951 return getUMinExpr(PromotedOps); 3952 } 3953 3954 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3955 // A pointer operand may evaluate to a nonpointer expression, such as null. 3956 if (!V->getType()->isPointerTy()) 3957 return V; 3958 3959 while (true) { 3960 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3961 V = Cast->getOperand(); 3962 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3963 const SCEV *PtrOp = nullptr; 3964 for (const SCEV *NAryOp : NAry->operands()) { 3965 if (NAryOp->getType()->isPointerTy()) { 3966 // Cannot find the base of an expression with multiple pointer ops. 3967 if (PtrOp) 3968 return V; 3969 PtrOp = NAryOp; 3970 } 3971 } 3972 if (!PtrOp) // All operands were non-pointer. 3973 return V; 3974 V = PtrOp; 3975 } else // Not something we can look further into. 3976 return V; 3977 } 3978 } 3979 3980 /// Push users of the given Instruction onto the given Worklist. 3981 static void 3982 PushDefUseChildren(Instruction *I, 3983 SmallVectorImpl<Instruction *> &Worklist) { 3984 // Push the def-use children onto the Worklist stack. 3985 for (User *U : I->users()) 3986 Worklist.push_back(cast<Instruction>(U)); 3987 } 3988 3989 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3990 SmallVector<Instruction *, 16> Worklist; 3991 PushDefUseChildren(PN, Worklist); 3992 3993 SmallPtrSet<Instruction *, 8> Visited; 3994 Visited.insert(PN); 3995 while (!Worklist.empty()) { 3996 Instruction *I = Worklist.pop_back_val(); 3997 if (!Visited.insert(I).second) 3998 continue; 3999 4000 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4001 if (It != ValueExprMap.end()) { 4002 const SCEV *Old = It->second; 4003 4004 // Short-circuit the def-use traversal if the symbolic name 4005 // ceases to appear in expressions. 4006 if (Old != SymName && !hasOperand(Old, SymName)) 4007 continue; 4008 4009 // SCEVUnknown for a PHI either means that it has an unrecognized 4010 // structure, it's a PHI that's in the progress of being computed 4011 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4012 // additional loop trip count information isn't going to change anything. 4013 // In the second case, createNodeForPHI will perform the necessary 4014 // updates on its own when it gets to that point. In the third, we do 4015 // want to forget the SCEVUnknown. 4016 if (!isa<PHINode>(I) || 4017 !isa<SCEVUnknown>(Old) || 4018 (I != PN && Old == SymName)) { 4019 eraseValueFromMap(It->first); 4020 forgetMemoizedResults(Old); 4021 } 4022 } 4023 4024 PushDefUseChildren(I, Worklist); 4025 } 4026 } 4027 4028 namespace { 4029 4030 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4031 /// expression in case its Loop is L. If it is not L then 4032 /// if IgnoreOtherLoops is true then use AddRec itself 4033 /// otherwise rewrite cannot be done. 4034 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4035 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4036 public: 4037 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4038 bool IgnoreOtherLoops = true) { 4039 SCEVInitRewriter Rewriter(L, SE); 4040 const SCEV *Result = Rewriter.visit(S); 4041 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4042 return SE.getCouldNotCompute(); 4043 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4044 ? SE.getCouldNotCompute() 4045 : Result; 4046 } 4047 4048 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4049 if (!SE.isLoopInvariant(Expr, L)) 4050 SeenLoopVariantSCEVUnknown = true; 4051 return Expr; 4052 } 4053 4054 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4055 // Only re-write AddRecExprs for this loop. 4056 if (Expr->getLoop() == L) 4057 return Expr->getStart(); 4058 SeenOtherLoops = true; 4059 return Expr; 4060 } 4061 4062 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4063 4064 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4065 4066 private: 4067 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4068 : SCEVRewriteVisitor(SE), L(L) {} 4069 4070 const Loop *L; 4071 bool SeenLoopVariantSCEVUnknown = false; 4072 bool SeenOtherLoops = false; 4073 }; 4074 4075 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4076 /// increment expression in case its Loop is L. If it is not L then 4077 /// use AddRec itself. 4078 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4079 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4080 public: 4081 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4082 SCEVPostIncRewriter Rewriter(L, SE); 4083 const SCEV *Result = Rewriter.visit(S); 4084 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4085 ? SE.getCouldNotCompute() 4086 : Result; 4087 } 4088 4089 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4090 if (!SE.isLoopInvariant(Expr, L)) 4091 SeenLoopVariantSCEVUnknown = true; 4092 return Expr; 4093 } 4094 4095 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4096 // Only re-write AddRecExprs for this loop. 4097 if (Expr->getLoop() == L) 4098 return Expr->getPostIncExpr(SE); 4099 SeenOtherLoops = true; 4100 return Expr; 4101 } 4102 4103 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4104 4105 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4106 4107 private: 4108 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4109 : SCEVRewriteVisitor(SE), L(L) {} 4110 4111 const Loop *L; 4112 bool SeenLoopVariantSCEVUnknown = false; 4113 bool SeenOtherLoops = false; 4114 }; 4115 4116 /// This class evaluates the compare condition by matching it against the 4117 /// condition of loop latch. If there is a match we assume a true value 4118 /// for the condition while building SCEV nodes. 4119 class SCEVBackedgeConditionFolder 4120 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4121 public: 4122 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4123 ScalarEvolution &SE) { 4124 bool IsPosBECond = false; 4125 Value *BECond = nullptr; 4126 if (BasicBlock *Latch = L->getLoopLatch()) { 4127 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4128 if (BI && BI->isConditional()) { 4129 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4130 "Both outgoing branches should not target same header!"); 4131 BECond = BI->getCondition(); 4132 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4133 } else { 4134 return S; 4135 } 4136 } 4137 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4138 return Rewriter.visit(S); 4139 } 4140 4141 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4142 const SCEV *Result = Expr; 4143 bool InvariantF = SE.isLoopInvariant(Expr, L); 4144 4145 if (!InvariantF) { 4146 Instruction *I = cast<Instruction>(Expr->getValue()); 4147 switch (I->getOpcode()) { 4148 case Instruction::Select: { 4149 SelectInst *SI = cast<SelectInst>(I); 4150 Optional<const SCEV *> Res = 4151 compareWithBackedgeCondition(SI->getCondition()); 4152 if (Res.hasValue()) { 4153 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4154 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4155 } 4156 break; 4157 } 4158 default: { 4159 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4160 if (Res.hasValue()) 4161 Result = Res.getValue(); 4162 break; 4163 } 4164 } 4165 } 4166 return Result; 4167 } 4168 4169 private: 4170 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4171 bool IsPosBECond, ScalarEvolution &SE) 4172 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4173 IsPositiveBECond(IsPosBECond) {} 4174 4175 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4176 4177 const Loop *L; 4178 /// Loop back condition. 4179 Value *BackedgeCond = nullptr; 4180 /// Set to true if loop back is on positive branch condition. 4181 bool IsPositiveBECond; 4182 }; 4183 4184 Optional<const SCEV *> 4185 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4186 4187 // If value matches the backedge condition for loop latch, 4188 // then return a constant evolution node based on loopback 4189 // branch taken. 4190 if (BackedgeCond == IC) 4191 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4192 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4193 return None; 4194 } 4195 4196 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4197 public: 4198 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4199 ScalarEvolution &SE) { 4200 SCEVShiftRewriter Rewriter(L, SE); 4201 const SCEV *Result = Rewriter.visit(S); 4202 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4203 } 4204 4205 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4206 // Only allow AddRecExprs for this loop. 4207 if (!SE.isLoopInvariant(Expr, L)) 4208 Valid = false; 4209 return Expr; 4210 } 4211 4212 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4213 if (Expr->getLoop() == L && Expr->isAffine()) 4214 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4215 Valid = false; 4216 return Expr; 4217 } 4218 4219 bool isValid() { return Valid; } 4220 4221 private: 4222 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4223 : SCEVRewriteVisitor(SE), L(L) {} 4224 4225 const Loop *L; 4226 bool Valid = true; 4227 }; 4228 4229 } // end anonymous namespace 4230 4231 SCEV::NoWrapFlags 4232 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4233 if (!AR->isAffine()) 4234 return SCEV::FlagAnyWrap; 4235 4236 using OBO = OverflowingBinaryOperator; 4237 4238 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4239 4240 if (!AR->hasNoSignedWrap()) { 4241 ConstantRange AddRecRange = getSignedRange(AR); 4242 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4243 4244 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4245 Instruction::Add, IncRange, OBO::NoSignedWrap); 4246 if (NSWRegion.contains(AddRecRange)) 4247 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4248 } 4249 4250 if (!AR->hasNoUnsignedWrap()) { 4251 ConstantRange AddRecRange = getUnsignedRange(AR); 4252 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4253 4254 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4255 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4256 if (NUWRegion.contains(AddRecRange)) 4257 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4258 } 4259 4260 return Result; 4261 } 4262 4263 namespace { 4264 4265 /// Represents an abstract binary operation. This may exist as a 4266 /// normal instruction or constant expression, or may have been 4267 /// derived from an expression tree. 4268 struct BinaryOp { 4269 unsigned Opcode; 4270 Value *LHS; 4271 Value *RHS; 4272 bool IsNSW = false; 4273 bool IsNUW = false; 4274 4275 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4276 /// constant expression. 4277 Operator *Op = nullptr; 4278 4279 explicit BinaryOp(Operator *Op) 4280 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4281 Op(Op) { 4282 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4283 IsNSW = OBO->hasNoSignedWrap(); 4284 IsNUW = OBO->hasNoUnsignedWrap(); 4285 } 4286 } 4287 4288 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4289 bool IsNUW = false) 4290 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4291 }; 4292 4293 } // end anonymous namespace 4294 4295 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4296 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4297 auto *Op = dyn_cast<Operator>(V); 4298 if (!Op) 4299 return None; 4300 4301 // Implementation detail: all the cleverness here should happen without 4302 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4303 // SCEV expressions when possible, and we should not break that. 4304 4305 switch (Op->getOpcode()) { 4306 case Instruction::Add: 4307 case Instruction::Sub: 4308 case Instruction::Mul: 4309 case Instruction::UDiv: 4310 case Instruction::URem: 4311 case Instruction::And: 4312 case Instruction::Or: 4313 case Instruction::AShr: 4314 case Instruction::Shl: 4315 return BinaryOp(Op); 4316 4317 case Instruction::Xor: 4318 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4319 // If the RHS of the xor is a signmask, then this is just an add. 4320 // Instcombine turns add of signmask into xor as a strength reduction step. 4321 if (RHSC->getValue().isSignMask()) 4322 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4323 return BinaryOp(Op); 4324 4325 case Instruction::LShr: 4326 // Turn logical shift right of a constant into a unsigned divide. 4327 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4328 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4329 4330 // If the shift count is not less than the bitwidth, the result of 4331 // the shift is undefined. Don't try to analyze it, because the 4332 // resolution chosen here may differ from the resolution chosen in 4333 // other parts of the compiler. 4334 if (SA->getValue().ult(BitWidth)) { 4335 Constant *X = 4336 ConstantInt::get(SA->getContext(), 4337 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4338 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4339 } 4340 } 4341 return BinaryOp(Op); 4342 4343 case Instruction::ExtractValue: { 4344 auto *EVI = cast<ExtractValueInst>(Op); 4345 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4346 break; 4347 4348 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4349 if (!WO) 4350 break; 4351 4352 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4353 bool Signed = WO->isSigned(); 4354 // TODO: Should add nuw/nsw flags for mul as well. 4355 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4356 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4357 4358 // Now that we know that all uses of the arithmetic-result component of 4359 // CI are guarded by the overflow check, we can go ahead and pretend 4360 // that the arithmetic is non-overflowing. 4361 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4362 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4363 } 4364 4365 default: 4366 break; 4367 } 4368 4369 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4370 // semantics as a Sub, return a binary sub expression. 4371 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4372 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4373 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4374 4375 return None; 4376 } 4377 4378 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4379 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4380 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4381 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4382 /// follows one of the following patterns: 4383 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4384 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4385 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4386 /// we return the type of the truncation operation, and indicate whether the 4387 /// truncated type should be treated as signed/unsigned by setting 4388 /// \p Signed to true/false, respectively. 4389 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4390 bool &Signed, ScalarEvolution &SE) { 4391 // The case where Op == SymbolicPHI (that is, with no type conversions on 4392 // the way) is handled by the regular add recurrence creating logic and 4393 // would have already been triggered in createAddRecForPHI. Reaching it here 4394 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4395 // because one of the other operands of the SCEVAddExpr updating this PHI is 4396 // not invariant). 4397 // 4398 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4399 // this case predicates that allow us to prove that Op == SymbolicPHI will 4400 // be added. 4401 if (Op == SymbolicPHI) 4402 return nullptr; 4403 4404 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4405 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4406 if (SourceBits != NewBits) 4407 return nullptr; 4408 4409 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4410 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4411 if (!SExt && !ZExt) 4412 return nullptr; 4413 const SCEVTruncateExpr *Trunc = 4414 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4415 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4416 if (!Trunc) 4417 return nullptr; 4418 const SCEV *X = Trunc->getOperand(); 4419 if (X != SymbolicPHI) 4420 return nullptr; 4421 Signed = SExt != nullptr; 4422 return Trunc->getType(); 4423 } 4424 4425 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4426 if (!PN->getType()->isIntegerTy()) 4427 return nullptr; 4428 const Loop *L = LI.getLoopFor(PN->getParent()); 4429 if (!L || L->getHeader() != PN->getParent()) 4430 return nullptr; 4431 return L; 4432 } 4433 4434 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4435 // computation that updates the phi follows the following pattern: 4436 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4437 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4438 // If so, try to see if it can be rewritten as an AddRecExpr under some 4439 // Predicates. If successful, return them as a pair. Also cache the results 4440 // of the analysis. 4441 // 4442 // Example usage scenario: 4443 // Say the Rewriter is called for the following SCEV: 4444 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4445 // where: 4446 // %X = phi i64 (%Start, %BEValue) 4447 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4448 // and call this function with %SymbolicPHI = %X. 4449 // 4450 // The analysis will find that the value coming around the backedge has 4451 // the following SCEV: 4452 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4453 // Upon concluding that this matches the desired pattern, the function 4454 // will return the pair {NewAddRec, SmallPredsVec} where: 4455 // NewAddRec = {%Start,+,%Step} 4456 // SmallPredsVec = {P1, P2, P3} as follows: 4457 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4458 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4459 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4460 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4461 // under the predicates {P1,P2,P3}. 4462 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4463 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4464 // 4465 // TODO's: 4466 // 4467 // 1) Extend the Induction descriptor to also support inductions that involve 4468 // casts: When needed (namely, when we are called in the context of the 4469 // vectorizer induction analysis), a Set of cast instructions will be 4470 // populated by this method, and provided back to isInductionPHI. This is 4471 // needed to allow the vectorizer to properly record them to be ignored by 4472 // the cost model and to avoid vectorizing them (otherwise these casts, 4473 // which are redundant under the runtime overflow checks, will be 4474 // vectorized, which can be costly). 4475 // 4476 // 2) Support additional induction/PHISCEV patterns: We also want to support 4477 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4478 // after the induction update operation (the induction increment): 4479 // 4480 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4481 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4482 // 4483 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4484 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4485 // 4486 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4487 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4488 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4489 SmallVector<const SCEVPredicate *, 3> Predicates; 4490 4491 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4492 // return an AddRec expression under some predicate. 4493 4494 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4495 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4496 assert(L && "Expecting an integer loop header phi"); 4497 4498 // The loop may have multiple entrances or multiple exits; we can analyze 4499 // this phi as an addrec if it has a unique entry value and a unique 4500 // backedge value. 4501 Value *BEValueV = nullptr, *StartValueV = nullptr; 4502 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4503 Value *V = PN->getIncomingValue(i); 4504 if (L->contains(PN->getIncomingBlock(i))) { 4505 if (!BEValueV) { 4506 BEValueV = V; 4507 } else if (BEValueV != V) { 4508 BEValueV = nullptr; 4509 break; 4510 } 4511 } else if (!StartValueV) { 4512 StartValueV = V; 4513 } else if (StartValueV != V) { 4514 StartValueV = nullptr; 4515 break; 4516 } 4517 } 4518 if (!BEValueV || !StartValueV) 4519 return None; 4520 4521 const SCEV *BEValue = getSCEV(BEValueV); 4522 4523 // If the value coming around the backedge is an add with the symbolic 4524 // value we just inserted, possibly with casts that we can ignore under 4525 // an appropriate runtime guard, then we found a simple induction variable! 4526 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4527 if (!Add) 4528 return None; 4529 4530 // If there is a single occurrence of the symbolic value, possibly 4531 // casted, replace it with a recurrence. 4532 unsigned FoundIndex = Add->getNumOperands(); 4533 Type *TruncTy = nullptr; 4534 bool Signed; 4535 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4536 if ((TruncTy = 4537 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4538 if (FoundIndex == e) { 4539 FoundIndex = i; 4540 break; 4541 } 4542 4543 if (FoundIndex == Add->getNumOperands()) 4544 return None; 4545 4546 // Create an add with everything but the specified operand. 4547 SmallVector<const SCEV *, 8> Ops; 4548 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4549 if (i != FoundIndex) 4550 Ops.push_back(Add->getOperand(i)); 4551 const SCEV *Accum = getAddExpr(Ops); 4552 4553 // The runtime checks will not be valid if the step amount is 4554 // varying inside the loop. 4555 if (!isLoopInvariant(Accum, L)) 4556 return None; 4557 4558 // *** Part2: Create the predicates 4559 4560 // Analysis was successful: we have a phi-with-cast pattern for which we 4561 // can return an AddRec expression under the following predicates: 4562 // 4563 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4564 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4565 // P2: An Equal predicate that guarantees that 4566 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4567 // P3: An Equal predicate that guarantees that 4568 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4569 // 4570 // As we next prove, the above predicates guarantee that: 4571 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4572 // 4573 // 4574 // More formally, we want to prove that: 4575 // Expr(i+1) = Start + (i+1) * Accum 4576 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4577 // 4578 // Given that: 4579 // 1) Expr(0) = Start 4580 // 2) Expr(1) = Start + Accum 4581 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4582 // 3) Induction hypothesis (step i): 4583 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4584 // 4585 // Proof: 4586 // Expr(i+1) = 4587 // = Start + (i+1)*Accum 4588 // = (Start + i*Accum) + Accum 4589 // = Expr(i) + Accum 4590 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4591 // :: from step i 4592 // 4593 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4594 // 4595 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4596 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4597 // + Accum :: from P3 4598 // 4599 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4600 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4601 // 4602 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4603 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4604 // 4605 // By induction, the same applies to all iterations 1<=i<n: 4606 // 4607 4608 // Create a truncated addrec for which we will add a no overflow check (P1). 4609 const SCEV *StartVal = getSCEV(StartValueV); 4610 const SCEV *PHISCEV = 4611 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4612 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4613 4614 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4615 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4616 // will be constant. 4617 // 4618 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4619 // add P1. 4620 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4621 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4622 Signed ? SCEVWrapPredicate::IncrementNSSW 4623 : SCEVWrapPredicate::IncrementNUSW; 4624 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4625 Predicates.push_back(AddRecPred); 4626 } 4627 4628 // Create the Equal Predicates P2,P3: 4629 4630 // It is possible that the predicates P2 and/or P3 are computable at 4631 // compile time due to StartVal and/or Accum being constants. 4632 // If either one is, then we can check that now and escape if either P2 4633 // or P3 is false. 4634 4635 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4636 // for each of StartVal and Accum 4637 auto getExtendedExpr = [&](const SCEV *Expr, 4638 bool CreateSignExtend) -> const SCEV * { 4639 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4640 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4641 const SCEV *ExtendedExpr = 4642 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4643 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4644 return ExtendedExpr; 4645 }; 4646 4647 // Given: 4648 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4649 // = getExtendedExpr(Expr) 4650 // Determine whether the predicate P: Expr == ExtendedExpr 4651 // is known to be false at compile time 4652 auto PredIsKnownFalse = [&](const SCEV *Expr, 4653 const SCEV *ExtendedExpr) -> bool { 4654 return Expr != ExtendedExpr && 4655 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4656 }; 4657 4658 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4659 if (PredIsKnownFalse(StartVal, StartExtended)) { 4660 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4661 return None; 4662 } 4663 4664 // The Step is always Signed (because the overflow checks are either 4665 // NSSW or NUSW) 4666 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4667 if (PredIsKnownFalse(Accum, AccumExtended)) { 4668 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4669 return None; 4670 } 4671 4672 auto AppendPredicate = [&](const SCEV *Expr, 4673 const SCEV *ExtendedExpr) -> void { 4674 if (Expr != ExtendedExpr && 4675 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4676 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4677 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4678 Predicates.push_back(Pred); 4679 } 4680 }; 4681 4682 AppendPredicate(StartVal, StartExtended); 4683 AppendPredicate(Accum, AccumExtended); 4684 4685 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4686 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4687 // into NewAR if it will also add the runtime overflow checks specified in 4688 // Predicates. 4689 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4690 4691 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4692 std::make_pair(NewAR, Predicates); 4693 // Remember the result of the analysis for this SCEV at this locayyytion. 4694 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4695 return PredRewrite; 4696 } 4697 4698 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4699 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4700 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4701 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4702 if (!L) 4703 return None; 4704 4705 // Check to see if we already analyzed this PHI. 4706 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4707 if (I != PredicatedSCEVRewrites.end()) { 4708 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4709 I->second; 4710 // Analysis was done before and failed to create an AddRec: 4711 if (Rewrite.first == SymbolicPHI) 4712 return None; 4713 // Analysis was done before and succeeded to create an AddRec under 4714 // a predicate: 4715 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4716 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4717 return Rewrite; 4718 } 4719 4720 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4721 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4722 4723 // Record in the cache that the analysis failed 4724 if (!Rewrite) { 4725 SmallVector<const SCEVPredicate *, 3> Predicates; 4726 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4727 return None; 4728 } 4729 4730 return Rewrite; 4731 } 4732 4733 // FIXME: This utility is currently required because the Rewriter currently 4734 // does not rewrite this expression: 4735 // {0, +, (sext ix (trunc iy to ix) to iy)} 4736 // into {0, +, %step}, 4737 // even when the following Equal predicate exists: 4738 // "%step == (sext ix (trunc iy to ix) to iy)". 4739 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4740 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4741 if (AR1 == AR2) 4742 return true; 4743 4744 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4745 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4746 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4747 return false; 4748 return true; 4749 }; 4750 4751 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4752 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4753 return false; 4754 return true; 4755 } 4756 4757 /// A helper function for createAddRecFromPHI to handle simple cases. 4758 /// 4759 /// This function tries to find an AddRec expression for the simplest (yet most 4760 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4761 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4762 /// technique for finding the AddRec expression. 4763 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4764 Value *BEValueV, 4765 Value *StartValueV) { 4766 const Loop *L = LI.getLoopFor(PN->getParent()); 4767 assert(L && L->getHeader() == PN->getParent()); 4768 assert(BEValueV && StartValueV); 4769 4770 auto BO = MatchBinaryOp(BEValueV, DT); 4771 if (!BO) 4772 return nullptr; 4773 4774 if (BO->Opcode != Instruction::Add) 4775 return nullptr; 4776 4777 const SCEV *Accum = nullptr; 4778 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4779 Accum = getSCEV(BO->RHS); 4780 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4781 Accum = getSCEV(BO->LHS); 4782 4783 if (!Accum) 4784 return nullptr; 4785 4786 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4787 if (BO->IsNUW) 4788 Flags = setFlags(Flags, SCEV::FlagNUW); 4789 if (BO->IsNSW) 4790 Flags = setFlags(Flags, SCEV::FlagNSW); 4791 4792 const SCEV *StartVal = getSCEV(StartValueV); 4793 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4794 4795 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4796 4797 // We can add Flags to the post-inc expression only if we 4798 // know that it is *undefined behavior* for BEValueV to 4799 // overflow. 4800 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4801 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4802 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4803 4804 return PHISCEV; 4805 } 4806 4807 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4808 const Loop *L = LI.getLoopFor(PN->getParent()); 4809 if (!L || L->getHeader() != PN->getParent()) 4810 return nullptr; 4811 4812 // The loop may have multiple entrances or multiple exits; we can analyze 4813 // this phi as an addrec if it has a unique entry value and a unique 4814 // backedge value. 4815 Value *BEValueV = nullptr, *StartValueV = nullptr; 4816 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4817 Value *V = PN->getIncomingValue(i); 4818 if (L->contains(PN->getIncomingBlock(i))) { 4819 if (!BEValueV) { 4820 BEValueV = V; 4821 } else if (BEValueV != V) { 4822 BEValueV = nullptr; 4823 break; 4824 } 4825 } else if (!StartValueV) { 4826 StartValueV = V; 4827 } else if (StartValueV != V) { 4828 StartValueV = nullptr; 4829 break; 4830 } 4831 } 4832 if (!BEValueV || !StartValueV) 4833 return nullptr; 4834 4835 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4836 "PHI node already processed?"); 4837 4838 // First, try to find AddRec expression without creating a fictituos symbolic 4839 // value for PN. 4840 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4841 return S; 4842 4843 // Handle PHI node value symbolically. 4844 const SCEV *SymbolicName = getUnknown(PN); 4845 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4846 4847 // Using this symbolic name for the PHI, analyze the value coming around 4848 // the back-edge. 4849 const SCEV *BEValue = getSCEV(BEValueV); 4850 4851 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4852 // has a special value for the first iteration of the loop. 4853 4854 // If the value coming around the backedge is an add with the symbolic 4855 // value we just inserted, then we found a simple induction variable! 4856 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4857 // If there is a single occurrence of the symbolic value, replace it 4858 // with a recurrence. 4859 unsigned FoundIndex = Add->getNumOperands(); 4860 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4861 if (Add->getOperand(i) == SymbolicName) 4862 if (FoundIndex == e) { 4863 FoundIndex = i; 4864 break; 4865 } 4866 4867 if (FoundIndex != Add->getNumOperands()) { 4868 // Create an add with everything but the specified operand. 4869 SmallVector<const SCEV *, 8> Ops; 4870 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4871 if (i != FoundIndex) 4872 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4873 L, *this)); 4874 const SCEV *Accum = getAddExpr(Ops); 4875 4876 // This is not a valid addrec if the step amount is varying each 4877 // loop iteration, but is not itself an addrec in this loop. 4878 if (isLoopInvariant(Accum, L) || 4879 (isa<SCEVAddRecExpr>(Accum) && 4880 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4881 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4882 4883 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4884 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4885 if (BO->IsNUW) 4886 Flags = setFlags(Flags, SCEV::FlagNUW); 4887 if (BO->IsNSW) 4888 Flags = setFlags(Flags, SCEV::FlagNSW); 4889 } 4890 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4891 // If the increment is an inbounds GEP, then we know the address 4892 // space cannot be wrapped around. We cannot make any guarantee 4893 // about signed or unsigned overflow because pointers are 4894 // unsigned but we may have a negative index from the base 4895 // pointer. We can guarantee that no unsigned wrap occurs if the 4896 // indices form a positive value. 4897 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4898 Flags = setFlags(Flags, SCEV::FlagNW); 4899 4900 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4901 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4902 Flags = setFlags(Flags, SCEV::FlagNUW); 4903 } 4904 4905 // We cannot transfer nuw and nsw flags from subtraction 4906 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4907 // for instance. 4908 } 4909 4910 const SCEV *StartVal = getSCEV(StartValueV); 4911 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4912 4913 // Okay, for the entire analysis of this edge we assumed the PHI 4914 // to be symbolic. We now need to go back and purge all of the 4915 // entries for the scalars that use the symbolic expression. 4916 forgetSymbolicName(PN, SymbolicName); 4917 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4918 4919 // We can add Flags to the post-inc expression only if we 4920 // know that it is *undefined behavior* for BEValueV to 4921 // overflow. 4922 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4923 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4924 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4925 4926 return PHISCEV; 4927 } 4928 } 4929 } else { 4930 // Otherwise, this could be a loop like this: 4931 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4932 // In this case, j = {1,+,1} and BEValue is j. 4933 // Because the other in-value of i (0) fits the evolution of BEValue 4934 // i really is an addrec evolution. 4935 // 4936 // We can generalize this saying that i is the shifted value of BEValue 4937 // by one iteration: 4938 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4939 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4940 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 4941 if (Shifted != getCouldNotCompute() && 4942 Start != getCouldNotCompute()) { 4943 const SCEV *StartVal = getSCEV(StartValueV); 4944 if (Start == StartVal) { 4945 // Okay, for the entire analysis of this edge we assumed the PHI 4946 // to be symbolic. We now need to go back and purge all of the 4947 // entries for the scalars that use the symbolic expression. 4948 forgetSymbolicName(PN, SymbolicName); 4949 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4950 return Shifted; 4951 } 4952 } 4953 } 4954 4955 // Remove the temporary PHI node SCEV that has been inserted while intending 4956 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4957 // as it will prevent later (possibly simpler) SCEV expressions to be added 4958 // to the ValueExprMap. 4959 eraseValueFromMap(PN); 4960 4961 return nullptr; 4962 } 4963 4964 // Checks if the SCEV S is available at BB. S is considered available at BB 4965 // if S can be materialized at BB without introducing a fault. 4966 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4967 BasicBlock *BB) { 4968 struct CheckAvailable { 4969 bool TraversalDone = false; 4970 bool Available = true; 4971 4972 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4973 BasicBlock *BB = nullptr; 4974 DominatorTree &DT; 4975 4976 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4977 : L(L), BB(BB), DT(DT) {} 4978 4979 bool setUnavailable() { 4980 TraversalDone = true; 4981 Available = false; 4982 return false; 4983 } 4984 4985 bool follow(const SCEV *S) { 4986 switch (S->getSCEVType()) { 4987 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4988 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4989 case scUMinExpr: 4990 case scSMinExpr: 4991 // These expressions are available if their operand(s) is/are. 4992 return true; 4993 4994 case scAddRecExpr: { 4995 // We allow add recurrences that are on the loop BB is in, or some 4996 // outer loop. This guarantees availability because the value of the 4997 // add recurrence at BB is simply the "current" value of the induction 4998 // variable. We can relax this in the future; for instance an add 4999 // recurrence on a sibling dominating loop is also available at BB. 5000 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5001 if (L && (ARLoop == L || ARLoop->contains(L))) 5002 return true; 5003 5004 return setUnavailable(); 5005 } 5006 5007 case scUnknown: { 5008 // For SCEVUnknown, we check for simple dominance. 5009 const auto *SU = cast<SCEVUnknown>(S); 5010 Value *V = SU->getValue(); 5011 5012 if (isa<Argument>(V)) 5013 return false; 5014 5015 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5016 return false; 5017 5018 return setUnavailable(); 5019 } 5020 5021 case scUDivExpr: 5022 case scCouldNotCompute: 5023 // We do not try to smart about these at all. 5024 return setUnavailable(); 5025 } 5026 llvm_unreachable("switch should be fully covered!"); 5027 } 5028 5029 bool isDone() { return TraversalDone; } 5030 }; 5031 5032 CheckAvailable CA(L, BB, DT); 5033 SCEVTraversal<CheckAvailable> ST(CA); 5034 5035 ST.visitAll(S); 5036 return CA.Available; 5037 } 5038 5039 // Try to match a control flow sequence that branches out at BI and merges back 5040 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5041 // match. 5042 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5043 Value *&C, Value *&LHS, Value *&RHS) { 5044 C = BI->getCondition(); 5045 5046 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5047 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5048 5049 if (!LeftEdge.isSingleEdge()) 5050 return false; 5051 5052 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5053 5054 Use &LeftUse = Merge->getOperandUse(0); 5055 Use &RightUse = Merge->getOperandUse(1); 5056 5057 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5058 LHS = LeftUse; 5059 RHS = RightUse; 5060 return true; 5061 } 5062 5063 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5064 LHS = RightUse; 5065 RHS = LeftUse; 5066 return true; 5067 } 5068 5069 return false; 5070 } 5071 5072 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5073 auto IsReachable = 5074 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5075 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5076 const Loop *L = LI.getLoopFor(PN->getParent()); 5077 5078 // We don't want to break LCSSA, even in a SCEV expression tree. 5079 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5080 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5081 return nullptr; 5082 5083 // Try to match 5084 // 5085 // br %cond, label %left, label %right 5086 // left: 5087 // br label %merge 5088 // right: 5089 // br label %merge 5090 // merge: 5091 // V = phi [ %x, %left ], [ %y, %right ] 5092 // 5093 // as "select %cond, %x, %y" 5094 5095 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5096 assert(IDom && "At least the entry block should dominate PN"); 5097 5098 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5099 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5100 5101 if (BI && BI->isConditional() && 5102 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5103 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5104 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5105 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5106 } 5107 5108 return nullptr; 5109 } 5110 5111 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5112 if (const SCEV *S = createAddRecFromPHI(PN)) 5113 return S; 5114 5115 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5116 return S; 5117 5118 // If the PHI has a single incoming value, follow that value, unless the 5119 // PHI's incoming blocks are in a different loop, in which case doing so 5120 // risks breaking LCSSA form. Instcombine would normally zap these, but 5121 // it doesn't have DominatorTree information, so it may miss cases. 5122 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5123 if (LI.replacementPreservesLCSSAForm(PN, V)) 5124 return getSCEV(V); 5125 5126 // If it's not a loop phi, we can't handle it yet. 5127 return getUnknown(PN); 5128 } 5129 5130 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5131 Value *Cond, 5132 Value *TrueVal, 5133 Value *FalseVal) { 5134 // Handle "constant" branch or select. This can occur for instance when a 5135 // loop pass transforms an inner loop and moves on to process the outer loop. 5136 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5137 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5138 5139 // Try to match some simple smax or umax patterns. 5140 auto *ICI = dyn_cast<ICmpInst>(Cond); 5141 if (!ICI) 5142 return getUnknown(I); 5143 5144 Value *LHS = ICI->getOperand(0); 5145 Value *RHS = ICI->getOperand(1); 5146 5147 switch (ICI->getPredicate()) { 5148 case ICmpInst::ICMP_SLT: 5149 case ICmpInst::ICMP_SLE: 5150 std::swap(LHS, RHS); 5151 LLVM_FALLTHROUGH; 5152 case ICmpInst::ICMP_SGT: 5153 case ICmpInst::ICMP_SGE: 5154 // a >s b ? a+x : b+x -> smax(a, b)+x 5155 // a >s b ? b+x : a+x -> smin(a, b)+x 5156 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5157 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5158 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5159 const SCEV *LA = getSCEV(TrueVal); 5160 const SCEV *RA = getSCEV(FalseVal); 5161 const SCEV *LDiff = getMinusSCEV(LA, LS); 5162 const SCEV *RDiff = getMinusSCEV(RA, RS); 5163 if (LDiff == RDiff) 5164 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5165 LDiff = getMinusSCEV(LA, RS); 5166 RDiff = getMinusSCEV(RA, LS); 5167 if (LDiff == RDiff) 5168 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5169 } 5170 break; 5171 case ICmpInst::ICMP_ULT: 5172 case ICmpInst::ICMP_ULE: 5173 std::swap(LHS, RHS); 5174 LLVM_FALLTHROUGH; 5175 case ICmpInst::ICMP_UGT: 5176 case ICmpInst::ICMP_UGE: 5177 // a >u b ? a+x : b+x -> umax(a, b)+x 5178 // a >u b ? b+x : a+x -> umin(a, b)+x 5179 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5180 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5181 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5182 const SCEV *LA = getSCEV(TrueVal); 5183 const SCEV *RA = getSCEV(FalseVal); 5184 const SCEV *LDiff = getMinusSCEV(LA, LS); 5185 const SCEV *RDiff = getMinusSCEV(RA, RS); 5186 if (LDiff == RDiff) 5187 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5188 LDiff = getMinusSCEV(LA, RS); 5189 RDiff = getMinusSCEV(RA, LS); 5190 if (LDiff == RDiff) 5191 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5192 } 5193 break; 5194 case ICmpInst::ICMP_NE: 5195 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5196 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5197 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5198 const SCEV *One = getOne(I->getType()); 5199 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5200 const SCEV *LA = getSCEV(TrueVal); 5201 const SCEV *RA = getSCEV(FalseVal); 5202 const SCEV *LDiff = getMinusSCEV(LA, LS); 5203 const SCEV *RDiff = getMinusSCEV(RA, One); 5204 if (LDiff == RDiff) 5205 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5206 } 5207 break; 5208 case ICmpInst::ICMP_EQ: 5209 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5210 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5211 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5212 const SCEV *One = getOne(I->getType()); 5213 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5214 const SCEV *LA = getSCEV(TrueVal); 5215 const SCEV *RA = getSCEV(FalseVal); 5216 const SCEV *LDiff = getMinusSCEV(LA, One); 5217 const SCEV *RDiff = getMinusSCEV(RA, LS); 5218 if (LDiff == RDiff) 5219 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5220 } 5221 break; 5222 default: 5223 break; 5224 } 5225 5226 return getUnknown(I); 5227 } 5228 5229 /// Expand GEP instructions into add and multiply operations. This allows them 5230 /// to be analyzed by regular SCEV code. 5231 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5232 // Don't attempt to analyze GEPs over unsized objects. 5233 if (!GEP->getSourceElementType()->isSized()) 5234 return getUnknown(GEP); 5235 5236 SmallVector<const SCEV *, 4> IndexExprs; 5237 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5238 IndexExprs.push_back(getSCEV(*Index)); 5239 return getGEPExpr(GEP, IndexExprs); 5240 } 5241 5242 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5243 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5244 return C->getAPInt().countTrailingZeros(); 5245 5246 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5247 return std::min(GetMinTrailingZeros(T->getOperand()), 5248 (uint32_t)getTypeSizeInBits(T->getType())); 5249 5250 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5251 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5252 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5253 ? getTypeSizeInBits(E->getType()) 5254 : OpRes; 5255 } 5256 5257 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5258 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5259 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5260 ? getTypeSizeInBits(E->getType()) 5261 : OpRes; 5262 } 5263 5264 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5265 // The result is the min of all operands results. 5266 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5267 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5268 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5269 return MinOpRes; 5270 } 5271 5272 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5273 // The result is the sum of all operands results. 5274 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5275 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5276 for (unsigned i = 1, e = M->getNumOperands(); 5277 SumOpRes != BitWidth && i != e; ++i) 5278 SumOpRes = 5279 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5280 return SumOpRes; 5281 } 5282 5283 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5284 // The result is the min of all operands results. 5285 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5286 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5287 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5288 return MinOpRes; 5289 } 5290 5291 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5292 // The result is the min of all operands results. 5293 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5294 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5295 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5296 return MinOpRes; 5297 } 5298 5299 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5300 // The result is the min of all operands results. 5301 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5302 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5303 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5304 return MinOpRes; 5305 } 5306 5307 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5308 // For a SCEVUnknown, ask ValueTracking. 5309 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5310 return Known.countMinTrailingZeros(); 5311 } 5312 5313 // SCEVUDivExpr 5314 return 0; 5315 } 5316 5317 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5318 auto I = MinTrailingZerosCache.find(S); 5319 if (I != MinTrailingZerosCache.end()) 5320 return I->second; 5321 5322 uint32_t Result = GetMinTrailingZerosImpl(S); 5323 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5324 assert(InsertPair.second && "Should insert a new key"); 5325 return InsertPair.first->second; 5326 } 5327 5328 /// Helper method to assign a range to V from metadata present in the IR. 5329 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5330 if (Instruction *I = dyn_cast<Instruction>(V)) 5331 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5332 return getConstantRangeFromMetadata(*MD); 5333 5334 return None; 5335 } 5336 5337 /// Determine the range for a particular SCEV. If SignHint is 5338 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5339 /// with a "cleaner" unsigned (resp. signed) representation. 5340 const ConstantRange & 5341 ScalarEvolution::getRangeRef(const SCEV *S, 5342 ScalarEvolution::RangeSignHint SignHint) { 5343 DenseMap<const SCEV *, ConstantRange> &Cache = 5344 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5345 : SignedRanges; 5346 ConstantRange::PreferredRangeType RangeType = 5347 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5348 ? ConstantRange::Unsigned : ConstantRange::Signed; 5349 5350 // See if we've computed this range already. 5351 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5352 if (I != Cache.end()) 5353 return I->second; 5354 5355 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5356 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5357 5358 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5359 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5360 using OBO = OverflowingBinaryOperator; 5361 5362 // If the value has known zeros, the maximum value will have those known zeros 5363 // as well. 5364 uint32_t TZ = GetMinTrailingZeros(S); 5365 if (TZ != 0) { 5366 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5367 ConservativeResult = 5368 ConstantRange(APInt::getMinValue(BitWidth), 5369 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5370 else 5371 ConservativeResult = ConstantRange( 5372 APInt::getSignedMinValue(BitWidth), 5373 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5374 } 5375 5376 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5377 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5378 unsigned WrapType = OBO::AnyWrap; 5379 if (Add->hasNoSignedWrap()) 5380 WrapType |= OBO::NoSignedWrap; 5381 if (Add->hasNoUnsignedWrap()) 5382 WrapType |= OBO::NoUnsignedWrap; 5383 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5384 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5385 WrapType, RangeType); 5386 return setRange(Add, SignHint, 5387 ConservativeResult.intersectWith(X, RangeType)); 5388 } 5389 5390 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5391 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5392 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5393 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5394 return setRange(Mul, SignHint, 5395 ConservativeResult.intersectWith(X, RangeType)); 5396 } 5397 5398 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5399 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5400 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5401 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5402 return setRange(SMax, SignHint, 5403 ConservativeResult.intersectWith(X, RangeType)); 5404 } 5405 5406 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5407 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5408 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5409 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5410 return setRange(UMax, SignHint, 5411 ConservativeResult.intersectWith(X, RangeType)); 5412 } 5413 5414 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5415 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5416 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5417 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5418 return setRange(SMin, SignHint, 5419 ConservativeResult.intersectWith(X, RangeType)); 5420 } 5421 5422 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5423 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5424 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5425 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5426 return setRange(UMin, SignHint, 5427 ConservativeResult.intersectWith(X, RangeType)); 5428 } 5429 5430 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5431 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5432 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5433 return setRange(UDiv, SignHint, 5434 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5435 } 5436 5437 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5438 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5439 return setRange(ZExt, SignHint, 5440 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5441 RangeType)); 5442 } 5443 5444 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5445 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5446 return setRange(SExt, SignHint, 5447 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5448 RangeType)); 5449 } 5450 5451 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5452 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5453 return setRange(Trunc, SignHint, 5454 ConservativeResult.intersectWith(X.truncate(BitWidth), 5455 RangeType)); 5456 } 5457 5458 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5459 // If there's no unsigned wrap, the value will never be less than its 5460 // initial value. 5461 if (AddRec->hasNoUnsignedWrap()) { 5462 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5463 if (!UnsignedMinValue.isNullValue()) 5464 ConservativeResult = ConservativeResult.intersectWith( 5465 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5466 } 5467 5468 // If there's no signed wrap, and all the operands except initial value have 5469 // the same sign or zero, the value won't ever be: 5470 // 1: smaller than initial value if operands are non negative, 5471 // 2: bigger than initial value if operands are non positive. 5472 // For both cases, value can not cross signed min/max boundary. 5473 if (AddRec->hasNoSignedWrap()) { 5474 bool AllNonNeg = true; 5475 bool AllNonPos = true; 5476 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5477 if (!isKnownNonNegative(AddRec->getOperand(i))) 5478 AllNonNeg = false; 5479 if (!isKnownNonPositive(AddRec->getOperand(i))) 5480 AllNonPos = false; 5481 } 5482 if (AllNonNeg) 5483 ConservativeResult = ConservativeResult.intersectWith( 5484 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5485 APInt::getSignedMinValue(BitWidth)), 5486 RangeType); 5487 else if (AllNonPos) 5488 ConservativeResult = ConservativeResult.intersectWith( 5489 ConstantRange::getNonEmpty( 5490 APInt::getSignedMinValue(BitWidth), 5491 getSignedRangeMax(AddRec->getStart()) + 1), 5492 RangeType); 5493 } 5494 5495 // TODO: non-affine addrec 5496 if (AddRec->isAffine()) { 5497 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5498 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5499 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5500 auto RangeFromAffine = getRangeForAffineAR( 5501 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5502 BitWidth); 5503 if (!RangeFromAffine.isFullSet()) 5504 ConservativeResult = 5505 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5506 5507 auto RangeFromFactoring = getRangeViaFactoring( 5508 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5509 BitWidth); 5510 if (!RangeFromFactoring.isFullSet()) 5511 ConservativeResult = 5512 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5513 } 5514 } 5515 5516 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5517 } 5518 5519 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5520 // Check if the IR explicitly contains !range metadata. 5521 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5522 if (MDRange.hasValue()) 5523 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5524 RangeType); 5525 5526 // Split here to avoid paying the compile-time cost of calling both 5527 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5528 // if needed. 5529 const DataLayout &DL = getDataLayout(); 5530 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5531 // For a SCEVUnknown, ask ValueTracking. 5532 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5533 if (Known.getBitWidth() != BitWidth) 5534 Known = Known.zextOrTrunc(BitWidth); 5535 // If Known does not result in full-set, intersect with it. 5536 if (Known.getMinValue() != Known.getMaxValue() + 1) 5537 ConservativeResult = ConservativeResult.intersectWith( 5538 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5539 RangeType); 5540 } else { 5541 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5542 "generalize as needed!"); 5543 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5544 // If the pointer size is larger than the index size type, this can cause 5545 // NS to be larger than BitWidth. So compensate for this. 5546 if (U->getType()->isPointerTy()) { 5547 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5548 int ptrIdxDiff = ptrSize - BitWidth; 5549 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5550 NS -= ptrIdxDiff; 5551 } 5552 5553 if (NS > 1) 5554 ConservativeResult = ConservativeResult.intersectWith( 5555 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5556 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5557 RangeType); 5558 } 5559 5560 // A range of Phi is a subset of union of all ranges of its input. 5561 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5562 // Make sure that we do not run over cycled Phis. 5563 if (PendingPhiRanges.insert(Phi).second) { 5564 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5565 for (auto &Op : Phi->operands()) { 5566 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5567 RangeFromOps = RangeFromOps.unionWith(OpRange); 5568 // No point to continue if we already have a full set. 5569 if (RangeFromOps.isFullSet()) 5570 break; 5571 } 5572 ConservativeResult = 5573 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5574 bool Erased = PendingPhiRanges.erase(Phi); 5575 assert(Erased && "Failed to erase Phi properly?"); 5576 (void) Erased; 5577 } 5578 } 5579 5580 return setRange(U, SignHint, std::move(ConservativeResult)); 5581 } 5582 5583 return setRange(S, SignHint, std::move(ConservativeResult)); 5584 } 5585 5586 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5587 // values that the expression can take. Initially, the expression has a value 5588 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5589 // argument defines if we treat Step as signed or unsigned. 5590 static ConstantRange getRangeForAffineARHelper(APInt Step, 5591 const ConstantRange &StartRange, 5592 const APInt &MaxBECount, 5593 unsigned BitWidth, bool Signed) { 5594 // If either Step or MaxBECount is 0, then the expression won't change, and we 5595 // just need to return the initial range. 5596 if (Step == 0 || MaxBECount == 0) 5597 return StartRange; 5598 5599 // If we don't know anything about the initial value (i.e. StartRange is 5600 // FullRange), then we don't know anything about the final range either. 5601 // Return FullRange. 5602 if (StartRange.isFullSet()) 5603 return ConstantRange::getFull(BitWidth); 5604 5605 // If Step is signed and negative, then we use its absolute value, but we also 5606 // note that we're moving in the opposite direction. 5607 bool Descending = Signed && Step.isNegative(); 5608 5609 if (Signed) 5610 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5611 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5612 // This equations hold true due to the well-defined wrap-around behavior of 5613 // APInt. 5614 Step = Step.abs(); 5615 5616 // Check if Offset is more than full span of BitWidth. If it is, the 5617 // expression is guaranteed to overflow. 5618 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5619 return ConstantRange::getFull(BitWidth); 5620 5621 // Offset is by how much the expression can change. Checks above guarantee no 5622 // overflow here. 5623 APInt Offset = Step * MaxBECount; 5624 5625 // Minimum value of the final range will match the minimal value of StartRange 5626 // if the expression is increasing and will be decreased by Offset otherwise. 5627 // Maximum value of the final range will match the maximal value of StartRange 5628 // if the expression is decreasing and will be increased by Offset otherwise. 5629 APInt StartLower = StartRange.getLower(); 5630 APInt StartUpper = StartRange.getUpper() - 1; 5631 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5632 : (StartUpper + std::move(Offset)); 5633 5634 // It's possible that the new minimum/maximum value will fall into the initial 5635 // range (due to wrap around). This means that the expression can take any 5636 // value in this bitwidth, and we have to return full range. 5637 if (StartRange.contains(MovedBoundary)) 5638 return ConstantRange::getFull(BitWidth); 5639 5640 APInt NewLower = 5641 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5642 APInt NewUpper = 5643 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5644 NewUpper += 1; 5645 5646 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5647 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5648 } 5649 5650 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5651 const SCEV *Step, 5652 const SCEV *MaxBECount, 5653 unsigned BitWidth) { 5654 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5655 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5656 "Precondition!"); 5657 5658 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5659 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5660 5661 // First, consider step signed. 5662 ConstantRange StartSRange = getSignedRange(Start); 5663 ConstantRange StepSRange = getSignedRange(Step); 5664 5665 // If Step can be both positive and negative, we need to find ranges for the 5666 // maximum absolute step values in both directions and union them. 5667 ConstantRange SR = 5668 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5669 MaxBECountValue, BitWidth, /* Signed = */ true); 5670 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5671 StartSRange, MaxBECountValue, 5672 BitWidth, /* Signed = */ true)); 5673 5674 // Next, consider step unsigned. 5675 ConstantRange UR = getRangeForAffineARHelper( 5676 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5677 MaxBECountValue, BitWidth, /* Signed = */ false); 5678 5679 // Finally, intersect signed and unsigned ranges. 5680 return SR.intersectWith(UR, ConstantRange::Smallest); 5681 } 5682 5683 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5684 const SCEV *Step, 5685 const SCEV *MaxBECount, 5686 unsigned BitWidth) { 5687 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5688 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5689 5690 struct SelectPattern { 5691 Value *Condition = nullptr; 5692 APInt TrueValue; 5693 APInt FalseValue; 5694 5695 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5696 const SCEV *S) { 5697 Optional<unsigned> CastOp; 5698 APInt Offset(BitWidth, 0); 5699 5700 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5701 "Should be!"); 5702 5703 // Peel off a constant offset: 5704 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5705 // In the future we could consider being smarter here and handle 5706 // {Start+Step,+,Step} too. 5707 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5708 return; 5709 5710 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5711 S = SA->getOperand(1); 5712 } 5713 5714 // Peel off a cast operation 5715 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5716 CastOp = SCast->getSCEVType(); 5717 S = SCast->getOperand(); 5718 } 5719 5720 using namespace llvm::PatternMatch; 5721 5722 auto *SU = dyn_cast<SCEVUnknown>(S); 5723 const APInt *TrueVal, *FalseVal; 5724 if (!SU || 5725 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5726 m_APInt(FalseVal)))) { 5727 Condition = nullptr; 5728 return; 5729 } 5730 5731 TrueValue = *TrueVal; 5732 FalseValue = *FalseVal; 5733 5734 // Re-apply the cast we peeled off earlier 5735 if (CastOp.hasValue()) 5736 switch (*CastOp) { 5737 default: 5738 llvm_unreachable("Unknown SCEV cast type!"); 5739 5740 case scTruncate: 5741 TrueValue = TrueValue.trunc(BitWidth); 5742 FalseValue = FalseValue.trunc(BitWidth); 5743 break; 5744 case scZeroExtend: 5745 TrueValue = TrueValue.zext(BitWidth); 5746 FalseValue = FalseValue.zext(BitWidth); 5747 break; 5748 case scSignExtend: 5749 TrueValue = TrueValue.sext(BitWidth); 5750 FalseValue = FalseValue.sext(BitWidth); 5751 break; 5752 } 5753 5754 // Re-apply the constant offset we peeled off earlier 5755 TrueValue += Offset; 5756 FalseValue += Offset; 5757 } 5758 5759 bool isRecognized() { return Condition != nullptr; } 5760 }; 5761 5762 SelectPattern StartPattern(*this, BitWidth, Start); 5763 if (!StartPattern.isRecognized()) 5764 return ConstantRange::getFull(BitWidth); 5765 5766 SelectPattern StepPattern(*this, BitWidth, Step); 5767 if (!StepPattern.isRecognized()) 5768 return ConstantRange::getFull(BitWidth); 5769 5770 if (StartPattern.Condition != StepPattern.Condition) { 5771 // We don't handle this case today; but we could, by considering four 5772 // possibilities below instead of two. I'm not sure if there are cases where 5773 // that will help over what getRange already does, though. 5774 return ConstantRange::getFull(BitWidth); 5775 } 5776 5777 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5778 // construct arbitrary general SCEV expressions here. This function is called 5779 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5780 // say) can end up caching a suboptimal value. 5781 5782 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5783 // C2352 and C2512 (otherwise it isn't needed). 5784 5785 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5786 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5787 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5788 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5789 5790 ConstantRange TrueRange = 5791 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5792 ConstantRange FalseRange = 5793 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5794 5795 return TrueRange.unionWith(FalseRange); 5796 } 5797 5798 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5799 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5800 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5801 5802 // Return early if there are no flags to propagate to the SCEV. 5803 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5804 if (BinOp->hasNoUnsignedWrap()) 5805 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5806 if (BinOp->hasNoSignedWrap()) 5807 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5808 if (Flags == SCEV::FlagAnyWrap) 5809 return SCEV::FlagAnyWrap; 5810 5811 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5812 } 5813 5814 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5815 // Here we check that I is in the header of the innermost loop containing I, 5816 // since we only deal with instructions in the loop header. The actual loop we 5817 // need to check later will come from an add recurrence, but getting that 5818 // requires computing the SCEV of the operands, which can be expensive. This 5819 // check we can do cheaply to rule out some cases early. 5820 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5821 if (InnermostContainingLoop == nullptr || 5822 InnermostContainingLoop->getHeader() != I->getParent()) 5823 return false; 5824 5825 // Only proceed if we can prove that I does not yield poison. 5826 if (!programUndefinedIfPoison(I)) 5827 return false; 5828 5829 // At this point we know that if I is executed, then it does not wrap 5830 // according to at least one of NSW or NUW. If I is not executed, then we do 5831 // not know if the calculation that I represents would wrap. Multiple 5832 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5833 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5834 // derived from other instructions that map to the same SCEV. We cannot make 5835 // that guarantee for cases where I is not executed. So we need to find the 5836 // loop that I is considered in relation to and prove that I is executed for 5837 // every iteration of that loop. That implies that the value that I 5838 // calculates does not wrap anywhere in the loop, so then we can apply the 5839 // flags to the SCEV. 5840 // 5841 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5842 // from different loops, so that we know which loop to prove that I is 5843 // executed in. 5844 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5845 // I could be an extractvalue from a call to an overflow intrinsic. 5846 // TODO: We can do better here in some cases. 5847 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5848 return false; 5849 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5850 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5851 bool AllOtherOpsLoopInvariant = true; 5852 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5853 ++OtherOpIndex) { 5854 if (OtherOpIndex != OpIndex) { 5855 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5856 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5857 AllOtherOpsLoopInvariant = false; 5858 break; 5859 } 5860 } 5861 } 5862 if (AllOtherOpsLoopInvariant && 5863 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5864 return true; 5865 } 5866 } 5867 return false; 5868 } 5869 5870 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5871 // If we know that \c I can never be poison period, then that's enough. 5872 if (isSCEVExprNeverPoison(I)) 5873 return true; 5874 5875 // For an add recurrence specifically, we assume that infinite loops without 5876 // side effects are undefined behavior, and then reason as follows: 5877 // 5878 // If the add recurrence is poison in any iteration, it is poison on all 5879 // future iterations (since incrementing poison yields poison). If the result 5880 // of the add recurrence is fed into the loop latch condition and the loop 5881 // does not contain any throws or exiting blocks other than the latch, we now 5882 // have the ability to "choose" whether the backedge is taken or not (by 5883 // choosing a sufficiently evil value for the poison feeding into the branch) 5884 // for every iteration including and after the one in which \p I first became 5885 // poison. There are two possibilities (let's call the iteration in which \p 5886 // I first became poison as K): 5887 // 5888 // 1. In the set of iterations including and after K, the loop body executes 5889 // no side effects. In this case executing the backege an infinte number 5890 // of times will yield undefined behavior. 5891 // 5892 // 2. In the set of iterations including and after K, the loop body executes 5893 // at least one side effect. In this case, that specific instance of side 5894 // effect is control dependent on poison, which also yields undefined 5895 // behavior. 5896 5897 auto *ExitingBB = L->getExitingBlock(); 5898 auto *LatchBB = L->getLoopLatch(); 5899 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5900 return false; 5901 5902 SmallPtrSet<const Instruction *, 16> Pushed; 5903 SmallVector<const Instruction *, 8> PoisonStack; 5904 5905 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5906 // things that are known to be poison under that assumption go on the 5907 // PoisonStack. 5908 Pushed.insert(I); 5909 PoisonStack.push_back(I); 5910 5911 bool LatchControlDependentOnPoison = false; 5912 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5913 const Instruction *Poison = PoisonStack.pop_back_val(); 5914 5915 for (auto *PoisonUser : Poison->users()) { 5916 if (propagatesPoison(cast<Instruction>(PoisonUser))) { 5917 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5918 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5919 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5920 assert(BI->isConditional() && "Only possibility!"); 5921 if (BI->getParent() == LatchBB) { 5922 LatchControlDependentOnPoison = true; 5923 break; 5924 } 5925 } 5926 } 5927 } 5928 5929 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5930 } 5931 5932 ScalarEvolution::LoopProperties 5933 ScalarEvolution::getLoopProperties(const Loop *L) { 5934 using LoopProperties = ScalarEvolution::LoopProperties; 5935 5936 auto Itr = LoopPropertiesCache.find(L); 5937 if (Itr == LoopPropertiesCache.end()) { 5938 auto HasSideEffects = [](Instruction *I) { 5939 if (auto *SI = dyn_cast<StoreInst>(I)) 5940 return !SI->isSimple(); 5941 5942 return I->mayHaveSideEffects(); 5943 }; 5944 5945 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5946 /*HasNoSideEffects*/ true}; 5947 5948 for (auto *BB : L->getBlocks()) 5949 for (auto &I : *BB) { 5950 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5951 LP.HasNoAbnormalExits = false; 5952 if (HasSideEffects(&I)) 5953 LP.HasNoSideEffects = false; 5954 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5955 break; // We're already as pessimistic as we can get. 5956 } 5957 5958 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5959 assert(InsertPair.second && "We just checked!"); 5960 Itr = InsertPair.first; 5961 } 5962 5963 return Itr->second; 5964 } 5965 5966 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5967 if (!isSCEVable(V->getType())) 5968 return getUnknown(V); 5969 5970 if (Instruction *I = dyn_cast<Instruction>(V)) { 5971 // Don't attempt to analyze instructions in blocks that aren't 5972 // reachable. Such instructions don't matter, and they aren't required 5973 // to obey basic rules for definitions dominating uses which this 5974 // analysis depends on. 5975 if (!DT.isReachableFromEntry(I->getParent())) 5976 return getUnknown(UndefValue::get(V->getType())); 5977 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5978 return getConstant(CI); 5979 else if (isa<ConstantPointerNull>(V)) 5980 return getZero(V->getType()); 5981 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5982 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5983 else if (!isa<ConstantExpr>(V)) 5984 return getUnknown(V); 5985 5986 Operator *U = cast<Operator>(V); 5987 if (auto BO = MatchBinaryOp(U, DT)) { 5988 switch (BO->Opcode) { 5989 case Instruction::Add: { 5990 // The simple thing to do would be to just call getSCEV on both operands 5991 // and call getAddExpr with the result. However if we're looking at a 5992 // bunch of things all added together, this can be quite inefficient, 5993 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5994 // Instead, gather up all the operands and make a single getAddExpr call. 5995 // LLVM IR canonical form means we need only traverse the left operands. 5996 SmallVector<const SCEV *, 4> AddOps; 5997 do { 5998 if (BO->Op) { 5999 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6000 AddOps.push_back(OpSCEV); 6001 break; 6002 } 6003 6004 // If a NUW or NSW flag can be applied to the SCEV for this 6005 // addition, then compute the SCEV for this addition by itself 6006 // with a separate call to getAddExpr. We need to do that 6007 // instead of pushing the operands of the addition onto AddOps, 6008 // since the flags are only known to apply to this particular 6009 // addition - they may not apply to other additions that can be 6010 // formed with operands from AddOps. 6011 const SCEV *RHS = getSCEV(BO->RHS); 6012 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6013 if (Flags != SCEV::FlagAnyWrap) { 6014 const SCEV *LHS = getSCEV(BO->LHS); 6015 if (BO->Opcode == Instruction::Sub) 6016 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6017 else 6018 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6019 break; 6020 } 6021 } 6022 6023 if (BO->Opcode == Instruction::Sub) 6024 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6025 else 6026 AddOps.push_back(getSCEV(BO->RHS)); 6027 6028 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6029 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6030 NewBO->Opcode != Instruction::Sub)) { 6031 AddOps.push_back(getSCEV(BO->LHS)); 6032 break; 6033 } 6034 BO = NewBO; 6035 } while (true); 6036 6037 return getAddExpr(AddOps); 6038 } 6039 6040 case Instruction::Mul: { 6041 SmallVector<const SCEV *, 4> MulOps; 6042 do { 6043 if (BO->Op) { 6044 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6045 MulOps.push_back(OpSCEV); 6046 break; 6047 } 6048 6049 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6050 if (Flags != SCEV::FlagAnyWrap) { 6051 MulOps.push_back( 6052 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6053 break; 6054 } 6055 } 6056 6057 MulOps.push_back(getSCEV(BO->RHS)); 6058 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6059 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6060 MulOps.push_back(getSCEV(BO->LHS)); 6061 break; 6062 } 6063 BO = NewBO; 6064 } while (true); 6065 6066 return getMulExpr(MulOps); 6067 } 6068 case Instruction::UDiv: 6069 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6070 case Instruction::URem: 6071 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6072 case Instruction::Sub: { 6073 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6074 if (BO->Op) 6075 Flags = getNoWrapFlagsFromUB(BO->Op); 6076 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6077 } 6078 case Instruction::And: 6079 // For an expression like x&255 that merely masks off the high bits, 6080 // use zext(trunc(x)) as the SCEV expression. 6081 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6082 if (CI->isZero()) 6083 return getSCEV(BO->RHS); 6084 if (CI->isMinusOne()) 6085 return getSCEV(BO->LHS); 6086 const APInt &A = CI->getValue(); 6087 6088 // Instcombine's ShrinkDemandedConstant may strip bits out of 6089 // constants, obscuring what would otherwise be a low-bits mask. 6090 // Use computeKnownBits to compute what ShrinkDemandedConstant 6091 // knew about to reconstruct a low-bits mask value. 6092 unsigned LZ = A.countLeadingZeros(); 6093 unsigned TZ = A.countTrailingZeros(); 6094 unsigned BitWidth = A.getBitWidth(); 6095 KnownBits Known(BitWidth); 6096 computeKnownBits(BO->LHS, Known, getDataLayout(), 6097 0, &AC, nullptr, &DT); 6098 6099 APInt EffectiveMask = 6100 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6101 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6102 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6103 const SCEV *LHS = getSCEV(BO->LHS); 6104 const SCEV *ShiftedLHS = nullptr; 6105 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6106 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6107 // For an expression like (x * 8) & 8, simplify the multiply. 6108 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6109 unsigned GCD = std::min(MulZeros, TZ); 6110 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6111 SmallVector<const SCEV*, 4> MulOps; 6112 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6113 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6114 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6115 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6116 } 6117 } 6118 if (!ShiftedLHS) 6119 ShiftedLHS = getUDivExpr(LHS, MulCount); 6120 return getMulExpr( 6121 getZeroExtendExpr( 6122 getTruncateExpr(ShiftedLHS, 6123 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6124 BO->LHS->getType()), 6125 MulCount); 6126 } 6127 } 6128 break; 6129 6130 case Instruction::Or: 6131 // If the RHS of the Or is a constant, we may have something like: 6132 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6133 // optimizations will transparently handle this case. 6134 // 6135 // In order for this transformation to be safe, the LHS must be of the 6136 // form X*(2^n) and the Or constant must be less than 2^n. 6137 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6138 const SCEV *LHS = getSCEV(BO->LHS); 6139 const APInt &CIVal = CI->getValue(); 6140 if (GetMinTrailingZeros(LHS) >= 6141 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6142 // Build a plain add SCEV. 6143 return getAddExpr(LHS, getSCEV(CI), 6144 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6145 } 6146 } 6147 break; 6148 6149 case Instruction::Xor: 6150 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6151 // If the RHS of xor is -1, then this is a not operation. 6152 if (CI->isMinusOne()) 6153 return getNotSCEV(getSCEV(BO->LHS)); 6154 6155 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6156 // This is a variant of the check for xor with -1, and it handles 6157 // the case where instcombine has trimmed non-demanded bits out 6158 // of an xor with -1. 6159 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6160 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6161 if (LBO->getOpcode() == Instruction::And && 6162 LCI->getValue() == CI->getValue()) 6163 if (const SCEVZeroExtendExpr *Z = 6164 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6165 Type *UTy = BO->LHS->getType(); 6166 const SCEV *Z0 = Z->getOperand(); 6167 Type *Z0Ty = Z0->getType(); 6168 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6169 6170 // If C is a low-bits mask, the zero extend is serving to 6171 // mask off the high bits. Complement the operand and 6172 // re-apply the zext. 6173 if (CI->getValue().isMask(Z0TySize)) 6174 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6175 6176 // If C is a single bit, it may be in the sign-bit position 6177 // before the zero-extend. In this case, represent the xor 6178 // using an add, which is equivalent, and re-apply the zext. 6179 APInt Trunc = CI->getValue().trunc(Z0TySize); 6180 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6181 Trunc.isSignMask()) 6182 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6183 UTy); 6184 } 6185 } 6186 break; 6187 6188 case Instruction::Shl: 6189 // Turn shift left of a constant amount into a multiply. 6190 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6191 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6192 6193 // If the shift count is not less than the bitwidth, the result of 6194 // the shift is undefined. Don't try to analyze it, because the 6195 // resolution chosen here may differ from the resolution chosen in 6196 // other parts of the compiler. 6197 if (SA->getValue().uge(BitWidth)) 6198 break; 6199 6200 // We can safely preserve the nuw flag in all cases. It's also safe to 6201 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6202 // requires special handling. It can be preserved as long as we're not 6203 // left shifting by bitwidth - 1. 6204 auto Flags = SCEV::FlagAnyWrap; 6205 if (BO->Op) { 6206 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6207 if ((MulFlags & SCEV::FlagNSW) && 6208 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6209 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6210 if (MulFlags & SCEV::FlagNUW) 6211 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6212 } 6213 6214 Constant *X = ConstantInt::get( 6215 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6216 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6217 } 6218 break; 6219 6220 case Instruction::AShr: { 6221 // AShr X, C, where C is a constant. 6222 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6223 if (!CI) 6224 break; 6225 6226 Type *OuterTy = BO->LHS->getType(); 6227 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6228 // If the shift count is not less than the bitwidth, the result of 6229 // the shift is undefined. Don't try to analyze it, because the 6230 // resolution chosen here may differ from the resolution chosen in 6231 // other parts of the compiler. 6232 if (CI->getValue().uge(BitWidth)) 6233 break; 6234 6235 if (CI->isZero()) 6236 return getSCEV(BO->LHS); // shift by zero --> noop 6237 6238 uint64_t AShrAmt = CI->getZExtValue(); 6239 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6240 6241 Operator *L = dyn_cast<Operator>(BO->LHS); 6242 if (L && L->getOpcode() == Instruction::Shl) { 6243 // X = Shl A, n 6244 // Y = AShr X, m 6245 // Both n and m are constant. 6246 6247 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6248 if (L->getOperand(1) == BO->RHS) 6249 // For a two-shift sext-inreg, i.e. n = m, 6250 // use sext(trunc(x)) as the SCEV expression. 6251 return getSignExtendExpr( 6252 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6253 6254 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6255 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6256 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6257 if (ShlAmt > AShrAmt) { 6258 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6259 // expression. We already checked that ShlAmt < BitWidth, so 6260 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6261 // ShlAmt - AShrAmt < Amt. 6262 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6263 ShlAmt - AShrAmt); 6264 return getSignExtendExpr( 6265 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6266 getConstant(Mul)), OuterTy); 6267 } 6268 } 6269 } 6270 break; 6271 } 6272 } 6273 } 6274 6275 switch (U->getOpcode()) { 6276 case Instruction::Trunc: 6277 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6278 6279 case Instruction::ZExt: 6280 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6281 6282 case Instruction::SExt: 6283 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6284 // The NSW flag of a subtract does not always survive the conversion to 6285 // A + (-1)*B. By pushing sign extension onto its operands we are much 6286 // more likely to preserve NSW and allow later AddRec optimisations. 6287 // 6288 // NOTE: This is effectively duplicating this logic from getSignExtend: 6289 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6290 // but by that point the NSW information has potentially been lost. 6291 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6292 Type *Ty = U->getType(); 6293 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6294 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6295 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6296 } 6297 } 6298 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6299 6300 case Instruction::BitCast: 6301 // BitCasts are no-op casts so we just eliminate the cast. 6302 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6303 return getSCEV(U->getOperand(0)); 6304 break; 6305 6306 case Instruction::SDiv: 6307 // If both operands are non-negative, this is just an udiv. 6308 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6309 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6310 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6311 break; 6312 6313 case Instruction::SRem: 6314 // If both operands are non-negative, this is just an urem. 6315 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6316 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6317 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6318 break; 6319 6320 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6321 // lead to pointer expressions which cannot safely be expanded to GEPs, 6322 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6323 // simplifying integer expressions. 6324 6325 case Instruction::GetElementPtr: 6326 return createNodeForGEP(cast<GEPOperator>(U)); 6327 6328 case Instruction::PHI: 6329 return createNodeForPHI(cast<PHINode>(U)); 6330 6331 case Instruction::Select: 6332 // U can also be a select constant expr, which let fall through. Since 6333 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6334 // constant expressions cannot have instructions as operands, we'd have 6335 // returned getUnknown for a select constant expressions anyway. 6336 if (isa<Instruction>(U)) 6337 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6338 U->getOperand(1), U->getOperand(2)); 6339 break; 6340 6341 case Instruction::Call: 6342 case Instruction::Invoke: 6343 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6344 return getSCEV(RV); 6345 break; 6346 } 6347 6348 return getUnknown(V); 6349 } 6350 6351 //===----------------------------------------------------------------------===// 6352 // Iteration Count Computation Code 6353 // 6354 6355 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6356 if (!ExitCount) 6357 return 0; 6358 6359 ConstantInt *ExitConst = ExitCount->getValue(); 6360 6361 // Guard against huge trip counts. 6362 if (ExitConst->getValue().getActiveBits() > 32) 6363 return 0; 6364 6365 // In case of integer overflow, this returns 0, which is correct. 6366 return ((unsigned)ExitConst->getZExtValue()) + 1; 6367 } 6368 6369 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6370 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6371 return getSmallConstantTripCount(L, ExitingBB); 6372 6373 // No trip count information for multiple exits. 6374 return 0; 6375 } 6376 6377 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6378 BasicBlock *ExitingBlock) { 6379 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6380 assert(L->isLoopExiting(ExitingBlock) && 6381 "Exiting block must actually branch out of the loop!"); 6382 const SCEVConstant *ExitCount = 6383 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6384 return getConstantTripCount(ExitCount); 6385 } 6386 6387 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6388 const auto *MaxExitCount = 6389 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6390 return getConstantTripCount(MaxExitCount); 6391 } 6392 6393 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6394 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6395 return getSmallConstantTripMultiple(L, ExitingBB); 6396 6397 // No trip multiple information for multiple exits. 6398 return 0; 6399 } 6400 6401 /// Returns the largest constant divisor of the trip count of this loop as a 6402 /// normal unsigned value, if possible. This means that the actual trip count is 6403 /// always a multiple of the returned value (don't forget the trip count could 6404 /// very well be zero as well!). 6405 /// 6406 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6407 /// multiple of a constant (which is also the case if the trip count is simply 6408 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6409 /// if the trip count is very large (>= 2^32). 6410 /// 6411 /// As explained in the comments for getSmallConstantTripCount, this assumes 6412 /// that control exits the loop via ExitingBlock. 6413 unsigned 6414 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6415 BasicBlock *ExitingBlock) { 6416 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6417 assert(L->isLoopExiting(ExitingBlock) && 6418 "Exiting block must actually branch out of the loop!"); 6419 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6420 if (ExitCount == getCouldNotCompute()) 6421 return 1; 6422 6423 // Get the trip count from the BE count by adding 1. 6424 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6425 6426 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6427 if (!TC) 6428 // Attempt to factor more general cases. Returns the greatest power of 6429 // two divisor. If overflow happens, the trip count expression is still 6430 // divisible by the greatest power of 2 divisor returned. 6431 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6432 6433 ConstantInt *Result = TC->getValue(); 6434 6435 // Guard against huge trip counts (this requires checking 6436 // for zero to handle the case where the trip count == -1 and the 6437 // addition wraps). 6438 if (!Result || Result->getValue().getActiveBits() > 32 || 6439 Result->getValue().getActiveBits() == 0) 6440 return 1; 6441 6442 return (unsigned)Result->getZExtValue(); 6443 } 6444 6445 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6446 BasicBlock *ExitingBlock, 6447 ExitCountKind Kind) { 6448 switch (Kind) { 6449 case Exact: 6450 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6451 case ConstantMaximum: 6452 return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); 6453 }; 6454 llvm_unreachable("Invalid ExitCountKind!"); 6455 } 6456 6457 const SCEV * 6458 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6459 SCEVUnionPredicate &Preds) { 6460 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6461 } 6462 6463 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6464 ExitCountKind Kind) { 6465 switch (Kind) { 6466 case Exact: 6467 return getBackedgeTakenInfo(L).getExact(L, this); 6468 case ConstantMaximum: 6469 return getBackedgeTakenInfo(L).getMax(this); 6470 }; 6471 llvm_unreachable("Invalid ExitCountKind!"); 6472 } 6473 6474 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6475 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6476 } 6477 6478 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6479 static void 6480 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6481 BasicBlock *Header = L->getHeader(); 6482 6483 // Push all Loop-header PHIs onto the Worklist stack. 6484 for (PHINode &PN : Header->phis()) 6485 Worklist.push_back(&PN); 6486 } 6487 6488 const ScalarEvolution::BackedgeTakenInfo & 6489 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6490 auto &BTI = getBackedgeTakenInfo(L); 6491 if (BTI.hasFullInfo()) 6492 return BTI; 6493 6494 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6495 6496 if (!Pair.second) 6497 return Pair.first->second; 6498 6499 BackedgeTakenInfo Result = 6500 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6501 6502 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6503 } 6504 6505 const ScalarEvolution::BackedgeTakenInfo & 6506 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6507 // Initially insert an invalid entry for this loop. If the insertion 6508 // succeeds, proceed to actually compute a backedge-taken count and 6509 // update the value. The temporary CouldNotCompute value tells SCEV 6510 // code elsewhere that it shouldn't attempt to request a new 6511 // backedge-taken count, which could result in infinite recursion. 6512 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6513 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6514 if (!Pair.second) 6515 return Pair.first->second; 6516 6517 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6518 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6519 // must be cleared in this scope. 6520 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6521 6522 // In product build, there are no usage of statistic. 6523 (void)NumTripCountsComputed; 6524 (void)NumTripCountsNotComputed; 6525 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6526 const SCEV *BEExact = Result.getExact(L, this); 6527 if (BEExact != getCouldNotCompute()) { 6528 assert(isLoopInvariant(BEExact, L) && 6529 isLoopInvariant(Result.getMax(this), L) && 6530 "Computed backedge-taken count isn't loop invariant for loop!"); 6531 ++NumTripCountsComputed; 6532 } 6533 else if (Result.getMax(this) == getCouldNotCompute() && 6534 isa<PHINode>(L->getHeader()->begin())) { 6535 // Only count loops that have phi nodes as not being computable. 6536 ++NumTripCountsNotComputed; 6537 } 6538 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6539 6540 // Now that we know more about the trip count for this loop, forget any 6541 // existing SCEV values for PHI nodes in this loop since they are only 6542 // conservative estimates made without the benefit of trip count 6543 // information. This is similar to the code in forgetLoop, except that 6544 // it handles SCEVUnknown PHI nodes specially. 6545 if (Result.hasAnyInfo()) { 6546 SmallVector<Instruction *, 16> Worklist; 6547 PushLoopPHIs(L, Worklist); 6548 6549 SmallPtrSet<Instruction *, 8> Discovered; 6550 while (!Worklist.empty()) { 6551 Instruction *I = Worklist.pop_back_val(); 6552 6553 ValueExprMapType::iterator It = 6554 ValueExprMap.find_as(static_cast<Value *>(I)); 6555 if (It != ValueExprMap.end()) { 6556 const SCEV *Old = It->second; 6557 6558 // SCEVUnknown for a PHI either means that it has an unrecognized 6559 // structure, or it's a PHI that's in the progress of being computed 6560 // by createNodeForPHI. In the former case, additional loop trip 6561 // count information isn't going to change anything. In the later 6562 // case, createNodeForPHI will perform the necessary updates on its 6563 // own when it gets to that point. 6564 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6565 eraseValueFromMap(It->first); 6566 forgetMemoizedResults(Old); 6567 } 6568 if (PHINode *PN = dyn_cast<PHINode>(I)) 6569 ConstantEvolutionLoopExitValue.erase(PN); 6570 } 6571 6572 // Since we don't need to invalidate anything for correctness and we're 6573 // only invalidating to make SCEV's results more precise, we get to stop 6574 // early to avoid invalidating too much. This is especially important in 6575 // cases like: 6576 // 6577 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6578 // loop0: 6579 // %pn0 = phi 6580 // ... 6581 // loop1: 6582 // %pn1 = phi 6583 // ... 6584 // 6585 // where both loop0 and loop1's backedge taken count uses the SCEV 6586 // expression for %v. If we don't have the early stop below then in cases 6587 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6588 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6589 // count for loop1, effectively nullifying SCEV's trip count cache. 6590 for (auto *U : I->users()) 6591 if (auto *I = dyn_cast<Instruction>(U)) { 6592 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6593 if (LoopForUser && L->contains(LoopForUser) && 6594 Discovered.insert(I).second) 6595 Worklist.push_back(I); 6596 } 6597 } 6598 } 6599 6600 // Re-lookup the insert position, since the call to 6601 // computeBackedgeTakenCount above could result in a 6602 // recusive call to getBackedgeTakenInfo (on a different 6603 // loop), which would invalidate the iterator computed 6604 // earlier. 6605 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6606 } 6607 6608 void ScalarEvolution::forgetAllLoops() { 6609 // This method is intended to forget all info about loops. It should 6610 // invalidate caches as if the following happened: 6611 // - The trip counts of all loops have changed arbitrarily 6612 // - Every llvm::Value has been updated in place to produce a different 6613 // result. 6614 BackedgeTakenCounts.clear(); 6615 PredicatedBackedgeTakenCounts.clear(); 6616 LoopPropertiesCache.clear(); 6617 ConstantEvolutionLoopExitValue.clear(); 6618 ValueExprMap.clear(); 6619 ValuesAtScopes.clear(); 6620 LoopDispositions.clear(); 6621 BlockDispositions.clear(); 6622 UnsignedRanges.clear(); 6623 SignedRanges.clear(); 6624 ExprValueMap.clear(); 6625 HasRecMap.clear(); 6626 MinTrailingZerosCache.clear(); 6627 PredicatedSCEVRewrites.clear(); 6628 } 6629 6630 void ScalarEvolution::forgetLoop(const Loop *L) { 6631 // Drop any stored trip count value. 6632 auto RemoveLoopFromBackedgeMap = 6633 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6634 auto BTCPos = Map.find(L); 6635 if (BTCPos != Map.end()) { 6636 BTCPos->second.clear(); 6637 Map.erase(BTCPos); 6638 } 6639 }; 6640 6641 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6642 SmallVector<Instruction *, 32> Worklist; 6643 SmallPtrSet<Instruction *, 16> Visited; 6644 6645 // Iterate over all the loops and sub-loops to drop SCEV information. 6646 while (!LoopWorklist.empty()) { 6647 auto *CurrL = LoopWorklist.pop_back_val(); 6648 6649 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6650 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6651 6652 // Drop information about predicated SCEV rewrites for this loop. 6653 for (auto I = PredicatedSCEVRewrites.begin(); 6654 I != PredicatedSCEVRewrites.end();) { 6655 std::pair<const SCEV *, const Loop *> Entry = I->first; 6656 if (Entry.second == CurrL) 6657 PredicatedSCEVRewrites.erase(I++); 6658 else 6659 ++I; 6660 } 6661 6662 auto LoopUsersItr = LoopUsers.find(CurrL); 6663 if (LoopUsersItr != LoopUsers.end()) { 6664 for (auto *S : LoopUsersItr->second) 6665 forgetMemoizedResults(S); 6666 LoopUsers.erase(LoopUsersItr); 6667 } 6668 6669 // Drop information about expressions based on loop-header PHIs. 6670 PushLoopPHIs(CurrL, Worklist); 6671 6672 while (!Worklist.empty()) { 6673 Instruction *I = Worklist.pop_back_val(); 6674 if (!Visited.insert(I).second) 6675 continue; 6676 6677 ValueExprMapType::iterator It = 6678 ValueExprMap.find_as(static_cast<Value *>(I)); 6679 if (It != ValueExprMap.end()) { 6680 eraseValueFromMap(It->first); 6681 forgetMemoizedResults(It->second); 6682 if (PHINode *PN = dyn_cast<PHINode>(I)) 6683 ConstantEvolutionLoopExitValue.erase(PN); 6684 } 6685 6686 PushDefUseChildren(I, Worklist); 6687 } 6688 6689 LoopPropertiesCache.erase(CurrL); 6690 // Forget all contained loops too, to avoid dangling entries in the 6691 // ValuesAtScopes map. 6692 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6693 } 6694 } 6695 6696 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6697 while (Loop *Parent = L->getParentLoop()) 6698 L = Parent; 6699 forgetLoop(L); 6700 } 6701 6702 void ScalarEvolution::forgetValue(Value *V) { 6703 Instruction *I = dyn_cast<Instruction>(V); 6704 if (!I) return; 6705 6706 // Drop information about expressions based on loop-header PHIs. 6707 SmallVector<Instruction *, 16> Worklist; 6708 Worklist.push_back(I); 6709 6710 SmallPtrSet<Instruction *, 8> Visited; 6711 while (!Worklist.empty()) { 6712 I = Worklist.pop_back_val(); 6713 if (!Visited.insert(I).second) 6714 continue; 6715 6716 ValueExprMapType::iterator It = 6717 ValueExprMap.find_as(static_cast<Value *>(I)); 6718 if (It != ValueExprMap.end()) { 6719 eraseValueFromMap(It->first); 6720 forgetMemoizedResults(It->second); 6721 if (PHINode *PN = dyn_cast<PHINode>(I)) 6722 ConstantEvolutionLoopExitValue.erase(PN); 6723 } 6724 6725 PushDefUseChildren(I, Worklist); 6726 } 6727 } 6728 6729 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 6730 LoopDispositions.clear(); 6731 } 6732 6733 /// Get the exact loop backedge taken count considering all loop exits. A 6734 /// computable result can only be returned for loops with all exiting blocks 6735 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6736 /// is never skipped. This is a valid assumption as long as the loop exits via 6737 /// that test. For precise results, it is the caller's responsibility to specify 6738 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6739 const SCEV * 6740 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6741 SCEVUnionPredicate *Preds) const { 6742 // If any exits were not computable, the loop is not computable. 6743 if (!isComplete() || ExitNotTaken.empty()) 6744 return SE->getCouldNotCompute(); 6745 6746 const BasicBlock *Latch = L->getLoopLatch(); 6747 // All exiting blocks we have collected must dominate the only backedge. 6748 if (!Latch) 6749 return SE->getCouldNotCompute(); 6750 6751 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6752 // count is simply a minimum out of all these calculated exit counts. 6753 SmallVector<const SCEV *, 2> Ops; 6754 for (auto &ENT : ExitNotTaken) { 6755 const SCEV *BECount = ENT.ExactNotTaken; 6756 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6757 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6758 "We should only have known counts for exiting blocks that dominate " 6759 "latch!"); 6760 6761 Ops.push_back(BECount); 6762 6763 if (Preds && !ENT.hasAlwaysTruePredicate()) 6764 Preds->add(ENT.Predicate.get()); 6765 6766 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6767 "Predicate should be always true!"); 6768 } 6769 6770 return SE->getUMinFromMismatchedTypes(Ops); 6771 } 6772 6773 /// Get the exact not taken count for this loop exit. 6774 const SCEV * 6775 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6776 ScalarEvolution *SE) const { 6777 for (auto &ENT : ExitNotTaken) 6778 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6779 return ENT.ExactNotTaken; 6780 6781 return SE->getCouldNotCompute(); 6782 } 6783 6784 const SCEV * 6785 ScalarEvolution::BackedgeTakenInfo::getMax(BasicBlock *ExitingBlock, 6786 ScalarEvolution *SE) const { 6787 for (auto &ENT : ExitNotTaken) 6788 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6789 return ENT.MaxNotTaken; 6790 6791 return SE->getCouldNotCompute(); 6792 } 6793 6794 /// getMax - Get the max backedge taken count for the loop. 6795 const SCEV * 6796 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6797 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6798 return !ENT.hasAlwaysTruePredicate(); 6799 }; 6800 6801 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6802 return SE->getCouldNotCompute(); 6803 6804 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6805 "No point in having a non-constant max backedge taken count!"); 6806 return getMax(); 6807 } 6808 6809 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6810 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6811 return !ENT.hasAlwaysTruePredicate(); 6812 }; 6813 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6814 } 6815 6816 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6817 ScalarEvolution *SE) const { 6818 if (getMax() && getMax() != SE->getCouldNotCompute() && 6819 SE->hasOperand(getMax(), S)) 6820 return true; 6821 6822 for (auto &ENT : ExitNotTaken) 6823 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6824 SE->hasOperand(ENT.ExactNotTaken, S)) 6825 return true; 6826 6827 return false; 6828 } 6829 6830 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6831 : ExactNotTaken(E), MaxNotTaken(E) { 6832 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6833 isa<SCEVConstant>(MaxNotTaken)) && 6834 "No point in having a non-constant max backedge taken count!"); 6835 } 6836 6837 ScalarEvolution::ExitLimit::ExitLimit( 6838 const SCEV *E, const SCEV *M, bool MaxOrZero, 6839 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6840 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6841 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6842 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6843 "Exact is not allowed to be less precise than Max"); 6844 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6845 isa<SCEVConstant>(MaxNotTaken)) && 6846 "No point in having a non-constant max backedge taken count!"); 6847 for (auto *PredSet : PredSetList) 6848 for (auto *P : *PredSet) 6849 addPredicate(P); 6850 } 6851 6852 ScalarEvolution::ExitLimit::ExitLimit( 6853 const SCEV *E, const SCEV *M, bool MaxOrZero, 6854 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6855 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6856 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6857 isa<SCEVConstant>(MaxNotTaken)) && 6858 "No point in having a non-constant max backedge taken count!"); 6859 } 6860 6861 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6862 bool MaxOrZero) 6863 : ExitLimit(E, M, MaxOrZero, None) { 6864 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6865 isa<SCEVConstant>(MaxNotTaken)) && 6866 "No point in having a non-constant max backedge taken count!"); 6867 } 6868 6869 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6870 /// computable exit into a persistent ExitNotTakenInfo array. 6871 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6872 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6873 ExitCounts, 6874 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6875 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6876 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6877 6878 ExitNotTaken.reserve(ExitCounts.size()); 6879 std::transform( 6880 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6881 [&](const EdgeExitInfo &EEI) { 6882 BasicBlock *ExitBB = EEI.first; 6883 const ExitLimit &EL = EEI.second; 6884 if (EL.Predicates.empty()) 6885 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 6886 nullptr); 6887 6888 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6889 for (auto *Pred : EL.Predicates) 6890 Predicate->add(Pred); 6891 6892 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 6893 std::move(Predicate)); 6894 }); 6895 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6896 "No point in having a non-constant max backedge taken count!"); 6897 } 6898 6899 /// Invalidate this result and free the ExitNotTakenInfo array. 6900 void ScalarEvolution::BackedgeTakenInfo::clear() { 6901 ExitNotTaken.clear(); 6902 } 6903 6904 /// Compute the number of times the backedge of the specified loop will execute. 6905 ScalarEvolution::BackedgeTakenInfo 6906 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6907 bool AllowPredicates) { 6908 SmallVector<BasicBlock *, 8> ExitingBlocks; 6909 L->getExitingBlocks(ExitingBlocks); 6910 6911 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6912 6913 SmallVector<EdgeExitInfo, 4> ExitCounts; 6914 bool CouldComputeBECount = true; 6915 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6916 const SCEV *MustExitMaxBECount = nullptr; 6917 const SCEV *MayExitMaxBECount = nullptr; 6918 bool MustExitMaxOrZero = false; 6919 6920 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6921 // and compute maxBECount. 6922 // Do a union of all the predicates here. 6923 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6924 BasicBlock *ExitBB = ExitingBlocks[i]; 6925 6926 // We canonicalize untaken exits to br (constant), ignore them so that 6927 // proving an exit untaken doesn't negatively impact our ability to reason 6928 // about the loop as whole. 6929 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 6930 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 6931 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 6932 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 6933 continue; 6934 } 6935 6936 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6937 6938 assert((AllowPredicates || EL.Predicates.empty()) && 6939 "Predicated exit limit when predicates are not allowed!"); 6940 6941 // 1. For each exit that can be computed, add an entry to ExitCounts. 6942 // CouldComputeBECount is true only if all exits can be computed. 6943 if (EL.ExactNotTaken == getCouldNotCompute()) 6944 // We couldn't compute an exact value for this exit, so 6945 // we won't be able to compute an exact value for the loop. 6946 CouldComputeBECount = false; 6947 else 6948 ExitCounts.emplace_back(ExitBB, EL); 6949 6950 // 2. Derive the loop's MaxBECount from each exit's max number of 6951 // non-exiting iterations. Partition the loop exits into two kinds: 6952 // LoopMustExits and LoopMayExits. 6953 // 6954 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6955 // is a LoopMayExit. If any computable LoopMustExit is found, then 6956 // MaxBECount is the minimum EL.MaxNotTaken of computable 6957 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6958 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6959 // computable EL.MaxNotTaken. 6960 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6961 DT.dominates(ExitBB, Latch)) { 6962 if (!MustExitMaxBECount) { 6963 MustExitMaxBECount = EL.MaxNotTaken; 6964 MustExitMaxOrZero = EL.MaxOrZero; 6965 } else { 6966 MustExitMaxBECount = 6967 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6968 } 6969 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6970 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6971 MayExitMaxBECount = EL.MaxNotTaken; 6972 else { 6973 MayExitMaxBECount = 6974 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6975 } 6976 } 6977 } 6978 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6979 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6980 // The loop backedge will be taken the maximum or zero times if there's 6981 // a single exit that must be taken the maximum or zero times. 6982 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6983 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6984 MaxBECount, MaxOrZero); 6985 } 6986 6987 ScalarEvolution::ExitLimit 6988 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6989 bool AllowPredicates) { 6990 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 6991 // If our exiting block does not dominate the latch, then its connection with 6992 // loop's exit limit may be far from trivial. 6993 const BasicBlock *Latch = L->getLoopLatch(); 6994 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 6995 return getCouldNotCompute(); 6996 6997 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6998 Instruction *Term = ExitingBlock->getTerminator(); 6999 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7000 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7001 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7002 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7003 "It should have one successor in loop and one exit block!"); 7004 // Proceed to the next level to examine the exit condition expression. 7005 return computeExitLimitFromCond( 7006 L, BI->getCondition(), ExitIfTrue, 7007 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7008 } 7009 7010 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7011 // For switch, make sure that there is a single exit from the loop. 7012 BasicBlock *Exit = nullptr; 7013 for (auto *SBB : successors(ExitingBlock)) 7014 if (!L->contains(SBB)) { 7015 if (Exit) // Multiple exit successors. 7016 return getCouldNotCompute(); 7017 Exit = SBB; 7018 } 7019 assert(Exit && "Exiting block must have at least one exit"); 7020 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7021 /*ControlsExit=*/IsOnlyExit); 7022 } 7023 7024 return getCouldNotCompute(); 7025 } 7026 7027 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7028 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7029 bool ControlsExit, bool AllowPredicates) { 7030 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7031 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7032 ControlsExit, AllowPredicates); 7033 } 7034 7035 Optional<ScalarEvolution::ExitLimit> 7036 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7037 bool ExitIfTrue, bool ControlsExit, 7038 bool AllowPredicates) { 7039 (void)this->L; 7040 (void)this->ExitIfTrue; 7041 (void)this->AllowPredicates; 7042 7043 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7044 this->AllowPredicates == AllowPredicates && 7045 "Variance in assumed invariant key components!"); 7046 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7047 if (Itr == TripCountMap.end()) 7048 return None; 7049 return Itr->second; 7050 } 7051 7052 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7053 bool ExitIfTrue, 7054 bool ControlsExit, 7055 bool AllowPredicates, 7056 const ExitLimit &EL) { 7057 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7058 this->AllowPredicates == AllowPredicates && 7059 "Variance in assumed invariant key components!"); 7060 7061 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7062 assert(InsertResult.second && "Expected successful insertion!"); 7063 (void)InsertResult; 7064 (void)ExitIfTrue; 7065 } 7066 7067 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7068 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7069 bool ControlsExit, bool AllowPredicates) { 7070 7071 if (auto MaybeEL = 7072 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7073 return *MaybeEL; 7074 7075 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7076 ControlsExit, AllowPredicates); 7077 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7078 return EL; 7079 } 7080 7081 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7082 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7083 bool ControlsExit, bool AllowPredicates) { 7084 // Check if the controlling expression for this loop is an And or Or. 7085 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7086 if (BO->getOpcode() == Instruction::And) { 7087 // Recurse on the operands of the and. 7088 bool EitherMayExit = !ExitIfTrue; 7089 ExitLimit EL0 = computeExitLimitFromCondCached( 7090 Cache, L, BO->getOperand(0), ExitIfTrue, 7091 ControlsExit && !EitherMayExit, AllowPredicates); 7092 ExitLimit EL1 = computeExitLimitFromCondCached( 7093 Cache, L, BO->getOperand(1), ExitIfTrue, 7094 ControlsExit && !EitherMayExit, AllowPredicates); 7095 // Be robust against unsimplified IR for the form "and i1 X, true" 7096 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7097 return CI->isOne() ? EL0 : EL1; 7098 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7099 return CI->isOne() ? EL1 : EL0; 7100 const SCEV *BECount = getCouldNotCompute(); 7101 const SCEV *MaxBECount = getCouldNotCompute(); 7102 if (EitherMayExit) { 7103 // Both conditions must be true for the loop to continue executing. 7104 // Choose the less conservative count. 7105 if (EL0.ExactNotTaken == getCouldNotCompute() || 7106 EL1.ExactNotTaken == getCouldNotCompute()) 7107 BECount = getCouldNotCompute(); 7108 else 7109 BECount = 7110 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7111 if (EL0.MaxNotTaken == getCouldNotCompute()) 7112 MaxBECount = EL1.MaxNotTaken; 7113 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7114 MaxBECount = EL0.MaxNotTaken; 7115 else 7116 MaxBECount = 7117 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7118 } else { 7119 // Both conditions must be true at the same time for the loop to exit. 7120 // For now, be conservative. 7121 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7122 MaxBECount = EL0.MaxNotTaken; 7123 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7124 BECount = EL0.ExactNotTaken; 7125 } 7126 7127 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7128 // to be more aggressive when computing BECount than when computing 7129 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7130 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7131 // to not. 7132 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7133 !isa<SCEVCouldNotCompute>(BECount)) 7134 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7135 7136 return ExitLimit(BECount, MaxBECount, false, 7137 {&EL0.Predicates, &EL1.Predicates}); 7138 } 7139 if (BO->getOpcode() == Instruction::Or) { 7140 // Recurse on the operands of the or. 7141 bool EitherMayExit = ExitIfTrue; 7142 ExitLimit EL0 = computeExitLimitFromCondCached( 7143 Cache, L, BO->getOperand(0), ExitIfTrue, 7144 ControlsExit && !EitherMayExit, AllowPredicates); 7145 ExitLimit EL1 = computeExitLimitFromCondCached( 7146 Cache, L, BO->getOperand(1), ExitIfTrue, 7147 ControlsExit && !EitherMayExit, AllowPredicates); 7148 // Be robust against unsimplified IR for the form "or i1 X, true" 7149 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7150 return CI->isZero() ? EL0 : EL1; 7151 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7152 return CI->isZero() ? EL1 : EL0; 7153 const SCEV *BECount = getCouldNotCompute(); 7154 const SCEV *MaxBECount = getCouldNotCompute(); 7155 if (EitherMayExit) { 7156 // Both conditions must be false for the loop to continue executing. 7157 // Choose the less conservative count. 7158 if (EL0.ExactNotTaken == getCouldNotCompute() || 7159 EL1.ExactNotTaken == getCouldNotCompute()) 7160 BECount = getCouldNotCompute(); 7161 else 7162 BECount = 7163 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7164 if (EL0.MaxNotTaken == getCouldNotCompute()) 7165 MaxBECount = EL1.MaxNotTaken; 7166 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7167 MaxBECount = EL0.MaxNotTaken; 7168 else 7169 MaxBECount = 7170 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7171 } else { 7172 // Both conditions must be false at the same time for the loop to exit. 7173 // For now, be conservative. 7174 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7175 MaxBECount = EL0.MaxNotTaken; 7176 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7177 BECount = EL0.ExactNotTaken; 7178 } 7179 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7180 // to be more aggressive when computing BECount than when computing 7181 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7182 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7183 // to not. 7184 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7185 !isa<SCEVCouldNotCompute>(BECount)) 7186 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7187 7188 return ExitLimit(BECount, MaxBECount, false, 7189 {&EL0.Predicates, &EL1.Predicates}); 7190 } 7191 } 7192 7193 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7194 // Proceed to the next level to examine the icmp. 7195 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7196 ExitLimit EL = 7197 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7198 if (EL.hasFullInfo() || !AllowPredicates) 7199 return EL; 7200 7201 // Try again, but use SCEV predicates this time. 7202 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7203 /*AllowPredicates=*/true); 7204 } 7205 7206 // Check for a constant condition. These are normally stripped out by 7207 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7208 // preserve the CFG and is temporarily leaving constant conditions 7209 // in place. 7210 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7211 if (ExitIfTrue == !CI->getZExtValue()) 7212 // The backedge is always taken. 7213 return getCouldNotCompute(); 7214 else 7215 // The backedge is never taken. 7216 return getZero(CI->getType()); 7217 } 7218 7219 // If it's not an integer or pointer comparison then compute it the hard way. 7220 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7221 } 7222 7223 ScalarEvolution::ExitLimit 7224 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7225 ICmpInst *ExitCond, 7226 bool ExitIfTrue, 7227 bool ControlsExit, 7228 bool AllowPredicates) { 7229 // If the condition was exit on true, convert the condition to exit on false 7230 ICmpInst::Predicate Pred; 7231 if (!ExitIfTrue) 7232 Pred = ExitCond->getPredicate(); 7233 else 7234 Pred = ExitCond->getInversePredicate(); 7235 const ICmpInst::Predicate OriginalPred = Pred; 7236 7237 // Handle common loops like: for (X = "string"; *X; ++X) 7238 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7239 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7240 ExitLimit ItCnt = 7241 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7242 if (ItCnt.hasAnyInfo()) 7243 return ItCnt; 7244 } 7245 7246 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7247 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7248 7249 // Try to evaluate any dependencies out of the loop. 7250 LHS = getSCEVAtScope(LHS, L); 7251 RHS = getSCEVAtScope(RHS, L); 7252 7253 // At this point, we would like to compute how many iterations of the 7254 // loop the predicate will return true for these inputs. 7255 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7256 // If there is a loop-invariant, force it into the RHS. 7257 std::swap(LHS, RHS); 7258 Pred = ICmpInst::getSwappedPredicate(Pred); 7259 } 7260 7261 // Simplify the operands before analyzing them. 7262 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7263 7264 // If we have a comparison of a chrec against a constant, try to use value 7265 // ranges to answer this query. 7266 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7267 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7268 if (AddRec->getLoop() == L) { 7269 // Form the constant range. 7270 ConstantRange CompRange = 7271 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7272 7273 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7274 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7275 } 7276 7277 switch (Pred) { 7278 case ICmpInst::ICMP_NE: { // while (X != Y) 7279 // Convert to: while (X-Y != 0) 7280 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7281 AllowPredicates); 7282 if (EL.hasAnyInfo()) return EL; 7283 break; 7284 } 7285 case ICmpInst::ICMP_EQ: { // while (X == Y) 7286 // Convert to: while (X-Y == 0) 7287 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7288 if (EL.hasAnyInfo()) return EL; 7289 break; 7290 } 7291 case ICmpInst::ICMP_SLT: 7292 case ICmpInst::ICMP_ULT: { // while (X < Y) 7293 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7294 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7295 AllowPredicates); 7296 if (EL.hasAnyInfo()) return EL; 7297 break; 7298 } 7299 case ICmpInst::ICMP_SGT: 7300 case ICmpInst::ICMP_UGT: { // while (X > Y) 7301 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7302 ExitLimit EL = 7303 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7304 AllowPredicates); 7305 if (EL.hasAnyInfo()) return EL; 7306 break; 7307 } 7308 default: 7309 break; 7310 } 7311 7312 auto *ExhaustiveCount = 7313 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7314 7315 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7316 return ExhaustiveCount; 7317 7318 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7319 ExitCond->getOperand(1), L, OriginalPred); 7320 } 7321 7322 ScalarEvolution::ExitLimit 7323 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7324 SwitchInst *Switch, 7325 BasicBlock *ExitingBlock, 7326 bool ControlsExit) { 7327 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7328 7329 // Give up if the exit is the default dest of a switch. 7330 if (Switch->getDefaultDest() == ExitingBlock) 7331 return getCouldNotCompute(); 7332 7333 assert(L->contains(Switch->getDefaultDest()) && 7334 "Default case must not exit the loop!"); 7335 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7336 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7337 7338 // while (X != Y) --> while (X-Y != 0) 7339 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7340 if (EL.hasAnyInfo()) 7341 return EL; 7342 7343 return getCouldNotCompute(); 7344 } 7345 7346 static ConstantInt * 7347 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7348 ScalarEvolution &SE) { 7349 const SCEV *InVal = SE.getConstant(C); 7350 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7351 assert(isa<SCEVConstant>(Val) && 7352 "Evaluation of SCEV at constant didn't fold correctly?"); 7353 return cast<SCEVConstant>(Val)->getValue(); 7354 } 7355 7356 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7357 /// compute the backedge execution count. 7358 ScalarEvolution::ExitLimit 7359 ScalarEvolution::computeLoadConstantCompareExitLimit( 7360 LoadInst *LI, 7361 Constant *RHS, 7362 const Loop *L, 7363 ICmpInst::Predicate predicate) { 7364 if (LI->isVolatile()) return getCouldNotCompute(); 7365 7366 // Check to see if the loaded pointer is a getelementptr of a global. 7367 // TODO: Use SCEV instead of manually grubbing with GEPs. 7368 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7369 if (!GEP) return getCouldNotCompute(); 7370 7371 // Make sure that it is really a constant global we are gepping, with an 7372 // initializer, and make sure the first IDX is really 0. 7373 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7374 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7375 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7376 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7377 return getCouldNotCompute(); 7378 7379 // Okay, we allow one non-constant index into the GEP instruction. 7380 Value *VarIdx = nullptr; 7381 std::vector<Constant*> Indexes; 7382 unsigned VarIdxNum = 0; 7383 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7384 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7385 Indexes.push_back(CI); 7386 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7387 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7388 VarIdx = GEP->getOperand(i); 7389 VarIdxNum = i-2; 7390 Indexes.push_back(nullptr); 7391 } 7392 7393 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7394 if (!VarIdx) 7395 return getCouldNotCompute(); 7396 7397 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7398 // Check to see if X is a loop variant variable value now. 7399 const SCEV *Idx = getSCEV(VarIdx); 7400 Idx = getSCEVAtScope(Idx, L); 7401 7402 // We can only recognize very limited forms of loop index expressions, in 7403 // particular, only affine AddRec's like {C1,+,C2}. 7404 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7405 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7406 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7407 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7408 return getCouldNotCompute(); 7409 7410 unsigned MaxSteps = MaxBruteForceIterations; 7411 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7412 ConstantInt *ItCst = ConstantInt::get( 7413 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7414 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7415 7416 // Form the GEP offset. 7417 Indexes[VarIdxNum] = Val; 7418 7419 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7420 Indexes); 7421 if (!Result) break; // Cannot compute! 7422 7423 // Evaluate the condition for this iteration. 7424 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7425 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7426 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7427 ++NumArrayLenItCounts; 7428 return getConstant(ItCst); // Found terminating iteration! 7429 } 7430 } 7431 return getCouldNotCompute(); 7432 } 7433 7434 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7435 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7436 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7437 if (!RHS) 7438 return getCouldNotCompute(); 7439 7440 const BasicBlock *Latch = L->getLoopLatch(); 7441 if (!Latch) 7442 return getCouldNotCompute(); 7443 7444 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7445 if (!Predecessor) 7446 return getCouldNotCompute(); 7447 7448 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7449 // Return LHS in OutLHS and shift_opt in OutOpCode. 7450 auto MatchPositiveShift = 7451 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7452 7453 using namespace PatternMatch; 7454 7455 ConstantInt *ShiftAmt; 7456 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7457 OutOpCode = Instruction::LShr; 7458 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7459 OutOpCode = Instruction::AShr; 7460 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7461 OutOpCode = Instruction::Shl; 7462 else 7463 return false; 7464 7465 return ShiftAmt->getValue().isStrictlyPositive(); 7466 }; 7467 7468 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7469 // 7470 // loop: 7471 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7472 // %iv.shifted = lshr i32 %iv, <positive constant> 7473 // 7474 // Return true on a successful match. Return the corresponding PHI node (%iv 7475 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7476 auto MatchShiftRecurrence = 7477 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7478 Optional<Instruction::BinaryOps> PostShiftOpCode; 7479 7480 { 7481 Instruction::BinaryOps OpC; 7482 Value *V; 7483 7484 // If we encounter a shift instruction, "peel off" the shift operation, 7485 // and remember that we did so. Later when we inspect %iv's backedge 7486 // value, we will make sure that the backedge value uses the same 7487 // operation. 7488 // 7489 // Note: the peeled shift operation does not have to be the same 7490 // instruction as the one feeding into the PHI's backedge value. We only 7491 // really care about it being the same *kind* of shift instruction -- 7492 // that's all that is required for our later inferences to hold. 7493 if (MatchPositiveShift(LHS, V, OpC)) { 7494 PostShiftOpCode = OpC; 7495 LHS = V; 7496 } 7497 } 7498 7499 PNOut = dyn_cast<PHINode>(LHS); 7500 if (!PNOut || PNOut->getParent() != L->getHeader()) 7501 return false; 7502 7503 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7504 Value *OpLHS; 7505 7506 return 7507 // The backedge value for the PHI node must be a shift by a positive 7508 // amount 7509 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7510 7511 // of the PHI node itself 7512 OpLHS == PNOut && 7513 7514 // and the kind of shift should be match the kind of shift we peeled 7515 // off, if any. 7516 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7517 }; 7518 7519 PHINode *PN; 7520 Instruction::BinaryOps OpCode; 7521 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7522 return getCouldNotCompute(); 7523 7524 const DataLayout &DL = getDataLayout(); 7525 7526 // The key rationale for this optimization is that for some kinds of shift 7527 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7528 // within a finite number of iterations. If the condition guarding the 7529 // backedge (in the sense that the backedge is taken if the condition is true) 7530 // is false for the value the shift recurrence stabilizes to, then we know 7531 // that the backedge is taken only a finite number of times. 7532 7533 ConstantInt *StableValue = nullptr; 7534 switch (OpCode) { 7535 default: 7536 llvm_unreachable("Impossible case!"); 7537 7538 case Instruction::AShr: { 7539 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7540 // bitwidth(K) iterations. 7541 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7542 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7543 Predecessor->getTerminator(), &DT); 7544 auto *Ty = cast<IntegerType>(RHS->getType()); 7545 if (Known.isNonNegative()) 7546 StableValue = ConstantInt::get(Ty, 0); 7547 else if (Known.isNegative()) 7548 StableValue = ConstantInt::get(Ty, -1, true); 7549 else 7550 return getCouldNotCompute(); 7551 7552 break; 7553 } 7554 case Instruction::LShr: 7555 case Instruction::Shl: 7556 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7557 // stabilize to 0 in at most bitwidth(K) iterations. 7558 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7559 break; 7560 } 7561 7562 auto *Result = 7563 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7564 assert(Result->getType()->isIntegerTy(1) && 7565 "Otherwise cannot be an operand to a branch instruction"); 7566 7567 if (Result->isZeroValue()) { 7568 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7569 const SCEV *UpperBound = 7570 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7571 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7572 } 7573 7574 return getCouldNotCompute(); 7575 } 7576 7577 /// Return true if we can constant fold an instruction of the specified type, 7578 /// assuming that all operands were constants. 7579 static bool CanConstantFold(const Instruction *I) { 7580 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7581 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7582 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 7583 return true; 7584 7585 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7586 if (const Function *F = CI->getCalledFunction()) 7587 return canConstantFoldCallTo(CI, F); 7588 return false; 7589 } 7590 7591 /// Determine whether this instruction can constant evolve within this loop 7592 /// assuming its operands can all constant evolve. 7593 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7594 // An instruction outside of the loop can't be derived from a loop PHI. 7595 if (!L->contains(I)) return false; 7596 7597 if (isa<PHINode>(I)) { 7598 // We don't currently keep track of the control flow needed to evaluate 7599 // PHIs, so we cannot handle PHIs inside of loops. 7600 return L->getHeader() == I->getParent(); 7601 } 7602 7603 // If we won't be able to constant fold this expression even if the operands 7604 // are constants, bail early. 7605 return CanConstantFold(I); 7606 } 7607 7608 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7609 /// recursing through each instruction operand until reaching a loop header phi. 7610 static PHINode * 7611 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7612 DenseMap<Instruction *, PHINode *> &PHIMap, 7613 unsigned Depth) { 7614 if (Depth > MaxConstantEvolvingDepth) 7615 return nullptr; 7616 7617 // Otherwise, we can evaluate this instruction if all of its operands are 7618 // constant or derived from a PHI node themselves. 7619 PHINode *PHI = nullptr; 7620 for (Value *Op : UseInst->operands()) { 7621 if (isa<Constant>(Op)) continue; 7622 7623 Instruction *OpInst = dyn_cast<Instruction>(Op); 7624 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7625 7626 PHINode *P = dyn_cast<PHINode>(OpInst); 7627 if (!P) 7628 // If this operand is already visited, reuse the prior result. 7629 // We may have P != PHI if this is the deepest point at which the 7630 // inconsistent paths meet. 7631 P = PHIMap.lookup(OpInst); 7632 if (!P) { 7633 // Recurse and memoize the results, whether a phi is found or not. 7634 // This recursive call invalidates pointers into PHIMap. 7635 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7636 PHIMap[OpInst] = P; 7637 } 7638 if (!P) 7639 return nullptr; // Not evolving from PHI 7640 if (PHI && PHI != P) 7641 return nullptr; // Evolving from multiple different PHIs. 7642 PHI = P; 7643 } 7644 // This is a expression evolving from a constant PHI! 7645 return PHI; 7646 } 7647 7648 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7649 /// in the loop that V is derived from. We allow arbitrary operations along the 7650 /// way, but the operands of an operation must either be constants or a value 7651 /// derived from a constant PHI. If this expression does not fit with these 7652 /// constraints, return null. 7653 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7654 Instruction *I = dyn_cast<Instruction>(V); 7655 if (!I || !canConstantEvolve(I, L)) return nullptr; 7656 7657 if (PHINode *PN = dyn_cast<PHINode>(I)) 7658 return PN; 7659 7660 // Record non-constant instructions contained by the loop. 7661 DenseMap<Instruction *, PHINode *> PHIMap; 7662 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7663 } 7664 7665 /// EvaluateExpression - Given an expression that passes the 7666 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7667 /// in the loop has the value PHIVal. If we can't fold this expression for some 7668 /// reason, return null. 7669 static Constant *EvaluateExpression(Value *V, const Loop *L, 7670 DenseMap<Instruction *, Constant *> &Vals, 7671 const DataLayout &DL, 7672 const TargetLibraryInfo *TLI) { 7673 // Convenient constant check, but redundant for recursive calls. 7674 if (Constant *C = dyn_cast<Constant>(V)) return C; 7675 Instruction *I = dyn_cast<Instruction>(V); 7676 if (!I) return nullptr; 7677 7678 if (Constant *C = Vals.lookup(I)) return C; 7679 7680 // An instruction inside the loop depends on a value outside the loop that we 7681 // weren't given a mapping for, or a value such as a call inside the loop. 7682 if (!canConstantEvolve(I, L)) return nullptr; 7683 7684 // An unmapped PHI can be due to a branch or another loop inside this loop, 7685 // or due to this not being the initial iteration through a loop where we 7686 // couldn't compute the evolution of this particular PHI last time. 7687 if (isa<PHINode>(I)) return nullptr; 7688 7689 std::vector<Constant*> Operands(I->getNumOperands()); 7690 7691 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7692 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7693 if (!Operand) { 7694 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7695 if (!Operands[i]) return nullptr; 7696 continue; 7697 } 7698 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7699 Vals[Operand] = C; 7700 if (!C) return nullptr; 7701 Operands[i] = C; 7702 } 7703 7704 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7705 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7706 Operands[1], DL, TLI); 7707 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7708 if (!LI->isVolatile()) 7709 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7710 } 7711 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7712 } 7713 7714 7715 // If every incoming value to PN except the one for BB is a specific Constant, 7716 // return that, else return nullptr. 7717 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7718 Constant *IncomingVal = nullptr; 7719 7720 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7721 if (PN->getIncomingBlock(i) == BB) 7722 continue; 7723 7724 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7725 if (!CurrentVal) 7726 return nullptr; 7727 7728 if (IncomingVal != CurrentVal) { 7729 if (IncomingVal) 7730 return nullptr; 7731 IncomingVal = CurrentVal; 7732 } 7733 } 7734 7735 return IncomingVal; 7736 } 7737 7738 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7739 /// in the header of its containing loop, we know the loop executes a 7740 /// constant number of times, and the PHI node is just a recurrence 7741 /// involving constants, fold it. 7742 Constant * 7743 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7744 const APInt &BEs, 7745 const Loop *L) { 7746 auto I = ConstantEvolutionLoopExitValue.find(PN); 7747 if (I != ConstantEvolutionLoopExitValue.end()) 7748 return I->second; 7749 7750 if (BEs.ugt(MaxBruteForceIterations)) 7751 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7752 7753 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7754 7755 DenseMap<Instruction *, Constant *> CurrentIterVals; 7756 BasicBlock *Header = L->getHeader(); 7757 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7758 7759 BasicBlock *Latch = L->getLoopLatch(); 7760 if (!Latch) 7761 return nullptr; 7762 7763 for (PHINode &PHI : Header->phis()) { 7764 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7765 CurrentIterVals[&PHI] = StartCST; 7766 } 7767 if (!CurrentIterVals.count(PN)) 7768 return RetVal = nullptr; 7769 7770 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7771 7772 // Execute the loop symbolically to determine the exit value. 7773 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7774 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7775 7776 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7777 unsigned IterationNum = 0; 7778 const DataLayout &DL = getDataLayout(); 7779 for (; ; ++IterationNum) { 7780 if (IterationNum == NumIterations) 7781 return RetVal = CurrentIterVals[PN]; // Got exit value! 7782 7783 // Compute the value of the PHIs for the next iteration. 7784 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7785 DenseMap<Instruction *, Constant *> NextIterVals; 7786 Constant *NextPHI = 7787 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7788 if (!NextPHI) 7789 return nullptr; // Couldn't evaluate! 7790 NextIterVals[PN] = NextPHI; 7791 7792 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7793 7794 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7795 // cease to be able to evaluate one of them or if they stop evolving, 7796 // because that doesn't necessarily prevent us from computing PN. 7797 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7798 for (const auto &I : CurrentIterVals) { 7799 PHINode *PHI = dyn_cast<PHINode>(I.first); 7800 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7801 PHIsToCompute.emplace_back(PHI, I.second); 7802 } 7803 // We use two distinct loops because EvaluateExpression may invalidate any 7804 // iterators into CurrentIterVals. 7805 for (const auto &I : PHIsToCompute) { 7806 PHINode *PHI = I.first; 7807 Constant *&NextPHI = NextIterVals[PHI]; 7808 if (!NextPHI) { // Not already computed. 7809 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7810 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7811 } 7812 if (NextPHI != I.second) 7813 StoppedEvolving = false; 7814 } 7815 7816 // If all entries in CurrentIterVals == NextIterVals then we can stop 7817 // iterating, the loop can't continue to change. 7818 if (StoppedEvolving) 7819 return RetVal = CurrentIterVals[PN]; 7820 7821 CurrentIterVals.swap(NextIterVals); 7822 } 7823 } 7824 7825 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7826 Value *Cond, 7827 bool ExitWhen) { 7828 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7829 if (!PN) return getCouldNotCompute(); 7830 7831 // If the loop is canonicalized, the PHI will have exactly two entries. 7832 // That's the only form we support here. 7833 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7834 7835 DenseMap<Instruction *, Constant *> CurrentIterVals; 7836 BasicBlock *Header = L->getHeader(); 7837 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7838 7839 BasicBlock *Latch = L->getLoopLatch(); 7840 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7841 7842 for (PHINode &PHI : Header->phis()) { 7843 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7844 CurrentIterVals[&PHI] = StartCST; 7845 } 7846 if (!CurrentIterVals.count(PN)) 7847 return getCouldNotCompute(); 7848 7849 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7850 // the loop symbolically to determine when the condition gets a value of 7851 // "ExitWhen". 7852 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7853 const DataLayout &DL = getDataLayout(); 7854 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7855 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7856 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7857 7858 // Couldn't symbolically evaluate. 7859 if (!CondVal) return getCouldNotCompute(); 7860 7861 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7862 ++NumBruteForceTripCountsComputed; 7863 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7864 } 7865 7866 // Update all the PHI nodes for the next iteration. 7867 DenseMap<Instruction *, Constant *> NextIterVals; 7868 7869 // Create a list of which PHIs we need to compute. We want to do this before 7870 // calling EvaluateExpression on them because that may invalidate iterators 7871 // into CurrentIterVals. 7872 SmallVector<PHINode *, 8> PHIsToCompute; 7873 for (const auto &I : CurrentIterVals) { 7874 PHINode *PHI = dyn_cast<PHINode>(I.first); 7875 if (!PHI || PHI->getParent() != Header) continue; 7876 PHIsToCompute.push_back(PHI); 7877 } 7878 for (PHINode *PHI : PHIsToCompute) { 7879 Constant *&NextPHI = NextIterVals[PHI]; 7880 if (NextPHI) continue; // Already computed! 7881 7882 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7883 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7884 } 7885 CurrentIterVals.swap(NextIterVals); 7886 } 7887 7888 // Too many iterations were needed to evaluate. 7889 return getCouldNotCompute(); 7890 } 7891 7892 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7893 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7894 ValuesAtScopes[V]; 7895 // Check to see if we've folded this expression at this loop before. 7896 for (auto &LS : Values) 7897 if (LS.first == L) 7898 return LS.second ? LS.second : V; 7899 7900 Values.emplace_back(L, nullptr); 7901 7902 // Otherwise compute it. 7903 const SCEV *C = computeSCEVAtScope(V, L); 7904 for (auto &LS : reverse(ValuesAtScopes[V])) 7905 if (LS.first == L) { 7906 LS.second = C; 7907 break; 7908 } 7909 return C; 7910 } 7911 7912 /// This builds up a Constant using the ConstantExpr interface. That way, we 7913 /// will return Constants for objects which aren't represented by a 7914 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7915 /// Returns NULL if the SCEV isn't representable as a Constant. 7916 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7917 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7918 case scCouldNotCompute: 7919 case scAddRecExpr: 7920 break; 7921 case scConstant: 7922 return cast<SCEVConstant>(V)->getValue(); 7923 case scUnknown: 7924 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7925 case scSignExtend: { 7926 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7927 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7928 return ConstantExpr::getSExt(CastOp, SS->getType()); 7929 break; 7930 } 7931 case scZeroExtend: { 7932 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7933 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7934 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7935 break; 7936 } 7937 case scTruncate: { 7938 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7939 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7940 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7941 break; 7942 } 7943 case scAddExpr: { 7944 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7945 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7946 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7947 unsigned AS = PTy->getAddressSpace(); 7948 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7949 C = ConstantExpr::getBitCast(C, DestPtrTy); 7950 } 7951 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7952 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7953 if (!C2) return nullptr; 7954 7955 // First pointer! 7956 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7957 unsigned AS = C2->getType()->getPointerAddressSpace(); 7958 std::swap(C, C2); 7959 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7960 // The offsets have been converted to bytes. We can add bytes to an 7961 // i8* by GEP with the byte count in the first index. 7962 C = ConstantExpr::getBitCast(C, DestPtrTy); 7963 } 7964 7965 // Don't bother trying to sum two pointers. We probably can't 7966 // statically compute a load that results from it anyway. 7967 if (C2->getType()->isPointerTy()) 7968 return nullptr; 7969 7970 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7971 if (PTy->getElementType()->isStructTy()) 7972 C2 = ConstantExpr::getIntegerCast( 7973 C2, Type::getInt32Ty(C->getContext()), true); 7974 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7975 } else 7976 C = ConstantExpr::getAdd(C, C2); 7977 } 7978 return C; 7979 } 7980 break; 7981 } 7982 case scMulExpr: { 7983 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7984 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7985 // Don't bother with pointers at all. 7986 if (C->getType()->isPointerTy()) return nullptr; 7987 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7988 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7989 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7990 C = ConstantExpr::getMul(C, C2); 7991 } 7992 return C; 7993 } 7994 break; 7995 } 7996 case scUDivExpr: { 7997 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7998 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7999 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8000 if (LHS->getType() == RHS->getType()) 8001 return ConstantExpr::getUDiv(LHS, RHS); 8002 break; 8003 } 8004 case scSMaxExpr: 8005 case scUMaxExpr: 8006 case scSMinExpr: 8007 case scUMinExpr: 8008 break; // TODO: smax, umax, smin, umax. 8009 } 8010 return nullptr; 8011 } 8012 8013 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8014 if (isa<SCEVConstant>(V)) return V; 8015 8016 // If this instruction is evolved from a constant-evolving PHI, compute the 8017 // exit value from the loop without using SCEVs. 8018 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8019 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8020 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8021 const Loop *LI = this->LI[I->getParent()]; 8022 // Looking for loop exit value. 8023 if (LI && LI->getParentLoop() == L && 8024 PN->getParent() == LI->getHeader()) { 8025 // Okay, there is no closed form solution for the PHI node. Check 8026 // to see if the loop that contains it has a known backedge-taken 8027 // count. If so, we may be able to force computation of the exit 8028 // value. 8029 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8030 // This trivial case can show up in some degenerate cases where 8031 // the incoming IR has not yet been fully simplified. 8032 if (BackedgeTakenCount->isZero()) { 8033 Value *InitValue = nullptr; 8034 bool MultipleInitValues = false; 8035 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8036 if (!LI->contains(PN->getIncomingBlock(i))) { 8037 if (!InitValue) 8038 InitValue = PN->getIncomingValue(i); 8039 else if (InitValue != PN->getIncomingValue(i)) { 8040 MultipleInitValues = true; 8041 break; 8042 } 8043 } 8044 } 8045 if (!MultipleInitValues && InitValue) 8046 return getSCEV(InitValue); 8047 } 8048 // Do we have a loop invariant value flowing around the backedge 8049 // for a loop which must execute the backedge? 8050 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8051 isKnownPositive(BackedgeTakenCount) && 8052 PN->getNumIncomingValues() == 2) { 8053 8054 unsigned InLoopPred = LI->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8055 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8056 if (LI->isLoopInvariant(BackedgeVal)) 8057 return getSCEV(BackedgeVal); 8058 } 8059 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8060 // Okay, we know how many times the containing loop executes. If 8061 // this is a constant evolving PHI node, get the final value at 8062 // the specified iteration number. 8063 Constant *RV = 8064 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8065 if (RV) return getSCEV(RV); 8066 } 8067 } 8068 8069 // If there is a single-input Phi, evaluate it at our scope. If we can 8070 // prove that this replacement does not break LCSSA form, use new value. 8071 if (PN->getNumOperands() == 1) { 8072 const SCEV *Input = getSCEV(PN->getOperand(0)); 8073 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8074 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8075 // for the simplest case just support constants. 8076 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8077 } 8078 } 8079 8080 // Okay, this is an expression that we cannot symbolically evaluate 8081 // into a SCEV. Check to see if it's possible to symbolically evaluate 8082 // the arguments into constants, and if so, try to constant propagate the 8083 // result. This is particularly useful for computing loop exit values. 8084 if (CanConstantFold(I)) { 8085 SmallVector<Constant *, 4> Operands; 8086 bool MadeImprovement = false; 8087 for (Value *Op : I->operands()) { 8088 if (Constant *C = dyn_cast<Constant>(Op)) { 8089 Operands.push_back(C); 8090 continue; 8091 } 8092 8093 // If any of the operands is non-constant and if they are 8094 // non-integer and non-pointer, don't even try to analyze them 8095 // with scev techniques. 8096 if (!isSCEVable(Op->getType())) 8097 return V; 8098 8099 const SCEV *OrigV = getSCEV(Op); 8100 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8101 MadeImprovement |= OrigV != OpV; 8102 8103 Constant *C = BuildConstantFromSCEV(OpV); 8104 if (!C) return V; 8105 if (C->getType() != Op->getType()) 8106 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8107 Op->getType(), 8108 false), 8109 C, Op->getType()); 8110 Operands.push_back(C); 8111 } 8112 8113 // Check to see if getSCEVAtScope actually made an improvement. 8114 if (MadeImprovement) { 8115 Constant *C = nullptr; 8116 const DataLayout &DL = getDataLayout(); 8117 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8118 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8119 Operands[1], DL, &TLI); 8120 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8121 if (!LI->isVolatile()) 8122 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8123 } else 8124 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8125 if (!C) return V; 8126 return getSCEV(C); 8127 } 8128 } 8129 } 8130 8131 // This is some other type of SCEVUnknown, just return it. 8132 return V; 8133 } 8134 8135 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8136 // Avoid performing the look-up in the common case where the specified 8137 // expression has no loop-variant portions. 8138 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8139 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8140 if (OpAtScope != Comm->getOperand(i)) { 8141 // Okay, at least one of these operands is loop variant but might be 8142 // foldable. Build a new instance of the folded commutative expression. 8143 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8144 Comm->op_begin()+i); 8145 NewOps.push_back(OpAtScope); 8146 8147 for (++i; i != e; ++i) { 8148 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8149 NewOps.push_back(OpAtScope); 8150 } 8151 if (isa<SCEVAddExpr>(Comm)) 8152 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8153 if (isa<SCEVMulExpr>(Comm)) 8154 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8155 if (isa<SCEVMinMaxExpr>(Comm)) 8156 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8157 llvm_unreachable("Unknown commutative SCEV type!"); 8158 } 8159 } 8160 // If we got here, all operands are loop invariant. 8161 return Comm; 8162 } 8163 8164 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8165 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8166 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8167 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8168 return Div; // must be loop invariant 8169 return getUDivExpr(LHS, RHS); 8170 } 8171 8172 // If this is a loop recurrence for a loop that does not contain L, then we 8173 // are dealing with the final value computed by the loop. 8174 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8175 // First, attempt to evaluate each operand. 8176 // Avoid performing the look-up in the common case where the specified 8177 // expression has no loop-variant portions. 8178 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8179 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8180 if (OpAtScope == AddRec->getOperand(i)) 8181 continue; 8182 8183 // Okay, at least one of these operands is loop variant but might be 8184 // foldable. Build a new instance of the folded commutative expression. 8185 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8186 AddRec->op_begin()+i); 8187 NewOps.push_back(OpAtScope); 8188 for (++i; i != e; ++i) 8189 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8190 8191 const SCEV *FoldedRec = 8192 getAddRecExpr(NewOps, AddRec->getLoop(), 8193 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8194 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8195 // The addrec may be folded to a nonrecurrence, for example, if the 8196 // induction variable is multiplied by zero after constant folding. Go 8197 // ahead and return the folded value. 8198 if (!AddRec) 8199 return FoldedRec; 8200 break; 8201 } 8202 8203 // If the scope is outside the addrec's loop, evaluate it by using the 8204 // loop exit value of the addrec. 8205 if (!AddRec->getLoop()->contains(L)) { 8206 // To evaluate this recurrence, we need to know how many times the AddRec 8207 // loop iterates. Compute this now. 8208 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8209 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8210 8211 // Then, evaluate the AddRec. 8212 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8213 } 8214 8215 return AddRec; 8216 } 8217 8218 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8219 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8220 if (Op == Cast->getOperand()) 8221 return Cast; // must be loop invariant 8222 return getZeroExtendExpr(Op, Cast->getType()); 8223 } 8224 8225 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8226 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8227 if (Op == Cast->getOperand()) 8228 return Cast; // must be loop invariant 8229 return getSignExtendExpr(Op, Cast->getType()); 8230 } 8231 8232 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8233 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8234 if (Op == Cast->getOperand()) 8235 return Cast; // must be loop invariant 8236 return getTruncateExpr(Op, Cast->getType()); 8237 } 8238 8239 llvm_unreachable("Unknown SCEV type!"); 8240 } 8241 8242 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8243 return getSCEVAtScope(getSCEV(V), L); 8244 } 8245 8246 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8247 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8248 return stripInjectiveFunctions(ZExt->getOperand()); 8249 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8250 return stripInjectiveFunctions(SExt->getOperand()); 8251 return S; 8252 } 8253 8254 /// Finds the minimum unsigned root of the following equation: 8255 /// 8256 /// A * X = B (mod N) 8257 /// 8258 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8259 /// A and B isn't important. 8260 /// 8261 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8262 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8263 ScalarEvolution &SE) { 8264 uint32_t BW = A.getBitWidth(); 8265 assert(BW == SE.getTypeSizeInBits(B->getType())); 8266 assert(A != 0 && "A must be non-zero."); 8267 8268 // 1. D = gcd(A, N) 8269 // 8270 // The gcd of A and N may have only one prime factor: 2. The number of 8271 // trailing zeros in A is its multiplicity 8272 uint32_t Mult2 = A.countTrailingZeros(); 8273 // D = 2^Mult2 8274 8275 // 2. Check if B is divisible by D. 8276 // 8277 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8278 // is not less than multiplicity of this prime factor for D. 8279 if (SE.GetMinTrailingZeros(B) < Mult2) 8280 return SE.getCouldNotCompute(); 8281 8282 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8283 // modulo (N / D). 8284 // 8285 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8286 // (N / D) in general. The inverse itself always fits into BW bits, though, 8287 // so we immediately truncate it. 8288 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8289 APInt Mod(BW + 1, 0); 8290 Mod.setBit(BW - Mult2); // Mod = N / D 8291 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8292 8293 // 4. Compute the minimum unsigned root of the equation: 8294 // I * (B / D) mod (N / D) 8295 // To simplify the computation, we factor out the divide by D: 8296 // (I * B mod N) / D 8297 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8298 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8299 } 8300 8301 /// For a given quadratic addrec, generate coefficients of the corresponding 8302 /// quadratic equation, multiplied by a common value to ensure that they are 8303 /// integers. 8304 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8305 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8306 /// were multiplied by, and BitWidth is the bit width of the original addrec 8307 /// coefficients. 8308 /// This function returns None if the addrec coefficients are not compile- 8309 /// time constants. 8310 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8311 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8312 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8313 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8314 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8315 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8316 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8317 << *AddRec << '\n'); 8318 8319 // We currently can only solve this if the coefficients are constants. 8320 if (!LC || !MC || !NC) { 8321 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8322 return None; 8323 } 8324 8325 APInt L = LC->getAPInt(); 8326 APInt M = MC->getAPInt(); 8327 APInt N = NC->getAPInt(); 8328 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8329 8330 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8331 unsigned NewWidth = BitWidth + 1; 8332 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8333 << BitWidth << '\n'); 8334 // The sign-extension (as opposed to a zero-extension) here matches the 8335 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8336 N = N.sext(NewWidth); 8337 M = M.sext(NewWidth); 8338 L = L.sext(NewWidth); 8339 8340 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8341 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8342 // L+M, L+2M+N, L+3M+3N, ... 8343 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8344 // 8345 // The equation Acc = 0 is then 8346 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8347 // In a quadratic form it becomes: 8348 // N n^2 + (2M-N) n + 2L = 0. 8349 8350 APInt A = N; 8351 APInt B = 2 * M - A; 8352 APInt C = 2 * L; 8353 APInt T = APInt(NewWidth, 2); 8354 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8355 << "x + " << C << ", coeff bw: " << NewWidth 8356 << ", multiplied by " << T << '\n'); 8357 return std::make_tuple(A, B, C, T, BitWidth); 8358 } 8359 8360 /// Helper function to compare optional APInts: 8361 /// (a) if X and Y both exist, return min(X, Y), 8362 /// (b) if neither X nor Y exist, return None, 8363 /// (c) if exactly one of X and Y exists, return that value. 8364 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8365 if (X.hasValue() && Y.hasValue()) { 8366 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8367 APInt XW = X->sextOrSelf(W); 8368 APInt YW = Y->sextOrSelf(W); 8369 return XW.slt(YW) ? *X : *Y; 8370 } 8371 if (!X.hasValue() && !Y.hasValue()) 8372 return None; 8373 return X.hasValue() ? *X : *Y; 8374 } 8375 8376 /// Helper function to truncate an optional APInt to a given BitWidth. 8377 /// When solving addrec-related equations, it is preferable to return a value 8378 /// that has the same bit width as the original addrec's coefficients. If the 8379 /// solution fits in the original bit width, truncate it (except for i1). 8380 /// Returning a value of a different bit width may inhibit some optimizations. 8381 /// 8382 /// In general, a solution to a quadratic equation generated from an addrec 8383 /// may require BW+1 bits, where BW is the bit width of the addrec's 8384 /// coefficients. The reason is that the coefficients of the quadratic 8385 /// equation are BW+1 bits wide (to avoid truncation when converting from 8386 /// the addrec to the equation). 8387 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8388 if (!X.hasValue()) 8389 return None; 8390 unsigned W = X->getBitWidth(); 8391 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8392 return X->trunc(BitWidth); 8393 return X; 8394 } 8395 8396 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8397 /// iterations. The values L, M, N are assumed to be signed, and they 8398 /// should all have the same bit widths. 8399 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8400 /// where BW is the bit width of the addrec's coefficients. 8401 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8402 /// returned as such, otherwise the bit width of the returned value may 8403 /// be greater than BW. 8404 /// 8405 /// This function returns None if 8406 /// (a) the addrec coefficients are not constant, or 8407 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8408 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8409 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8410 static Optional<APInt> 8411 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8412 APInt A, B, C, M; 8413 unsigned BitWidth; 8414 auto T = GetQuadraticEquation(AddRec); 8415 if (!T.hasValue()) 8416 return None; 8417 8418 std::tie(A, B, C, M, BitWidth) = *T; 8419 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8420 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8421 if (!X.hasValue()) 8422 return None; 8423 8424 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8425 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8426 if (!V->isZero()) 8427 return None; 8428 8429 return TruncIfPossible(X, BitWidth); 8430 } 8431 8432 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8433 /// iterations. The values M, N are assumed to be signed, and they 8434 /// should all have the same bit widths. 8435 /// Find the least n such that c(n) does not belong to the given range, 8436 /// while c(n-1) does. 8437 /// 8438 /// This function returns None if 8439 /// (a) the addrec coefficients are not constant, or 8440 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8441 /// bounds of the range. 8442 static Optional<APInt> 8443 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8444 const ConstantRange &Range, ScalarEvolution &SE) { 8445 assert(AddRec->getOperand(0)->isZero() && 8446 "Starting value of addrec should be 0"); 8447 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8448 << Range << ", addrec " << *AddRec << '\n'); 8449 // This case is handled in getNumIterationsInRange. Here we can assume that 8450 // we start in the range. 8451 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8452 "Addrec's initial value should be in range"); 8453 8454 APInt A, B, C, M; 8455 unsigned BitWidth; 8456 auto T = GetQuadraticEquation(AddRec); 8457 if (!T.hasValue()) 8458 return None; 8459 8460 // Be careful about the return value: there can be two reasons for not 8461 // returning an actual number. First, if no solutions to the equations 8462 // were found, and second, if the solutions don't leave the given range. 8463 // The first case means that the actual solution is "unknown", the second 8464 // means that it's known, but not valid. If the solution is unknown, we 8465 // cannot make any conclusions. 8466 // Return a pair: the optional solution and a flag indicating if the 8467 // solution was found. 8468 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8469 // Solve for signed overflow and unsigned overflow, pick the lower 8470 // solution. 8471 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8472 << Bound << " (before multiplying by " << M << ")\n"); 8473 Bound *= M; // The quadratic equation multiplier. 8474 8475 Optional<APInt> SO = None; 8476 if (BitWidth > 1) { 8477 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8478 "signed overflow\n"); 8479 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8480 } 8481 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8482 "unsigned overflow\n"); 8483 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8484 BitWidth+1); 8485 8486 auto LeavesRange = [&] (const APInt &X) { 8487 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8488 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8489 if (Range.contains(V0->getValue())) 8490 return false; 8491 // X should be at least 1, so X-1 is non-negative. 8492 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8493 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8494 if (Range.contains(V1->getValue())) 8495 return true; 8496 return false; 8497 }; 8498 8499 // If SolveQuadraticEquationWrap returns None, it means that there can 8500 // be a solution, but the function failed to find it. We cannot treat it 8501 // as "no solution". 8502 if (!SO.hasValue() || !UO.hasValue()) 8503 return { None, false }; 8504 8505 // Check the smaller value first to see if it leaves the range. 8506 // At this point, both SO and UO must have values. 8507 Optional<APInt> Min = MinOptional(SO, UO); 8508 if (LeavesRange(*Min)) 8509 return { Min, true }; 8510 Optional<APInt> Max = Min == SO ? UO : SO; 8511 if (LeavesRange(*Max)) 8512 return { Max, true }; 8513 8514 // Solutions were found, but were eliminated, hence the "true". 8515 return { None, true }; 8516 }; 8517 8518 std::tie(A, B, C, M, BitWidth) = *T; 8519 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8520 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8521 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8522 auto SL = SolveForBoundary(Lower); 8523 auto SU = SolveForBoundary(Upper); 8524 // If any of the solutions was unknown, no meaninigful conclusions can 8525 // be made. 8526 if (!SL.second || !SU.second) 8527 return None; 8528 8529 // Claim: The correct solution is not some value between Min and Max. 8530 // 8531 // Justification: Assuming that Min and Max are different values, one of 8532 // them is when the first signed overflow happens, the other is when the 8533 // first unsigned overflow happens. Crossing the range boundary is only 8534 // possible via an overflow (treating 0 as a special case of it, modeling 8535 // an overflow as crossing k*2^W for some k). 8536 // 8537 // The interesting case here is when Min was eliminated as an invalid 8538 // solution, but Max was not. The argument is that if there was another 8539 // overflow between Min and Max, it would also have been eliminated if 8540 // it was considered. 8541 // 8542 // For a given boundary, it is possible to have two overflows of the same 8543 // type (signed/unsigned) without having the other type in between: this 8544 // can happen when the vertex of the parabola is between the iterations 8545 // corresponding to the overflows. This is only possible when the two 8546 // overflows cross k*2^W for the same k. In such case, if the second one 8547 // left the range (and was the first one to do so), the first overflow 8548 // would have to enter the range, which would mean that either we had left 8549 // the range before or that we started outside of it. Both of these cases 8550 // are contradictions. 8551 // 8552 // Claim: In the case where SolveForBoundary returns None, the correct 8553 // solution is not some value between the Max for this boundary and the 8554 // Min of the other boundary. 8555 // 8556 // Justification: Assume that we had such Max_A and Min_B corresponding 8557 // to range boundaries A and B and such that Max_A < Min_B. If there was 8558 // a solution between Max_A and Min_B, it would have to be caused by an 8559 // overflow corresponding to either A or B. It cannot correspond to B, 8560 // since Min_B is the first occurrence of such an overflow. If it 8561 // corresponded to A, it would have to be either a signed or an unsigned 8562 // overflow that is larger than both eliminated overflows for A. But 8563 // between the eliminated overflows and this overflow, the values would 8564 // cover the entire value space, thus crossing the other boundary, which 8565 // is a contradiction. 8566 8567 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8568 } 8569 8570 ScalarEvolution::ExitLimit 8571 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8572 bool AllowPredicates) { 8573 8574 // This is only used for loops with a "x != y" exit test. The exit condition 8575 // is now expressed as a single expression, V = x-y. So the exit test is 8576 // effectively V != 0. We know and take advantage of the fact that this 8577 // expression only being used in a comparison by zero context. 8578 8579 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8580 // If the value is a constant 8581 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8582 // If the value is already zero, the branch will execute zero times. 8583 if (C->getValue()->isZero()) return C; 8584 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8585 } 8586 8587 const SCEVAddRecExpr *AddRec = 8588 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8589 8590 if (!AddRec && AllowPredicates) 8591 // Try to make this an AddRec using runtime tests, in the first X 8592 // iterations of this loop, where X is the SCEV expression found by the 8593 // algorithm below. 8594 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8595 8596 if (!AddRec || AddRec->getLoop() != L) 8597 return getCouldNotCompute(); 8598 8599 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8600 // the quadratic equation to solve it. 8601 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8602 // We can only use this value if the chrec ends up with an exact zero 8603 // value at this index. When solving for "X*X != 5", for example, we 8604 // should not accept a root of 2. 8605 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8606 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8607 return ExitLimit(R, R, false, Predicates); 8608 } 8609 return getCouldNotCompute(); 8610 } 8611 8612 // Otherwise we can only handle this if it is affine. 8613 if (!AddRec->isAffine()) 8614 return getCouldNotCompute(); 8615 8616 // If this is an affine expression, the execution count of this branch is 8617 // the minimum unsigned root of the following equation: 8618 // 8619 // Start + Step*N = 0 (mod 2^BW) 8620 // 8621 // equivalent to: 8622 // 8623 // Step*N = -Start (mod 2^BW) 8624 // 8625 // where BW is the common bit width of Start and Step. 8626 8627 // Get the initial value for the loop. 8628 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8629 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8630 8631 // For now we handle only constant steps. 8632 // 8633 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8634 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8635 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8636 // We have not yet seen any such cases. 8637 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8638 if (!StepC || StepC->getValue()->isZero()) 8639 return getCouldNotCompute(); 8640 8641 // For positive steps (counting up until unsigned overflow): 8642 // N = -Start/Step (as unsigned) 8643 // For negative steps (counting down to zero): 8644 // N = Start/-Step 8645 // First compute the unsigned distance from zero in the direction of Step. 8646 bool CountDown = StepC->getAPInt().isNegative(); 8647 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8648 8649 // Handle unitary steps, which cannot wraparound. 8650 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8651 // N = Distance (as unsigned) 8652 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8653 APInt MaxBECount = getUnsignedRangeMax(Distance); 8654 8655 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8656 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8657 // case, and see if we can improve the bound. 8658 // 8659 // Explicitly handling this here is necessary because getUnsignedRange 8660 // isn't context-sensitive; it doesn't know that we only care about the 8661 // range inside the loop. 8662 const SCEV *Zero = getZero(Distance->getType()); 8663 const SCEV *One = getOne(Distance->getType()); 8664 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8665 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8666 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8667 // as "unsigned_max(Distance + 1) - 1". 8668 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8669 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8670 } 8671 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8672 } 8673 8674 // If the condition controls loop exit (the loop exits only if the expression 8675 // is true) and the addition is no-wrap we can use unsigned divide to 8676 // compute the backedge count. In this case, the step may not divide the 8677 // distance, but we don't care because if the condition is "missed" the loop 8678 // will have undefined behavior due to wrapping. 8679 if (ControlsExit && AddRec->hasNoSelfWrap() && 8680 loopHasNoAbnormalExits(AddRec->getLoop())) { 8681 const SCEV *Exact = 8682 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8683 const SCEV *Max = 8684 Exact == getCouldNotCompute() 8685 ? Exact 8686 : getConstant(getUnsignedRangeMax(Exact)); 8687 return ExitLimit(Exact, Max, false, Predicates); 8688 } 8689 8690 // Solve the general equation. 8691 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8692 getNegativeSCEV(Start), *this); 8693 const SCEV *M = E == getCouldNotCompute() 8694 ? E 8695 : getConstant(getUnsignedRangeMax(E)); 8696 return ExitLimit(E, M, false, Predicates); 8697 } 8698 8699 ScalarEvolution::ExitLimit 8700 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8701 // Loops that look like: while (X == 0) are very strange indeed. We don't 8702 // handle them yet except for the trivial case. This could be expanded in the 8703 // future as needed. 8704 8705 // If the value is a constant, check to see if it is known to be non-zero 8706 // already. If so, the backedge will execute zero times. 8707 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8708 if (!C->getValue()->isZero()) 8709 return getZero(C->getType()); 8710 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8711 } 8712 8713 // We could implement others, but I really doubt anyone writes loops like 8714 // this, and if they did, they would already be constant folded. 8715 return getCouldNotCompute(); 8716 } 8717 8718 std::pair<BasicBlock *, BasicBlock *> 8719 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8720 // If the block has a unique predecessor, then there is no path from the 8721 // predecessor to the block that does not go through the direct edge 8722 // from the predecessor to the block. 8723 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8724 return {Pred, BB}; 8725 8726 // A loop's header is defined to be a block that dominates the loop. 8727 // If the header has a unique predecessor outside the loop, it must be 8728 // a block that has exactly one successor that can reach the loop. 8729 if (Loop *L = LI.getLoopFor(BB)) 8730 return {L->getLoopPredecessor(), L->getHeader()}; 8731 8732 return {nullptr, nullptr}; 8733 } 8734 8735 /// SCEV structural equivalence is usually sufficient for testing whether two 8736 /// expressions are equal, however for the purposes of looking for a condition 8737 /// guarding a loop, it can be useful to be a little more general, since a 8738 /// front-end may have replicated the controlling expression. 8739 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8740 // Quick check to see if they are the same SCEV. 8741 if (A == B) return true; 8742 8743 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8744 // Not all instructions that are "identical" compute the same value. For 8745 // instance, two distinct alloca instructions allocating the same type are 8746 // identical and do not read memory; but compute distinct values. 8747 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8748 }; 8749 8750 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8751 // two different instructions with the same value. Check for this case. 8752 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8753 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8754 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8755 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8756 if (ComputesEqualValues(AI, BI)) 8757 return true; 8758 8759 // Otherwise assume they may have a different value. 8760 return false; 8761 } 8762 8763 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8764 const SCEV *&LHS, const SCEV *&RHS, 8765 unsigned Depth) { 8766 bool Changed = false; 8767 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8768 // '0 != 0'. 8769 auto TrivialCase = [&](bool TriviallyTrue) { 8770 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8771 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8772 return true; 8773 }; 8774 // If we hit the max recursion limit bail out. 8775 if (Depth >= 3) 8776 return false; 8777 8778 // Canonicalize a constant to the right side. 8779 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8780 // Check for both operands constant. 8781 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8782 if (ConstantExpr::getICmp(Pred, 8783 LHSC->getValue(), 8784 RHSC->getValue())->isNullValue()) 8785 return TrivialCase(false); 8786 else 8787 return TrivialCase(true); 8788 } 8789 // Otherwise swap the operands to put the constant on the right. 8790 std::swap(LHS, RHS); 8791 Pred = ICmpInst::getSwappedPredicate(Pred); 8792 Changed = true; 8793 } 8794 8795 // If we're comparing an addrec with a value which is loop-invariant in the 8796 // addrec's loop, put the addrec on the left. Also make a dominance check, 8797 // as both operands could be addrecs loop-invariant in each other's loop. 8798 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8799 const Loop *L = AR->getLoop(); 8800 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8801 std::swap(LHS, RHS); 8802 Pred = ICmpInst::getSwappedPredicate(Pred); 8803 Changed = true; 8804 } 8805 } 8806 8807 // If there's a constant operand, canonicalize comparisons with boundary 8808 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8809 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8810 const APInt &RA = RC->getAPInt(); 8811 8812 bool SimplifiedByConstantRange = false; 8813 8814 if (!ICmpInst::isEquality(Pred)) { 8815 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8816 if (ExactCR.isFullSet()) 8817 return TrivialCase(true); 8818 else if (ExactCR.isEmptySet()) 8819 return TrivialCase(false); 8820 8821 APInt NewRHS; 8822 CmpInst::Predicate NewPred; 8823 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8824 ICmpInst::isEquality(NewPred)) { 8825 // We were able to convert an inequality to an equality. 8826 Pred = NewPred; 8827 RHS = getConstant(NewRHS); 8828 Changed = SimplifiedByConstantRange = true; 8829 } 8830 } 8831 8832 if (!SimplifiedByConstantRange) { 8833 switch (Pred) { 8834 default: 8835 break; 8836 case ICmpInst::ICMP_EQ: 8837 case ICmpInst::ICMP_NE: 8838 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8839 if (!RA) 8840 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8841 if (const SCEVMulExpr *ME = 8842 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8843 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8844 ME->getOperand(0)->isAllOnesValue()) { 8845 RHS = AE->getOperand(1); 8846 LHS = ME->getOperand(1); 8847 Changed = true; 8848 } 8849 break; 8850 8851 8852 // The "Should have been caught earlier!" messages refer to the fact 8853 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8854 // should have fired on the corresponding cases, and canonicalized the 8855 // check to trivial case. 8856 8857 case ICmpInst::ICMP_UGE: 8858 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8859 Pred = ICmpInst::ICMP_UGT; 8860 RHS = getConstant(RA - 1); 8861 Changed = true; 8862 break; 8863 case ICmpInst::ICMP_ULE: 8864 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8865 Pred = ICmpInst::ICMP_ULT; 8866 RHS = getConstant(RA + 1); 8867 Changed = true; 8868 break; 8869 case ICmpInst::ICMP_SGE: 8870 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8871 Pred = ICmpInst::ICMP_SGT; 8872 RHS = getConstant(RA - 1); 8873 Changed = true; 8874 break; 8875 case ICmpInst::ICMP_SLE: 8876 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8877 Pred = ICmpInst::ICMP_SLT; 8878 RHS = getConstant(RA + 1); 8879 Changed = true; 8880 break; 8881 } 8882 } 8883 } 8884 8885 // Check for obvious equality. 8886 if (HasSameValue(LHS, RHS)) { 8887 if (ICmpInst::isTrueWhenEqual(Pred)) 8888 return TrivialCase(true); 8889 if (ICmpInst::isFalseWhenEqual(Pred)) 8890 return TrivialCase(false); 8891 } 8892 8893 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8894 // adding or subtracting 1 from one of the operands. 8895 switch (Pred) { 8896 case ICmpInst::ICMP_SLE: 8897 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8898 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8899 SCEV::FlagNSW); 8900 Pred = ICmpInst::ICMP_SLT; 8901 Changed = true; 8902 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8903 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8904 SCEV::FlagNSW); 8905 Pred = ICmpInst::ICMP_SLT; 8906 Changed = true; 8907 } 8908 break; 8909 case ICmpInst::ICMP_SGE: 8910 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8911 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8912 SCEV::FlagNSW); 8913 Pred = ICmpInst::ICMP_SGT; 8914 Changed = true; 8915 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8916 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8917 SCEV::FlagNSW); 8918 Pred = ICmpInst::ICMP_SGT; 8919 Changed = true; 8920 } 8921 break; 8922 case ICmpInst::ICMP_ULE: 8923 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8924 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8925 SCEV::FlagNUW); 8926 Pred = ICmpInst::ICMP_ULT; 8927 Changed = true; 8928 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8929 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8930 Pred = ICmpInst::ICMP_ULT; 8931 Changed = true; 8932 } 8933 break; 8934 case ICmpInst::ICMP_UGE: 8935 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8936 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8937 Pred = ICmpInst::ICMP_UGT; 8938 Changed = true; 8939 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8940 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8941 SCEV::FlagNUW); 8942 Pred = ICmpInst::ICMP_UGT; 8943 Changed = true; 8944 } 8945 break; 8946 default: 8947 break; 8948 } 8949 8950 // TODO: More simplifications are possible here. 8951 8952 // Recursively simplify until we either hit a recursion limit or nothing 8953 // changes. 8954 if (Changed) 8955 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8956 8957 return Changed; 8958 } 8959 8960 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8961 return getSignedRangeMax(S).isNegative(); 8962 } 8963 8964 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8965 return getSignedRangeMin(S).isStrictlyPositive(); 8966 } 8967 8968 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8969 return !getSignedRangeMin(S).isNegative(); 8970 } 8971 8972 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8973 return !getSignedRangeMax(S).isStrictlyPositive(); 8974 } 8975 8976 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8977 return isKnownNegative(S) || isKnownPositive(S); 8978 } 8979 8980 std::pair<const SCEV *, const SCEV *> 8981 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 8982 // Compute SCEV on entry of loop L. 8983 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 8984 if (Start == getCouldNotCompute()) 8985 return { Start, Start }; 8986 // Compute post increment SCEV for loop L. 8987 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 8988 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 8989 return { Start, PostInc }; 8990 } 8991 8992 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 8993 const SCEV *LHS, const SCEV *RHS) { 8994 // First collect all loops. 8995 SmallPtrSet<const Loop *, 8> LoopsUsed; 8996 getUsedLoops(LHS, LoopsUsed); 8997 getUsedLoops(RHS, LoopsUsed); 8998 8999 if (LoopsUsed.empty()) 9000 return false; 9001 9002 // Domination relationship must be a linear order on collected loops. 9003 #ifndef NDEBUG 9004 for (auto *L1 : LoopsUsed) 9005 for (auto *L2 : LoopsUsed) 9006 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9007 DT.dominates(L2->getHeader(), L1->getHeader())) && 9008 "Domination relationship is not a linear order"); 9009 #endif 9010 9011 const Loop *MDL = 9012 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9013 [&](const Loop *L1, const Loop *L2) { 9014 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9015 }); 9016 9017 // Get init and post increment value for LHS. 9018 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9019 // if LHS contains unknown non-invariant SCEV then bail out. 9020 if (SplitLHS.first == getCouldNotCompute()) 9021 return false; 9022 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9023 // Get init and post increment value for RHS. 9024 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9025 // if RHS contains unknown non-invariant SCEV then bail out. 9026 if (SplitRHS.first == getCouldNotCompute()) 9027 return false; 9028 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9029 // It is possible that init SCEV contains an invariant load but it does 9030 // not dominate MDL and is not available at MDL loop entry, so we should 9031 // check it here. 9032 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9033 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9034 return false; 9035 9036 // It seems backedge guard check is faster than entry one so in some cases 9037 // it can speed up whole estimation by short circuit 9038 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9039 SplitRHS.second) && 9040 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9041 } 9042 9043 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9044 const SCEV *LHS, const SCEV *RHS) { 9045 // Canonicalize the inputs first. 9046 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9047 9048 if (isKnownViaInduction(Pred, LHS, RHS)) 9049 return true; 9050 9051 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9052 return true; 9053 9054 // Otherwise see what can be done with some simple reasoning. 9055 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9056 } 9057 9058 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9059 const SCEVAddRecExpr *LHS, 9060 const SCEV *RHS) { 9061 const Loop *L = LHS->getLoop(); 9062 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9063 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9064 } 9065 9066 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9067 ICmpInst::Predicate Pred, 9068 bool &Increasing) { 9069 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9070 9071 #ifndef NDEBUG 9072 // Verify an invariant: inverting the predicate should turn a monotonically 9073 // increasing change to a monotonically decreasing one, and vice versa. 9074 bool IncreasingSwapped; 9075 bool ResultSwapped = isMonotonicPredicateImpl( 9076 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9077 9078 assert(Result == ResultSwapped && "should be able to analyze both!"); 9079 if (ResultSwapped) 9080 assert(Increasing == !IncreasingSwapped && 9081 "monotonicity should flip as we flip the predicate"); 9082 #endif 9083 9084 return Result; 9085 } 9086 9087 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9088 ICmpInst::Predicate Pred, 9089 bool &Increasing) { 9090 9091 // A zero step value for LHS means the induction variable is essentially a 9092 // loop invariant value. We don't really depend on the predicate actually 9093 // flipping from false to true (for increasing predicates, and the other way 9094 // around for decreasing predicates), all we care about is that *if* the 9095 // predicate changes then it only changes from false to true. 9096 // 9097 // A zero step value in itself is not very useful, but there may be places 9098 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9099 // as general as possible. 9100 9101 switch (Pred) { 9102 default: 9103 return false; // Conservative answer 9104 9105 case ICmpInst::ICMP_UGT: 9106 case ICmpInst::ICMP_UGE: 9107 case ICmpInst::ICMP_ULT: 9108 case ICmpInst::ICMP_ULE: 9109 if (!LHS->hasNoUnsignedWrap()) 9110 return false; 9111 9112 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9113 return true; 9114 9115 case ICmpInst::ICMP_SGT: 9116 case ICmpInst::ICMP_SGE: 9117 case ICmpInst::ICMP_SLT: 9118 case ICmpInst::ICMP_SLE: { 9119 if (!LHS->hasNoSignedWrap()) 9120 return false; 9121 9122 const SCEV *Step = LHS->getStepRecurrence(*this); 9123 9124 if (isKnownNonNegative(Step)) { 9125 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9126 return true; 9127 } 9128 9129 if (isKnownNonPositive(Step)) { 9130 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9131 return true; 9132 } 9133 9134 return false; 9135 } 9136 9137 } 9138 9139 llvm_unreachable("switch has default clause!"); 9140 } 9141 9142 bool ScalarEvolution::isLoopInvariantPredicate( 9143 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9144 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9145 const SCEV *&InvariantRHS) { 9146 9147 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9148 if (!isLoopInvariant(RHS, L)) { 9149 if (!isLoopInvariant(LHS, L)) 9150 return false; 9151 9152 std::swap(LHS, RHS); 9153 Pred = ICmpInst::getSwappedPredicate(Pred); 9154 } 9155 9156 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9157 if (!ArLHS || ArLHS->getLoop() != L) 9158 return false; 9159 9160 bool Increasing; 9161 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9162 return false; 9163 9164 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9165 // true as the loop iterates, and the backedge is control dependent on 9166 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9167 // 9168 // * if the predicate was false in the first iteration then the predicate 9169 // is never evaluated again, since the loop exits without taking the 9170 // backedge. 9171 // * if the predicate was true in the first iteration then it will 9172 // continue to be true for all future iterations since it is 9173 // monotonically increasing. 9174 // 9175 // For both the above possibilities, we can replace the loop varying 9176 // predicate with its value on the first iteration of the loop (which is 9177 // loop invariant). 9178 // 9179 // A similar reasoning applies for a monotonically decreasing predicate, by 9180 // replacing true with false and false with true in the above two bullets. 9181 9182 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9183 9184 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9185 return false; 9186 9187 InvariantPred = Pred; 9188 InvariantLHS = ArLHS->getStart(); 9189 InvariantRHS = RHS; 9190 return true; 9191 } 9192 9193 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9194 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9195 if (HasSameValue(LHS, RHS)) 9196 return ICmpInst::isTrueWhenEqual(Pred); 9197 9198 // This code is split out from isKnownPredicate because it is called from 9199 // within isLoopEntryGuardedByCond. 9200 9201 auto CheckRanges = 9202 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9203 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9204 .contains(RangeLHS); 9205 }; 9206 9207 // The check at the top of the function catches the case where the values are 9208 // known to be equal. 9209 if (Pred == CmpInst::ICMP_EQ) 9210 return false; 9211 9212 if (Pred == CmpInst::ICMP_NE) 9213 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9214 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9215 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9216 9217 if (CmpInst::isSigned(Pred)) 9218 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9219 9220 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9221 } 9222 9223 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9224 const SCEV *LHS, 9225 const SCEV *RHS) { 9226 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9227 // Return Y via OutY. 9228 auto MatchBinaryAddToConst = 9229 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9230 SCEV::NoWrapFlags ExpectedFlags) { 9231 const SCEV *NonConstOp, *ConstOp; 9232 SCEV::NoWrapFlags FlagsPresent; 9233 9234 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9235 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9236 return false; 9237 9238 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9239 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9240 }; 9241 9242 APInt C; 9243 9244 switch (Pred) { 9245 default: 9246 break; 9247 9248 case ICmpInst::ICMP_SGE: 9249 std::swap(LHS, RHS); 9250 LLVM_FALLTHROUGH; 9251 case ICmpInst::ICMP_SLE: 9252 // X s<= (X + C)<nsw> if C >= 0 9253 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9254 return true; 9255 9256 // (X + C)<nsw> s<= X if C <= 0 9257 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9258 !C.isStrictlyPositive()) 9259 return true; 9260 break; 9261 9262 case ICmpInst::ICMP_SGT: 9263 std::swap(LHS, RHS); 9264 LLVM_FALLTHROUGH; 9265 case ICmpInst::ICMP_SLT: 9266 // X s< (X + C)<nsw> if C > 0 9267 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9268 C.isStrictlyPositive()) 9269 return true; 9270 9271 // (X + C)<nsw> s< X if C < 0 9272 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9273 return true; 9274 break; 9275 } 9276 9277 return false; 9278 } 9279 9280 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9281 const SCEV *LHS, 9282 const SCEV *RHS) { 9283 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9284 return false; 9285 9286 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9287 // the stack can result in exponential time complexity. 9288 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9289 9290 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9291 // 9292 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9293 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9294 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9295 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9296 // use isKnownPredicate later if needed. 9297 return isKnownNonNegative(RHS) && 9298 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9299 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9300 } 9301 9302 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9303 ICmpInst::Predicate Pred, 9304 const SCEV *LHS, const SCEV *RHS) { 9305 // No need to even try if we know the module has no guards. 9306 if (!HasGuards) 9307 return false; 9308 9309 return any_of(*BB, [&](Instruction &I) { 9310 using namespace llvm::PatternMatch; 9311 9312 Value *Condition; 9313 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9314 m_Value(Condition))) && 9315 isImpliedCond(Pred, LHS, RHS, Condition, false); 9316 }); 9317 } 9318 9319 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9320 /// protected by a conditional between LHS and RHS. This is used to 9321 /// to eliminate casts. 9322 bool 9323 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9324 ICmpInst::Predicate Pred, 9325 const SCEV *LHS, const SCEV *RHS) { 9326 // Interpret a null as meaning no loop, where there is obviously no guard 9327 // (interprocedural conditions notwithstanding). 9328 if (!L) return true; 9329 9330 if (VerifyIR) 9331 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9332 "This cannot be done on broken IR!"); 9333 9334 9335 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9336 return true; 9337 9338 BasicBlock *Latch = L->getLoopLatch(); 9339 if (!Latch) 9340 return false; 9341 9342 BranchInst *LoopContinuePredicate = 9343 dyn_cast<BranchInst>(Latch->getTerminator()); 9344 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9345 isImpliedCond(Pred, LHS, RHS, 9346 LoopContinuePredicate->getCondition(), 9347 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9348 return true; 9349 9350 // We don't want more than one activation of the following loops on the stack 9351 // -- that can lead to O(n!) time complexity. 9352 if (WalkingBEDominatingConds) 9353 return false; 9354 9355 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9356 9357 // See if we can exploit a trip count to prove the predicate. 9358 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9359 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9360 if (LatchBECount != getCouldNotCompute()) { 9361 // We know that Latch branches back to the loop header exactly 9362 // LatchBECount times. This means the backdege condition at Latch is 9363 // equivalent to "{0,+,1} u< LatchBECount". 9364 Type *Ty = LatchBECount->getType(); 9365 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9366 const SCEV *LoopCounter = 9367 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9368 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9369 LatchBECount)) 9370 return true; 9371 } 9372 9373 // Check conditions due to any @llvm.assume intrinsics. 9374 for (auto &AssumeVH : AC.assumptions()) { 9375 if (!AssumeVH) 9376 continue; 9377 auto *CI = cast<CallInst>(AssumeVH); 9378 if (!DT.dominates(CI, Latch->getTerminator())) 9379 continue; 9380 9381 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9382 return true; 9383 } 9384 9385 // If the loop is not reachable from the entry block, we risk running into an 9386 // infinite loop as we walk up into the dom tree. These loops do not matter 9387 // anyway, so we just return a conservative answer when we see them. 9388 if (!DT.isReachableFromEntry(L->getHeader())) 9389 return false; 9390 9391 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9392 return true; 9393 9394 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9395 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9396 assert(DTN && "should reach the loop header before reaching the root!"); 9397 9398 BasicBlock *BB = DTN->getBlock(); 9399 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9400 return true; 9401 9402 BasicBlock *PBB = BB->getSinglePredecessor(); 9403 if (!PBB) 9404 continue; 9405 9406 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9407 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9408 continue; 9409 9410 Value *Condition = ContinuePredicate->getCondition(); 9411 9412 // If we have an edge `E` within the loop body that dominates the only 9413 // latch, the condition guarding `E` also guards the backedge. This 9414 // reasoning works only for loops with a single latch. 9415 9416 BasicBlockEdge DominatingEdge(PBB, BB); 9417 if (DominatingEdge.isSingleEdge()) { 9418 // We're constructively (and conservatively) enumerating edges within the 9419 // loop body that dominate the latch. The dominator tree better agree 9420 // with us on this: 9421 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9422 9423 if (isImpliedCond(Pred, LHS, RHS, Condition, 9424 BB != ContinuePredicate->getSuccessor(0))) 9425 return true; 9426 } 9427 } 9428 9429 return false; 9430 } 9431 9432 bool 9433 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9434 ICmpInst::Predicate Pred, 9435 const SCEV *LHS, const SCEV *RHS) { 9436 // Interpret a null as meaning no loop, where there is obviously no guard 9437 // (interprocedural conditions notwithstanding). 9438 if (!L) return false; 9439 9440 if (VerifyIR) 9441 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9442 "This cannot be done on broken IR!"); 9443 9444 // Both LHS and RHS must be available at loop entry. 9445 assert(isAvailableAtLoopEntry(LHS, L) && 9446 "LHS is not available at Loop Entry"); 9447 assert(isAvailableAtLoopEntry(RHS, L) && 9448 "RHS is not available at Loop Entry"); 9449 9450 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9451 return true; 9452 9453 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9454 // the facts (a >= b && a != b) separately. A typical situation is when the 9455 // non-strict comparison is known from ranges and non-equality is known from 9456 // dominating predicates. If we are proving strict comparison, we always try 9457 // to prove non-equality and non-strict comparison separately. 9458 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9459 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9460 bool ProvedNonStrictComparison = false; 9461 bool ProvedNonEquality = false; 9462 9463 if (ProvingStrictComparison) { 9464 ProvedNonStrictComparison = 9465 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9466 ProvedNonEquality = 9467 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9468 if (ProvedNonStrictComparison && ProvedNonEquality) 9469 return true; 9470 } 9471 9472 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9473 auto ProveViaGuard = [&](BasicBlock *Block) { 9474 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9475 return true; 9476 if (ProvingStrictComparison) { 9477 if (!ProvedNonStrictComparison) 9478 ProvedNonStrictComparison = 9479 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9480 if (!ProvedNonEquality) 9481 ProvedNonEquality = 9482 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9483 if (ProvedNonStrictComparison && ProvedNonEquality) 9484 return true; 9485 } 9486 return false; 9487 }; 9488 9489 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9490 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9491 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9492 return true; 9493 if (ProvingStrictComparison) { 9494 if (!ProvedNonStrictComparison) 9495 ProvedNonStrictComparison = 9496 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9497 if (!ProvedNonEquality) 9498 ProvedNonEquality = 9499 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9500 if (ProvedNonStrictComparison && ProvedNonEquality) 9501 return true; 9502 } 9503 return false; 9504 }; 9505 9506 // Starting at the loop predecessor, climb up the predecessor chain, as long 9507 // as there are predecessors that can be found that have unique successors 9508 // leading to the original header. 9509 for (std::pair<BasicBlock *, BasicBlock *> 9510 Pair(L->getLoopPredecessor(), L->getHeader()); 9511 Pair.first; 9512 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9513 9514 if (ProveViaGuard(Pair.first)) 9515 return true; 9516 9517 BranchInst *LoopEntryPredicate = 9518 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9519 if (!LoopEntryPredicate || 9520 LoopEntryPredicate->isUnconditional()) 9521 continue; 9522 9523 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9524 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9525 return true; 9526 } 9527 9528 // Check conditions due to any @llvm.assume intrinsics. 9529 for (auto &AssumeVH : AC.assumptions()) { 9530 if (!AssumeVH) 9531 continue; 9532 auto *CI = cast<CallInst>(AssumeVH); 9533 if (!DT.dominates(CI, L->getHeader())) 9534 continue; 9535 9536 if (ProveViaCond(CI->getArgOperand(0), false)) 9537 return true; 9538 } 9539 9540 return false; 9541 } 9542 9543 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9544 const SCEV *LHS, const SCEV *RHS, 9545 Value *FoundCondValue, 9546 bool Inverse) { 9547 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9548 return false; 9549 9550 auto ClearOnExit = 9551 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9552 9553 // Recursively handle And and Or conditions. 9554 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9555 if (BO->getOpcode() == Instruction::And) { 9556 if (!Inverse) 9557 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9558 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9559 } else if (BO->getOpcode() == Instruction::Or) { 9560 if (Inverse) 9561 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9562 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9563 } 9564 } 9565 9566 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9567 if (!ICI) return false; 9568 9569 // Now that we found a conditional branch that dominates the loop or controls 9570 // the loop latch. Check to see if it is the comparison we are looking for. 9571 ICmpInst::Predicate FoundPred; 9572 if (Inverse) 9573 FoundPred = ICI->getInversePredicate(); 9574 else 9575 FoundPred = ICI->getPredicate(); 9576 9577 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9578 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9579 9580 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9581 } 9582 9583 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9584 const SCEV *RHS, 9585 ICmpInst::Predicate FoundPred, 9586 const SCEV *FoundLHS, 9587 const SCEV *FoundRHS) { 9588 // Balance the types. 9589 if (getTypeSizeInBits(LHS->getType()) < 9590 getTypeSizeInBits(FoundLHS->getType())) { 9591 if (CmpInst::isSigned(Pred)) { 9592 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9593 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9594 } else { 9595 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9596 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9597 } 9598 } else if (getTypeSizeInBits(LHS->getType()) > 9599 getTypeSizeInBits(FoundLHS->getType())) { 9600 if (CmpInst::isSigned(FoundPred)) { 9601 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9602 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9603 } else { 9604 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9605 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9606 } 9607 } 9608 9609 // Canonicalize the query to match the way instcombine will have 9610 // canonicalized the comparison. 9611 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9612 if (LHS == RHS) 9613 return CmpInst::isTrueWhenEqual(Pred); 9614 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9615 if (FoundLHS == FoundRHS) 9616 return CmpInst::isFalseWhenEqual(FoundPred); 9617 9618 // Check to see if we can make the LHS or RHS match. 9619 if (LHS == FoundRHS || RHS == FoundLHS) { 9620 if (isa<SCEVConstant>(RHS)) { 9621 std::swap(FoundLHS, FoundRHS); 9622 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9623 } else { 9624 std::swap(LHS, RHS); 9625 Pred = ICmpInst::getSwappedPredicate(Pred); 9626 } 9627 } 9628 9629 // Check whether the found predicate is the same as the desired predicate. 9630 if (FoundPred == Pred) 9631 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9632 9633 // Check whether swapping the found predicate makes it the same as the 9634 // desired predicate. 9635 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9636 if (isa<SCEVConstant>(RHS)) 9637 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9638 else 9639 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9640 RHS, LHS, FoundLHS, FoundRHS); 9641 } 9642 9643 // Unsigned comparison is the same as signed comparison when both the operands 9644 // are non-negative. 9645 if (CmpInst::isUnsigned(FoundPred) && 9646 CmpInst::getSignedPredicate(FoundPred) == Pred && 9647 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9648 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9649 9650 // Check if we can make progress by sharpening ranges. 9651 if (FoundPred == ICmpInst::ICMP_NE && 9652 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9653 9654 const SCEVConstant *C = nullptr; 9655 const SCEV *V = nullptr; 9656 9657 if (isa<SCEVConstant>(FoundLHS)) { 9658 C = cast<SCEVConstant>(FoundLHS); 9659 V = FoundRHS; 9660 } else { 9661 C = cast<SCEVConstant>(FoundRHS); 9662 V = FoundLHS; 9663 } 9664 9665 // The guarding predicate tells us that C != V. If the known range 9666 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9667 // range we consider has to correspond to same signedness as the 9668 // predicate we're interested in folding. 9669 9670 APInt Min = ICmpInst::isSigned(Pred) ? 9671 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9672 9673 if (Min == C->getAPInt()) { 9674 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9675 // This is true even if (Min + 1) wraps around -- in case of 9676 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9677 9678 APInt SharperMin = Min + 1; 9679 9680 switch (Pred) { 9681 case ICmpInst::ICMP_SGE: 9682 case ICmpInst::ICMP_UGE: 9683 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9684 // RHS, we're done. 9685 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9686 getConstant(SharperMin))) 9687 return true; 9688 LLVM_FALLTHROUGH; 9689 9690 case ICmpInst::ICMP_SGT: 9691 case ICmpInst::ICMP_UGT: 9692 // We know from the range information that (V `Pred` Min || 9693 // V == Min). We know from the guarding condition that !(V 9694 // == Min). This gives us 9695 // 9696 // V `Pred` Min || V == Min && !(V == Min) 9697 // => V `Pred` Min 9698 // 9699 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9700 9701 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9702 return true; 9703 LLVM_FALLTHROUGH; 9704 9705 default: 9706 // No change 9707 break; 9708 } 9709 } 9710 } 9711 9712 // Check whether the actual condition is beyond sufficient. 9713 if (FoundPred == ICmpInst::ICMP_EQ) 9714 if (ICmpInst::isTrueWhenEqual(Pred)) 9715 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9716 return true; 9717 if (Pred == ICmpInst::ICMP_NE) 9718 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9719 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9720 return true; 9721 9722 // Otherwise assume the worst. 9723 return false; 9724 } 9725 9726 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9727 const SCEV *&L, const SCEV *&R, 9728 SCEV::NoWrapFlags &Flags) { 9729 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9730 if (!AE || AE->getNumOperands() != 2) 9731 return false; 9732 9733 L = AE->getOperand(0); 9734 R = AE->getOperand(1); 9735 Flags = AE->getNoWrapFlags(); 9736 return true; 9737 } 9738 9739 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9740 const SCEV *Less) { 9741 // We avoid subtracting expressions here because this function is usually 9742 // fairly deep in the call stack (i.e. is called many times). 9743 9744 // X - X = 0. 9745 if (More == Less) 9746 return APInt(getTypeSizeInBits(More->getType()), 0); 9747 9748 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9749 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9750 const auto *MAR = cast<SCEVAddRecExpr>(More); 9751 9752 if (LAR->getLoop() != MAR->getLoop()) 9753 return None; 9754 9755 // We look at affine expressions only; not for correctness but to keep 9756 // getStepRecurrence cheap. 9757 if (!LAR->isAffine() || !MAR->isAffine()) 9758 return None; 9759 9760 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9761 return None; 9762 9763 Less = LAR->getStart(); 9764 More = MAR->getStart(); 9765 9766 // fall through 9767 } 9768 9769 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9770 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9771 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9772 return M - L; 9773 } 9774 9775 SCEV::NoWrapFlags Flags; 9776 const SCEV *LLess = nullptr, *RLess = nullptr; 9777 const SCEV *LMore = nullptr, *RMore = nullptr; 9778 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9779 // Compare (X + C1) vs X. 9780 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9781 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9782 if (RLess == More) 9783 return -(C1->getAPInt()); 9784 9785 // Compare X vs (X + C2). 9786 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9787 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9788 if (RMore == Less) 9789 return C2->getAPInt(); 9790 9791 // Compare (X + C1) vs (X + C2). 9792 if (C1 && C2 && RLess == RMore) 9793 return C2->getAPInt() - C1->getAPInt(); 9794 9795 return None; 9796 } 9797 9798 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9799 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9800 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9801 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9802 return false; 9803 9804 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9805 if (!AddRecLHS) 9806 return false; 9807 9808 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9809 if (!AddRecFoundLHS) 9810 return false; 9811 9812 // We'd like to let SCEV reason about control dependencies, so we constrain 9813 // both the inequalities to be about add recurrences on the same loop. This 9814 // way we can use isLoopEntryGuardedByCond later. 9815 9816 const Loop *L = AddRecFoundLHS->getLoop(); 9817 if (L != AddRecLHS->getLoop()) 9818 return false; 9819 9820 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9821 // 9822 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9823 // ... (2) 9824 // 9825 // Informal proof for (2), assuming (1) [*]: 9826 // 9827 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9828 // 9829 // Then 9830 // 9831 // FoundLHS s< FoundRHS s< INT_MIN - C 9832 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9833 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9834 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9835 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9836 // <=> FoundLHS + C s< FoundRHS + C 9837 // 9838 // [*]: (1) can be proved by ruling out overflow. 9839 // 9840 // [**]: This can be proved by analyzing all the four possibilities: 9841 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9842 // (A s>= 0, B s>= 0). 9843 // 9844 // Note: 9845 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9846 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9847 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9848 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9849 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9850 // C)". 9851 9852 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9853 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9854 if (!LDiff || !RDiff || *LDiff != *RDiff) 9855 return false; 9856 9857 if (LDiff->isMinValue()) 9858 return true; 9859 9860 APInt FoundRHSLimit; 9861 9862 if (Pred == CmpInst::ICMP_ULT) { 9863 FoundRHSLimit = -(*RDiff); 9864 } else { 9865 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9866 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9867 } 9868 9869 // Try to prove (1) or (2), as needed. 9870 return isAvailableAtLoopEntry(FoundRHS, L) && 9871 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9872 getConstant(FoundRHSLimit)); 9873 } 9874 9875 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 9876 const SCEV *LHS, const SCEV *RHS, 9877 const SCEV *FoundLHS, 9878 const SCEV *FoundRHS, unsigned Depth) { 9879 const PHINode *LPhi = nullptr, *RPhi = nullptr; 9880 9881 auto ClearOnExit = make_scope_exit([&]() { 9882 if (LPhi) { 9883 bool Erased = PendingMerges.erase(LPhi); 9884 assert(Erased && "Failed to erase LPhi!"); 9885 (void)Erased; 9886 } 9887 if (RPhi) { 9888 bool Erased = PendingMerges.erase(RPhi); 9889 assert(Erased && "Failed to erase RPhi!"); 9890 (void)Erased; 9891 } 9892 }); 9893 9894 // Find respective Phis and check that they are not being pending. 9895 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 9896 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 9897 if (!PendingMerges.insert(Phi).second) 9898 return false; 9899 LPhi = Phi; 9900 } 9901 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 9902 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 9903 // If we detect a loop of Phi nodes being processed by this method, for 9904 // example: 9905 // 9906 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 9907 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 9908 // 9909 // we don't want to deal with a case that complex, so return conservative 9910 // answer false. 9911 if (!PendingMerges.insert(Phi).second) 9912 return false; 9913 RPhi = Phi; 9914 } 9915 9916 // If none of LHS, RHS is a Phi, nothing to do here. 9917 if (!LPhi && !RPhi) 9918 return false; 9919 9920 // If there is a SCEVUnknown Phi we are interested in, make it left. 9921 if (!LPhi) { 9922 std::swap(LHS, RHS); 9923 std::swap(FoundLHS, FoundRHS); 9924 std::swap(LPhi, RPhi); 9925 Pred = ICmpInst::getSwappedPredicate(Pred); 9926 } 9927 9928 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 9929 const BasicBlock *LBB = LPhi->getParent(); 9930 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9931 9932 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 9933 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 9934 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 9935 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 9936 }; 9937 9938 if (RPhi && RPhi->getParent() == LBB) { 9939 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 9940 // If we compare two Phis from the same block, and for each entry block 9941 // the predicate is true for incoming values from this block, then the 9942 // predicate is also true for the Phis. 9943 for (const BasicBlock *IncBB : predecessors(LBB)) { 9944 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9945 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 9946 if (!ProvedEasily(L, R)) 9947 return false; 9948 } 9949 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 9950 // Case two: RHS is also a Phi from the same basic block, and it is an 9951 // AddRec. It means that there is a loop which has both AddRec and Unknown 9952 // PHIs, for it we can compare incoming values of AddRec from above the loop 9953 // and latch with their respective incoming values of LPhi. 9954 // TODO: Generalize to handle loops with many inputs in a header. 9955 if (LPhi->getNumIncomingValues() != 2) return false; 9956 9957 auto *RLoop = RAR->getLoop(); 9958 auto *Predecessor = RLoop->getLoopPredecessor(); 9959 assert(Predecessor && "Loop with AddRec with no predecessor?"); 9960 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 9961 if (!ProvedEasily(L1, RAR->getStart())) 9962 return false; 9963 auto *Latch = RLoop->getLoopLatch(); 9964 assert(Latch && "Loop with AddRec with no latch?"); 9965 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 9966 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 9967 return false; 9968 } else { 9969 // In all other cases go over inputs of LHS and compare each of them to RHS, 9970 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 9971 // At this point RHS is either a non-Phi, or it is a Phi from some block 9972 // different from LBB. 9973 for (const BasicBlock *IncBB : predecessors(LBB)) { 9974 // Check that RHS is available in this block. 9975 if (!dominates(RHS, IncBB)) 9976 return false; 9977 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9978 if (!ProvedEasily(L, RHS)) 9979 return false; 9980 } 9981 } 9982 return true; 9983 } 9984 9985 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9986 const SCEV *LHS, const SCEV *RHS, 9987 const SCEV *FoundLHS, 9988 const SCEV *FoundRHS) { 9989 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9990 return true; 9991 9992 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9993 return true; 9994 9995 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9996 FoundLHS, FoundRHS) || 9997 // ~x < ~y --> x > y 9998 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9999 getNotSCEV(FoundRHS), 10000 getNotSCEV(FoundLHS)); 10001 } 10002 10003 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10004 template <typename MinMaxExprType> 10005 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10006 const SCEV *Candidate) { 10007 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10008 if (!MinMaxExpr) 10009 return false; 10010 10011 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10012 } 10013 10014 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10015 ICmpInst::Predicate Pred, 10016 const SCEV *LHS, const SCEV *RHS) { 10017 // If both sides are affine addrecs for the same loop, with equal 10018 // steps, and we know the recurrences don't wrap, then we only 10019 // need to check the predicate on the starting values. 10020 10021 if (!ICmpInst::isRelational(Pred)) 10022 return false; 10023 10024 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10025 if (!LAR) 10026 return false; 10027 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10028 if (!RAR) 10029 return false; 10030 if (LAR->getLoop() != RAR->getLoop()) 10031 return false; 10032 if (!LAR->isAffine() || !RAR->isAffine()) 10033 return false; 10034 10035 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10036 return false; 10037 10038 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10039 SCEV::FlagNSW : SCEV::FlagNUW; 10040 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10041 return false; 10042 10043 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10044 } 10045 10046 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10047 /// expression? 10048 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10049 ICmpInst::Predicate Pred, 10050 const SCEV *LHS, const SCEV *RHS) { 10051 switch (Pred) { 10052 default: 10053 return false; 10054 10055 case ICmpInst::ICMP_SGE: 10056 std::swap(LHS, RHS); 10057 LLVM_FALLTHROUGH; 10058 case ICmpInst::ICMP_SLE: 10059 return 10060 // min(A, ...) <= A 10061 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10062 // A <= max(A, ...) 10063 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10064 10065 case ICmpInst::ICMP_UGE: 10066 std::swap(LHS, RHS); 10067 LLVM_FALLTHROUGH; 10068 case ICmpInst::ICMP_ULE: 10069 return 10070 // min(A, ...) <= A 10071 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10072 // A <= max(A, ...) 10073 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10074 } 10075 10076 llvm_unreachable("covered switch fell through?!"); 10077 } 10078 10079 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10080 const SCEV *LHS, const SCEV *RHS, 10081 const SCEV *FoundLHS, 10082 const SCEV *FoundRHS, 10083 unsigned Depth) { 10084 assert(getTypeSizeInBits(LHS->getType()) == 10085 getTypeSizeInBits(RHS->getType()) && 10086 "LHS and RHS have different sizes?"); 10087 assert(getTypeSizeInBits(FoundLHS->getType()) == 10088 getTypeSizeInBits(FoundRHS->getType()) && 10089 "FoundLHS and FoundRHS have different sizes?"); 10090 // We want to avoid hurting the compile time with analysis of too big trees. 10091 if (Depth > MaxSCEVOperationsImplicationDepth) 10092 return false; 10093 // We only want to work with ICMP_SGT comparison so far. 10094 // TODO: Extend to ICMP_UGT? 10095 if (Pred == ICmpInst::ICMP_SLT) { 10096 Pred = ICmpInst::ICMP_SGT; 10097 std::swap(LHS, RHS); 10098 std::swap(FoundLHS, FoundRHS); 10099 } 10100 if (Pred != ICmpInst::ICMP_SGT) 10101 return false; 10102 10103 auto GetOpFromSExt = [&](const SCEV *S) { 10104 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10105 return Ext->getOperand(); 10106 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10107 // the constant in some cases. 10108 return S; 10109 }; 10110 10111 // Acquire values from extensions. 10112 auto *OrigLHS = LHS; 10113 auto *OrigFoundLHS = FoundLHS; 10114 LHS = GetOpFromSExt(LHS); 10115 FoundLHS = GetOpFromSExt(FoundLHS); 10116 10117 // Is the SGT predicate can be proved trivially or using the found context. 10118 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10119 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10120 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10121 FoundRHS, Depth + 1); 10122 }; 10123 10124 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10125 // We want to avoid creation of any new non-constant SCEV. Since we are 10126 // going to compare the operands to RHS, we should be certain that we don't 10127 // need any size extensions for this. So let's decline all cases when the 10128 // sizes of types of LHS and RHS do not match. 10129 // TODO: Maybe try to get RHS from sext to catch more cases? 10130 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10131 return false; 10132 10133 // Should not overflow. 10134 if (!LHSAddExpr->hasNoSignedWrap()) 10135 return false; 10136 10137 auto *LL = LHSAddExpr->getOperand(0); 10138 auto *LR = LHSAddExpr->getOperand(1); 10139 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10140 10141 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10142 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10143 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10144 }; 10145 // Try to prove the following rule: 10146 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10147 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10148 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10149 return true; 10150 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10151 Value *LL, *LR; 10152 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10153 10154 using namespace llvm::PatternMatch; 10155 10156 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10157 // Rules for division. 10158 // We are going to perform some comparisons with Denominator and its 10159 // derivative expressions. In general case, creating a SCEV for it may 10160 // lead to a complex analysis of the entire graph, and in particular it 10161 // can request trip count recalculation for the same loop. This would 10162 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10163 // this, we only want to create SCEVs that are constants in this section. 10164 // So we bail if Denominator is not a constant. 10165 if (!isa<ConstantInt>(LR)) 10166 return false; 10167 10168 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10169 10170 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10171 // then a SCEV for the numerator already exists and matches with FoundLHS. 10172 auto *Numerator = getExistingSCEV(LL); 10173 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10174 return false; 10175 10176 // Make sure that the numerator matches with FoundLHS and the denominator 10177 // is positive. 10178 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10179 return false; 10180 10181 auto *DTy = Denominator->getType(); 10182 auto *FRHSTy = FoundRHS->getType(); 10183 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10184 // One of types is a pointer and another one is not. We cannot extend 10185 // them properly to a wider type, so let us just reject this case. 10186 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10187 // to avoid this check. 10188 return false; 10189 10190 // Given that: 10191 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10192 auto *WTy = getWiderType(DTy, FRHSTy); 10193 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10194 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10195 10196 // Try to prove the following rule: 10197 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10198 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10199 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10200 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10201 if (isKnownNonPositive(RHS) && 10202 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10203 return true; 10204 10205 // Try to prove the following rule: 10206 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10207 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10208 // If we divide it by Denominator > 2, then: 10209 // 1. If FoundLHS is negative, then the result is 0. 10210 // 2. If FoundLHS is non-negative, then the result is non-negative. 10211 // Anyways, the result is non-negative. 10212 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10213 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10214 if (isKnownNegative(RHS) && 10215 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10216 return true; 10217 } 10218 } 10219 10220 // If our expression contained SCEVUnknown Phis, and we split it down and now 10221 // need to prove something for them, try to prove the predicate for every 10222 // possible incoming values of those Phis. 10223 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10224 return true; 10225 10226 return false; 10227 } 10228 10229 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10230 const SCEV *LHS, const SCEV *RHS) { 10231 // zext x u<= sext x, sext x s<= zext x 10232 switch (Pred) { 10233 case ICmpInst::ICMP_SGE: 10234 std::swap(LHS, RHS); 10235 LLVM_FALLTHROUGH; 10236 case ICmpInst::ICMP_SLE: { 10237 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10238 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10239 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10240 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10241 return true; 10242 break; 10243 } 10244 case ICmpInst::ICMP_UGE: 10245 std::swap(LHS, RHS); 10246 LLVM_FALLTHROUGH; 10247 case ICmpInst::ICMP_ULE: { 10248 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10249 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10250 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10251 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10252 return true; 10253 break; 10254 } 10255 default: 10256 break; 10257 }; 10258 return false; 10259 } 10260 10261 bool 10262 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10263 const SCEV *LHS, const SCEV *RHS) { 10264 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10265 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10266 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10267 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10268 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10269 } 10270 10271 bool 10272 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10273 const SCEV *LHS, const SCEV *RHS, 10274 const SCEV *FoundLHS, 10275 const SCEV *FoundRHS) { 10276 switch (Pred) { 10277 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10278 case ICmpInst::ICMP_EQ: 10279 case ICmpInst::ICMP_NE: 10280 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10281 return true; 10282 break; 10283 case ICmpInst::ICMP_SLT: 10284 case ICmpInst::ICMP_SLE: 10285 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10286 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10287 return true; 10288 break; 10289 case ICmpInst::ICMP_SGT: 10290 case ICmpInst::ICMP_SGE: 10291 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10292 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10293 return true; 10294 break; 10295 case ICmpInst::ICMP_ULT: 10296 case ICmpInst::ICMP_ULE: 10297 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10298 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10299 return true; 10300 break; 10301 case ICmpInst::ICMP_UGT: 10302 case ICmpInst::ICMP_UGE: 10303 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10304 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10305 return true; 10306 break; 10307 } 10308 10309 // Maybe it can be proved via operations? 10310 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10311 return true; 10312 10313 return false; 10314 } 10315 10316 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10317 const SCEV *LHS, 10318 const SCEV *RHS, 10319 const SCEV *FoundLHS, 10320 const SCEV *FoundRHS) { 10321 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10322 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10323 // reduce the compile time impact of this optimization. 10324 return false; 10325 10326 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10327 if (!Addend) 10328 return false; 10329 10330 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10331 10332 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10333 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10334 ConstantRange FoundLHSRange = 10335 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10336 10337 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10338 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10339 10340 // We can also compute the range of values for `LHS` that satisfy the 10341 // consequent, "`LHS` `Pred` `RHS`": 10342 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10343 ConstantRange SatisfyingLHSRange = 10344 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10345 10346 // The antecedent implies the consequent if every value of `LHS` that 10347 // satisfies the antecedent also satisfies the consequent. 10348 return SatisfyingLHSRange.contains(LHSRange); 10349 } 10350 10351 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10352 bool IsSigned, bool NoWrap) { 10353 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10354 10355 if (NoWrap) return false; 10356 10357 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10358 const SCEV *One = getOne(Stride->getType()); 10359 10360 if (IsSigned) { 10361 APInt MaxRHS = getSignedRangeMax(RHS); 10362 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10363 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10364 10365 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10366 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10367 } 10368 10369 APInt MaxRHS = getUnsignedRangeMax(RHS); 10370 APInt MaxValue = APInt::getMaxValue(BitWidth); 10371 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10372 10373 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10374 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10375 } 10376 10377 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10378 bool IsSigned, bool NoWrap) { 10379 if (NoWrap) return false; 10380 10381 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10382 const SCEV *One = getOne(Stride->getType()); 10383 10384 if (IsSigned) { 10385 APInt MinRHS = getSignedRangeMin(RHS); 10386 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10387 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10388 10389 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10390 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10391 } 10392 10393 APInt MinRHS = getUnsignedRangeMin(RHS); 10394 APInt MinValue = APInt::getMinValue(BitWidth); 10395 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10396 10397 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10398 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10399 } 10400 10401 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10402 bool Equality) { 10403 const SCEV *One = getOne(Step->getType()); 10404 Delta = Equality ? getAddExpr(Delta, Step) 10405 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10406 return getUDivExpr(Delta, Step); 10407 } 10408 10409 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10410 const SCEV *Stride, 10411 const SCEV *End, 10412 unsigned BitWidth, 10413 bool IsSigned) { 10414 10415 assert(!isKnownNonPositive(Stride) && 10416 "Stride is expected strictly positive!"); 10417 // Calculate the maximum backedge count based on the range of values 10418 // permitted by Start, End, and Stride. 10419 const SCEV *MaxBECount; 10420 APInt MinStart = 10421 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10422 10423 APInt StrideForMaxBECount = 10424 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10425 10426 // We already know that the stride is positive, so we paper over conservatism 10427 // in our range computation by forcing StrideForMaxBECount to be at least one. 10428 // In theory this is unnecessary, but we expect MaxBECount to be a 10429 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10430 // is nothing to constant fold it to). 10431 APInt One(BitWidth, 1, IsSigned); 10432 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10433 10434 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10435 : APInt::getMaxValue(BitWidth); 10436 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10437 10438 // Although End can be a MAX expression we estimate MaxEnd considering only 10439 // the case End = RHS of the loop termination condition. This is safe because 10440 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10441 // taken count. 10442 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10443 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10444 10445 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10446 getConstant(StrideForMaxBECount) /* Step */, 10447 false /* Equality */); 10448 10449 return MaxBECount; 10450 } 10451 10452 ScalarEvolution::ExitLimit 10453 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10454 const Loop *L, bool IsSigned, 10455 bool ControlsExit, bool AllowPredicates) { 10456 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10457 10458 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10459 bool PredicatedIV = false; 10460 10461 if (!IV && AllowPredicates) { 10462 // Try to make this an AddRec using runtime tests, in the first X 10463 // iterations of this loop, where X is the SCEV expression found by the 10464 // algorithm below. 10465 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10466 PredicatedIV = true; 10467 } 10468 10469 // Avoid weird loops 10470 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10471 return getCouldNotCompute(); 10472 10473 bool NoWrap = ControlsExit && 10474 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10475 10476 const SCEV *Stride = IV->getStepRecurrence(*this); 10477 10478 bool PositiveStride = isKnownPositive(Stride); 10479 10480 // Avoid negative or zero stride values. 10481 if (!PositiveStride) { 10482 // We can compute the correct backedge taken count for loops with unknown 10483 // strides if we can prove that the loop is not an infinite loop with side 10484 // effects. Here's the loop structure we are trying to handle - 10485 // 10486 // i = start 10487 // do { 10488 // A[i] = i; 10489 // i += s; 10490 // } while (i < end); 10491 // 10492 // The backedge taken count for such loops is evaluated as - 10493 // (max(end, start + stride) - start - 1) /u stride 10494 // 10495 // The additional preconditions that we need to check to prove correctness 10496 // of the above formula is as follows - 10497 // 10498 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10499 // NoWrap flag). 10500 // b) loop is single exit with no side effects. 10501 // 10502 // 10503 // Precondition a) implies that if the stride is negative, this is a single 10504 // trip loop. The backedge taken count formula reduces to zero in this case. 10505 // 10506 // Precondition b) implies that the unknown stride cannot be zero otherwise 10507 // we have UB. 10508 // 10509 // The positive stride case is the same as isKnownPositive(Stride) returning 10510 // true (original behavior of the function). 10511 // 10512 // We want to make sure that the stride is truly unknown as there are edge 10513 // cases where ScalarEvolution propagates no wrap flags to the 10514 // post-increment/decrement IV even though the increment/decrement operation 10515 // itself is wrapping. The computed backedge taken count may be wrong in 10516 // such cases. This is prevented by checking that the stride is not known to 10517 // be either positive or non-positive. For example, no wrap flags are 10518 // propagated to the post-increment IV of this loop with a trip count of 2 - 10519 // 10520 // unsigned char i; 10521 // for(i=127; i<128; i+=129) 10522 // A[i] = i; 10523 // 10524 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10525 !loopHasNoSideEffects(L)) 10526 return getCouldNotCompute(); 10527 } else if (!Stride->isOne() && 10528 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10529 // Avoid proven overflow cases: this will ensure that the backedge taken 10530 // count will not generate any unsigned overflow. Relaxed no-overflow 10531 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10532 // undefined behaviors like the case of C language. 10533 return getCouldNotCompute(); 10534 10535 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10536 : ICmpInst::ICMP_ULT; 10537 const SCEV *Start = IV->getStart(); 10538 const SCEV *End = RHS; 10539 // When the RHS is not invariant, we do not know the end bound of the loop and 10540 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10541 // calculate the MaxBECount, given the start, stride and max value for the end 10542 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10543 // checked above). 10544 if (!isLoopInvariant(RHS, L)) { 10545 const SCEV *MaxBECount = computeMaxBECountForLT( 10546 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10547 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10548 false /*MaxOrZero*/, Predicates); 10549 } 10550 // If the backedge is taken at least once, then it will be taken 10551 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10552 // is the LHS value of the less-than comparison the first time it is evaluated 10553 // and End is the RHS. 10554 const SCEV *BECountIfBackedgeTaken = 10555 computeBECount(getMinusSCEV(End, Start), Stride, false); 10556 // If the loop entry is guarded by the result of the backedge test of the 10557 // first loop iteration, then we know the backedge will be taken at least 10558 // once and so the backedge taken count is as above. If not then we use the 10559 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10560 // as if the backedge is taken at least once max(End,Start) is End and so the 10561 // result is as above, and if not max(End,Start) is Start so we get a backedge 10562 // count of zero. 10563 const SCEV *BECount; 10564 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10565 BECount = BECountIfBackedgeTaken; 10566 else { 10567 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10568 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10569 } 10570 10571 const SCEV *MaxBECount; 10572 bool MaxOrZero = false; 10573 if (isa<SCEVConstant>(BECount)) 10574 MaxBECount = BECount; 10575 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10576 // If we know exactly how many times the backedge will be taken if it's 10577 // taken at least once, then the backedge count will either be that or 10578 // zero. 10579 MaxBECount = BECountIfBackedgeTaken; 10580 MaxOrZero = true; 10581 } else { 10582 MaxBECount = computeMaxBECountForLT( 10583 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10584 } 10585 10586 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10587 !isa<SCEVCouldNotCompute>(BECount)) 10588 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10589 10590 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10591 } 10592 10593 ScalarEvolution::ExitLimit 10594 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10595 const Loop *L, bool IsSigned, 10596 bool ControlsExit, bool AllowPredicates) { 10597 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10598 // We handle only IV > Invariant 10599 if (!isLoopInvariant(RHS, L)) 10600 return getCouldNotCompute(); 10601 10602 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10603 if (!IV && AllowPredicates) 10604 // Try to make this an AddRec using runtime tests, in the first X 10605 // iterations of this loop, where X is the SCEV expression found by the 10606 // algorithm below. 10607 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10608 10609 // Avoid weird loops 10610 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10611 return getCouldNotCompute(); 10612 10613 bool NoWrap = ControlsExit && 10614 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10615 10616 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10617 10618 // Avoid negative or zero stride values 10619 if (!isKnownPositive(Stride)) 10620 return getCouldNotCompute(); 10621 10622 // Avoid proven overflow cases: this will ensure that the backedge taken count 10623 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10624 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10625 // behaviors like the case of C language. 10626 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10627 return getCouldNotCompute(); 10628 10629 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10630 : ICmpInst::ICMP_UGT; 10631 10632 const SCEV *Start = IV->getStart(); 10633 const SCEV *End = RHS; 10634 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10635 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10636 10637 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10638 10639 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10640 : getUnsignedRangeMax(Start); 10641 10642 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10643 : getUnsignedRangeMin(Stride); 10644 10645 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10646 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10647 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10648 10649 // Although End can be a MIN expression we estimate MinEnd considering only 10650 // the case End = RHS. This is safe because in the other case (Start - End) 10651 // is zero, leading to a zero maximum backedge taken count. 10652 APInt MinEnd = 10653 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10654 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10655 10656 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 10657 ? BECount 10658 : computeBECount(getConstant(MaxStart - MinEnd), 10659 getConstant(MinStride), false); 10660 10661 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10662 MaxBECount = BECount; 10663 10664 return ExitLimit(BECount, MaxBECount, false, Predicates); 10665 } 10666 10667 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10668 ScalarEvolution &SE) const { 10669 if (Range.isFullSet()) // Infinite loop. 10670 return SE.getCouldNotCompute(); 10671 10672 // If the start is a non-zero constant, shift the range to simplify things. 10673 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10674 if (!SC->getValue()->isZero()) { 10675 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10676 Operands[0] = SE.getZero(SC->getType()); 10677 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10678 getNoWrapFlags(FlagNW)); 10679 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10680 return ShiftedAddRec->getNumIterationsInRange( 10681 Range.subtract(SC->getAPInt()), SE); 10682 // This is strange and shouldn't happen. 10683 return SE.getCouldNotCompute(); 10684 } 10685 10686 // The only time we can solve this is when we have all constant indices. 10687 // Otherwise, we cannot determine the overflow conditions. 10688 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10689 return SE.getCouldNotCompute(); 10690 10691 // Okay at this point we know that all elements of the chrec are constants and 10692 // that the start element is zero. 10693 10694 // First check to see if the range contains zero. If not, the first 10695 // iteration exits. 10696 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10697 if (!Range.contains(APInt(BitWidth, 0))) 10698 return SE.getZero(getType()); 10699 10700 if (isAffine()) { 10701 // If this is an affine expression then we have this situation: 10702 // Solve {0,+,A} in Range === Ax in Range 10703 10704 // We know that zero is in the range. If A is positive then we know that 10705 // the upper value of the range must be the first possible exit value. 10706 // If A is negative then the lower of the range is the last possible loop 10707 // value. Also note that we already checked for a full range. 10708 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10709 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10710 10711 // The exit value should be (End+A)/A. 10712 APInt ExitVal = (End + A).udiv(A); 10713 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10714 10715 // Evaluate at the exit value. If we really did fall out of the valid 10716 // range, then we computed our trip count, otherwise wrap around or other 10717 // things must have happened. 10718 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10719 if (Range.contains(Val->getValue())) 10720 return SE.getCouldNotCompute(); // Something strange happened 10721 10722 // Ensure that the previous value is in the range. This is a sanity check. 10723 assert(Range.contains( 10724 EvaluateConstantChrecAtConstant(this, 10725 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10726 "Linear scev computation is off in a bad way!"); 10727 return SE.getConstant(ExitValue); 10728 } 10729 10730 if (isQuadratic()) { 10731 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10732 return SE.getConstant(S.getValue()); 10733 } 10734 10735 return SE.getCouldNotCompute(); 10736 } 10737 10738 const SCEVAddRecExpr * 10739 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10740 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10741 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10742 // but in this case we cannot guarantee that the value returned will be an 10743 // AddRec because SCEV does not have a fixed point where it stops 10744 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10745 // may happen if we reach arithmetic depth limit while simplifying. So we 10746 // construct the returned value explicitly. 10747 SmallVector<const SCEV *, 3> Ops; 10748 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10749 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10750 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10751 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10752 // We know that the last operand is not a constant zero (otherwise it would 10753 // have been popped out earlier). This guarantees us that if the result has 10754 // the same last operand, then it will also not be popped out, meaning that 10755 // the returned value will be an AddRec. 10756 const SCEV *Last = getOperand(getNumOperands() - 1); 10757 assert(!Last->isZero() && "Recurrency with zero step?"); 10758 Ops.push_back(Last); 10759 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10760 SCEV::FlagAnyWrap)); 10761 } 10762 10763 // Return true when S contains at least an undef value. 10764 static inline bool containsUndefs(const SCEV *S) { 10765 return SCEVExprContains(S, [](const SCEV *S) { 10766 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10767 return isa<UndefValue>(SU->getValue()); 10768 return false; 10769 }); 10770 } 10771 10772 namespace { 10773 10774 // Collect all steps of SCEV expressions. 10775 struct SCEVCollectStrides { 10776 ScalarEvolution &SE; 10777 SmallVectorImpl<const SCEV *> &Strides; 10778 10779 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10780 : SE(SE), Strides(S) {} 10781 10782 bool follow(const SCEV *S) { 10783 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10784 Strides.push_back(AR->getStepRecurrence(SE)); 10785 return true; 10786 } 10787 10788 bool isDone() const { return false; } 10789 }; 10790 10791 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10792 struct SCEVCollectTerms { 10793 SmallVectorImpl<const SCEV *> &Terms; 10794 10795 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10796 10797 bool follow(const SCEV *S) { 10798 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10799 isa<SCEVSignExtendExpr>(S)) { 10800 if (!containsUndefs(S)) 10801 Terms.push_back(S); 10802 10803 // Stop recursion: once we collected a term, do not walk its operands. 10804 return false; 10805 } 10806 10807 // Keep looking. 10808 return true; 10809 } 10810 10811 bool isDone() const { return false; } 10812 }; 10813 10814 // Check if a SCEV contains an AddRecExpr. 10815 struct SCEVHasAddRec { 10816 bool &ContainsAddRec; 10817 10818 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10819 ContainsAddRec = false; 10820 } 10821 10822 bool follow(const SCEV *S) { 10823 if (isa<SCEVAddRecExpr>(S)) { 10824 ContainsAddRec = true; 10825 10826 // Stop recursion: once we collected a term, do not walk its operands. 10827 return false; 10828 } 10829 10830 // Keep looking. 10831 return true; 10832 } 10833 10834 bool isDone() const { return false; } 10835 }; 10836 10837 // Find factors that are multiplied with an expression that (possibly as a 10838 // subexpression) contains an AddRecExpr. In the expression: 10839 // 10840 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10841 // 10842 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10843 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10844 // parameters as they form a product with an induction variable. 10845 // 10846 // This collector expects all array size parameters to be in the same MulExpr. 10847 // It might be necessary to later add support for collecting parameters that are 10848 // spread over different nested MulExpr. 10849 struct SCEVCollectAddRecMultiplies { 10850 SmallVectorImpl<const SCEV *> &Terms; 10851 ScalarEvolution &SE; 10852 10853 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10854 : Terms(T), SE(SE) {} 10855 10856 bool follow(const SCEV *S) { 10857 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10858 bool HasAddRec = false; 10859 SmallVector<const SCEV *, 0> Operands; 10860 for (auto Op : Mul->operands()) { 10861 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10862 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10863 Operands.push_back(Op); 10864 } else if (Unknown) { 10865 HasAddRec = true; 10866 } else { 10867 bool ContainsAddRec = false; 10868 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10869 visitAll(Op, ContiansAddRec); 10870 HasAddRec |= ContainsAddRec; 10871 } 10872 } 10873 if (Operands.size() == 0) 10874 return true; 10875 10876 if (!HasAddRec) 10877 return false; 10878 10879 Terms.push_back(SE.getMulExpr(Operands)); 10880 // Stop recursion: once we collected a term, do not walk its operands. 10881 return false; 10882 } 10883 10884 // Keep looking. 10885 return true; 10886 } 10887 10888 bool isDone() const { return false; } 10889 }; 10890 10891 } // end anonymous namespace 10892 10893 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10894 /// two places: 10895 /// 1) The strides of AddRec expressions. 10896 /// 2) Unknowns that are multiplied with AddRec expressions. 10897 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10898 SmallVectorImpl<const SCEV *> &Terms) { 10899 SmallVector<const SCEV *, 4> Strides; 10900 SCEVCollectStrides StrideCollector(*this, Strides); 10901 visitAll(Expr, StrideCollector); 10902 10903 LLVM_DEBUG({ 10904 dbgs() << "Strides:\n"; 10905 for (const SCEV *S : Strides) 10906 dbgs() << *S << "\n"; 10907 }); 10908 10909 for (const SCEV *S : Strides) { 10910 SCEVCollectTerms TermCollector(Terms); 10911 visitAll(S, TermCollector); 10912 } 10913 10914 LLVM_DEBUG({ 10915 dbgs() << "Terms:\n"; 10916 for (const SCEV *T : Terms) 10917 dbgs() << *T << "\n"; 10918 }); 10919 10920 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10921 visitAll(Expr, MulCollector); 10922 } 10923 10924 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10925 SmallVectorImpl<const SCEV *> &Terms, 10926 SmallVectorImpl<const SCEV *> &Sizes) { 10927 int Last = Terms.size() - 1; 10928 const SCEV *Step = Terms[Last]; 10929 10930 // End of recursion. 10931 if (Last == 0) { 10932 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10933 SmallVector<const SCEV *, 2> Qs; 10934 for (const SCEV *Op : M->operands()) 10935 if (!isa<SCEVConstant>(Op)) 10936 Qs.push_back(Op); 10937 10938 Step = SE.getMulExpr(Qs); 10939 } 10940 10941 Sizes.push_back(Step); 10942 return true; 10943 } 10944 10945 for (const SCEV *&Term : Terms) { 10946 // Normalize the terms before the next call to findArrayDimensionsRec. 10947 const SCEV *Q, *R; 10948 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10949 10950 // Bail out when GCD does not evenly divide one of the terms. 10951 if (!R->isZero()) 10952 return false; 10953 10954 Term = Q; 10955 } 10956 10957 // Remove all SCEVConstants. 10958 Terms.erase( 10959 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10960 Terms.end()); 10961 10962 if (Terms.size() > 0) 10963 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10964 return false; 10965 10966 Sizes.push_back(Step); 10967 return true; 10968 } 10969 10970 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10971 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10972 for (const SCEV *T : Terms) 10973 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 10974 return true; 10975 10976 return false; 10977 } 10978 10979 // Return the number of product terms in S. 10980 static inline int numberOfTerms(const SCEV *S) { 10981 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10982 return Expr->getNumOperands(); 10983 return 1; 10984 } 10985 10986 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10987 if (isa<SCEVConstant>(T)) 10988 return nullptr; 10989 10990 if (isa<SCEVUnknown>(T)) 10991 return T; 10992 10993 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10994 SmallVector<const SCEV *, 2> Factors; 10995 for (const SCEV *Op : M->operands()) 10996 if (!isa<SCEVConstant>(Op)) 10997 Factors.push_back(Op); 10998 10999 return SE.getMulExpr(Factors); 11000 } 11001 11002 return T; 11003 } 11004 11005 /// Return the size of an element read or written by Inst. 11006 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11007 Type *Ty; 11008 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11009 Ty = Store->getValueOperand()->getType(); 11010 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11011 Ty = Load->getType(); 11012 else 11013 return nullptr; 11014 11015 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11016 return getSizeOfExpr(ETy, Ty); 11017 } 11018 11019 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11020 SmallVectorImpl<const SCEV *> &Sizes, 11021 const SCEV *ElementSize) { 11022 if (Terms.size() < 1 || !ElementSize) 11023 return; 11024 11025 // Early return when Terms do not contain parameters: we do not delinearize 11026 // non parametric SCEVs. 11027 if (!containsParameters(Terms)) 11028 return; 11029 11030 LLVM_DEBUG({ 11031 dbgs() << "Terms:\n"; 11032 for (const SCEV *T : Terms) 11033 dbgs() << *T << "\n"; 11034 }); 11035 11036 // Remove duplicates. 11037 array_pod_sort(Terms.begin(), Terms.end()); 11038 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11039 11040 // Put larger terms first. 11041 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11042 return numberOfTerms(LHS) > numberOfTerms(RHS); 11043 }); 11044 11045 // Try to divide all terms by the element size. If term is not divisible by 11046 // element size, proceed with the original term. 11047 for (const SCEV *&Term : Terms) { 11048 const SCEV *Q, *R; 11049 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11050 if (!Q->isZero()) 11051 Term = Q; 11052 } 11053 11054 SmallVector<const SCEV *, 4> NewTerms; 11055 11056 // Remove constant factors. 11057 for (const SCEV *T : Terms) 11058 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11059 NewTerms.push_back(NewT); 11060 11061 LLVM_DEBUG({ 11062 dbgs() << "Terms after sorting:\n"; 11063 for (const SCEV *T : NewTerms) 11064 dbgs() << *T << "\n"; 11065 }); 11066 11067 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11068 Sizes.clear(); 11069 return; 11070 } 11071 11072 // The last element to be pushed into Sizes is the size of an element. 11073 Sizes.push_back(ElementSize); 11074 11075 LLVM_DEBUG({ 11076 dbgs() << "Sizes:\n"; 11077 for (const SCEV *S : Sizes) 11078 dbgs() << *S << "\n"; 11079 }); 11080 } 11081 11082 void ScalarEvolution::computeAccessFunctions( 11083 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11084 SmallVectorImpl<const SCEV *> &Sizes) { 11085 // Early exit in case this SCEV is not an affine multivariate function. 11086 if (Sizes.empty()) 11087 return; 11088 11089 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11090 if (!AR->isAffine()) 11091 return; 11092 11093 const SCEV *Res = Expr; 11094 int Last = Sizes.size() - 1; 11095 for (int i = Last; i >= 0; i--) { 11096 const SCEV *Q, *R; 11097 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11098 11099 LLVM_DEBUG({ 11100 dbgs() << "Res: " << *Res << "\n"; 11101 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11102 dbgs() << "Res divided by Sizes[i]:\n"; 11103 dbgs() << "Quotient: " << *Q << "\n"; 11104 dbgs() << "Remainder: " << *R << "\n"; 11105 }); 11106 11107 Res = Q; 11108 11109 // Do not record the last subscript corresponding to the size of elements in 11110 // the array. 11111 if (i == Last) { 11112 11113 // Bail out if the remainder is too complex. 11114 if (isa<SCEVAddRecExpr>(R)) { 11115 Subscripts.clear(); 11116 Sizes.clear(); 11117 return; 11118 } 11119 11120 continue; 11121 } 11122 11123 // Record the access function for the current subscript. 11124 Subscripts.push_back(R); 11125 } 11126 11127 // Also push in last position the remainder of the last division: it will be 11128 // the access function of the innermost dimension. 11129 Subscripts.push_back(Res); 11130 11131 std::reverse(Subscripts.begin(), Subscripts.end()); 11132 11133 LLVM_DEBUG({ 11134 dbgs() << "Subscripts:\n"; 11135 for (const SCEV *S : Subscripts) 11136 dbgs() << *S << "\n"; 11137 }); 11138 } 11139 11140 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11141 /// sizes of an array access. Returns the remainder of the delinearization that 11142 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11143 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11144 /// expressions in the stride and base of a SCEV corresponding to the 11145 /// computation of a GCD (greatest common divisor) of base and stride. When 11146 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11147 /// 11148 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11149 /// 11150 /// void foo(long n, long m, long o, double A[n][m][o]) { 11151 /// 11152 /// for (long i = 0; i < n; i++) 11153 /// for (long j = 0; j < m; j++) 11154 /// for (long k = 0; k < o; k++) 11155 /// A[i][j][k] = 1.0; 11156 /// } 11157 /// 11158 /// the delinearization input is the following AddRec SCEV: 11159 /// 11160 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11161 /// 11162 /// From this SCEV, we are able to say that the base offset of the access is %A 11163 /// because it appears as an offset that does not divide any of the strides in 11164 /// the loops: 11165 /// 11166 /// CHECK: Base offset: %A 11167 /// 11168 /// and then SCEV->delinearize determines the size of some of the dimensions of 11169 /// the array as these are the multiples by which the strides are happening: 11170 /// 11171 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11172 /// 11173 /// Note that the outermost dimension remains of UnknownSize because there are 11174 /// no strides that would help identifying the size of the last dimension: when 11175 /// the array has been statically allocated, one could compute the size of that 11176 /// dimension by dividing the overall size of the array by the size of the known 11177 /// dimensions: %m * %o * 8. 11178 /// 11179 /// Finally delinearize provides the access functions for the array reference 11180 /// that does correspond to A[i][j][k] of the above C testcase: 11181 /// 11182 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11183 /// 11184 /// The testcases are checking the output of a function pass: 11185 /// DelinearizationPass that walks through all loads and stores of a function 11186 /// asking for the SCEV of the memory access with respect to all enclosing 11187 /// loops, calling SCEV->delinearize on that and printing the results. 11188 void ScalarEvolution::delinearize(const SCEV *Expr, 11189 SmallVectorImpl<const SCEV *> &Subscripts, 11190 SmallVectorImpl<const SCEV *> &Sizes, 11191 const SCEV *ElementSize) { 11192 // First step: collect parametric terms. 11193 SmallVector<const SCEV *, 4> Terms; 11194 collectParametricTerms(Expr, Terms); 11195 11196 if (Terms.empty()) 11197 return; 11198 11199 // Second step: find subscript sizes. 11200 findArrayDimensions(Terms, Sizes, ElementSize); 11201 11202 if (Sizes.empty()) 11203 return; 11204 11205 // Third step: compute the access functions for each subscript. 11206 computeAccessFunctions(Expr, Subscripts, Sizes); 11207 11208 if (Subscripts.empty()) 11209 return; 11210 11211 LLVM_DEBUG({ 11212 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11213 dbgs() << "ArrayDecl[UnknownSize]"; 11214 for (const SCEV *S : Sizes) 11215 dbgs() << "[" << *S << "]"; 11216 11217 dbgs() << "\nArrayRef"; 11218 for (const SCEV *S : Subscripts) 11219 dbgs() << "[" << *S << "]"; 11220 dbgs() << "\n"; 11221 }); 11222 } 11223 11224 bool ScalarEvolution::getIndexExpressionsFromGEP( 11225 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11226 SmallVectorImpl<int> &Sizes) { 11227 assert(Subscripts.empty() && Sizes.empty() && 11228 "Expected output lists to be empty on entry to this function."); 11229 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11230 Type *Ty = GEP->getPointerOperandType(); 11231 bool DroppedFirstDim = false; 11232 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11233 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11234 if (i == 1) { 11235 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11236 Ty = PtrTy->getElementType(); 11237 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11238 Ty = ArrayTy->getElementType(); 11239 } else { 11240 Subscripts.clear(); 11241 Sizes.clear(); 11242 return false; 11243 } 11244 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11245 if (Const->getValue()->isZero()) { 11246 DroppedFirstDim = true; 11247 continue; 11248 } 11249 Subscripts.push_back(Expr); 11250 continue; 11251 } 11252 11253 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11254 if (!ArrayTy) { 11255 Subscripts.clear(); 11256 Sizes.clear(); 11257 return false; 11258 } 11259 11260 Subscripts.push_back(Expr); 11261 if (!(DroppedFirstDim && i == 2)) 11262 Sizes.push_back(ArrayTy->getNumElements()); 11263 11264 Ty = ArrayTy->getElementType(); 11265 } 11266 return !Subscripts.empty(); 11267 } 11268 11269 //===----------------------------------------------------------------------===// 11270 // SCEVCallbackVH Class Implementation 11271 //===----------------------------------------------------------------------===// 11272 11273 void ScalarEvolution::SCEVCallbackVH::deleted() { 11274 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11275 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11276 SE->ConstantEvolutionLoopExitValue.erase(PN); 11277 SE->eraseValueFromMap(getValPtr()); 11278 // this now dangles! 11279 } 11280 11281 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11282 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11283 11284 // Forget all the expressions associated with users of the old value, 11285 // so that future queries will recompute the expressions using the new 11286 // value. 11287 Value *Old = getValPtr(); 11288 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11289 SmallPtrSet<User *, 8> Visited; 11290 while (!Worklist.empty()) { 11291 User *U = Worklist.pop_back_val(); 11292 // Deleting the Old value will cause this to dangle. Postpone 11293 // that until everything else is done. 11294 if (U == Old) 11295 continue; 11296 if (!Visited.insert(U).second) 11297 continue; 11298 if (PHINode *PN = dyn_cast<PHINode>(U)) 11299 SE->ConstantEvolutionLoopExitValue.erase(PN); 11300 SE->eraseValueFromMap(U); 11301 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11302 } 11303 // Delete the Old value. 11304 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11305 SE->ConstantEvolutionLoopExitValue.erase(PN); 11306 SE->eraseValueFromMap(Old); 11307 // this now dangles! 11308 } 11309 11310 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11311 : CallbackVH(V), SE(se) {} 11312 11313 //===----------------------------------------------------------------------===// 11314 // ScalarEvolution Class Implementation 11315 //===----------------------------------------------------------------------===// 11316 11317 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11318 AssumptionCache &AC, DominatorTree &DT, 11319 LoopInfo &LI) 11320 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11321 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11322 LoopDispositions(64), BlockDispositions(64) { 11323 // To use guards for proving predicates, we need to scan every instruction in 11324 // relevant basic blocks, and not just terminators. Doing this is a waste of 11325 // time if the IR does not actually contain any calls to 11326 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11327 // 11328 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11329 // to _add_ guards to the module when there weren't any before, and wants 11330 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11331 // efficient in lieu of being smart in that rather obscure case. 11332 11333 auto *GuardDecl = F.getParent()->getFunction( 11334 Intrinsic::getName(Intrinsic::experimental_guard)); 11335 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11336 } 11337 11338 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11339 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11340 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11341 ValueExprMap(std::move(Arg.ValueExprMap)), 11342 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11343 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11344 PendingMerges(std::move(Arg.PendingMerges)), 11345 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11346 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11347 PredicatedBackedgeTakenCounts( 11348 std::move(Arg.PredicatedBackedgeTakenCounts)), 11349 ConstantEvolutionLoopExitValue( 11350 std::move(Arg.ConstantEvolutionLoopExitValue)), 11351 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11352 LoopDispositions(std::move(Arg.LoopDispositions)), 11353 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11354 BlockDispositions(std::move(Arg.BlockDispositions)), 11355 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11356 SignedRanges(std::move(Arg.SignedRanges)), 11357 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11358 UniquePreds(std::move(Arg.UniquePreds)), 11359 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11360 LoopUsers(std::move(Arg.LoopUsers)), 11361 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11362 FirstUnknown(Arg.FirstUnknown) { 11363 Arg.FirstUnknown = nullptr; 11364 } 11365 11366 ScalarEvolution::~ScalarEvolution() { 11367 // Iterate through all the SCEVUnknown instances and call their 11368 // destructors, so that they release their references to their values. 11369 for (SCEVUnknown *U = FirstUnknown; U;) { 11370 SCEVUnknown *Tmp = U; 11371 U = U->Next; 11372 Tmp->~SCEVUnknown(); 11373 } 11374 FirstUnknown = nullptr; 11375 11376 ExprValueMap.clear(); 11377 ValueExprMap.clear(); 11378 HasRecMap.clear(); 11379 11380 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11381 // that a loop had multiple computable exits. 11382 for (auto &BTCI : BackedgeTakenCounts) 11383 BTCI.second.clear(); 11384 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11385 BTCI.second.clear(); 11386 11387 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11388 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11389 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11390 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11391 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11392 } 11393 11394 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11395 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11396 } 11397 11398 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11399 const Loop *L) { 11400 // Print all inner loops first 11401 for (Loop *I : *L) 11402 PrintLoopInfo(OS, SE, I); 11403 11404 OS << "Loop "; 11405 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11406 OS << ": "; 11407 11408 SmallVector<BasicBlock *, 8> ExitingBlocks; 11409 L->getExitingBlocks(ExitingBlocks); 11410 if (ExitingBlocks.size() != 1) 11411 OS << "<multiple exits> "; 11412 11413 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 11414 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 11415 else 11416 OS << "Unpredictable backedge-taken count.\n"; 11417 11418 if (ExitingBlocks.size() > 1) 11419 for (BasicBlock *ExitingBlock : ExitingBlocks) { 11420 OS << " exit count for " << ExitingBlock->getName() << ": " 11421 << *SE->getExitCount(L, ExitingBlock) << "\n"; 11422 } 11423 11424 OS << "Loop "; 11425 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11426 OS << ": "; 11427 11428 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 11429 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 11430 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11431 OS << ", actual taken count either this or zero."; 11432 } else { 11433 OS << "Unpredictable max backedge-taken count. "; 11434 } 11435 11436 OS << "\n" 11437 "Loop "; 11438 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11439 OS << ": "; 11440 11441 SCEVUnionPredicate Pred; 11442 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11443 if (!isa<SCEVCouldNotCompute>(PBT)) { 11444 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11445 OS << " Predicates:\n"; 11446 Pred.print(OS, 4); 11447 } else { 11448 OS << "Unpredictable predicated backedge-taken count. "; 11449 } 11450 OS << "\n"; 11451 11452 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11453 OS << "Loop "; 11454 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11455 OS << ": "; 11456 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11457 } 11458 } 11459 11460 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11461 switch (LD) { 11462 case ScalarEvolution::LoopVariant: 11463 return "Variant"; 11464 case ScalarEvolution::LoopInvariant: 11465 return "Invariant"; 11466 case ScalarEvolution::LoopComputable: 11467 return "Computable"; 11468 } 11469 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11470 } 11471 11472 void ScalarEvolution::print(raw_ostream &OS) const { 11473 // ScalarEvolution's implementation of the print method is to print 11474 // out SCEV values of all instructions that are interesting. Doing 11475 // this potentially causes it to create new SCEV objects though, 11476 // which technically conflicts with the const qualifier. This isn't 11477 // observable from outside the class though, so casting away the 11478 // const isn't dangerous. 11479 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11480 11481 if (ClassifyExpressions) { 11482 OS << "Classifying expressions for: "; 11483 F.printAsOperand(OS, /*PrintType=*/false); 11484 OS << "\n"; 11485 for (Instruction &I : instructions(F)) 11486 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11487 OS << I << '\n'; 11488 OS << " --> "; 11489 const SCEV *SV = SE.getSCEV(&I); 11490 SV->print(OS); 11491 if (!isa<SCEVCouldNotCompute>(SV)) { 11492 OS << " U: "; 11493 SE.getUnsignedRange(SV).print(OS); 11494 OS << " S: "; 11495 SE.getSignedRange(SV).print(OS); 11496 } 11497 11498 const Loop *L = LI.getLoopFor(I.getParent()); 11499 11500 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11501 if (AtUse != SV) { 11502 OS << " --> "; 11503 AtUse->print(OS); 11504 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11505 OS << " U: "; 11506 SE.getUnsignedRange(AtUse).print(OS); 11507 OS << " S: "; 11508 SE.getSignedRange(AtUse).print(OS); 11509 } 11510 } 11511 11512 if (L) { 11513 OS << "\t\t" "Exits: "; 11514 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11515 if (!SE.isLoopInvariant(ExitValue, L)) { 11516 OS << "<<Unknown>>"; 11517 } else { 11518 OS << *ExitValue; 11519 } 11520 11521 bool First = true; 11522 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11523 if (First) { 11524 OS << "\t\t" "LoopDispositions: { "; 11525 First = false; 11526 } else { 11527 OS << ", "; 11528 } 11529 11530 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11531 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11532 } 11533 11534 for (auto *InnerL : depth_first(L)) { 11535 if (InnerL == L) 11536 continue; 11537 if (First) { 11538 OS << "\t\t" "LoopDispositions: { "; 11539 First = false; 11540 } else { 11541 OS << ", "; 11542 } 11543 11544 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11545 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11546 } 11547 11548 OS << " }"; 11549 } 11550 11551 OS << "\n"; 11552 } 11553 } 11554 11555 OS << "Determining loop execution counts for: "; 11556 F.printAsOperand(OS, /*PrintType=*/false); 11557 OS << "\n"; 11558 for (Loop *I : LI) 11559 PrintLoopInfo(OS, &SE, I); 11560 } 11561 11562 ScalarEvolution::LoopDisposition 11563 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11564 auto &Values = LoopDispositions[S]; 11565 for (auto &V : Values) { 11566 if (V.getPointer() == L) 11567 return V.getInt(); 11568 } 11569 Values.emplace_back(L, LoopVariant); 11570 LoopDisposition D = computeLoopDisposition(S, L); 11571 auto &Values2 = LoopDispositions[S]; 11572 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11573 if (V.getPointer() == L) { 11574 V.setInt(D); 11575 break; 11576 } 11577 } 11578 return D; 11579 } 11580 11581 ScalarEvolution::LoopDisposition 11582 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11583 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11584 case scConstant: 11585 return LoopInvariant; 11586 case scTruncate: 11587 case scZeroExtend: 11588 case scSignExtend: 11589 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11590 case scAddRecExpr: { 11591 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11592 11593 // If L is the addrec's loop, it's computable. 11594 if (AR->getLoop() == L) 11595 return LoopComputable; 11596 11597 // Add recurrences are never invariant in the function-body (null loop). 11598 if (!L) 11599 return LoopVariant; 11600 11601 // Everything that is not defined at loop entry is variant. 11602 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11603 return LoopVariant; 11604 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11605 " dominate the contained loop's header?"); 11606 11607 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11608 if (AR->getLoop()->contains(L)) 11609 return LoopInvariant; 11610 11611 // This recurrence is variant w.r.t. L if any of its operands 11612 // are variant. 11613 for (auto *Op : AR->operands()) 11614 if (!isLoopInvariant(Op, L)) 11615 return LoopVariant; 11616 11617 // Otherwise it's loop-invariant. 11618 return LoopInvariant; 11619 } 11620 case scAddExpr: 11621 case scMulExpr: 11622 case scUMaxExpr: 11623 case scSMaxExpr: 11624 case scUMinExpr: 11625 case scSMinExpr: { 11626 bool HasVarying = false; 11627 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11628 LoopDisposition D = getLoopDisposition(Op, L); 11629 if (D == LoopVariant) 11630 return LoopVariant; 11631 if (D == LoopComputable) 11632 HasVarying = true; 11633 } 11634 return HasVarying ? LoopComputable : LoopInvariant; 11635 } 11636 case scUDivExpr: { 11637 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11638 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11639 if (LD == LoopVariant) 11640 return LoopVariant; 11641 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11642 if (RD == LoopVariant) 11643 return LoopVariant; 11644 return (LD == LoopInvariant && RD == LoopInvariant) ? 11645 LoopInvariant : LoopComputable; 11646 } 11647 case scUnknown: 11648 // All non-instruction values are loop invariant. All instructions are loop 11649 // invariant if they are not contained in the specified loop. 11650 // Instructions are never considered invariant in the function body 11651 // (null loop) because they are defined within the "loop". 11652 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11653 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11654 return LoopInvariant; 11655 case scCouldNotCompute: 11656 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11657 } 11658 llvm_unreachable("Unknown SCEV kind!"); 11659 } 11660 11661 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11662 return getLoopDisposition(S, L) == LoopInvariant; 11663 } 11664 11665 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11666 return getLoopDisposition(S, L) == LoopComputable; 11667 } 11668 11669 ScalarEvolution::BlockDisposition 11670 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11671 auto &Values = BlockDispositions[S]; 11672 for (auto &V : Values) { 11673 if (V.getPointer() == BB) 11674 return V.getInt(); 11675 } 11676 Values.emplace_back(BB, DoesNotDominateBlock); 11677 BlockDisposition D = computeBlockDisposition(S, BB); 11678 auto &Values2 = BlockDispositions[S]; 11679 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11680 if (V.getPointer() == BB) { 11681 V.setInt(D); 11682 break; 11683 } 11684 } 11685 return D; 11686 } 11687 11688 ScalarEvolution::BlockDisposition 11689 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11690 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11691 case scConstant: 11692 return ProperlyDominatesBlock; 11693 case scTruncate: 11694 case scZeroExtend: 11695 case scSignExtend: 11696 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11697 case scAddRecExpr: { 11698 // This uses a "dominates" query instead of "properly dominates" query 11699 // to test for proper dominance too, because the instruction which 11700 // produces the addrec's value is a PHI, and a PHI effectively properly 11701 // dominates its entire containing block. 11702 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11703 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11704 return DoesNotDominateBlock; 11705 11706 // Fall through into SCEVNAryExpr handling. 11707 LLVM_FALLTHROUGH; 11708 } 11709 case scAddExpr: 11710 case scMulExpr: 11711 case scUMaxExpr: 11712 case scSMaxExpr: 11713 case scUMinExpr: 11714 case scSMinExpr: { 11715 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11716 bool Proper = true; 11717 for (const SCEV *NAryOp : NAry->operands()) { 11718 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11719 if (D == DoesNotDominateBlock) 11720 return DoesNotDominateBlock; 11721 if (D == DominatesBlock) 11722 Proper = false; 11723 } 11724 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11725 } 11726 case scUDivExpr: { 11727 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11728 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11729 BlockDisposition LD = getBlockDisposition(LHS, BB); 11730 if (LD == DoesNotDominateBlock) 11731 return DoesNotDominateBlock; 11732 BlockDisposition RD = getBlockDisposition(RHS, BB); 11733 if (RD == DoesNotDominateBlock) 11734 return DoesNotDominateBlock; 11735 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11736 ProperlyDominatesBlock : DominatesBlock; 11737 } 11738 case scUnknown: 11739 if (Instruction *I = 11740 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11741 if (I->getParent() == BB) 11742 return DominatesBlock; 11743 if (DT.properlyDominates(I->getParent(), BB)) 11744 return ProperlyDominatesBlock; 11745 return DoesNotDominateBlock; 11746 } 11747 return ProperlyDominatesBlock; 11748 case scCouldNotCompute: 11749 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11750 } 11751 llvm_unreachable("Unknown SCEV kind!"); 11752 } 11753 11754 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11755 return getBlockDisposition(S, BB) >= DominatesBlock; 11756 } 11757 11758 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11759 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11760 } 11761 11762 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11763 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11764 } 11765 11766 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11767 auto IsS = [&](const SCEV *X) { return S == X; }; 11768 auto ContainsS = [&](const SCEV *X) { 11769 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11770 }; 11771 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11772 } 11773 11774 void 11775 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11776 ValuesAtScopes.erase(S); 11777 LoopDispositions.erase(S); 11778 BlockDispositions.erase(S); 11779 UnsignedRanges.erase(S); 11780 SignedRanges.erase(S); 11781 ExprValueMap.erase(S); 11782 HasRecMap.erase(S); 11783 MinTrailingZerosCache.erase(S); 11784 11785 for (auto I = PredicatedSCEVRewrites.begin(); 11786 I != PredicatedSCEVRewrites.end();) { 11787 std::pair<const SCEV *, const Loop *> Entry = I->first; 11788 if (Entry.first == S) 11789 PredicatedSCEVRewrites.erase(I++); 11790 else 11791 ++I; 11792 } 11793 11794 auto RemoveSCEVFromBackedgeMap = 11795 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11796 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11797 BackedgeTakenInfo &BEInfo = I->second; 11798 if (BEInfo.hasOperand(S, this)) { 11799 BEInfo.clear(); 11800 Map.erase(I++); 11801 } else 11802 ++I; 11803 } 11804 }; 11805 11806 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11807 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11808 } 11809 11810 void 11811 ScalarEvolution::getUsedLoops(const SCEV *S, 11812 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11813 struct FindUsedLoops { 11814 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11815 : LoopsUsed(LoopsUsed) {} 11816 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11817 bool follow(const SCEV *S) { 11818 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11819 LoopsUsed.insert(AR->getLoop()); 11820 return true; 11821 } 11822 11823 bool isDone() const { return false; } 11824 }; 11825 11826 FindUsedLoops F(LoopsUsed); 11827 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11828 } 11829 11830 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11831 SmallPtrSet<const Loop *, 8> LoopsUsed; 11832 getUsedLoops(S, LoopsUsed); 11833 for (auto *L : LoopsUsed) 11834 LoopUsers[L].push_back(S); 11835 } 11836 11837 void ScalarEvolution::verify() const { 11838 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11839 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11840 11841 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11842 11843 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11844 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11845 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11846 11847 const SCEV *visitConstant(const SCEVConstant *Constant) { 11848 return SE.getConstant(Constant->getAPInt()); 11849 } 11850 11851 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11852 return SE.getUnknown(Expr->getValue()); 11853 } 11854 11855 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11856 return SE.getCouldNotCompute(); 11857 } 11858 }; 11859 11860 SCEVMapper SCM(SE2); 11861 11862 while (!LoopStack.empty()) { 11863 auto *L = LoopStack.pop_back_val(); 11864 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11865 11866 auto *CurBECount = SCM.visit( 11867 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11868 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11869 11870 if (CurBECount == SE2.getCouldNotCompute() || 11871 NewBECount == SE2.getCouldNotCompute()) { 11872 // NB! This situation is legal, but is very suspicious -- whatever pass 11873 // change the loop to make a trip count go from could not compute to 11874 // computable or vice-versa *should have* invalidated SCEV. However, we 11875 // choose not to assert here (for now) since we don't want false 11876 // positives. 11877 continue; 11878 } 11879 11880 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11881 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11882 // not propagate undef aggressively). This means we can (and do) fail 11883 // verification in cases where a transform makes the trip count of a loop 11884 // go from "undef" to "undef+1" (say). The transform is fine, since in 11885 // both cases the loop iterates "undef" times, but SCEV thinks we 11886 // increased the trip count of the loop by 1 incorrectly. 11887 continue; 11888 } 11889 11890 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11891 SE.getTypeSizeInBits(NewBECount->getType())) 11892 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11893 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11894 SE.getTypeSizeInBits(NewBECount->getType())) 11895 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11896 11897 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 11898 11899 // Unless VerifySCEVStrict is set, we only compare constant deltas. 11900 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 11901 dbgs() << "Trip Count for " << *L << " Changed!\n"; 11902 dbgs() << "Old: " << *CurBECount << "\n"; 11903 dbgs() << "New: " << *NewBECount << "\n"; 11904 dbgs() << "Delta: " << *Delta << "\n"; 11905 std::abort(); 11906 } 11907 } 11908 } 11909 11910 bool ScalarEvolution::invalidate( 11911 Function &F, const PreservedAnalyses &PA, 11912 FunctionAnalysisManager::Invalidator &Inv) { 11913 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11914 // of its dependencies is invalidated. 11915 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11916 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11917 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11918 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11919 Inv.invalidate<LoopAnalysis>(F, PA); 11920 } 11921 11922 AnalysisKey ScalarEvolutionAnalysis::Key; 11923 11924 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11925 FunctionAnalysisManager &AM) { 11926 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11927 AM.getResult<AssumptionAnalysis>(F), 11928 AM.getResult<DominatorTreeAnalysis>(F), 11929 AM.getResult<LoopAnalysis>(F)); 11930 } 11931 11932 PreservedAnalyses 11933 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 11934 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 11935 return PreservedAnalyses::all(); 11936 } 11937 11938 PreservedAnalyses 11939 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11940 // For compatibility with opt's -analyze feature under legacy pass manager 11941 // which was not ported to NPM. This keeps tests using 11942 // update_analyze_test_checks.py working. 11943 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 11944 << F.getName() << "':\n"; 11945 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11946 return PreservedAnalyses::all(); 11947 } 11948 11949 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11950 "Scalar Evolution Analysis", false, true) 11951 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11952 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11953 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11954 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11955 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11956 "Scalar Evolution Analysis", false, true) 11957 11958 char ScalarEvolutionWrapperPass::ID = 0; 11959 11960 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11961 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11962 } 11963 11964 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11965 SE.reset(new ScalarEvolution( 11966 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 11967 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11968 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11969 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11970 return false; 11971 } 11972 11973 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11974 11975 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11976 SE->print(OS); 11977 } 11978 11979 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11980 if (!VerifySCEV) 11981 return; 11982 11983 SE->verify(); 11984 } 11985 11986 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11987 AU.setPreservesAll(); 11988 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11989 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11990 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11991 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11992 } 11993 11994 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11995 const SCEV *RHS) { 11996 FoldingSetNodeID ID; 11997 assert(LHS->getType() == RHS->getType() && 11998 "Type mismatch between LHS and RHS"); 11999 // Unique this node based on the arguments 12000 ID.AddInteger(SCEVPredicate::P_Equal); 12001 ID.AddPointer(LHS); 12002 ID.AddPointer(RHS); 12003 void *IP = nullptr; 12004 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12005 return S; 12006 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12007 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12008 UniquePreds.InsertNode(Eq, IP); 12009 return Eq; 12010 } 12011 12012 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12013 const SCEVAddRecExpr *AR, 12014 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12015 FoldingSetNodeID ID; 12016 // Unique this node based on the arguments 12017 ID.AddInteger(SCEVPredicate::P_Wrap); 12018 ID.AddPointer(AR); 12019 ID.AddInteger(AddedFlags); 12020 void *IP = nullptr; 12021 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12022 return S; 12023 auto *OF = new (SCEVAllocator) 12024 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12025 UniquePreds.InsertNode(OF, IP); 12026 return OF; 12027 } 12028 12029 namespace { 12030 12031 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12032 public: 12033 12034 /// Rewrites \p S in the context of a loop L and the SCEV predication 12035 /// infrastructure. 12036 /// 12037 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12038 /// equivalences present in \p Pred. 12039 /// 12040 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12041 /// \p NewPreds such that the result will be an AddRecExpr. 12042 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12043 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12044 SCEVUnionPredicate *Pred) { 12045 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12046 return Rewriter.visit(S); 12047 } 12048 12049 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12050 if (Pred) { 12051 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12052 for (auto *Pred : ExprPreds) 12053 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12054 if (IPred->getLHS() == Expr) 12055 return IPred->getRHS(); 12056 } 12057 return convertToAddRecWithPreds(Expr); 12058 } 12059 12060 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12061 const SCEV *Operand = visit(Expr->getOperand()); 12062 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12063 if (AR && AR->getLoop() == L && AR->isAffine()) { 12064 // This couldn't be folded because the operand didn't have the nuw 12065 // flag. Add the nusw flag as an assumption that we could make. 12066 const SCEV *Step = AR->getStepRecurrence(SE); 12067 Type *Ty = Expr->getType(); 12068 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12069 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12070 SE.getSignExtendExpr(Step, Ty), L, 12071 AR->getNoWrapFlags()); 12072 } 12073 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12074 } 12075 12076 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12077 const SCEV *Operand = visit(Expr->getOperand()); 12078 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12079 if (AR && AR->getLoop() == L && AR->isAffine()) { 12080 // This couldn't be folded because the operand didn't have the nsw 12081 // flag. Add the nssw flag as an assumption that we could make. 12082 const SCEV *Step = AR->getStepRecurrence(SE); 12083 Type *Ty = Expr->getType(); 12084 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12085 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12086 SE.getSignExtendExpr(Step, Ty), L, 12087 AR->getNoWrapFlags()); 12088 } 12089 return SE.getSignExtendExpr(Operand, Expr->getType()); 12090 } 12091 12092 private: 12093 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12094 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12095 SCEVUnionPredicate *Pred) 12096 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12097 12098 bool addOverflowAssumption(const SCEVPredicate *P) { 12099 if (!NewPreds) { 12100 // Check if we've already made this assumption. 12101 return Pred && Pred->implies(P); 12102 } 12103 NewPreds->insert(P); 12104 return true; 12105 } 12106 12107 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12108 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12109 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12110 return addOverflowAssumption(A); 12111 } 12112 12113 // If \p Expr represents a PHINode, we try to see if it can be represented 12114 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12115 // to add this predicate as a runtime overflow check, we return the AddRec. 12116 // If \p Expr does not meet these conditions (is not a PHI node, or we 12117 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12118 // return \p Expr. 12119 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12120 if (!isa<PHINode>(Expr->getValue())) 12121 return Expr; 12122 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12123 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12124 if (!PredicatedRewrite) 12125 return Expr; 12126 for (auto *P : PredicatedRewrite->second){ 12127 // Wrap predicates from outer loops are not supported. 12128 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12129 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12130 if (L != AR->getLoop()) 12131 return Expr; 12132 } 12133 if (!addOverflowAssumption(P)) 12134 return Expr; 12135 } 12136 return PredicatedRewrite->first; 12137 } 12138 12139 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12140 SCEVUnionPredicate *Pred; 12141 const Loop *L; 12142 }; 12143 12144 } // end anonymous namespace 12145 12146 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12147 SCEVUnionPredicate &Preds) { 12148 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12149 } 12150 12151 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12152 const SCEV *S, const Loop *L, 12153 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12154 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12155 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12156 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12157 12158 if (!AddRec) 12159 return nullptr; 12160 12161 // Since the transformation was successful, we can now transfer the SCEV 12162 // predicates. 12163 for (auto *P : TransformPreds) 12164 Preds.insert(P); 12165 12166 return AddRec; 12167 } 12168 12169 /// SCEV predicates 12170 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12171 SCEVPredicateKind Kind) 12172 : FastID(ID), Kind(Kind) {} 12173 12174 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12175 const SCEV *LHS, const SCEV *RHS) 12176 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12177 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12178 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12179 } 12180 12181 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12182 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12183 12184 if (!Op) 12185 return false; 12186 12187 return Op->LHS == LHS && Op->RHS == RHS; 12188 } 12189 12190 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12191 12192 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12193 12194 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12195 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12196 } 12197 12198 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12199 const SCEVAddRecExpr *AR, 12200 IncrementWrapFlags Flags) 12201 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12202 12203 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12204 12205 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12206 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12207 12208 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12209 } 12210 12211 bool SCEVWrapPredicate::isAlwaysTrue() const { 12212 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12213 IncrementWrapFlags IFlags = Flags; 12214 12215 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12216 IFlags = clearFlags(IFlags, IncrementNSSW); 12217 12218 return IFlags == IncrementAnyWrap; 12219 } 12220 12221 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12222 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12223 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12224 OS << "<nusw>"; 12225 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12226 OS << "<nssw>"; 12227 OS << "\n"; 12228 } 12229 12230 SCEVWrapPredicate::IncrementWrapFlags 12231 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12232 ScalarEvolution &SE) { 12233 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12234 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12235 12236 // We can safely transfer the NSW flag as NSSW. 12237 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12238 ImpliedFlags = IncrementNSSW; 12239 12240 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12241 // If the increment is positive, the SCEV NUW flag will also imply the 12242 // WrapPredicate NUSW flag. 12243 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12244 if (Step->getValue()->getValue().isNonNegative()) 12245 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12246 } 12247 12248 return ImpliedFlags; 12249 } 12250 12251 /// Union predicates don't get cached so create a dummy set ID for it. 12252 SCEVUnionPredicate::SCEVUnionPredicate() 12253 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12254 12255 bool SCEVUnionPredicate::isAlwaysTrue() const { 12256 return all_of(Preds, 12257 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12258 } 12259 12260 ArrayRef<const SCEVPredicate *> 12261 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12262 auto I = SCEVToPreds.find(Expr); 12263 if (I == SCEVToPreds.end()) 12264 return ArrayRef<const SCEVPredicate *>(); 12265 return I->second; 12266 } 12267 12268 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12269 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12270 return all_of(Set->Preds, 12271 [this](const SCEVPredicate *I) { return this->implies(I); }); 12272 12273 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12274 if (ScevPredsIt == SCEVToPreds.end()) 12275 return false; 12276 auto &SCEVPreds = ScevPredsIt->second; 12277 12278 return any_of(SCEVPreds, 12279 [N](const SCEVPredicate *I) { return I->implies(N); }); 12280 } 12281 12282 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12283 12284 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12285 for (auto Pred : Preds) 12286 Pred->print(OS, Depth); 12287 } 12288 12289 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12290 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12291 for (auto Pred : Set->Preds) 12292 add(Pred); 12293 return; 12294 } 12295 12296 if (implies(N)) 12297 return; 12298 12299 const SCEV *Key = N->getExpr(); 12300 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12301 " associated expression!"); 12302 12303 SCEVToPreds[Key].push_back(N); 12304 Preds.push_back(N); 12305 } 12306 12307 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12308 Loop &L) 12309 : SE(SE), L(L) {} 12310 12311 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12312 const SCEV *Expr = SE.getSCEV(V); 12313 RewriteEntry &Entry = RewriteMap[Expr]; 12314 12315 // If we already have an entry and the version matches, return it. 12316 if (Entry.second && Generation == Entry.first) 12317 return Entry.second; 12318 12319 // We found an entry but it's stale. Rewrite the stale entry 12320 // according to the current predicate. 12321 if (Entry.second) 12322 Expr = Entry.second; 12323 12324 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12325 Entry = {Generation, NewSCEV}; 12326 12327 return NewSCEV; 12328 } 12329 12330 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12331 if (!BackedgeCount) { 12332 SCEVUnionPredicate BackedgePred; 12333 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12334 addPredicate(BackedgePred); 12335 } 12336 return BackedgeCount; 12337 } 12338 12339 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12340 if (Preds.implies(&Pred)) 12341 return; 12342 Preds.add(&Pred); 12343 updateGeneration(); 12344 } 12345 12346 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12347 return Preds; 12348 } 12349 12350 void PredicatedScalarEvolution::updateGeneration() { 12351 // If the generation number wrapped recompute everything. 12352 if (++Generation == 0) { 12353 for (auto &II : RewriteMap) { 12354 const SCEV *Rewritten = II.second.second; 12355 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12356 } 12357 } 12358 } 12359 12360 void PredicatedScalarEvolution::setNoOverflow( 12361 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12362 const SCEV *Expr = getSCEV(V); 12363 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12364 12365 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12366 12367 // Clear the statically implied flags. 12368 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12369 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12370 12371 auto II = FlagsMap.insert({V, Flags}); 12372 if (!II.second) 12373 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12374 } 12375 12376 bool PredicatedScalarEvolution::hasNoOverflow( 12377 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12378 const SCEV *Expr = getSCEV(V); 12379 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12380 12381 Flags = SCEVWrapPredicate::clearFlags( 12382 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12383 12384 auto II = FlagsMap.find(V); 12385 12386 if (II != FlagsMap.end()) 12387 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12388 12389 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12390 } 12391 12392 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12393 const SCEV *Expr = this->getSCEV(V); 12394 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12395 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12396 12397 if (!New) 12398 return nullptr; 12399 12400 for (auto *P : NewPreds) 12401 Preds.add(P); 12402 12403 updateGeneration(); 12404 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12405 return New; 12406 } 12407 12408 PredicatedScalarEvolution::PredicatedScalarEvolution( 12409 const PredicatedScalarEvolution &Init) 12410 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12411 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12412 for (auto I : Init.FlagsMap) 12413 FlagsMap.insert(I); 12414 } 12415 12416 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12417 // For each block. 12418 for (auto *BB : L.getBlocks()) 12419 for (auto &I : *BB) { 12420 if (!SE.isSCEVable(I.getType())) 12421 continue; 12422 12423 auto *Expr = SE.getSCEV(&I); 12424 auto II = RewriteMap.find(Expr); 12425 12426 if (II == RewriteMap.end()) 12427 continue; 12428 12429 // Don't print things that are not interesting. 12430 if (II->second.second == Expr) 12431 continue; 12432 12433 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12434 OS.indent(Depth + 2) << *Expr << "\n"; 12435 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12436 } 12437 } 12438 12439 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12440 // arbitrary expressions. 12441 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12442 // 4, A / B becomes X / 8). 12443 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12444 const SCEV *&RHS) { 12445 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12446 if (Add == nullptr || Add->getNumOperands() != 2) 12447 return false; 12448 12449 const SCEV *A = Add->getOperand(1); 12450 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12451 12452 if (Mul == nullptr) 12453 return false; 12454 12455 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12456 // (SomeExpr + (-(SomeExpr / B) * B)). 12457 if (Expr == getURemExpr(A, B)) { 12458 LHS = A; 12459 RHS = B; 12460 return true; 12461 } 12462 return false; 12463 }; 12464 12465 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12466 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12467 return MatchURemWithDivisor(Mul->getOperand(1)) || 12468 MatchURemWithDivisor(Mul->getOperand(2)); 12469 12470 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12471 if (Mul->getNumOperands() == 2) 12472 return MatchURemWithDivisor(Mul->getOperand(1)) || 12473 MatchURemWithDivisor(Mul->getOperand(0)) || 12474 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12475 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12476 return false; 12477 } 12478