1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 139 #define DEBUG_TYPE "scalar-evolution" 140 141 STATISTIC(NumArrayLenItCounts, 142 "Number of trip counts computed with array length"); 143 STATISTIC(NumTripCountsComputed, 144 "Number of loops with predictable loop counts"); 145 STATISTIC(NumTripCountsNotComputed, 146 "Number of loops without predictable loop counts"); 147 STATISTIC(NumBruteForceTripCountsComputed, 148 "Number of loops with trip counts computed by force"); 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::ZeroOrMore, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 159 static cl::opt<bool> VerifySCEV( 160 "verify-scev", cl::Hidden, 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 static cl::opt<bool> UseExpensiveRangeSharpening( 230 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 231 cl::init(false), 232 cl::desc("Use more powerful methods of sharpening expression ranges. May " 233 "be costly in terms of compile time")); 234 235 //===----------------------------------------------------------------------===// 236 // SCEV class definitions 237 //===----------------------------------------------------------------------===// 238 239 //===----------------------------------------------------------------------===// 240 // Implementation of the SCEV class. 241 // 242 243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 244 LLVM_DUMP_METHOD void SCEV::dump() const { 245 print(dbgs()); 246 dbgs() << '\n'; 247 } 248 #endif 249 250 void SCEV::print(raw_ostream &OS) const { 251 switch (getSCEVType()) { 252 case scConstant: 253 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 254 return; 255 case scPtrToInt: { 256 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 257 const SCEV *Op = PtrToInt->getOperand(); 258 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 259 << *PtrToInt->getType() << ")"; 260 return; 261 } 262 case scTruncate: { 263 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 264 const SCEV *Op = Trunc->getOperand(); 265 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 266 << *Trunc->getType() << ")"; 267 return; 268 } 269 case scZeroExtend: { 270 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 271 const SCEV *Op = ZExt->getOperand(); 272 OS << "(zext " << *Op->getType() << " " << *Op << " to " 273 << *ZExt->getType() << ")"; 274 return; 275 } 276 case scSignExtend: { 277 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 278 const SCEV *Op = SExt->getOperand(); 279 OS << "(sext " << *Op->getType() << " " << *Op << " to " 280 << *SExt->getType() << ")"; 281 return; 282 } 283 case scAddRecExpr: { 284 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 285 OS << "{" << *AR->getOperand(0); 286 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 287 OS << ",+," << *AR->getOperand(i); 288 OS << "}<"; 289 if (AR->hasNoUnsignedWrap()) 290 OS << "nuw><"; 291 if (AR->hasNoSignedWrap()) 292 OS << "nsw><"; 293 if (AR->hasNoSelfWrap() && 294 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 295 OS << "nw><"; 296 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 297 OS << ">"; 298 return; 299 } 300 case scAddExpr: 301 case scMulExpr: 302 case scUMaxExpr: 303 case scSMaxExpr: 304 case scUMinExpr: 305 case scSMinExpr: { 306 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 307 const char *OpStr = nullptr; 308 switch (NAry->getSCEVType()) { 309 case scAddExpr: OpStr = " + "; break; 310 case scMulExpr: OpStr = " * "; break; 311 case scUMaxExpr: OpStr = " umax "; break; 312 case scSMaxExpr: OpStr = " smax "; break; 313 case scUMinExpr: 314 OpStr = " umin "; 315 break; 316 case scSMinExpr: 317 OpStr = " smin "; 318 break; 319 default: 320 llvm_unreachable("There are no other nary expression types."); 321 } 322 OS << "("; 323 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 324 I != E; ++I) { 325 OS << **I; 326 if (std::next(I) != E) 327 OS << OpStr; 328 } 329 OS << ")"; 330 switch (NAry->getSCEVType()) { 331 case scAddExpr: 332 case scMulExpr: 333 if (NAry->hasNoUnsignedWrap()) 334 OS << "<nuw>"; 335 if (NAry->hasNoSignedWrap()) 336 OS << "<nsw>"; 337 break; 338 default: 339 // Nothing to print for other nary expressions. 340 break; 341 } 342 return; 343 } 344 case scUDivExpr: { 345 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 346 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 347 return; 348 } 349 case scUnknown: { 350 const SCEVUnknown *U = cast<SCEVUnknown>(this); 351 Type *AllocTy; 352 if (U->isSizeOf(AllocTy)) { 353 OS << "sizeof(" << *AllocTy << ")"; 354 return; 355 } 356 if (U->isAlignOf(AllocTy)) { 357 OS << "alignof(" << *AllocTy << ")"; 358 return; 359 } 360 361 Type *CTy; 362 Constant *FieldNo; 363 if (U->isOffsetOf(CTy, FieldNo)) { 364 OS << "offsetof(" << *CTy << ", "; 365 FieldNo->printAsOperand(OS, false); 366 OS << ")"; 367 return; 368 } 369 370 // Otherwise just print it normally. 371 U->getValue()->printAsOperand(OS, false); 372 return; 373 } 374 case scCouldNotCompute: 375 OS << "***COULDNOTCOMPUTE***"; 376 return; 377 } 378 llvm_unreachable("Unknown SCEV kind!"); 379 } 380 381 Type *SCEV::getType() const { 382 switch (getSCEVType()) { 383 case scConstant: 384 return cast<SCEVConstant>(this)->getType(); 385 case scPtrToInt: 386 case scTruncate: 387 case scZeroExtend: 388 case scSignExtend: 389 return cast<SCEVCastExpr>(this)->getType(); 390 case scAddRecExpr: 391 case scMulExpr: 392 case scUMaxExpr: 393 case scSMaxExpr: 394 case scUMinExpr: 395 case scSMinExpr: 396 return cast<SCEVNAryExpr>(this)->getType(); 397 case scAddExpr: 398 return cast<SCEVAddExpr>(this)->getType(); 399 case scUDivExpr: 400 return cast<SCEVUDivExpr>(this)->getType(); 401 case scUnknown: 402 return cast<SCEVUnknown>(this)->getType(); 403 case scCouldNotCompute: 404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 405 } 406 llvm_unreachable("Unknown SCEV kind!"); 407 } 408 409 bool SCEV::isZero() const { 410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 411 return SC->getValue()->isZero(); 412 return false; 413 } 414 415 bool SCEV::isOne() const { 416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 417 return SC->getValue()->isOne(); 418 return false; 419 } 420 421 bool SCEV::isAllOnesValue() const { 422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 423 return SC->getValue()->isMinusOne(); 424 return false; 425 } 426 427 bool SCEV::isNonConstantNegative() const { 428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 429 if (!Mul) return false; 430 431 // If there is a constant factor, it will be first. 432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 433 if (!SC) return false; 434 435 // Return true if the value is negative, this matches things like (-42 * V). 436 return SC->getAPInt().isNegative(); 437 } 438 439 SCEVCouldNotCompute::SCEVCouldNotCompute() : 440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 441 442 bool SCEVCouldNotCompute::classof(const SCEV *S) { 443 return S->getSCEVType() == scCouldNotCompute; 444 } 445 446 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 447 FoldingSetNodeID ID; 448 ID.AddInteger(scConstant); 449 ID.AddPointer(V); 450 void *IP = nullptr; 451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 453 UniqueSCEVs.InsertNode(S, IP); 454 return S; 455 } 456 457 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 458 return getConstant(ConstantInt::get(getContext(), Val)); 459 } 460 461 const SCEV * 462 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 464 return getConstant(ConstantInt::get(ITy, V, isSigned)); 465 } 466 467 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 468 const SCEV *op, Type *ty) 469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 470 Operands[0] = op; 471 } 472 473 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 474 Type *ITy) 475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 477 "Must be a non-bit-width-changing pointer-to-integer cast!"); 478 } 479 480 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 481 SCEVTypes SCEVTy, const SCEV *op, 482 Type *ty) 483 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 484 485 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 486 Type *ty) 487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 489 "Cannot truncate non-integer value!"); 490 } 491 492 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 493 const SCEV *op, Type *ty) 494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 496 "Cannot zero extend non-integer value!"); 497 } 498 499 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 500 const SCEV *op, Type *ty) 501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 503 "Cannot sign extend non-integer value!"); 504 } 505 506 void SCEVUnknown::deleted() { 507 // Clear this SCEVUnknown from various maps. 508 SE->forgetMemoizedResults(this); 509 510 // Remove this SCEVUnknown from the uniquing map. 511 SE->UniqueSCEVs.RemoveNode(this); 512 513 // Release the value. 514 setValPtr(nullptr); 515 } 516 517 void SCEVUnknown::allUsesReplacedWith(Value *New) { 518 // Remove this SCEVUnknown from the uniquing map. 519 SE->UniqueSCEVs.RemoveNode(this); 520 521 // Update this SCEVUnknown to point to the new value. This is needed 522 // because there may still be outstanding SCEVs which still point to 523 // this SCEVUnknown. 524 setValPtr(New); 525 } 526 527 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 529 if (VCE->getOpcode() == Instruction::PtrToInt) 530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 531 if (CE->getOpcode() == Instruction::GetElementPtr && 532 CE->getOperand(0)->isNullValue() && 533 CE->getNumOperands() == 2) 534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 535 if (CI->isOne()) { 536 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 537 ->getElementType(); 538 return true; 539 } 540 541 return false; 542 } 543 544 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 545 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 546 if (VCE->getOpcode() == Instruction::PtrToInt) 547 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 548 if (CE->getOpcode() == Instruction::GetElementPtr && 549 CE->getOperand(0)->isNullValue()) { 550 Type *Ty = 551 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 552 if (StructType *STy = dyn_cast<StructType>(Ty)) 553 if (!STy->isPacked() && 554 CE->getNumOperands() == 3 && 555 CE->getOperand(1)->isNullValue()) { 556 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 557 if (CI->isOne() && 558 STy->getNumElements() == 2 && 559 STy->getElementType(0)->isIntegerTy(1)) { 560 AllocTy = STy->getElementType(1); 561 return true; 562 } 563 } 564 } 565 566 return false; 567 } 568 569 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 570 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 571 if (VCE->getOpcode() == Instruction::PtrToInt) 572 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 573 if (CE->getOpcode() == Instruction::GetElementPtr && 574 CE->getNumOperands() == 3 && 575 CE->getOperand(0)->isNullValue() && 576 CE->getOperand(1)->isNullValue()) { 577 Type *Ty = 578 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 579 // Ignore vector types here so that ScalarEvolutionExpander doesn't 580 // emit getelementptrs that index into vectors. 581 if (Ty->isStructTy() || Ty->isArrayTy()) { 582 CTy = Ty; 583 FieldNo = CE->getOperand(2); 584 return true; 585 } 586 } 587 588 return false; 589 } 590 591 //===----------------------------------------------------------------------===// 592 // SCEV Utilities 593 //===----------------------------------------------------------------------===// 594 595 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 596 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 597 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 598 /// have been previously deemed to be "equally complex" by this routine. It is 599 /// intended to avoid exponential time complexity in cases like: 600 /// 601 /// %a = f(%x, %y) 602 /// %b = f(%a, %a) 603 /// %c = f(%b, %b) 604 /// 605 /// %d = f(%x, %y) 606 /// %e = f(%d, %d) 607 /// %f = f(%e, %e) 608 /// 609 /// CompareValueComplexity(%f, %c) 610 /// 611 /// Since we do not continue running this routine on expression trees once we 612 /// have seen unequal values, there is no need to track them in the cache. 613 static int 614 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 615 const LoopInfo *const LI, Value *LV, Value *RV, 616 unsigned Depth) { 617 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 618 return 0; 619 620 // Order pointer values after integer values. This helps SCEVExpander form 621 // GEPs. 622 bool LIsPointer = LV->getType()->isPointerTy(), 623 RIsPointer = RV->getType()->isPointerTy(); 624 if (LIsPointer != RIsPointer) 625 return (int)LIsPointer - (int)RIsPointer; 626 627 // Compare getValueID values. 628 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 629 if (LID != RID) 630 return (int)LID - (int)RID; 631 632 // Sort arguments by their position. 633 if (const auto *LA = dyn_cast<Argument>(LV)) { 634 const auto *RA = cast<Argument>(RV); 635 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 636 return (int)LArgNo - (int)RArgNo; 637 } 638 639 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 640 const auto *RGV = cast<GlobalValue>(RV); 641 642 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 643 auto LT = GV->getLinkage(); 644 return !(GlobalValue::isPrivateLinkage(LT) || 645 GlobalValue::isInternalLinkage(LT)); 646 }; 647 648 // Use the names to distinguish the two values, but only if the 649 // names are semantically important. 650 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 651 return LGV->getName().compare(RGV->getName()); 652 } 653 654 // For instructions, compare their loop depth, and their operand count. This 655 // is pretty loose. 656 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 657 const auto *RInst = cast<Instruction>(RV); 658 659 // Compare loop depths. 660 const BasicBlock *LParent = LInst->getParent(), 661 *RParent = RInst->getParent(); 662 if (LParent != RParent) { 663 unsigned LDepth = LI->getLoopDepth(LParent), 664 RDepth = LI->getLoopDepth(RParent); 665 if (LDepth != RDepth) 666 return (int)LDepth - (int)RDepth; 667 } 668 669 // Compare the number of operands. 670 unsigned LNumOps = LInst->getNumOperands(), 671 RNumOps = RInst->getNumOperands(); 672 if (LNumOps != RNumOps) 673 return (int)LNumOps - (int)RNumOps; 674 675 for (unsigned Idx : seq(0u, LNumOps)) { 676 int Result = 677 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 678 RInst->getOperand(Idx), Depth + 1); 679 if (Result != 0) 680 return Result; 681 } 682 } 683 684 EqCacheValue.unionSets(LV, RV); 685 return 0; 686 } 687 688 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 689 // than RHS, respectively. A three-way result allows recursive comparisons to be 690 // more efficient. 691 static int CompareSCEVComplexity( 692 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 693 EquivalenceClasses<const Value *> &EqCacheValue, 694 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 695 DominatorTree &DT, unsigned Depth = 0) { 696 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 697 if (LHS == RHS) 698 return 0; 699 700 // Primarily, sort the SCEVs by their getSCEVType(). 701 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 702 if (LType != RType) 703 return (int)LType - (int)RType; 704 705 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 706 return 0; 707 // Aside from the getSCEVType() ordering, the particular ordering 708 // isn't very important except that it's beneficial to be consistent, 709 // so that (a + b) and (b + a) don't end up as different expressions. 710 switch (LType) { 711 case scUnknown: { 712 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 713 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 714 715 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 716 RU->getValue(), Depth + 1); 717 if (X == 0) 718 EqCacheSCEV.unionSets(LHS, RHS); 719 return X; 720 } 721 722 case scConstant: { 723 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 724 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 725 726 // Compare constant values. 727 const APInt &LA = LC->getAPInt(); 728 const APInt &RA = RC->getAPInt(); 729 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 730 if (LBitWidth != RBitWidth) 731 return (int)LBitWidth - (int)RBitWidth; 732 return LA.ult(RA) ? -1 : 1; 733 } 734 735 case scAddRecExpr: { 736 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 737 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 738 739 // There is always a dominance between two recs that are used by one SCEV, 740 // so we can safely sort recs by loop header dominance. We require such 741 // order in getAddExpr. 742 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 743 if (LLoop != RLoop) { 744 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 745 assert(LHead != RHead && "Two loops share the same header?"); 746 if (DT.dominates(LHead, RHead)) 747 return 1; 748 else 749 assert(DT.dominates(RHead, LHead) && 750 "No dominance between recurrences used by one SCEV?"); 751 return -1; 752 } 753 754 // Addrec complexity grows with operand count. 755 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 756 if (LNumOps != RNumOps) 757 return (int)LNumOps - (int)RNumOps; 758 759 // Lexicographically compare. 760 for (unsigned i = 0; i != LNumOps; ++i) { 761 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 762 LA->getOperand(i), RA->getOperand(i), DT, 763 Depth + 1); 764 if (X != 0) 765 return X; 766 } 767 EqCacheSCEV.unionSets(LHS, RHS); 768 return 0; 769 } 770 771 case scAddExpr: 772 case scMulExpr: 773 case scSMaxExpr: 774 case scUMaxExpr: 775 case scSMinExpr: 776 case scUMinExpr: { 777 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 778 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 779 780 // Lexicographically compare n-ary expressions. 781 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 782 if (LNumOps != RNumOps) 783 return (int)LNumOps - (int)RNumOps; 784 785 for (unsigned i = 0; i != LNumOps; ++i) { 786 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 787 LC->getOperand(i), RC->getOperand(i), DT, 788 Depth + 1); 789 if (X != 0) 790 return X; 791 } 792 EqCacheSCEV.unionSets(LHS, RHS); 793 return 0; 794 } 795 796 case scUDivExpr: { 797 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 798 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 799 800 // Lexicographically compare udiv expressions. 801 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 802 RC->getLHS(), DT, Depth + 1); 803 if (X != 0) 804 return X; 805 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 806 RC->getRHS(), DT, Depth + 1); 807 if (X == 0) 808 EqCacheSCEV.unionSets(LHS, RHS); 809 return X; 810 } 811 812 case scPtrToInt: 813 case scTruncate: 814 case scZeroExtend: 815 case scSignExtend: { 816 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 817 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 818 819 // Compare cast expressions by operand. 820 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 821 LC->getOperand(), RC->getOperand(), DT, 822 Depth + 1); 823 if (X == 0) 824 EqCacheSCEV.unionSets(LHS, RHS); 825 return X; 826 } 827 828 case scCouldNotCompute: 829 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 830 } 831 llvm_unreachable("Unknown SCEV kind!"); 832 } 833 834 /// Given a list of SCEV objects, order them by their complexity, and group 835 /// objects of the same complexity together by value. When this routine is 836 /// finished, we know that any duplicates in the vector are consecutive and that 837 /// complexity is monotonically increasing. 838 /// 839 /// Note that we go take special precautions to ensure that we get deterministic 840 /// results from this routine. In other words, we don't want the results of 841 /// this to depend on where the addresses of various SCEV objects happened to 842 /// land in memory. 843 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 844 LoopInfo *LI, DominatorTree &DT) { 845 if (Ops.size() < 2) return; // Noop 846 847 EquivalenceClasses<const SCEV *> EqCacheSCEV; 848 EquivalenceClasses<const Value *> EqCacheValue; 849 if (Ops.size() == 2) { 850 // This is the common case, which also happens to be trivially simple. 851 // Special case it. 852 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 853 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 854 std::swap(LHS, RHS); 855 return; 856 } 857 858 // Do the rough sort by complexity. 859 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 860 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 861 0; 862 }); 863 864 // Now that we are sorted by complexity, group elements of the same 865 // complexity. Note that this is, at worst, N^2, but the vector is likely to 866 // be extremely short in practice. Note that we take this approach because we 867 // do not want to depend on the addresses of the objects we are grouping. 868 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 869 const SCEV *S = Ops[i]; 870 unsigned Complexity = S->getSCEVType(); 871 872 // If there are any objects of the same complexity and same value as this 873 // one, group them. 874 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 875 if (Ops[j] == S) { // Found a duplicate. 876 // Move it to immediately after i'th element. 877 std::swap(Ops[i+1], Ops[j]); 878 ++i; // no need to rescan it. 879 if (i == e-2) return; // Done! 880 } 881 } 882 } 883 } 884 885 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 886 /// least HugeExprThreshold nodes). 887 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 888 return any_of(Ops, [](const SCEV *S) { 889 return S->getExpressionSize() >= HugeExprThreshold; 890 }); 891 } 892 893 //===----------------------------------------------------------------------===// 894 // Simple SCEV method implementations 895 //===----------------------------------------------------------------------===// 896 897 /// Compute BC(It, K). The result has width W. Assume, K > 0. 898 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 899 ScalarEvolution &SE, 900 Type *ResultTy) { 901 // Handle the simplest case efficiently. 902 if (K == 1) 903 return SE.getTruncateOrZeroExtend(It, ResultTy); 904 905 // We are using the following formula for BC(It, K): 906 // 907 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 908 // 909 // Suppose, W is the bitwidth of the return value. We must be prepared for 910 // overflow. Hence, we must assure that the result of our computation is 911 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 912 // safe in modular arithmetic. 913 // 914 // However, this code doesn't use exactly that formula; the formula it uses 915 // is something like the following, where T is the number of factors of 2 in 916 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 917 // exponentiation: 918 // 919 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 920 // 921 // This formula is trivially equivalent to the previous formula. However, 922 // this formula can be implemented much more efficiently. The trick is that 923 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 924 // arithmetic. To do exact division in modular arithmetic, all we have 925 // to do is multiply by the inverse. Therefore, this step can be done at 926 // width W. 927 // 928 // The next issue is how to safely do the division by 2^T. The way this 929 // is done is by doing the multiplication step at a width of at least W + T 930 // bits. This way, the bottom W+T bits of the product are accurate. Then, 931 // when we perform the division by 2^T (which is equivalent to a right shift 932 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 933 // truncated out after the division by 2^T. 934 // 935 // In comparison to just directly using the first formula, this technique 936 // is much more efficient; using the first formula requires W * K bits, 937 // but this formula less than W + K bits. Also, the first formula requires 938 // a division step, whereas this formula only requires multiplies and shifts. 939 // 940 // It doesn't matter whether the subtraction step is done in the calculation 941 // width or the input iteration count's width; if the subtraction overflows, 942 // the result must be zero anyway. We prefer here to do it in the width of 943 // the induction variable because it helps a lot for certain cases; CodeGen 944 // isn't smart enough to ignore the overflow, which leads to much less 945 // efficient code if the width of the subtraction is wider than the native 946 // register width. 947 // 948 // (It's possible to not widen at all by pulling out factors of 2 before 949 // the multiplication; for example, K=2 can be calculated as 950 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 951 // extra arithmetic, so it's not an obvious win, and it gets 952 // much more complicated for K > 3.) 953 954 // Protection from insane SCEVs; this bound is conservative, 955 // but it probably doesn't matter. 956 if (K > 1000) 957 return SE.getCouldNotCompute(); 958 959 unsigned W = SE.getTypeSizeInBits(ResultTy); 960 961 // Calculate K! / 2^T and T; we divide out the factors of two before 962 // multiplying for calculating K! / 2^T to avoid overflow. 963 // Other overflow doesn't matter because we only care about the bottom 964 // W bits of the result. 965 APInt OddFactorial(W, 1); 966 unsigned T = 1; 967 for (unsigned i = 3; i <= K; ++i) { 968 APInt Mult(W, i); 969 unsigned TwoFactors = Mult.countTrailingZeros(); 970 T += TwoFactors; 971 Mult.lshrInPlace(TwoFactors); 972 OddFactorial *= Mult; 973 } 974 975 // We need at least W + T bits for the multiplication step 976 unsigned CalculationBits = W + T; 977 978 // Calculate 2^T, at width T+W. 979 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 980 981 // Calculate the multiplicative inverse of K! / 2^T; 982 // this multiplication factor will perform the exact division by 983 // K! / 2^T. 984 APInt Mod = APInt::getSignedMinValue(W+1); 985 APInt MultiplyFactor = OddFactorial.zext(W+1); 986 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 987 MultiplyFactor = MultiplyFactor.trunc(W); 988 989 // Calculate the product, at width T+W 990 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 991 CalculationBits); 992 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 993 for (unsigned i = 1; i != K; ++i) { 994 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 995 Dividend = SE.getMulExpr(Dividend, 996 SE.getTruncateOrZeroExtend(S, CalculationTy)); 997 } 998 999 // Divide by 2^T 1000 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1001 1002 // Truncate the result, and divide by K! / 2^T. 1003 1004 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1005 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1006 } 1007 1008 /// Return the value of this chain of recurrences at the specified iteration 1009 /// number. We can evaluate this recurrence by multiplying each element in the 1010 /// chain by the binomial coefficient corresponding to it. In other words, we 1011 /// can evaluate {A,+,B,+,C,+,D} as: 1012 /// 1013 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1014 /// 1015 /// where BC(It, k) stands for binomial coefficient. 1016 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1017 ScalarEvolution &SE) const { 1018 const SCEV *Result = getStart(); 1019 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1020 // The computation is correct in the face of overflow provided that the 1021 // multiplication is performed _after_ the evaluation of the binomial 1022 // coefficient. 1023 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1024 if (isa<SCEVCouldNotCompute>(Coeff)) 1025 return Coeff; 1026 1027 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1028 } 1029 return Result; 1030 } 1031 1032 //===----------------------------------------------------------------------===// 1033 // SCEV Expression folder implementations 1034 //===----------------------------------------------------------------------===// 1035 1036 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty, 1037 unsigned Depth) { 1038 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1039 assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once."); 1040 1041 // We could be called with an integer-typed operands during SCEV rewrites. 1042 // Since the operand is an integer already, just perform zext/trunc/self cast. 1043 if (!Op->getType()->isPointerTy()) 1044 return getTruncateOrZeroExtend(Op, Ty); 1045 1046 // What would be an ID for such a SCEV cast expression? 1047 FoldingSetNodeID ID; 1048 ID.AddInteger(scPtrToInt); 1049 ID.AddPointer(Op); 1050 1051 void *IP = nullptr; 1052 1053 // Is there already an expression for such a cast? 1054 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1055 return getTruncateOrZeroExtend(S, Ty); 1056 1057 // If not, is this expression something we can't reduce any further? 1058 if (isa<SCEVUnknown>(Op)) { 1059 // Create an explicit cast node. 1060 // We can reuse the existing insert position since if we get here, 1061 // we won't have made any changes which would invalidate it. 1062 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1063 assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType( 1064 Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) && 1065 "We can only model ptrtoint if SCEV's effective (integer) type is " 1066 "sufficiently wide to represent all possible pointer values."); 1067 SCEV *S = new (SCEVAllocator) 1068 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1069 UniqueSCEVs.InsertNode(S, IP); 1070 addToLoopUseLists(S); 1071 return getTruncateOrZeroExtend(S, Ty); 1072 } 1073 1074 assert(Depth == 0 && 1075 "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's."); 1076 1077 // Otherwise, we've got some expression that is more complex than just a 1078 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1079 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1080 // only, and the expressions must otherwise be integer-typed. 1081 // So sink the cast down to the SCEVUnknown's. 1082 1083 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1084 /// which computes a pointer-typed value, and rewrites the whole expression 1085 /// tree so that *all* the computations are done on integers, and the only 1086 /// pointer-typed operands in the expression are SCEVUnknown. 1087 class SCEVPtrToIntSinkingRewriter 1088 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1089 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1090 1091 public: 1092 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1093 1094 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1095 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1096 return Rewriter.visit(Scev); 1097 } 1098 1099 const SCEV *visit(const SCEV *S) { 1100 Type *STy = S->getType(); 1101 // If the expression is not pointer-typed, just keep it as-is. 1102 if (!STy->isPointerTy()) 1103 return S; 1104 // Else, recursively sink the cast down into it. 1105 return Base::visit(S); 1106 } 1107 1108 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1109 SmallVector<const SCEV *, 2> Operands; 1110 bool Changed = false; 1111 for (auto *Op : Expr->operands()) { 1112 Operands.push_back(visit(Op)); 1113 Changed |= Op != Operands.back(); 1114 } 1115 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1116 } 1117 1118 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1119 SmallVector<const SCEV *, 2> Operands; 1120 bool Changed = false; 1121 for (auto *Op : Expr->operands()) { 1122 Operands.push_back(visit(Op)); 1123 Changed |= Op != Operands.back(); 1124 } 1125 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1126 } 1127 1128 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1129 Type *ExprPtrTy = Expr->getType(); 1130 assert(ExprPtrTy->isPointerTy() && 1131 "Should only reach pointer-typed SCEVUnknown's."); 1132 Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy); 1133 return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1); 1134 } 1135 }; 1136 1137 // And actually perform the cast sinking. 1138 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1139 assert(IntOp->getType()->isIntegerTy() && 1140 "We must have succeeded in sinking the cast, " 1141 "and ending up with an integer-typed expression!"); 1142 return getTruncateOrZeroExtend(IntOp, Ty); 1143 } 1144 1145 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1146 unsigned Depth) { 1147 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1148 "This is not a truncating conversion!"); 1149 assert(isSCEVable(Ty) && 1150 "This is not a conversion to a SCEVable type!"); 1151 Ty = getEffectiveSCEVType(Ty); 1152 1153 FoldingSetNodeID ID; 1154 ID.AddInteger(scTruncate); 1155 ID.AddPointer(Op); 1156 ID.AddPointer(Ty); 1157 void *IP = nullptr; 1158 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1159 1160 // Fold if the operand is constant. 1161 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1162 return getConstant( 1163 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1164 1165 // trunc(trunc(x)) --> trunc(x) 1166 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1167 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1168 1169 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1170 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1171 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1172 1173 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1174 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1175 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1176 1177 if (Depth > MaxCastDepth) { 1178 SCEV *S = 1179 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1180 UniqueSCEVs.InsertNode(S, IP); 1181 addToLoopUseLists(S); 1182 return S; 1183 } 1184 1185 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1186 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1187 // if after transforming we have at most one truncate, not counting truncates 1188 // that replace other casts. 1189 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1190 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1191 SmallVector<const SCEV *, 4> Operands; 1192 unsigned numTruncs = 0; 1193 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1194 ++i) { 1195 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1196 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1197 isa<SCEVTruncateExpr>(S)) 1198 numTruncs++; 1199 Operands.push_back(S); 1200 } 1201 if (numTruncs < 2) { 1202 if (isa<SCEVAddExpr>(Op)) 1203 return getAddExpr(Operands); 1204 else if (isa<SCEVMulExpr>(Op)) 1205 return getMulExpr(Operands); 1206 else 1207 llvm_unreachable("Unexpected SCEV type for Op."); 1208 } 1209 // Although we checked in the beginning that ID is not in the cache, it is 1210 // possible that during recursion and different modification ID was inserted 1211 // into the cache. So if we find it, just return it. 1212 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1213 return S; 1214 } 1215 1216 // If the input value is a chrec scev, truncate the chrec's operands. 1217 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1218 SmallVector<const SCEV *, 4> Operands; 1219 for (const SCEV *Op : AddRec->operands()) 1220 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1221 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1222 } 1223 1224 // The cast wasn't folded; create an explicit cast node. We can reuse 1225 // the existing insert position since if we get here, we won't have 1226 // made any changes which would invalidate it. 1227 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1228 Op, Ty); 1229 UniqueSCEVs.InsertNode(S, IP); 1230 addToLoopUseLists(S); 1231 return S; 1232 } 1233 1234 // Get the limit of a recurrence such that incrementing by Step cannot cause 1235 // signed overflow as long as the value of the recurrence within the 1236 // loop does not exceed this limit before incrementing. 1237 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1238 ICmpInst::Predicate *Pred, 1239 ScalarEvolution *SE) { 1240 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1241 if (SE->isKnownPositive(Step)) { 1242 *Pred = ICmpInst::ICMP_SLT; 1243 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1244 SE->getSignedRangeMax(Step)); 1245 } 1246 if (SE->isKnownNegative(Step)) { 1247 *Pred = ICmpInst::ICMP_SGT; 1248 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1249 SE->getSignedRangeMin(Step)); 1250 } 1251 return nullptr; 1252 } 1253 1254 // Get the limit of a recurrence such that incrementing by Step cannot cause 1255 // unsigned overflow as long as the value of the recurrence within the loop does 1256 // not exceed this limit before incrementing. 1257 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1258 ICmpInst::Predicate *Pred, 1259 ScalarEvolution *SE) { 1260 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1261 *Pred = ICmpInst::ICMP_ULT; 1262 1263 return SE->getConstant(APInt::getMinValue(BitWidth) - 1264 SE->getUnsignedRangeMax(Step)); 1265 } 1266 1267 namespace { 1268 1269 struct ExtendOpTraitsBase { 1270 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1271 unsigned); 1272 }; 1273 1274 // Used to make code generic over signed and unsigned overflow. 1275 template <typename ExtendOp> struct ExtendOpTraits { 1276 // Members present: 1277 // 1278 // static const SCEV::NoWrapFlags WrapType; 1279 // 1280 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1281 // 1282 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1283 // ICmpInst::Predicate *Pred, 1284 // ScalarEvolution *SE); 1285 }; 1286 1287 template <> 1288 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1289 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1290 1291 static const GetExtendExprTy GetExtendExpr; 1292 1293 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1294 ICmpInst::Predicate *Pred, 1295 ScalarEvolution *SE) { 1296 return getSignedOverflowLimitForStep(Step, Pred, SE); 1297 } 1298 }; 1299 1300 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1301 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1302 1303 template <> 1304 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1305 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1306 1307 static const GetExtendExprTy GetExtendExpr; 1308 1309 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1310 ICmpInst::Predicate *Pred, 1311 ScalarEvolution *SE) { 1312 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1313 } 1314 }; 1315 1316 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1317 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1318 1319 } // end anonymous namespace 1320 1321 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1322 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1323 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1324 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1325 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1326 // expression "Step + sext/zext(PreIncAR)" is congruent with 1327 // "sext/zext(PostIncAR)" 1328 template <typename ExtendOpTy> 1329 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1330 ScalarEvolution *SE, unsigned Depth) { 1331 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1332 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1333 1334 const Loop *L = AR->getLoop(); 1335 const SCEV *Start = AR->getStart(); 1336 const SCEV *Step = AR->getStepRecurrence(*SE); 1337 1338 // Check for a simple looking step prior to loop entry. 1339 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1340 if (!SA) 1341 return nullptr; 1342 1343 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1344 // subtraction is expensive. For this purpose, perform a quick and dirty 1345 // difference, by checking for Step in the operand list. 1346 SmallVector<const SCEV *, 4> DiffOps; 1347 for (const SCEV *Op : SA->operands()) 1348 if (Op != Step) 1349 DiffOps.push_back(Op); 1350 1351 if (DiffOps.size() == SA->getNumOperands()) 1352 return nullptr; 1353 1354 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1355 // `Step`: 1356 1357 // 1. NSW/NUW flags on the step increment. 1358 auto PreStartFlags = 1359 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1360 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1361 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1362 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1363 1364 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1365 // "S+X does not sign/unsign-overflow". 1366 // 1367 1368 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1369 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1370 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1371 return PreStart; 1372 1373 // 2. Direct overflow check on the step operation's expression. 1374 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1375 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1376 const SCEV *OperandExtendedStart = 1377 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1378 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1379 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1380 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1381 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1382 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1383 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1384 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1385 } 1386 return PreStart; 1387 } 1388 1389 // 3. Loop precondition. 1390 ICmpInst::Predicate Pred; 1391 const SCEV *OverflowLimit = 1392 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1393 1394 if (OverflowLimit && 1395 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1396 return PreStart; 1397 1398 return nullptr; 1399 } 1400 1401 // Get the normalized zero or sign extended expression for this AddRec's Start. 1402 template <typename ExtendOpTy> 1403 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1404 ScalarEvolution *SE, 1405 unsigned Depth) { 1406 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1407 1408 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1409 if (!PreStart) 1410 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1411 1412 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1413 Depth), 1414 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1415 } 1416 1417 // Try to prove away overflow by looking at "nearby" add recurrences. A 1418 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1419 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1420 // 1421 // Formally: 1422 // 1423 // {S,+,X} == {S-T,+,X} + T 1424 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1425 // 1426 // If ({S-T,+,X} + T) does not overflow ... (1) 1427 // 1428 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1429 // 1430 // If {S-T,+,X} does not overflow ... (2) 1431 // 1432 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1433 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1434 // 1435 // If (S-T)+T does not overflow ... (3) 1436 // 1437 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1438 // == {Ext(S),+,Ext(X)} == LHS 1439 // 1440 // Thus, if (1), (2) and (3) are true for some T, then 1441 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1442 // 1443 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1444 // does not overflow" restricted to the 0th iteration. Therefore we only need 1445 // to check for (1) and (2). 1446 // 1447 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1448 // is `Delta` (defined below). 1449 template <typename ExtendOpTy> 1450 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1451 const SCEV *Step, 1452 const Loop *L) { 1453 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1454 1455 // We restrict `Start` to a constant to prevent SCEV from spending too much 1456 // time here. It is correct (but more expensive) to continue with a 1457 // non-constant `Start` and do a general SCEV subtraction to compute 1458 // `PreStart` below. 1459 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1460 if (!StartC) 1461 return false; 1462 1463 APInt StartAI = StartC->getAPInt(); 1464 1465 for (unsigned Delta : {-2, -1, 1, 2}) { 1466 const SCEV *PreStart = getConstant(StartAI - Delta); 1467 1468 FoldingSetNodeID ID; 1469 ID.AddInteger(scAddRecExpr); 1470 ID.AddPointer(PreStart); 1471 ID.AddPointer(Step); 1472 ID.AddPointer(L); 1473 void *IP = nullptr; 1474 const auto *PreAR = 1475 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1476 1477 // Give up if we don't already have the add recurrence we need because 1478 // actually constructing an add recurrence is relatively expensive. 1479 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1480 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1481 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1482 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1483 DeltaS, &Pred, this); 1484 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1485 return true; 1486 } 1487 } 1488 1489 return false; 1490 } 1491 1492 // Finds an integer D for an expression (C + x + y + ...) such that the top 1493 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1494 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1495 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1496 // the (C + x + y + ...) expression is \p WholeAddExpr. 1497 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1498 const SCEVConstant *ConstantTerm, 1499 const SCEVAddExpr *WholeAddExpr) { 1500 const APInt &C = ConstantTerm->getAPInt(); 1501 const unsigned BitWidth = C.getBitWidth(); 1502 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1503 uint32_t TZ = BitWidth; 1504 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1505 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1506 if (TZ) { 1507 // Set D to be as many least significant bits of C as possible while still 1508 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1509 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1510 } 1511 return APInt(BitWidth, 0); 1512 } 1513 1514 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1515 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1516 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1517 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1518 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1519 const APInt &ConstantStart, 1520 const SCEV *Step) { 1521 const unsigned BitWidth = ConstantStart.getBitWidth(); 1522 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1523 if (TZ) 1524 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1525 : ConstantStart; 1526 return APInt(BitWidth, 0); 1527 } 1528 1529 const SCEV * 1530 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1531 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1532 "This is not an extending conversion!"); 1533 assert(isSCEVable(Ty) && 1534 "This is not a conversion to a SCEVable type!"); 1535 Ty = getEffectiveSCEVType(Ty); 1536 1537 // Fold if the operand is constant. 1538 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1539 return getConstant( 1540 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1541 1542 // zext(zext(x)) --> zext(x) 1543 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1544 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1545 1546 // Before doing any expensive analysis, check to see if we've already 1547 // computed a SCEV for this Op and Ty. 1548 FoldingSetNodeID ID; 1549 ID.AddInteger(scZeroExtend); 1550 ID.AddPointer(Op); 1551 ID.AddPointer(Ty); 1552 void *IP = nullptr; 1553 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1554 if (Depth > MaxCastDepth) { 1555 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1556 Op, Ty); 1557 UniqueSCEVs.InsertNode(S, IP); 1558 addToLoopUseLists(S); 1559 return S; 1560 } 1561 1562 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1563 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1564 // It's possible the bits taken off by the truncate were all zero bits. If 1565 // so, we should be able to simplify this further. 1566 const SCEV *X = ST->getOperand(); 1567 ConstantRange CR = getUnsignedRange(X); 1568 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1569 unsigned NewBits = getTypeSizeInBits(Ty); 1570 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1571 CR.zextOrTrunc(NewBits))) 1572 return getTruncateOrZeroExtend(X, Ty, Depth); 1573 } 1574 1575 // If the input value is a chrec scev, and we can prove that the value 1576 // did not overflow the old, smaller, value, we can zero extend all of the 1577 // operands (often constants). This allows analysis of something like 1578 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1579 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1580 if (AR->isAffine()) { 1581 const SCEV *Start = AR->getStart(); 1582 const SCEV *Step = AR->getStepRecurrence(*this); 1583 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1584 const Loop *L = AR->getLoop(); 1585 1586 if (!AR->hasNoUnsignedWrap()) { 1587 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1588 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1589 } 1590 1591 // If we have special knowledge that this addrec won't overflow, 1592 // we don't need to do any further analysis. 1593 if (AR->hasNoUnsignedWrap()) 1594 return getAddRecExpr( 1595 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1596 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1597 1598 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1599 // Note that this serves two purposes: It filters out loops that are 1600 // simply not analyzable, and it covers the case where this code is 1601 // being called from within backedge-taken count analysis, such that 1602 // attempting to ask for the backedge-taken count would likely result 1603 // in infinite recursion. In the later case, the analysis code will 1604 // cope with a conservative value, and it will take care to purge 1605 // that value once it has finished. 1606 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1607 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1608 // Manually compute the final value for AR, checking for 1609 // overflow. 1610 1611 // Check whether the backedge-taken count can be losslessly casted to 1612 // the addrec's type. The count is always unsigned. 1613 const SCEV *CastedMaxBECount = 1614 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1615 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1616 CastedMaxBECount, MaxBECount->getType(), Depth); 1617 if (MaxBECount == RecastedMaxBECount) { 1618 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1619 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1620 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1621 SCEV::FlagAnyWrap, Depth + 1); 1622 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1623 SCEV::FlagAnyWrap, 1624 Depth + 1), 1625 WideTy, Depth + 1); 1626 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1627 const SCEV *WideMaxBECount = 1628 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1629 const SCEV *OperandExtendedAdd = 1630 getAddExpr(WideStart, 1631 getMulExpr(WideMaxBECount, 1632 getZeroExtendExpr(Step, WideTy, Depth + 1), 1633 SCEV::FlagAnyWrap, Depth + 1), 1634 SCEV::FlagAnyWrap, Depth + 1); 1635 if (ZAdd == OperandExtendedAdd) { 1636 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1637 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1638 // Return the expression with the addrec on the outside. 1639 return getAddRecExpr( 1640 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1641 Depth + 1), 1642 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1643 AR->getNoWrapFlags()); 1644 } 1645 // Similar to above, only this time treat the step value as signed. 1646 // This covers loops that count down. 1647 OperandExtendedAdd = 1648 getAddExpr(WideStart, 1649 getMulExpr(WideMaxBECount, 1650 getSignExtendExpr(Step, WideTy, Depth + 1), 1651 SCEV::FlagAnyWrap, Depth + 1), 1652 SCEV::FlagAnyWrap, Depth + 1); 1653 if (ZAdd == OperandExtendedAdd) { 1654 // Cache knowledge of AR NW, which is propagated to this AddRec. 1655 // Negative step causes unsigned wrap, but it still can't self-wrap. 1656 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1657 // Return the expression with the addrec on the outside. 1658 return getAddRecExpr( 1659 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1660 Depth + 1), 1661 getSignExtendExpr(Step, Ty, Depth + 1), L, 1662 AR->getNoWrapFlags()); 1663 } 1664 } 1665 } 1666 1667 // Normally, in the cases we can prove no-overflow via a 1668 // backedge guarding condition, we can also compute a backedge 1669 // taken count for the loop. The exceptions are assumptions and 1670 // guards present in the loop -- SCEV is not great at exploiting 1671 // these to compute max backedge taken counts, but can still use 1672 // these to prove lack of overflow. Use this fact to avoid 1673 // doing extra work that may not pay off. 1674 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1675 !AC.assumptions().empty()) { 1676 1677 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1678 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1679 if (AR->hasNoUnsignedWrap()) { 1680 // Same as nuw case above - duplicated here to avoid a compile time 1681 // issue. It's not clear that the order of checks does matter, but 1682 // it's one of two issue possible causes for a change which was 1683 // reverted. Be conservative for the moment. 1684 return getAddRecExpr( 1685 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1686 Depth + 1), 1687 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1688 AR->getNoWrapFlags()); 1689 } 1690 1691 // For a negative step, we can extend the operands iff doing so only 1692 // traverses values in the range zext([0,UINT_MAX]). 1693 if (isKnownNegative(Step)) { 1694 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1695 getSignedRangeMin(Step)); 1696 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1697 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1698 // Cache knowledge of AR NW, which is propagated to this 1699 // AddRec. Negative step causes unsigned wrap, but it 1700 // still can't self-wrap. 1701 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1702 // Return the expression with the addrec on the outside. 1703 return getAddRecExpr( 1704 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1705 Depth + 1), 1706 getSignExtendExpr(Step, Ty, Depth + 1), L, 1707 AR->getNoWrapFlags()); 1708 } 1709 } 1710 } 1711 1712 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1713 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1714 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1715 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1716 const APInt &C = SC->getAPInt(); 1717 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1718 if (D != 0) { 1719 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1720 const SCEV *SResidual = 1721 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1722 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1723 return getAddExpr(SZExtD, SZExtR, 1724 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1725 Depth + 1); 1726 } 1727 } 1728 1729 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1730 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1731 return getAddRecExpr( 1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1733 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1734 } 1735 } 1736 1737 // zext(A % B) --> zext(A) % zext(B) 1738 { 1739 const SCEV *LHS; 1740 const SCEV *RHS; 1741 if (matchURem(Op, LHS, RHS)) 1742 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1743 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1744 } 1745 1746 // zext(A / B) --> zext(A) / zext(B). 1747 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1748 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1749 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1750 1751 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1752 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1753 if (SA->hasNoUnsignedWrap()) { 1754 // If the addition does not unsign overflow then we can, by definition, 1755 // commute the zero extension with the addition operation. 1756 SmallVector<const SCEV *, 4> Ops; 1757 for (const auto *Op : SA->operands()) 1758 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1759 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1760 } 1761 1762 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1763 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1764 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1765 // 1766 // Often address arithmetics contain expressions like 1767 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1768 // This transformation is useful while proving that such expressions are 1769 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1770 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1771 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1772 if (D != 0) { 1773 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1774 const SCEV *SResidual = 1775 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1776 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1777 return getAddExpr(SZExtD, SZExtR, 1778 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1779 Depth + 1); 1780 } 1781 } 1782 } 1783 1784 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1785 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1786 if (SM->hasNoUnsignedWrap()) { 1787 // If the multiply does not unsign overflow then we can, by definition, 1788 // commute the zero extension with the multiply operation. 1789 SmallVector<const SCEV *, 4> Ops; 1790 for (const auto *Op : SM->operands()) 1791 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1792 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1793 } 1794 1795 // zext(2^K * (trunc X to iN)) to iM -> 1796 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1797 // 1798 // Proof: 1799 // 1800 // zext(2^K * (trunc X to iN)) to iM 1801 // = zext((trunc X to iN) << K) to iM 1802 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1803 // (because shl removes the top K bits) 1804 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1805 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1806 // 1807 if (SM->getNumOperands() == 2) 1808 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1809 if (MulLHS->getAPInt().isPowerOf2()) 1810 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1811 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1812 MulLHS->getAPInt().logBase2(); 1813 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1814 return getMulExpr( 1815 getZeroExtendExpr(MulLHS, Ty), 1816 getZeroExtendExpr( 1817 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1818 SCEV::FlagNUW, Depth + 1); 1819 } 1820 } 1821 1822 // The cast wasn't folded; create an explicit cast node. 1823 // Recompute the insert position, as it may have been invalidated. 1824 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1825 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1826 Op, Ty); 1827 UniqueSCEVs.InsertNode(S, IP); 1828 addToLoopUseLists(S); 1829 return S; 1830 } 1831 1832 const SCEV * 1833 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1834 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1835 "This is not an extending conversion!"); 1836 assert(isSCEVable(Ty) && 1837 "This is not a conversion to a SCEVable type!"); 1838 Ty = getEffectiveSCEVType(Ty); 1839 1840 // Fold if the operand is constant. 1841 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1842 return getConstant( 1843 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1844 1845 // sext(sext(x)) --> sext(x) 1846 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1847 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1848 1849 // sext(zext(x)) --> zext(x) 1850 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1851 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1852 1853 // Before doing any expensive analysis, check to see if we've already 1854 // computed a SCEV for this Op and Ty. 1855 FoldingSetNodeID ID; 1856 ID.AddInteger(scSignExtend); 1857 ID.AddPointer(Op); 1858 ID.AddPointer(Ty); 1859 void *IP = nullptr; 1860 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1861 // Limit recursion depth. 1862 if (Depth > MaxCastDepth) { 1863 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1864 Op, Ty); 1865 UniqueSCEVs.InsertNode(S, IP); 1866 addToLoopUseLists(S); 1867 return S; 1868 } 1869 1870 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1871 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1872 // It's possible the bits taken off by the truncate were all sign bits. If 1873 // so, we should be able to simplify this further. 1874 const SCEV *X = ST->getOperand(); 1875 ConstantRange CR = getSignedRange(X); 1876 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1877 unsigned NewBits = getTypeSizeInBits(Ty); 1878 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1879 CR.sextOrTrunc(NewBits))) 1880 return getTruncateOrSignExtend(X, Ty, Depth); 1881 } 1882 1883 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1884 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1885 if (SA->hasNoSignedWrap()) { 1886 // If the addition does not sign overflow then we can, by definition, 1887 // commute the sign extension with the addition operation. 1888 SmallVector<const SCEV *, 4> Ops; 1889 for (const auto *Op : SA->operands()) 1890 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1891 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1892 } 1893 1894 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1895 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1896 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1897 // 1898 // For instance, this will bring two seemingly different expressions: 1899 // 1 + sext(5 + 20 * %x + 24 * %y) and 1900 // sext(6 + 20 * %x + 24 * %y) 1901 // to the same form: 1902 // 2 + sext(4 + 20 * %x + 24 * %y) 1903 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1904 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1905 if (D != 0) { 1906 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1907 const SCEV *SResidual = 1908 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1909 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1910 return getAddExpr(SSExtD, SSExtR, 1911 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1912 Depth + 1); 1913 } 1914 } 1915 } 1916 // If the input value is a chrec scev, and we can prove that the value 1917 // did not overflow the old, smaller, value, we can sign extend all of the 1918 // operands (often constants). This allows analysis of something like 1919 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1920 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1921 if (AR->isAffine()) { 1922 const SCEV *Start = AR->getStart(); 1923 const SCEV *Step = AR->getStepRecurrence(*this); 1924 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1925 const Loop *L = AR->getLoop(); 1926 1927 if (!AR->hasNoSignedWrap()) { 1928 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1929 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1930 } 1931 1932 // If we have special knowledge that this addrec won't overflow, 1933 // we don't need to do any further analysis. 1934 if (AR->hasNoSignedWrap()) 1935 return getAddRecExpr( 1936 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1937 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1938 1939 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1940 // Note that this serves two purposes: It filters out loops that are 1941 // simply not analyzable, and it covers the case where this code is 1942 // being called from within backedge-taken count analysis, such that 1943 // attempting to ask for the backedge-taken count would likely result 1944 // in infinite recursion. In the later case, the analysis code will 1945 // cope with a conservative value, and it will take care to purge 1946 // that value once it has finished. 1947 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1948 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1949 // Manually compute the final value for AR, checking for 1950 // overflow. 1951 1952 // Check whether the backedge-taken count can be losslessly casted to 1953 // the addrec's type. The count is always unsigned. 1954 const SCEV *CastedMaxBECount = 1955 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1956 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1957 CastedMaxBECount, MaxBECount->getType(), Depth); 1958 if (MaxBECount == RecastedMaxBECount) { 1959 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1960 // Check whether Start+Step*MaxBECount has no signed overflow. 1961 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1962 SCEV::FlagAnyWrap, Depth + 1); 1963 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1964 SCEV::FlagAnyWrap, 1965 Depth + 1), 1966 WideTy, Depth + 1); 1967 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1968 const SCEV *WideMaxBECount = 1969 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1970 const SCEV *OperandExtendedAdd = 1971 getAddExpr(WideStart, 1972 getMulExpr(WideMaxBECount, 1973 getSignExtendExpr(Step, WideTy, Depth + 1), 1974 SCEV::FlagAnyWrap, Depth + 1), 1975 SCEV::FlagAnyWrap, Depth + 1); 1976 if (SAdd == OperandExtendedAdd) { 1977 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1978 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 1979 // Return the expression with the addrec on the outside. 1980 return getAddRecExpr( 1981 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1982 Depth + 1), 1983 getSignExtendExpr(Step, Ty, Depth + 1), L, 1984 AR->getNoWrapFlags()); 1985 } 1986 // Similar to above, only this time treat the step value as unsigned. 1987 // This covers loops that count up with an unsigned step. 1988 OperandExtendedAdd = 1989 getAddExpr(WideStart, 1990 getMulExpr(WideMaxBECount, 1991 getZeroExtendExpr(Step, WideTy, Depth + 1), 1992 SCEV::FlagAnyWrap, Depth + 1), 1993 SCEV::FlagAnyWrap, Depth + 1); 1994 if (SAdd == OperandExtendedAdd) { 1995 // If AR wraps around then 1996 // 1997 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1998 // => SAdd != OperandExtendedAdd 1999 // 2000 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2001 // (SAdd == OperandExtendedAdd => AR is NW) 2002 2003 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2004 2005 // Return the expression with the addrec on the outside. 2006 return getAddRecExpr( 2007 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2008 Depth + 1), 2009 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2010 AR->getNoWrapFlags()); 2011 } 2012 } 2013 } 2014 2015 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2016 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2017 if (AR->hasNoSignedWrap()) { 2018 // Same as nsw case above - duplicated here to avoid a compile time 2019 // issue. It's not clear that the order of checks does matter, but 2020 // it's one of two issue possible causes for a change which was 2021 // reverted. Be conservative for the moment. 2022 return getAddRecExpr( 2023 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2024 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2025 } 2026 2027 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2028 // if D + (C - D + Step * n) could be proven to not signed wrap 2029 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2030 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2031 const APInt &C = SC->getAPInt(); 2032 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2033 if (D != 0) { 2034 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2035 const SCEV *SResidual = 2036 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2037 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2038 return getAddExpr(SSExtD, SSExtR, 2039 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2040 Depth + 1); 2041 } 2042 } 2043 2044 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2045 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2046 return getAddRecExpr( 2047 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2048 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2049 } 2050 } 2051 2052 // If the input value is provably positive and we could not simplify 2053 // away the sext build a zext instead. 2054 if (isKnownNonNegative(Op)) 2055 return getZeroExtendExpr(Op, Ty, Depth + 1); 2056 2057 // The cast wasn't folded; create an explicit cast node. 2058 // Recompute the insert position, as it may have been invalidated. 2059 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2060 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2061 Op, Ty); 2062 UniqueSCEVs.InsertNode(S, IP); 2063 addToLoopUseLists(S); 2064 return S; 2065 } 2066 2067 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2068 /// unspecified bits out to the given type. 2069 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2070 Type *Ty) { 2071 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2072 "This is not an extending conversion!"); 2073 assert(isSCEVable(Ty) && 2074 "This is not a conversion to a SCEVable type!"); 2075 Ty = getEffectiveSCEVType(Ty); 2076 2077 // Sign-extend negative constants. 2078 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2079 if (SC->getAPInt().isNegative()) 2080 return getSignExtendExpr(Op, Ty); 2081 2082 // Peel off a truncate cast. 2083 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2084 const SCEV *NewOp = T->getOperand(); 2085 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2086 return getAnyExtendExpr(NewOp, Ty); 2087 return getTruncateOrNoop(NewOp, Ty); 2088 } 2089 2090 // Next try a zext cast. If the cast is folded, use it. 2091 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2092 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2093 return ZExt; 2094 2095 // Next try a sext cast. If the cast is folded, use it. 2096 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2097 if (!isa<SCEVSignExtendExpr>(SExt)) 2098 return SExt; 2099 2100 // Force the cast to be folded into the operands of an addrec. 2101 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2102 SmallVector<const SCEV *, 4> Ops; 2103 for (const SCEV *Op : AR->operands()) 2104 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2105 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2106 } 2107 2108 // If the expression is obviously signed, use the sext cast value. 2109 if (isa<SCEVSMaxExpr>(Op)) 2110 return SExt; 2111 2112 // Absent any other information, use the zext cast value. 2113 return ZExt; 2114 } 2115 2116 /// Process the given Ops list, which is a list of operands to be added under 2117 /// the given scale, update the given map. This is a helper function for 2118 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2119 /// that would form an add expression like this: 2120 /// 2121 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2122 /// 2123 /// where A and B are constants, update the map with these values: 2124 /// 2125 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2126 /// 2127 /// and add 13 + A*B*29 to AccumulatedConstant. 2128 /// This will allow getAddRecExpr to produce this: 2129 /// 2130 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2131 /// 2132 /// This form often exposes folding opportunities that are hidden in 2133 /// the original operand list. 2134 /// 2135 /// Return true iff it appears that any interesting folding opportunities 2136 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2137 /// the common case where no interesting opportunities are present, and 2138 /// is also used as a check to avoid infinite recursion. 2139 static bool 2140 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2141 SmallVectorImpl<const SCEV *> &NewOps, 2142 APInt &AccumulatedConstant, 2143 const SCEV *const *Ops, size_t NumOperands, 2144 const APInt &Scale, 2145 ScalarEvolution &SE) { 2146 bool Interesting = false; 2147 2148 // Iterate over the add operands. They are sorted, with constants first. 2149 unsigned i = 0; 2150 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2151 ++i; 2152 // Pull a buried constant out to the outside. 2153 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2154 Interesting = true; 2155 AccumulatedConstant += Scale * C->getAPInt(); 2156 } 2157 2158 // Next comes everything else. We're especially interested in multiplies 2159 // here, but they're in the middle, so just visit the rest with one loop. 2160 for (; i != NumOperands; ++i) { 2161 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2162 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2163 APInt NewScale = 2164 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2165 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2166 // A multiplication of a constant with another add; recurse. 2167 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2168 Interesting |= 2169 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2170 Add->op_begin(), Add->getNumOperands(), 2171 NewScale, SE); 2172 } else { 2173 // A multiplication of a constant with some other value. Update 2174 // the map. 2175 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2176 const SCEV *Key = SE.getMulExpr(MulOps); 2177 auto Pair = M.insert({Key, NewScale}); 2178 if (Pair.second) { 2179 NewOps.push_back(Pair.first->first); 2180 } else { 2181 Pair.first->second += NewScale; 2182 // The map already had an entry for this value, which may indicate 2183 // a folding opportunity. 2184 Interesting = true; 2185 } 2186 } 2187 } else { 2188 // An ordinary operand. Update the map. 2189 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2190 M.insert({Ops[i], Scale}); 2191 if (Pair.second) { 2192 NewOps.push_back(Pair.first->first); 2193 } else { 2194 Pair.first->second += Scale; 2195 // The map already had an entry for this value, which may indicate 2196 // a folding opportunity. 2197 Interesting = true; 2198 } 2199 } 2200 } 2201 2202 return Interesting; 2203 } 2204 2205 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2206 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2207 // can't-overflow flags for the operation if possible. 2208 static SCEV::NoWrapFlags 2209 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2210 const ArrayRef<const SCEV *> Ops, 2211 SCEV::NoWrapFlags Flags) { 2212 using namespace std::placeholders; 2213 2214 using OBO = OverflowingBinaryOperator; 2215 2216 bool CanAnalyze = 2217 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2218 (void)CanAnalyze; 2219 assert(CanAnalyze && "don't call from other places!"); 2220 2221 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2222 SCEV::NoWrapFlags SignOrUnsignWrap = 2223 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2224 2225 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2226 auto IsKnownNonNegative = [&](const SCEV *S) { 2227 return SE->isKnownNonNegative(S); 2228 }; 2229 2230 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2231 Flags = 2232 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2233 2234 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2235 2236 if (SignOrUnsignWrap != SignOrUnsignMask && 2237 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2238 isa<SCEVConstant>(Ops[0])) { 2239 2240 auto Opcode = [&] { 2241 switch (Type) { 2242 case scAddExpr: 2243 return Instruction::Add; 2244 case scMulExpr: 2245 return Instruction::Mul; 2246 default: 2247 llvm_unreachable("Unexpected SCEV op."); 2248 } 2249 }(); 2250 2251 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2252 2253 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2254 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2255 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2256 Opcode, C, OBO::NoSignedWrap); 2257 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2258 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2259 } 2260 2261 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2262 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2263 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2264 Opcode, C, OBO::NoUnsignedWrap); 2265 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2266 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2267 } 2268 } 2269 2270 return Flags; 2271 } 2272 2273 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2274 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2275 } 2276 2277 /// Get a canonical add expression, or something simpler if possible. 2278 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2279 SCEV::NoWrapFlags OrigFlags, 2280 unsigned Depth) { 2281 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2282 "only nuw or nsw allowed"); 2283 assert(!Ops.empty() && "Cannot get empty add!"); 2284 if (Ops.size() == 1) return Ops[0]; 2285 #ifndef NDEBUG 2286 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2287 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2288 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2289 "SCEVAddExpr operand types don't match!"); 2290 #endif 2291 2292 // Sort by complexity, this groups all similar expression types together. 2293 GroupByComplexity(Ops, &LI, DT); 2294 2295 // If there are any constants, fold them together. 2296 unsigned Idx = 0; 2297 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2298 ++Idx; 2299 assert(Idx < Ops.size()); 2300 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2301 // We found two constants, fold them together! 2302 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2303 if (Ops.size() == 2) return Ops[0]; 2304 Ops.erase(Ops.begin()+1); // Erase the folded element 2305 LHSC = cast<SCEVConstant>(Ops[0]); 2306 } 2307 2308 // If we are left with a constant zero being added, strip it off. 2309 if (LHSC->getValue()->isZero()) { 2310 Ops.erase(Ops.begin()); 2311 --Idx; 2312 } 2313 2314 if (Ops.size() == 1) return Ops[0]; 2315 } 2316 2317 // Delay expensive flag strengthening until necessary. 2318 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2319 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2320 }; 2321 2322 // Limit recursion calls depth. 2323 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2324 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2325 2326 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2327 // Don't strengthen flags if we have no new information. 2328 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2329 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2330 Add->setNoWrapFlags(ComputeFlags(Ops)); 2331 return S; 2332 } 2333 2334 // Okay, check to see if the same value occurs in the operand list more than 2335 // once. If so, merge them together into an multiply expression. Since we 2336 // sorted the list, these values are required to be adjacent. 2337 Type *Ty = Ops[0]->getType(); 2338 bool FoundMatch = false; 2339 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2340 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2341 // Scan ahead to count how many equal operands there are. 2342 unsigned Count = 2; 2343 while (i+Count != e && Ops[i+Count] == Ops[i]) 2344 ++Count; 2345 // Merge the values into a multiply. 2346 const SCEV *Scale = getConstant(Ty, Count); 2347 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2348 if (Ops.size() == Count) 2349 return Mul; 2350 Ops[i] = Mul; 2351 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2352 --i; e -= Count - 1; 2353 FoundMatch = true; 2354 } 2355 if (FoundMatch) 2356 return getAddExpr(Ops, OrigFlags, Depth + 1); 2357 2358 // Check for truncates. If all the operands are truncated from the same 2359 // type, see if factoring out the truncate would permit the result to be 2360 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2361 // if the contents of the resulting outer trunc fold to something simple. 2362 auto FindTruncSrcType = [&]() -> Type * { 2363 // We're ultimately looking to fold an addrec of truncs and muls of only 2364 // constants and truncs, so if we find any other types of SCEV 2365 // as operands of the addrec then we bail and return nullptr here. 2366 // Otherwise, we return the type of the operand of a trunc that we find. 2367 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2368 return T->getOperand()->getType(); 2369 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2370 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2371 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2372 return T->getOperand()->getType(); 2373 } 2374 return nullptr; 2375 }; 2376 if (auto *SrcType = FindTruncSrcType()) { 2377 SmallVector<const SCEV *, 8> LargeOps; 2378 bool Ok = true; 2379 // Check all the operands to see if they can be represented in the 2380 // source type of the truncate. 2381 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2382 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2383 if (T->getOperand()->getType() != SrcType) { 2384 Ok = false; 2385 break; 2386 } 2387 LargeOps.push_back(T->getOperand()); 2388 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2389 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2390 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2391 SmallVector<const SCEV *, 8> LargeMulOps; 2392 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2393 if (const SCEVTruncateExpr *T = 2394 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2395 if (T->getOperand()->getType() != SrcType) { 2396 Ok = false; 2397 break; 2398 } 2399 LargeMulOps.push_back(T->getOperand()); 2400 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2401 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2402 } else { 2403 Ok = false; 2404 break; 2405 } 2406 } 2407 if (Ok) 2408 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2409 } else { 2410 Ok = false; 2411 break; 2412 } 2413 } 2414 if (Ok) { 2415 // Evaluate the expression in the larger type. 2416 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2417 // If it folds to something simple, use it. Otherwise, don't. 2418 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2419 return getTruncateExpr(Fold, Ty); 2420 } 2421 } 2422 2423 // Skip past any other cast SCEVs. 2424 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2425 ++Idx; 2426 2427 // If there are add operands they would be next. 2428 if (Idx < Ops.size()) { 2429 bool DeletedAdd = false; 2430 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2431 if (Ops.size() > AddOpsInlineThreshold || 2432 Add->getNumOperands() > AddOpsInlineThreshold) 2433 break; 2434 // If we have an add, expand the add operands onto the end of the operands 2435 // list. 2436 Ops.erase(Ops.begin()+Idx); 2437 Ops.append(Add->op_begin(), Add->op_end()); 2438 DeletedAdd = true; 2439 } 2440 2441 // If we deleted at least one add, we added operands to the end of the list, 2442 // and they are not necessarily sorted. Recurse to resort and resimplify 2443 // any operands we just acquired. 2444 if (DeletedAdd) 2445 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2446 } 2447 2448 // Skip over the add expression until we get to a multiply. 2449 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2450 ++Idx; 2451 2452 // Check to see if there are any folding opportunities present with 2453 // operands multiplied by constant values. 2454 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2455 uint64_t BitWidth = getTypeSizeInBits(Ty); 2456 DenseMap<const SCEV *, APInt> M; 2457 SmallVector<const SCEV *, 8> NewOps; 2458 APInt AccumulatedConstant(BitWidth, 0); 2459 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2460 Ops.data(), Ops.size(), 2461 APInt(BitWidth, 1), *this)) { 2462 struct APIntCompare { 2463 bool operator()(const APInt &LHS, const APInt &RHS) const { 2464 return LHS.ult(RHS); 2465 } 2466 }; 2467 2468 // Some interesting folding opportunity is present, so its worthwhile to 2469 // re-generate the operands list. Group the operands by constant scale, 2470 // to avoid multiplying by the same constant scale multiple times. 2471 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2472 for (const SCEV *NewOp : NewOps) 2473 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2474 // Re-generate the operands list. 2475 Ops.clear(); 2476 if (AccumulatedConstant != 0) 2477 Ops.push_back(getConstant(AccumulatedConstant)); 2478 for (auto &MulOp : MulOpLists) 2479 if (MulOp.first != 0) 2480 Ops.push_back(getMulExpr( 2481 getConstant(MulOp.first), 2482 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2483 SCEV::FlagAnyWrap, Depth + 1)); 2484 if (Ops.empty()) 2485 return getZero(Ty); 2486 if (Ops.size() == 1) 2487 return Ops[0]; 2488 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2489 } 2490 } 2491 2492 // If we are adding something to a multiply expression, make sure the 2493 // something is not already an operand of the multiply. If so, merge it into 2494 // the multiply. 2495 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2496 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2497 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2498 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2499 if (isa<SCEVConstant>(MulOpSCEV)) 2500 continue; 2501 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2502 if (MulOpSCEV == Ops[AddOp]) { 2503 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2504 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2505 if (Mul->getNumOperands() != 2) { 2506 // If the multiply has more than two operands, we must get the 2507 // Y*Z term. 2508 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2509 Mul->op_begin()+MulOp); 2510 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2511 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2512 } 2513 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2514 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2515 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2516 SCEV::FlagAnyWrap, Depth + 1); 2517 if (Ops.size() == 2) return OuterMul; 2518 if (AddOp < Idx) { 2519 Ops.erase(Ops.begin()+AddOp); 2520 Ops.erase(Ops.begin()+Idx-1); 2521 } else { 2522 Ops.erase(Ops.begin()+Idx); 2523 Ops.erase(Ops.begin()+AddOp-1); 2524 } 2525 Ops.push_back(OuterMul); 2526 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2527 } 2528 2529 // Check this multiply against other multiplies being added together. 2530 for (unsigned OtherMulIdx = Idx+1; 2531 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2532 ++OtherMulIdx) { 2533 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2534 // If MulOp occurs in OtherMul, we can fold the two multiplies 2535 // together. 2536 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2537 OMulOp != e; ++OMulOp) 2538 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2539 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2540 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2541 if (Mul->getNumOperands() != 2) { 2542 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2543 Mul->op_begin()+MulOp); 2544 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2545 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2546 } 2547 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2548 if (OtherMul->getNumOperands() != 2) { 2549 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2550 OtherMul->op_begin()+OMulOp); 2551 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2552 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2553 } 2554 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2555 const SCEV *InnerMulSum = 2556 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2557 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2558 SCEV::FlagAnyWrap, Depth + 1); 2559 if (Ops.size() == 2) return OuterMul; 2560 Ops.erase(Ops.begin()+Idx); 2561 Ops.erase(Ops.begin()+OtherMulIdx-1); 2562 Ops.push_back(OuterMul); 2563 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2564 } 2565 } 2566 } 2567 } 2568 2569 // If there are any add recurrences in the operands list, see if any other 2570 // added values are loop invariant. If so, we can fold them into the 2571 // recurrence. 2572 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2573 ++Idx; 2574 2575 // Scan over all recurrences, trying to fold loop invariants into them. 2576 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2577 // Scan all of the other operands to this add and add them to the vector if 2578 // they are loop invariant w.r.t. the recurrence. 2579 SmallVector<const SCEV *, 8> LIOps; 2580 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2581 const Loop *AddRecLoop = AddRec->getLoop(); 2582 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2583 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2584 LIOps.push_back(Ops[i]); 2585 Ops.erase(Ops.begin()+i); 2586 --i; --e; 2587 } 2588 2589 // If we found some loop invariants, fold them into the recurrence. 2590 if (!LIOps.empty()) { 2591 // Compute nowrap flags for the addition of the loop-invariant ops and 2592 // the addrec. Temporarily push it as an operand for that purpose. 2593 LIOps.push_back(AddRec); 2594 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2595 LIOps.pop_back(); 2596 2597 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2598 LIOps.push_back(AddRec->getStart()); 2599 2600 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2601 AddRec->op_end()); 2602 // This follows from the fact that the no-wrap flags on the outer add 2603 // expression are applicable on the 0th iteration, when the add recurrence 2604 // will be equal to its start value. 2605 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2606 2607 // Build the new addrec. Propagate the NUW and NSW flags if both the 2608 // outer add and the inner addrec are guaranteed to have no overflow. 2609 // Always propagate NW. 2610 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2611 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2612 2613 // If all of the other operands were loop invariant, we are done. 2614 if (Ops.size() == 1) return NewRec; 2615 2616 // Otherwise, add the folded AddRec by the non-invariant parts. 2617 for (unsigned i = 0;; ++i) 2618 if (Ops[i] == AddRec) { 2619 Ops[i] = NewRec; 2620 break; 2621 } 2622 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2623 } 2624 2625 // Okay, if there weren't any loop invariants to be folded, check to see if 2626 // there are multiple AddRec's with the same loop induction variable being 2627 // added together. If so, we can fold them. 2628 for (unsigned OtherIdx = Idx+1; 2629 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2630 ++OtherIdx) { 2631 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2632 // so that the 1st found AddRecExpr is dominated by all others. 2633 assert(DT.dominates( 2634 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2635 AddRec->getLoop()->getHeader()) && 2636 "AddRecExprs are not sorted in reverse dominance order?"); 2637 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2638 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2639 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2640 AddRec->op_end()); 2641 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2642 ++OtherIdx) { 2643 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2644 if (OtherAddRec->getLoop() == AddRecLoop) { 2645 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2646 i != e; ++i) { 2647 if (i >= AddRecOps.size()) { 2648 AddRecOps.append(OtherAddRec->op_begin()+i, 2649 OtherAddRec->op_end()); 2650 break; 2651 } 2652 SmallVector<const SCEV *, 2> TwoOps = { 2653 AddRecOps[i], OtherAddRec->getOperand(i)}; 2654 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2655 } 2656 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2657 } 2658 } 2659 // Step size has changed, so we cannot guarantee no self-wraparound. 2660 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2661 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2662 } 2663 } 2664 2665 // Otherwise couldn't fold anything into this recurrence. Move onto the 2666 // next one. 2667 } 2668 2669 // Okay, it looks like we really DO need an add expr. Check to see if we 2670 // already have one, otherwise create a new one. 2671 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2672 } 2673 2674 const SCEV * 2675 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2676 SCEV::NoWrapFlags Flags) { 2677 FoldingSetNodeID ID; 2678 ID.AddInteger(scAddExpr); 2679 for (const SCEV *Op : Ops) 2680 ID.AddPointer(Op); 2681 void *IP = nullptr; 2682 SCEVAddExpr *S = 2683 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2684 if (!S) { 2685 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2686 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2687 S = new (SCEVAllocator) 2688 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2689 UniqueSCEVs.InsertNode(S, IP); 2690 addToLoopUseLists(S); 2691 } 2692 S->setNoWrapFlags(Flags); 2693 return S; 2694 } 2695 2696 const SCEV * 2697 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2698 const Loop *L, SCEV::NoWrapFlags Flags) { 2699 FoldingSetNodeID ID; 2700 ID.AddInteger(scAddRecExpr); 2701 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2702 ID.AddPointer(Ops[i]); 2703 ID.AddPointer(L); 2704 void *IP = nullptr; 2705 SCEVAddRecExpr *S = 2706 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2707 if (!S) { 2708 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2709 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2710 S = new (SCEVAllocator) 2711 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2712 UniqueSCEVs.InsertNode(S, IP); 2713 addToLoopUseLists(S); 2714 } 2715 setNoWrapFlags(S, Flags); 2716 return S; 2717 } 2718 2719 const SCEV * 2720 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2721 SCEV::NoWrapFlags Flags) { 2722 FoldingSetNodeID ID; 2723 ID.AddInteger(scMulExpr); 2724 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2725 ID.AddPointer(Ops[i]); 2726 void *IP = nullptr; 2727 SCEVMulExpr *S = 2728 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2729 if (!S) { 2730 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2731 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2732 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2733 O, Ops.size()); 2734 UniqueSCEVs.InsertNode(S, IP); 2735 addToLoopUseLists(S); 2736 } 2737 S->setNoWrapFlags(Flags); 2738 return S; 2739 } 2740 2741 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2742 uint64_t k = i*j; 2743 if (j > 1 && k / j != i) Overflow = true; 2744 return k; 2745 } 2746 2747 /// Compute the result of "n choose k", the binomial coefficient. If an 2748 /// intermediate computation overflows, Overflow will be set and the return will 2749 /// be garbage. Overflow is not cleared on absence of overflow. 2750 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2751 // We use the multiplicative formula: 2752 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2753 // At each iteration, we take the n-th term of the numeral and divide by the 2754 // (k-n)th term of the denominator. This division will always produce an 2755 // integral result, and helps reduce the chance of overflow in the 2756 // intermediate computations. However, we can still overflow even when the 2757 // final result would fit. 2758 2759 if (n == 0 || n == k) return 1; 2760 if (k > n) return 0; 2761 2762 if (k > n/2) 2763 k = n-k; 2764 2765 uint64_t r = 1; 2766 for (uint64_t i = 1; i <= k; ++i) { 2767 r = umul_ov(r, n-(i-1), Overflow); 2768 r /= i; 2769 } 2770 return r; 2771 } 2772 2773 /// Determine if any of the operands in this SCEV are a constant or if 2774 /// any of the add or multiply expressions in this SCEV contain a constant. 2775 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2776 struct FindConstantInAddMulChain { 2777 bool FoundConstant = false; 2778 2779 bool follow(const SCEV *S) { 2780 FoundConstant |= isa<SCEVConstant>(S); 2781 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2782 } 2783 2784 bool isDone() const { 2785 return FoundConstant; 2786 } 2787 }; 2788 2789 FindConstantInAddMulChain F; 2790 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2791 ST.visitAll(StartExpr); 2792 return F.FoundConstant; 2793 } 2794 2795 /// Get a canonical multiply expression, or something simpler if possible. 2796 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2797 SCEV::NoWrapFlags OrigFlags, 2798 unsigned Depth) { 2799 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2800 "only nuw or nsw allowed"); 2801 assert(!Ops.empty() && "Cannot get empty mul!"); 2802 if (Ops.size() == 1) return Ops[0]; 2803 #ifndef NDEBUG 2804 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2805 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2806 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2807 "SCEVMulExpr operand types don't match!"); 2808 #endif 2809 2810 // Sort by complexity, this groups all similar expression types together. 2811 GroupByComplexity(Ops, &LI, DT); 2812 2813 // If there are any constants, fold them together. 2814 unsigned Idx = 0; 2815 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2816 ++Idx; 2817 assert(Idx < Ops.size()); 2818 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2819 // We found two constants, fold them together! 2820 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2821 if (Ops.size() == 2) return Ops[0]; 2822 Ops.erase(Ops.begin()+1); // Erase the folded element 2823 LHSC = cast<SCEVConstant>(Ops[0]); 2824 } 2825 2826 // If we have a multiply of zero, it will always be zero. 2827 if (LHSC->getValue()->isZero()) 2828 return LHSC; 2829 2830 // If we are left with a constant one being multiplied, strip it off. 2831 if (LHSC->getValue()->isOne()) { 2832 Ops.erase(Ops.begin()); 2833 --Idx; 2834 } 2835 2836 if (Ops.size() == 1) 2837 return Ops[0]; 2838 } 2839 2840 // Delay expensive flag strengthening until necessary. 2841 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2842 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 2843 }; 2844 2845 // Limit recursion calls depth. 2846 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2847 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 2848 2849 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2850 // Don't strengthen flags if we have no new information. 2851 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 2852 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 2853 Mul->setNoWrapFlags(ComputeFlags(Ops)); 2854 return S; 2855 } 2856 2857 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2858 if (Ops.size() == 2) { 2859 // C1*(C2+V) -> C1*C2 + C1*V 2860 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2861 // If any of Add's ops are Adds or Muls with a constant, apply this 2862 // transformation as well. 2863 // 2864 // TODO: There are some cases where this transformation is not 2865 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2866 // this transformation should be narrowed down. 2867 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2868 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2869 SCEV::FlagAnyWrap, Depth + 1), 2870 getMulExpr(LHSC, Add->getOperand(1), 2871 SCEV::FlagAnyWrap, Depth + 1), 2872 SCEV::FlagAnyWrap, Depth + 1); 2873 2874 if (Ops[0]->isAllOnesValue()) { 2875 // If we have a mul by -1 of an add, try distributing the -1 among the 2876 // add operands. 2877 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2878 SmallVector<const SCEV *, 4> NewOps; 2879 bool AnyFolded = false; 2880 for (const SCEV *AddOp : Add->operands()) { 2881 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2882 Depth + 1); 2883 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2884 NewOps.push_back(Mul); 2885 } 2886 if (AnyFolded) 2887 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2888 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2889 // Negation preserves a recurrence's no self-wrap property. 2890 SmallVector<const SCEV *, 4> Operands; 2891 for (const SCEV *AddRecOp : AddRec->operands()) 2892 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2893 Depth + 1)); 2894 2895 return getAddRecExpr(Operands, AddRec->getLoop(), 2896 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2897 } 2898 } 2899 } 2900 } 2901 2902 // Skip over the add expression until we get to a multiply. 2903 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2904 ++Idx; 2905 2906 // If there are mul operands inline them all into this expression. 2907 if (Idx < Ops.size()) { 2908 bool DeletedMul = false; 2909 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2910 if (Ops.size() > MulOpsInlineThreshold) 2911 break; 2912 // If we have an mul, expand the mul operands onto the end of the 2913 // operands list. 2914 Ops.erase(Ops.begin()+Idx); 2915 Ops.append(Mul->op_begin(), Mul->op_end()); 2916 DeletedMul = true; 2917 } 2918 2919 // If we deleted at least one mul, we added operands to the end of the 2920 // list, and they are not necessarily sorted. Recurse to resort and 2921 // resimplify any operands we just acquired. 2922 if (DeletedMul) 2923 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2924 } 2925 2926 // If there are any add recurrences in the operands list, see if any other 2927 // added values are loop invariant. If so, we can fold them into the 2928 // recurrence. 2929 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2930 ++Idx; 2931 2932 // Scan over all recurrences, trying to fold loop invariants into them. 2933 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2934 // Scan all of the other operands to this mul and add them to the vector 2935 // if they are loop invariant w.r.t. the recurrence. 2936 SmallVector<const SCEV *, 8> LIOps; 2937 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2938 const Loop *AddRecLoop = AddRec->getLoop(); 2939 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2940 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2941 LIOps.push_back(Ops[i]); 2942 Ops.erase(Ops.begin()+i); 2943 --i; --e; 2944 } 2945 2946 // If we found some loop invariants, fold them into the recurrence. 2947 if (!LIOps.empty()) { 2948 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2949 SmallVector<const SCEV *, 4> NewOps; 2950 NewOps.reserve(AddRec->getNumOperands()); 2951 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2952 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2953 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2954 SCEV::FlagAnyWrap, Depth + 1)); 2955 2956 // Build the new addrec. Propagate the NUW and NSW flags if both the 2957 // outer mul and the inner addrec are guaranteed to have no overflow. 2958 // 2959 // No self-wrap cannot be guaranteed after changing the step size, but 2960 // will be inferred if either NUW or NSW is true. 2961 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 2962 const SCEV *NewRec = getAddRecExpr( 2963 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 2964 2965 // If all of the other operands were loop invariant, we are done. 2966 if (Ops.size() == 1) return NewRec; 2967 2968 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2969 for (unsigned i = 0;; ++i) 2970 if (Ops[i] == AddRec) { 2971 Ops[i] = NewRec; 2972 break; 2973 } 2974 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2975 } 2976 2977 // Okay, if there weren't any loop invariants to be folded, check to see 2978 // if there are multiple AddRec's with the same loop induction variable 2979 // being multiplied together. If so, we can fold them. 2980 2981 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2982 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2983 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2984 // ]]],+,...up to x=2n}. 2985 // Note that the arguments to choose() are always integers with values 2986 // known at compile time, never SCEV objects. 2987 // 2988 // The implementation avoids pointless extra computations when the two 2989 // addrec's are of different length (mathematically, it's equivalent to 2990 // an infinite stream of zeros on the right). 2991 bool OpsModified = false; 2992 for (unsigned OtherIdx = Idx+1; 2993 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2994 ++OtherIdx) { 2995 const SCEVAddRecExpr *OtherAddRec = 2996 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2997 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2998 continue; 2999 3000 // Limit max number of arguments to avoid creation of unreasonably big 3001 // SCEVAddRecs with very complex operands. 3002 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3003 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3004 continue; 3005 3006 bool Overflow = false; 3007 Type *Ty = AddRec->getType(); 3008 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3009 SmallVector<const SCEV*, 7> AddRecOps; 3010 for (int x = 0, xe = AddRec->getNumOperands() + 3011 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3012 SmallVector <const SCEV *, 7> SumOps; 3013 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3014 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3015 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3016 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3017 z < ze && !Overflow; ++z) { 3018 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3019 uint64_t Coeff; 3020 if (LargerThan64Bits) 3021 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3022 else 3023 Coeff = Coeff1*Coeff2; 3024 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3025 const SCEV *Term1 = AddRec->getOperand(y-z); 3026 const SCEV *Term2 = OtherAddRec->getOperand(z); 3027 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3028 SCEV::FlagAnyWrap, Depth + 1)); 3029 } 3030 } 3031 if (SumOps.empty()) 3032 SumOps.push_back(getZero(Ty)); 3033 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3034 } 3035 if (!Overflow) { 3036 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3037 SCEV::FlagAnyWrap); 3038 if (Ops.size() == 2) return NewAddRec; 3039 Ops[Idx] = NewAddRec; 3040 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3041 OpsModified = true; 3042 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3043 if (!AddRec) 3044 break; 3045 } 3046 } 3047 if (OpsModified) 3048 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3049 3050 // Otherwise couldn't fold anything into this recurrence. Move onto the 3051 // next one. 3052 } 3053 3054 // Okay, it looks like we really DO need an mul expr. Check to see if we 3055 // already have one, otherwise create a new one. 3056 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3057 } 3058 3059 /// Represents an unsigned remainder expression based on unsigned division. 3060 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3061 const SCEV *RHS) { 3062 assert(getEffectiveSCEVType(LHS->getType()) == 3063 getEffectiveSCEVType(RHS->getType()) && 3064 "SCEVURemExpr operand types don't match!"); 3065 3066 // Short-circuit easy cases 3067 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3068 // If constant is one, the result is trivial 3069 if (RHSC->getValue()->isOne()) 3070 return getZero(LHS->getType()); // X urem 1 --> 0 3071 3072 // If constant is a power of two, fold into a zext(trunc(LHS)). 3073 if (RHSC->getAPInt().isPowerOf2()) { 3074 Type *FullTy = LHS->getType(); 3075 Type *TruncTy = 3076 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3077 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3078 } 3079 } 3080 3081 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3082 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3083 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3084 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3085 } 3086 3087 /// Get a canonical unsigned division expression, or something simpler if 3088 /// possible. 3089 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3090 const SCEV *RHS) { 3091 assert(getEffectiveSCEVType(LHS->getType()) == 3092 getEffectiveSCEVType(RHS->getType()) && 3093 "SCEVUDivExpr operand types don't match!"); 3094 3095 FoldingSetNodeID ID; 3096 ID.AddInteger(scUDivExpr); 3097 ID.AddPointer(LHS); 3098 ID.AddPointer(RHS); 3099 void *IP = nullptr; 3100 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3101 return S; 3102 3103 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3104 if (RHSC->getValue()->isOne()) 3105 return LHS; // X udiv 1 --> x 3106 // If the denominator is zero, the result of the udiv is undefined. Don't 3107 // try to analyze it, because the resolution chosen here may differ from 3108 // the resolution chosen in other parts of the compiler. 3109 if (!RHSC->getValue()->isZero()) { 3110 // Determine if the division can be folded into the operands of 3111 // its operands. 3112 // TODO: Generalize this to non-constants by using known-bits information. 3113 Type *Ty = LHS->getType(); 3114 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3115 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3116 // For non-power-of-two values, effectively round the value up to the 3117 // nearest power of two. 3118 if (!RHSC->getAPInt().isPowerOf2()) 3119 ++MaxShiftAmt; 3120 IntegerType *ExtTy = 3121 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3122 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3123 if (const SCEVConstant *Step = 3124 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3125 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3126 const APInt &StepInt = Step->getAPInt(); 3127 const APInt &DivInt = RHSC->getAPInt(); 3128 if (!StepInt.urem(DivInt) && 3129 getZeroExtendExpr(AR, ExtTy) == 3130 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3131 getZeroExtendExpr(Step, ExtTy), 3132 AR->getLoop(), SCEV::FlagAnyWrap)) { 3133 SmallVector<const SCEV *, 4> Operands; 3134 for (const SCEV *Op : AR->operands()) 3135 Operands.push_back(getUDivExpr(Op, RHS)); 3136 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3137 } 3138 /// Get a canonical UDivExpr for a recurrence. 3139 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3140 // We can currently only fold X%N if X is constant. 3141 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3142 if (StartC && !DivInt.urem(StepInt) && 3143 getZeroExtendExpr(AR, ExtTy) == 3144 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3145 getZeroExtendExpr(Step, ExtTy), 3146 AR->getLoop(), SCEV::FlagAnyWrap)) { 3147 const APInt &StartInt = StartC->getAPInt(); 3148 const APInt &StartRem = StartInt.urem(StepInt); 3149 if (StartRem != 0) { 3150 const SCEV *NewLHS = 3151 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3152 AR->getLoop(), SCEV::FlagNW); 3153 if (LHS != NewLHS) { 3154 LHS = NewLHS; 3155 3156 // Reset the ID to include the new LHS, and check if it is 3157 // already cached. 3158 ID.clear(); 3159 ID.AddInteger(scUDivExpr); 3160 ID.AddPointer(LHS); 3161 ID.AddPointer(RHS); 3162 IP = nullptr; 3163 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3164 return S; 3165 } 3166 } 3167 } 3168 } 3169 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3170 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3171 SmallVector<const SCEV *, 4> Operands; 3172 for (const SCEV *Op : M->operands()) 3173 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3174 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3175 // Find an operand that's safely divisible. 3176 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3177 const SCEV *Op = M->getOperand(i); 3178 const SCEV *Div = getUDivExpr(Op, RHSC); 3179 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3180 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3181 M->op_end()); 3182 Operands[i] = Div; 3183 return getMulExpr(Operands); 3184 } 3185 } 3186 } 3187 3188 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3189 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3190 if (auto *DivisorConstant = 3191 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3192 bool Overflow = false; 3193 APInt NewRHS = 3194 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3195 if (Overflow) { 3196 return getConstant(RHSC->getType(), 0, false); 3197 } 3198 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3199 } 3200 } 3201 3202 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3203 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3204 SmallVector<const SCEV *, 4> Operands; 3205 for (const SCEV *Op : A->operands()) 3206 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3207 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3208 Operands.clear(); 3209 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3210 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3211 if (isa<SCEVUDivExpr>(Op) || 3212 getMulExpr(Op, RHS) != A->getOperand(i)) 3213 break; 3214 Operands.push_back(Op); 3215 } 3216 if (Operands.size() == A->getNumOperands()) 3217 return getAddExpr(Operands); 3218 } 3219 } 3220 3221 // Fold if both operands are constant. 3222 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3223 Constant *LHSCV = LHSC->getValue(); 3224 Constant *RHSCV = RHSC->getValue(); 3225 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3226 RHSCV))); 3227 } 3228 } 3229 } 3230 3231 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3232 // changes). Make sure we get a new one. 3233 IP = nullptr; 3234 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3235 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3236 LHS, RHS); 3237 UniqueSCEVs.InsertNode(S, IP); 3238 addToLoopUseLists(S); 3239 return S; 3240 } 3241 3242 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3243 APInt A = C1->getAPInt().abs(); 3244 APInt B = C2->getAPInt().abs(); 3245 uint32_t ABW = A.getBitWidth(); 3246 uint32_t BBW = B.getBitWidth(); 3247 3248 if (ABW > BBW) 3249 B = B.zext(ABW); 3250 else if (ABW < BBW) 3251 A = A.zext(BBW); 3252 3253 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3254 } 3255 3256 /// Get a canonical unsigned division expression, or something simpler if 3257 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3258 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3259 /// it's not exact because the udiv may be clearing bits. 3260 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3261 const SCEV *RHS) { 3262 // TODO: we could try to find factors in all sorts of things, but for now we 3263 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3264 // end of this file for inspiration. 3265 3266 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3267 if (!Mul || !Mul->hasNoUnsignedWrap()) 3268 return getUDivExpr(LHS, RHS); 3269 3270 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3271 // If the mulexpr multiplies by a constant, then that constant must be the 3272 // first element of the mulexpr. 3273 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3274 if (LHSCst == RHSCst) { 3275 SmallVector<const SCEV *, 2> Operands; 3276 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3277 return getMulExpr(Operands); 3278 } 3279 3280 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3281 // that there's a factor provided by one of the other terms. We need to 3282 // check. 3283 APInt Factor = gcd(LHSCst, RHSCst); 3284 if (!Factor.isIntN(1)) { 3285 LHSCst = 3286 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3287 RHSCst = 3288 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3289 SmallVector<const SCEV *, 2> Operands; 3290 Operands.push_back(LHSCst); 3291 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3292 LHS = getMulExpr(Operands); 3293 RHS = RHSCst; 3294 Mul = dyn_cast<SCEVMulExpr>(LHS); 3295 if (!Mul) 3296 return getUDivExactExpr(LHS, RHS); 3297 } 3298 } 3299 } 3300 3301 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3302 if (Mul->getOperand(i) == RHS) { 3303 SmallVector<const SCEV *, 2> Operands; 3304 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3305 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3306 return getMulExpr(Operands); 3307 } 3308 } 3309 3310 return getUDivExpr(LHS, RHS); 3311 } 3312 3313 /// Get an add recurrence expression for the specified loop. Simplify the 3314 /// expression as much as possible. 3315 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3316 const Loop *L, 3317 SCEV::NoWrapFlags Flags) { 3318 SmallVector<const SCEV *, 4> Operands; 3319 Operands.push_back(Start); 3320 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3321 if (StepChrec->getLoop() == L) { 3322 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3323 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3324 } 3325 3326 Operands.push_back(Step); 3327 return getAddRecExpr(Operands, L, Flags); 3328 } 3329 3330 /// Get an add recurrence expression for the specified loop. Simplify the 3331 /// expression as much as possible. 3332 const SCEV * 3333 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3334 const Loop *L, SCEV::NoWrapFlags Flags) { 3335 if (Operands.size() == 1) return Operands[0]; 3336 #ifndef NDEBUG 3337 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3338 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3339 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3340 "SCEVAddRecExpr operand types don't match!"); 3341 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3342 assert(isLoopInvariant(Operands[i], L) && 3343 "SCEVAddRecExpr operand is not loop-invariant!"); 3344 #endif 3345 3346 if (Operands.back()->isZero()) { 3347 Operands.pop_back(); 3348 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3349 } 3350 3351 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3352 // use that information to infer NUW and NSW flags. However, computing a 3353 // BE count requires calling getAddRecExpr, so we may not yet have a 3354 // meaningful BE count at this point (and if we don't, we'd be stuck 3355 // with a SCEVCouldNotCompute as the cached BE count). 3356 3357 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3358 3359 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3360 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3361 const Loop *NestedLoop = NestedAR->getLoop(); 3362 if (L->contains(NestedLoop) 3363 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3364 : (!NestedLoop->contains(L) && 3365 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3366 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3367 NestedAR->op_end()); 3368 Operands[0] = NestedAR->getStart(); 3369 // AddRecs require their operands be loop-invariant with respect to their 3370 // loops. Don't perform this transformation if it would break this 3371 // requirement. 3372 bool AllInvariant = all_of( 3373 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3374 3375 if (AllInvariant) { 3376 // Create a recurrence for the outer loop with the same step size. 3377 // 3378 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3379 // inner recurrence has the same property. 3380 SCEV::NoWrapFlags OuterFlags = 3381 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3382 3383 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3384 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3385 return isLoopInvariant(Op, NestedLoop); 3386 }); 3387 3388 if (AllInvariant) { 3389 // Ok, both add recurrences are valid after the transformation. 3390 // 3391 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3392 // the outer recurrence has the same property. 3393 SCEV::NoWrapFlags InnerFlags = 3394 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3395 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3396 } 3397 } 3398 // Reset Operands to its original state. 3399 Operands[0] = NestedAR; 3400 } 3401 } 3402 3403 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3404 // already have one, otherwise create a new one. 3405 return getOrCreateAddRecExpr(Operands, L, Flags); 3406 } 3407 3408 const SCEV * 3409 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3410 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3411 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3412 // getSCEV(Base)->getType() has the same address space as Base->getType() 3413 // because SCEV::getType() preserves the address space. 3414 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3415 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3416 // instruction to its SCEV, because the Instruction may be guarded by control 3417 // flow and the no-overflow bits may not be valid for the expression in any 3418 // context. This can be fixed similarly to how these flags are handled for 3419 // adds. 3420 SCEV::NoWrapFlags OffsetWrap = 3421 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3422 3423 Type *CurTy = GEP->getType(); 3424 bool FirstIter = true; 3425 SmallVector<const SCEV *, 4> Offsets; 3426 for (const SCEV *IndexExpr : IndexExprs) { 3427 // Compute the (potentially symbolic) offset in bytes for this index. 3428 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3429 // For a struct, add the member offset. 3430 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3431 unsigned FieldNo = Index->getZExtValue(); 3432 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3433 Offsets.push_back(FieldOffset); 3434 3435 // Update CurTy to the type of the field at Index. 3436 CurTy = STy->getTypeAtIndex(Index); 3437 } else { 3438 // Update CurTy to its element type. 3439 if (FirstIter) { 3440 assert(isa<PointerType>(CurTy) && 3441 "The first index of a GEP indexes a pointer"); 3442 CurTy = GEP->getSourceElementType(); 3443 FirstIter = false; 3444 } else { 3445 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3446 } 3447 // For an array, add the element offset, explicitly scaled. 3448 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3449 // Getelementptr indices are signed. 3450 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3451 3452 // Multiply the index by the element size to compute the element offset. 3453 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3454 Offsets.push_back(LocalOffset); 3455 } 3456 } 3457 3458 // Handle degenerate case of GEP without offsets. 3459 if (Offsets.empty()) 3460 return BaseExpr; 3461 3462 // Add the offsets together, assuming nsw if inbounds. 3463 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3464 // Add the base address and the offset. We cannot use the nsw flag, as the 3465 // base address is unsigned. However, if we know that the offset is 3466 // non-negative, we can use nuw. 3467 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3468 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3469 return getAddExpr(BaseExpr, Offset, BaseWrap); 3470 } 3471 3472 std::tuple<SCEV *, FoldingSetNodeID, void *> 3473 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3474 ArrayRef<const SCEV *> Ops) { 3475 FoldingSetNodeID ID; 3476 void *IP = nullptr; 3477 ID.AddInteger(SCEVType); 3478 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3479 ID.AddPointer(Ops[i]); 3480 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3481 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3482 } 3483 3484 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3485 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3486 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3487 } 3488 3489 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) { 3490 Type *Ty = Op->getType(); 3491 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty)); 3492 } 3493 3494 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3495 SmallVectorImpl<const SCEV *> &Ops) { 3496 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3497 if (Ops.size() == 1) return Ops[0]; 3498 #ifndef NDEBUG 3499 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3500 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3501 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3502 "Operand types don't match!"); 3503 #endif 3504 3505 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3506 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3507 3508 // Sort by complexity, this groups all similar expression types together. 3509 GroupByComplexity(Ops, &LI, DT); 3510 3511 // Check if we have created the same expression before. 3512 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3513 return S; 3514 } 3515 3516 // If there are any constants, fold them together. 3517 unsigned Idx = 0; 3518 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3519 ++Idx; 3520 assert(Idx < Ops.size()); 3521 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3522 if (Kind == scSMaxExpr) 3523 return APIntOps::smax(LHS, RHS); 3524 else if (Kind == scSMinExpr) 3525 return APIntOps::smin(LHS, RHS); 3526 else if (Kind == scUMaxExpr) 3527 return APIntOps::umax(LHS, RHS); 3528 else if (Kind == scUMinExpr) 3529 return APIntOps::umin(LHS, RHS); 3530 llvm_unreachable("Unknown SCEV min/max opcode"); 3531 }; 3532 3533 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3534 // We found two constants, fold them together! 3535 ConstantInt *Fold = ConstantInt::get( 3536 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3537 Ops[0] = getConstant(Fold); 3538 Ops.erase(Ops.begin()+1); // Erase the folded element 3539 if (Ops.size() == 1) return Ops[0]; 3540 LHSC = cast<SCEVConstant>(Ops[0]); 3541 } 3542 3543 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3544 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3545 3546 if (IsMax ? IsMinV : IsMaxV) { 3547 // If we are left with a constant minimum(/maximum)-int, strip it off. 3548 Ops.erase(Ops.begin()); 3549 --Idx; 3550 } else if (IsMax ? IsMaxV : IsMinV) { 3551 // If we have a max(/min) with a constant maximum(/minimum)-int, 3552 // it will always be the extremum. 3553 return LHSC; 3554 } 3555 3556 if (Ops.size() == 1) return Ops[0]; 3557 } 3558 3559 // Find the first operation of the same kind 3560 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3561 ++Idx; 3562 3563 // Check to see if one of the operands is of the same kind. If so, expand its 3564 // operands onto our operand list, and recurse to simplify. 3565 if (Idx < Ops.size()) { 3566 bool DeletedAny = false; 3567 while (Ops[Idx]->getSCEVType() == Kind) { 3568 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3569 Ops.erase(Ops.begin()+Idx); 3570 Ops.append(SMME->op_begin(), SMME->op_end()); 3571 DeletedAny = true; 3572 } 3573 3574 if (DeletedAny) 3575 return getMinMaxExpr(Kind, Ops); 3576 } 3577 3578 // Okay, check to see if the same value occurs in the operand list twice. If 3579 // so, delete one. Since we sorted the list, these values are required to 3580 // be adjacent. 3581 llvm::CmpInst::Predicate GEPred = 3582 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3583 llvm::CmpInst::Predicate LEPred = 3584 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3585 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3586 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3587 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3588 if (Ops[i] == Ops[i + 1] || 3589 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3590 // X op Y op Y --> X op Y 3591 // X op Y --> X, if we know X, Y are ordered appropriately 3592 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3593 --i; 3594 --e; 3595 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3596 Ops[i + 1])) { 3597 // X op Y --> Y, if we know X, Y are ordered appropriately 3598 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3599 --i; 3600 --e; 3601 } 3602 } 3603 3604 if (Ops.size() == 1) return Ops[0]; 3605 3606 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3607 3608 // Okay, it looks like we really DO need an expr. Check to see if we 3609 // already have one, otherwise create a new one. 3610 const SCEV *ExistingSCEV; 3611 FoldingSetNodeID ID; 3612 void *IP; 3613 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3614 if (ExistingSCEV) 3615 return ExistingSCEV; 3616 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3617 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3618 SCEV *S = new (SCEVAllocator) 3619 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3620 3621 UniqueSCEVs.InsertNode(S, IP); 3622 addToLoopUseLists(S); 3623 return S; 3624 } 3625 3626 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3627 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3628 return getSMaxExpr(Ops); 3629 } 3630 3631 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3632 return getMinMaxExpr(scSMaxExpr, Ops); 3633 } 3634 3635 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3636 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3637 return getUMaxExpr(Ops); 3638 } 3639 3640 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3641 return getMinMaxExpr(scUMaxExpr, Ops); 3642 } 3643 3644 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3645 const SCEV *RHS) { 3646 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3647 return getSMinExpr(Ops); 3648 } 3649 3650 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3651 return getMinMaxExpr(scSMinExpr, Ops); 3652 } 3653 3654 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3655 const SCEV *RHS) { 3656 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3657 return getUMinExpr(Ops); 3658 } 3659 3660 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3661 return getMinMaxExpr(scUMinExpr, Ops); 3662 } 3663 3664 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3665 if (isa<ScalableVectorType>(AllocTy)) { 3666 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); 3667 Constant *One = ConstantInt::get(IntTy, 1); 3668 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); 3669 // Note that the expression we created is the final expression, we don't 3670 // want to simplify it any further Also, if we call a normal getSCEV(), 3671 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3672 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3673 } 3674 // We can bypass creating a target-independent 3675 // constant expression and then folding it back into a ConstantInt. 3676 // This is just a compile-time optimization. 3677 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3678 } 3679 3680 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3681 StructType *STy, 3682 unsigned FieldNo) { 3683 // We can bypass creating a target-independent 3684 // constant expression and then folding it back into a ConstantInt. 3685 // This is just a compile-time optimization. 3686 return getConstant( 3687 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3688 } 3689 3690 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3691 // Don't attempt to do anything other than create a SCEVUnknown object 3692 // here. createSCEV only calls getUnknown after checking for all other 3693 // interesting possibilities, and any other code that calls getUnknown 3694 // is doing so in order to hide a value from SCEV canonicalization. 3695 3696 FoldingSetNodeID ID; 3697 ID.AddInteger(scUnknown); 3698 ID.AddPointer(V); 3699 void *IP = nullptr; 3700 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3701 assert(cast<SCEVUnknown>(S)->getValue() == V && 3702 "Stale SCEVUnknown in uniquing map!"); 3703 return S; 3704 } 3705 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3706 FirstUnknown); 3707 FirstUnknown = cast<SCEVUnknown>(S); 3708 UniqueSCEVs.InsertNode(S, IP); 3709 return S; 3710 } 3711 3712 //===----------------------------------------------------------------------===// 3713 // Basic SCEV Analysis and PHI Idiom Recognition Code 3714 // 3715 3716 /// Test if values of the given type are analyzable within the SCEV 3717 /// framework. This primarily includes integer types, and it can optionally 3718 /// include pointer types if the ScalarEvolution class has access to 3719 /// target-specific information. 3720 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3721 // Integers and pointers are always SCEVable. 3722 return Ty->isIntOrPtrTy(); 3723 } 3724 3725 /// Return the size in bits of the specified type, for which isSCEVable must 3726 /// return true. 3727 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3728 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3729 if (Ty->isPointerTy()) 3730 return getDataLayout().getIndexTypeSizeInBits(Ty); 3731 return getDataLayout().getTypeSizeInBits(Ty); 3732 } 3733 3734 /// Return a type with the same bitwidth as the given type and which represents 3735 /// how SCEV will treat the given type, for which isSCEVable must return 3736 /// true. For pointer types, this is the pointer index sized integer type. 3737 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3738 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3739 3740 if (Ty->isIntegerTy()) 3741 return Ty; 3742 3743 // The only other support type is pointer. 3744 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3745 return getDataLayout().getIndexType(Ty); 3746 } 3747 3748 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3749 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3750 } 3751 3752 const SCEV *ScalarEvolution::getCouldNotCompute() { 3753 return CouldNotCompute.get(); 3754 } 3755 3756 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3757 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3758 auto *SU = dyn_cast<SCEVUnknown>(S); 3759 return SU && SU->getValue() == nullptr; 3760 }); 3761 3762 return !ContainsNulls; 3763 } 3764 3765 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3766 HasRecMapType::iterator I = HasRecMap.find(S); 3767 if (I != HasRecMap.end()) 3768 return I->second; 3769 3770 bool FoundAddRec = 3771 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3772 HasRecMap.insert({S, FoundAddRec}); 3773 return FoundAddRec; 3774 } 3775 3776 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3777 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3778 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3779 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3780 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3781 if (!Add) 3782 return {S, nullptr}; 3783 3784 if (Add->getNumOperands() != 2) 3785 return {S, nullptr}; 3786 3787 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3788 if (!ConstOp) 3789 return {S, nullptr}; 3790 3791 return {Add->getOperand(1), ConstOp->getValue()}; 3792 } 3793 3794 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3795 /// by the value and offset from any ValueOffsetPair in the set. 3796 SetVector<ScalarEvolution::ValueOffsetPair> * 3797 ScalarEvolution::getSCEVValues(const SCEV *S) { 3798 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3799 if (SI == ExprValueMap.end()) 3800 return nullptr; 3801 #ifndef NDEBUG 3802 if (VerifySCEVMap) { 3803 // Check there is no dangling Value in the set returned. 3804 for (const auto &VE : SI->second) 3805 assert(ValueExprMap.count(VE.first)); 3806 } 3807 #endif 3808 return &SI->second; 3809 } 3810 3811 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3812 /// cannot be used separately. eraseValueFromMap should be used to remove 3813 /// V from ValueExprMap and ExprValueMap at the same time. 3814 void ScalarEvolution::eraseValueFromMap(Value *V) { 3815 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3816 if (I != ValueExprMap.end()) { 3817 const SCEV *S = I->second; 3818 // Remove {V, 0} from the set of ExprValueMap[S] 3819 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3820 SV->remove({V, nullptr}); 3821 3822 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3823 const SCEV *Stripped; 3824 ConstantInt *Offset; 3825 std::tie(Stripped, Offset) = splitAddExpr(S); 3826 if (Offset != nullptr) { 3827 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3828 SV->remove({V, Offset}); 3829 } 3830 ValueExprMap.erase(V); 3831 } 3832 } 3833 3834 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3835 /// TODO: In reality it is better to check the poison recursively 3836 /// but this is better than nothing. 3837 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3838 if (auto *I = dyn_cast<Instruction>(V)) { 3839 if (isa<OverflowingBinaryOperator>(I)) { 3840 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3841 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3842 return true; 3843 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3844 return true; 3845 } 3846 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3847 return true; 3848 } 3849 return false; 3850 } 3851 3852 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3853 /// create a new one. 3854 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3855 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3856 3857 const SCEV *S = getExistingSCEV(V); 3858 if (S == nullptr) { 3859 S = createSCEV(V); 3860 // During PHI resolution, it is possible to create two SCEVs for the same 3861 // V, so it is needed to double check whether V->S is inserted into 3862 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3863 std::pair<ValueExprMapType::iterator, bool> Pair = 3864 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3865 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3866 ExprValueMap[S].insert({V, nullptr}); 3867 3868 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3869 // ExprValueMap. 3870 const SCEV *Stripped = S; 3871 ConstantInt *Offset = nullptr; 3872 std::tie(Stripped, Offset) = splitAddExpr(S); 3873 // If stripped is SCEVUnknown, don't bother to save 3874 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3875 // increase the complexity of the expansion code. 3876 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3877 // because it may generate add/sub instead of GEP in SCEV expansion. 3878 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3879 !isa<GetElementPtrInst>(V)) 3880 ExprValueMap[Stripped].insert({V, Offset}); 3881 } 3882 } 3883 return S; 3884 } 3885 3886 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3887 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3888 3889 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3890 if (I != ValueExprMap.end()) { 3891 const SCEV *S = I->second; 3892 if (checkValidity(S)) 3893 return S; 3894 eraseValueFromMap(V); 3895 forgetMemoizedResults(S); 3896 } 3897 return nullptr; 3898 } 3899 3900 /// Return a SCEV corresponding to -V = -1*V 3901 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3902 SCEV::NoWrapFlags Flags) { 3903 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3904 return getConstant( 3905 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3906 3907 Type *Ty = V->getType(); 3908 Ty = getEffectiveSCEVType(Ty); 3909 return getMulExpr(V, getMinusOne(Ty), Flags); 3910 } 3911 3912 /// If Expr computes ~A, return A else return nullptr 3913 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3914 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3915 if (!Add || Add->getNumOperands() != 2 || 3916 !Add->getOperand(0)->isAllOnesValue()) 3917 return nullptr; 3918 3919 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3920 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3921 !AddRHS->getOperand(0)->isAllOnesValue()) 3922 return nullptr; 3923 3924 return AddRHS->getOperand(1); 3925 } 3926 3927 /// Return a SCEV corresponding to ~V = -1-V 3928 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3929 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3930 return getConstant( 3931 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3932 3933 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3934 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3935 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3936 SmallVector<const SCEV *, 2> MatchedOperands; 3937 for (const SCEV *Operand : MME->operands()) { 3938 const SCEV *Matched = MatchNotExpr(Operand); 3939 if (!Matched) 3940 return (const SCEV *)nullptr; 3941 MatchedOperands.push_back(Matched); 3942 } 3943 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 3944 MatchedOperands); 3945 }; 3946 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3947 return Replaced; 3948 } 3949 3950 Type *Ty = V->getType(); 3951 Ty = getEffectiveSCEVType(Ty); 3952 return getMinusSCEV(getMinusOne(Ty), V); 3953 } 3954 3955 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3956 SCEV::NoWrapFlags Flags, 3957 unsigned Depth) { 3958 // Fast path: X - X --> 0. 3959 if (LHS == RHS) 3960 return getZero(LHS->getType()); 3961 3962 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3963 // makes it so that we cannot make much use of NUW. 3964 auto AddFlags = SCEV::FlagAnyWrap; 3965 const bool RHSIsNotMinSigned = 3966 !getSignedRangeMin(RHS).isMinSignedValue(); 3967 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3968 // Let M be the minimum representable signed value. Then (-1)*RHS 3969 // signed-wraps if and only if RHS is M. That can happen even for 3970 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3971 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3972 // (-1)*RHS, we need to prove that RHS != M. 3973 // 3974 // If LHS is non-negative and we know that LHS - RHS does not 3975 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3976 // either by proving that RHS > M or that LHS >= 0. 3977 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3978 AddFlags = SCEV::FlagNSW; 3979 } 3980 } 3981 3982 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3983 // RHS is NSW and LHS >= 0. 3984 // 3985 // The difficulty here is that the NSW flag may have been proven 3986 // relative to a loop that is to be found in a recurrence in LHS and 3987 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3988 // larger scope than intended. 3989 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3990 3991 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3992 } 3993 3994 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 3995 unsigned Depth) { 3996 Type *SrcTy = V->getType(); 3997 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3998 "Cannot truncate or zero extend with non-integer arguments!"); 3999 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4000 return V; // No conversion 4001 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4002 return getTruncateExpr(V, Ty, Depth); 4003 return getZeroExtendExpr(V, Ty, Depth); 4004 } 4005 4006 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4007 unsigned Depth) { 4008 Type *SrcTy = V->getType(); 4009 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4010 "Cannot truncate or zero extend with non-integer arguments!"); 4011 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4012 return V; // No conversion 4013 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4014 return getTruncateExpr(V, Ty, Depth); 4015 return getSignExtendExpr(V, Ty, Depth); 4016 } 4017 4018 const SCEV * 4019 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4020 Type *SrcTy = V->getType(); 4021 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4022 "Cannot noop or zero extend with non-integer arguments!"); 4023 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4024 "getNoopOrZeroExtend cannot truncate!"); 4025 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4026 return V; // No conversion 4027 return getZeroExtendExpr(V, Ty); 4028 } 4029 4030 const SCEV * 4031 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4032 Type *SrcTy = V->getType(); 4033 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4034 "Cannot noop or sign extend with non-integer arguments!"); 4035 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4036 "getNoopOrSignExtend cannot truncate!"); 4037 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4038 return V; // No conversion 4039 return getSignExtendExpr(V, Ty); 4040 } 4041 4042 const SCEV * 4043 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4044 Type *SrcTy = V->getType(); 4045 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4046 "Cannot noop or any extend with non-integer arguments!"); 4047 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4048 "getNoopOrAnyExtend cannot truncate!"); 4049 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4050 return V; // No conversion 4051 return getAnyExtendExpr(V, Ty); 4052 } 4053 4054 const SCEV * 4055 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4056 Type *SrcTy = V->getType(); 4057 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4058 "Cannot truncate or noop with non-integer arguments!"); 4059 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4060 "getTruncateOrNoop cannot extend!"); 4061 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4062 return V; // No conversion 4063 return getTruncateExpr(V, Ty); 4064 } 4065 4066 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4067 const SCEV *RHS) { 4068 const SCEV *PromotedLHS = LHS; 4069 const SCEV *PromotedRHS = RHS; 4070 4071 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4072 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4073 else 4074 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4075 4076 return getUMaxExpr(PromotedLHS, PromotedRHS); 4077 } 4078 4079 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4080 const SCEV *RHS) { 4081 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4082 return getUMinFromMismatchedTypes(Ops); 4083 } 4084 4085 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4086 SmallVectorImpl<const SCEV *> &Ops) { 4087 assert(!Ops.empty() && "At least one operand must be!"); 4088 // Trivial case. 4089 if (Ops.size() == 1) 4090 return Ops[0]; 4091 4092 // Find the max type first. 4093 Type *MaxType = nullptr; 4094 for (auto *S : Ops) 4095 if (MaxType) 4096 MaxType = getWiderType(MaxType, S->getType()); 4097 else 4098 MaxType = S->getType(); 4099 assert(MaxType && "Failed to find maximum type!"); 4100 4101 // Extend all ops to max type. 4102 SmallVector<const SCEV *, 2> PromotedOps; 4103 for (auto *S : Ops) 4104 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4105 4106 // Generate umin. 4107 return getUMinExpr(PromotedOps); 4108 } 4109 4110 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4111 // A pointer operand may evaluate to a nonpointer expression, such as null. 4112 if (!V->getType()->isPointerTy()) 4113 return V; 4114 4115 while (true) { 4116 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 4117 V = Cast->getOperand(); 4118 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4119 const SCEV *PtrOp = nullptr; 4120 for (const SCEV *NAryOp : NAry->operands()) { 4121 if (NAryOp->getType()->isPointerTy()) { 4122 // Cannot find the base of an expression with multiple pointer ops. 4123 if (PtrOp) 4124 return V; 4125 PtrOp = NAryOp; 4126 } 4127 } 4128 if (!PtrOp) // All operands were non-pointer. 4129 return V; 4130 V = PtrOp; 4131 } else // Not something we can look further into. 4132 return V; 4133 } 4134 } 4135 4136 /// Push users of the given Instruction onto the given Worklist. 4137 static void 4138 PushDefUseChildren(Instruction *I, 4139 SmallVectorImpl<Instruction *> &Worklist) { 4140 // Push the def-use children onto the Worklist stack. 4141 for (User *U : I->users()) 4142 Worklist.push_back(cast<Instruction>(U)); 4143 } 4144 4145 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4146 SmallVector<Instruction *, 16> Worklist; 4147 PushDefUseChildren(PN, Worklist); 4148 4149 SmallPtrSet<Instruction *, 8> Visited; 4150 Visited.insert(PN); 4151 while (!Worklist.empty()) { 4152 Instruction *I = Worklist.pop_back_val(); 4153 if (!Visited.insert(I).second) 4154 continue; 4155 4156 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4157 if (It != ValueExprMap.end()) { 4158 const SCEV *Old = It->second; 4159 4160 // Short-circuit the def-use traversal if the symbolic name 4161 // ceases to appear in expressions. 4162 if (Old != SymName && !hasOperand(Old, SymName)) 4163 continue; 4164 4165 // SCEVUnknown for a PHI either means that it has an unrecognized 4166 // structure, it's a PHI that's in the progress of being computed 4167 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4168 // additional loop trip count information isn't going to change anything. 4169 // In the second case, createNodeForPHI will perform the necessary 4170 // updates on its own when it gets to that point. In the third, we do 4171 // want to forget the SCEVUnknown. 4172 if (!isa<PHINode>(I) || 4173 !isa<SCEVUnknown>(Old) || 4174 (I != PN && Old == SymName)) { 4175 eraseValueFromMap(It->first); 4176 forgetMemoizedResults(Old); 4177 } 4178 } 4179 4180 PushDefUseChildren(I, Worklist); 4181 } 4182 } 4183 4184 namespace { 4185 4186 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4187 /// expression in case its Loop is L. If it is not L then 4188 /// if IgnoreOtherLoops is true then use AddRec itself 4189 /// otherwise rewrite cannot be done. 4190 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4191 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4192 public: 4193 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4194 bool IgnoreOtherLoops = true) { 4195 SCEVInitRewriter Rewriter(L, SE); 4196 const SCEV *Result = Rewriter.visit(S); 4197 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4198 return SE.getCouldNotCompute(); 4199 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4200 ? SE.getCouldNotCompute() 4201 : Result; 4202 } 4203 4204 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4205 if (!SE.isLoopInvariant(Expr, L)) 4206 SeenLoopVariantSCEVUnknown = true; 4207 return Expr; 4208 } 4209 4210 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4211 // Only re-write AddRecExprs for this loop. 4212 if (Expr->getLoop() == L) 4213 return Expr->getStart(); 4214 SeenOtherLoops = true; 4215 return Expr; 4216 } 4217 4218 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4219 4220 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4221 4222 private: 4223 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4224 : SCEVRewriteVisitor(SE), L(L) {} 4225 4226 const Loop *L; 4227 bool SeenLoopVariantSCEVUnknown = false; 4228 bool SeenOtherLoops = false; 4229 }; 4230 4231 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4232 /// increment expression in case its Loop is L. If it is not L then 4233 /// use AddRec itself. 4234 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4235 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4236 public: 4237 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4238 SCEVPostIncRewriter Rewriter(L, SE); 4239 const SCEV *Result = Rewriter.visit(S); 4240 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4241 ? SE.getCouldNotCompute() 4242 : Result; 4243 } 4244 4245 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4246 if (!SE.isLoopInvariant(Expr, L)) 4247 SeenLoopVariantSCEVUnknown = true; 4248 return Expr; 4249 } 4250 4251 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4252 // Only re-write AddRecExprs for this loop. 4253 if (Expr->getLoop() == L) 4254 return Expr->getPostIncExpr(SE); 4255 SeenOtherLoops = true; 4256 return Expr; 4257 } 4258 4259 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4260 4261 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4262 4263 private: 4264 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4265 : SCEVRewriteVisitor(SE), L(L) {} 4266 4267 const Loop *L; 4268 bool SeenLoopVariantSCEVUnknown = false; 4269 bool SeenOtherLoops = false; 4270 }; 4271 4272 /// This class evaluates the compare condition by matching it against the 4273 /// condition of loop latch. If there is a match we assume a true value 4274 /// for the condition while building SCEV nodes. 4275 class SCEVBackedgeConditionFolder 4276 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4277 public: 4278 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4279 ScalarEvolution &SE) { 4280 bool IsPosBECond = false; 4281 Value *BECond = nullptr; 4282 if (BasicBlock *Latch = L->getLoopLatch()) { 4283 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4284 if (BI && BI->isConditional()) { 4285 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4286 "Both outgoing branches should not target same header!"); 4287 BECond = BI->getCondition(); 4288 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4289 } else { 4290 return S; 4291 } 4292 } 4293 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4294 return Rewriter.visit(S); 4295 } 4296 4297 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4298 const SCEV *Result = Expr; 4299 bool InvariantF = SE.isLoopInvariant(Expr, L); 4300 4301 if (!InvariantF) { 4302 Instruction *I = cast<Instruction>(Expr->getValue()); 4303 switch (I->getOpcode()) { 4304 case Instruction::Select: { 4305 SelectInst *SI = cast<SelectInst>(I); 4306 Optional<const SCEV *> Res = 4307 compareWithBackedgeCondition(SI->getCondition()); 4308 if (Res.hasValue()) { 4309 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4310 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4311 } 4312 break; 4313 } 4314 default: { 4315 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4316 if (Res.hasValue()) 4317 Result = Res.getValue(); 4318 break; 4319 } 4320 } 4321 } 4322 return Result; 4323 } 4324 4325 private: 4326 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4327 bool IsPosBECond, ScalarEvolution &SE) 4328 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4329 IsPositiveBECond(IsPosBECond) {} 4330 4331 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4332 4333 const Loop *L; 4334 /// Loop back condition. 4335 Value *BackedgeCond = nullptr; 4336 /// Set to true if loop back is on positive branch condition. 4337 bool IsPositiveBECond; 4338 }; 4339 4340 Optional<const SCEV *> 4341 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4342 4343 // If value matches the backedge condition for loop latch, 4344 // then return a constant evolution node based on loopback 4345 // branch taken. 4346 if (BackedgeCond == IC) 4347 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4348 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4349 return None; 4350 } 4351 4352 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4353 public: 4354 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4355 ScalarEvolution &SE) { 4356 SCEVShiftRewriter Rewriter(L, SE); 4357 const SCEV *Result = Rewriter.visit(S); 4358 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4359 } 4360 4361 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4362 // Only allow AddRecExprs for this loop. 4363 if (!SE.isLoopInvariant(Expr, L)) 4364 Valid = false; 4365 return Expr; 4366 } 4367 4368 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4369 if (Expr->getLoop() == L && Expr->isAffine()) 4370 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4371 Valid = false; 4372 return Expr; 4373 } 4374 4375 bool isValid() { return Valid; } 4376 4377 private: 4378 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4379 : SCEVRewriteVisitor(SE), L(L) {} 4380 4381 const Loop *L; 4382 bool Valid = true; 4383 }; 4384 4385 } // end anonymous namespace 4386 4387 SCEV::NoWrapFlags 4388 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4389 if (!AR->isAffine()) 4390 return SCEV::FlagAnyWrap; 4391 4392 using OBO = OverflowingBinaryOperator; 4393 4394 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4395 4396 if (!AR->hasNoSignedWrap()) { 4397 ConstantRange AddRecRange = getSignedRange(AR); 4398 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4399 4400 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4401 Instruction::Add, IncRange, OBO::NoSignedWrap); 4402 if (NSWRegion.contains(AddRecRange)) 4403 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4404 } 4405 4406 if (!AR->hasNoUnsignedWrap()) { 4407 ConstantRange AddRecRange = getUnsignedRange(AR); 4408 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4409 4410 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4411 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4412 if (NUWRegion.contains(AddRecRange)) 4413 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4414 } 4415 4416 return Result; 4417 } 4418 4419 SCEV::NoWrapFlags 4420 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4421 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4422 4423 if (AR->hasNoSignedWrap()) 4424 return Result; 4425 4426 if (!AR->isAffine()) 4427 return Result; 4428 4429 const SCEV *Step = AR->getStepRecurrence(*this); 4430 const Loop *L = AR->getLoop(); 4431 4432 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4433 // Note that this serves two purposes: It filters out loops that are 4434 // simply not analyzable, and it covers the case where this code is 4435 // being called from within backedge-taken count analysis, such that 4436 // attempting to ask for the backedge-taken count would likely result 4437 // in infinite recursion. In the later case, the analysis code will 4438 // cope with a conservative value, and it will take care to purge 4439 // that value once it has finished. 4440 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4441 4442 // Normally, in the cases we can prove no-overflow via a 4443 // backedge guarding condition, we can also compute a backedge 4444 // taken count for the loop. The exceptions are assumptions and 4445 // guards present in the loop -- SCEV is not great at exploiting 4446 // these to compute max backedge taken counts, but can still use 4447 // these to prove lack of overflow. Use this fact to avoid 4448 // doing extra work that may not pay off. 4449 4450 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4451 AC.assumptions().empty()) 4452 return Result; 4453 4454 // If the backedge is guarded by a comparison with the pre-inc value the 4455 // addrec is safe. Also, if the entry is guarded by a comparison with the 4456 // start value and the backedge is guarded by a comparison with the post-inc 4457 // value, the addrec is safe. 4458 ICmpInst::Predicate Pred; 4459 const SCEV *OverflowLimit = 4460 getSignedOverflowLimitForStep(Step, &Pred, this); 4461 if (OverflowLimit && 4462 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4463 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4464 Result = setFlags(Result, SCEV::FlagNSW); 4465 } 4466 return Result; 4467 } 4468 SCEV::NoWrapFlags 4469 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4470 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4471 4472 if (AR->hasNoUnsignedWrap()) 4473 return Result; 4474 4475 if (!AR->isAffine()) 4476 return Result; 4477 4478 const SCEV *Step = AR->getStepRecurrence(*this); 4479 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4480 const Loop *L = AR->getLoop(); 4481 4482 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4483 // Note that this serves two purposes: It filters out loops that are 4484 // simply not analyzable, and it covers the case where this code is 4485 // being called from within backedge-taken count analysis, such that 4486 // attempting to ask for the backedge-taken count would likely result 4487 // in infinite recursion. In the later case, the analysis code will 4488 // cope with a conservative value, and it will take care to purge 4489 // that value once it has finished. 4490 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4491 4492 // Normally, in the cases we can prove no-overflow via a 4493 // backedge guarding condition, we can also compute a backedge 4494 // taken count for the loop. The exceptions are assumptions and 4495 // guards present in the loop -- SCEV is not great at exploiting 4496 // these to compute max backedge taken counts, but can still use 4497 // these to prove lack of overflow. Use this fact to avoid 4498 // doing extra work that may not pay off. 4499 4500 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4501 AC.assumptions().empty()) 4502 return Result; 4503 4504 // If the backedge is guarded by a comparison with the pre-inc value the 4505 // addrec is safe. Also, if the entry is guarded by a comparison with the 4506 // start value and the backedge is guarded by a comparison with the post-inc 4507 // value, the addrec is safe. 4508 if (isKnownPositive(Step)) { 4509 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4510 getUnsignedRangeMax(Step)); 4511 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4512 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4513 Result = setFlags(Result, SCEV::FlagNUW); 4514 } 4515 } 4516 4517 return Result; 4518 } 4519 4520 namespace { 4521 4522 /// Represents an abstract binary operation. This may exist as a 4523 /// normal instruction or constant expression, or may have been 4524 /// derived from an expression tree. 4525 struct BinaryOp { 4526 unsigned Opcode; 4527 Value *LHS; 4528 Value *RHS; 4529 bool IsNSW = false; 4530 bool IsNUW = false; 4531 bool IsExact = false; 4532 4533 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4534 /// constant expression. 4535 Operator *Op = nullptr; 4536 4537 explicit BinaryOp(Operator *Op) 4538 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4539 Op(Op) { 4540 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4541 IsNSW = OBO->hasNoSignedWrap(); 4542 IsNUW = OBO->hasNoUnsignedWrap(); 4543 } 4544 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op)) 4545 IsExact = PEO->isExact(); 4546 } 4547 4548 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4549 bool IsNUW = false, bool IsExact = false) 4550 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4551 IsExact(IsExact) {} 4552 }; 4553 4554 } // end anonymous namespace 4555 4556 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4557 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4558 auto *Op = dyn_cast<Operator>(V); 4559 if (!Op) 4560 return None; 4561 4562 // Implementation detail: all the cleverness here should happen without 4563 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4564 // SCEV expressions when possible, and we should not break that. 4565 4566 switch (Op->getOpcode()) { 4567 case Instruction::Add: 4568 case Instruction::Sub: 4569 case Instruction::Mul: 4570 case Instruction::UDiv: 4571 case Instruction::URem: 4572 case Instruction::And: 4573 case Instruction::Or: 4574 case Instruction::AShr: 4575 case Instruction::Shl: 4576 return BinaryOp(Op); 4577 4578 case Instruction::Xor: 4579 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4580 // If the RHS of the xor is a signmask, then this is just an add. 4581 // Instcombine turns add of signmask into xor as a strength reduction step. 4582 if (RHSC->getValue().isSignMask()) 4583 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4584 return BinaryOp(Op); 4585 4586 case Instruction::LShr: 4587 // Turn logical shift right of a constant into a unsigned divide. 4588 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4589 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4590 4591 // If the shift count is not less than the bitwidth, the result of 4592 // the shift is undefined. Don't try to analyze it, because the 4593 // resolution chosen here may differ from the resolution chosen in 4594 // other parts of the compiler. 4595 if (SA->getValue().ult(BitWidth)) { 4596 Constant *X = 4597 ConstantInt::get(SA->getContext(), 4598 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4599 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4600 } 4601 } 4602 return BinaryOp(Op); 4603 4604 case Instruction::ExtractValue: { 4605 auto *EVI = cast<ExtractValueInst>(Op); 4606 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4607 break; 4608 4609 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4610 if (!WO) 4611 break; 4612 4613 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4614 bool Signed = WO->isSigned(); 4615 // TODO: Should add nuw/nsw flags for mul as well. 4616 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4617 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4618 4619 // Now that we know that all uses of the arithmetic-result component of 4620 // CI are guarded by the overflow check, we can go ahead and pretend 4621 // that the arithmetic is non-overflowing. 4622 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4623 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4624 } 4625 4626 default: 4627 break; 4628 } 4629 4630 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4631 // semantics as a Sub, return a binary sub expression. 4632 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4633 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4634 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4635 4636 return None; 4637 } 4638 4639 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4640 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4641 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4642 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4643 /// follows one of the following patterns: 4644 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4645 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4646 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4647 /// we return the type of the truncation operation, and indicate whether the 4648 /// truncated type should be treated as signed/unsigned by setting 4649 /// \p Signed to true/false, respectively. 4650 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4651 bool &Signed, ScalarEvolution &SE) { 4652 // The case where Op == SymbolicPHI (that is, with no type conversions on 4653 // the way) is handled by the regular add recurrence creating logic and 4654 // would have already been triggered in createAddRecForPHI. Reaching it here 4655 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4656 // because one of the other operands of the SCEVAddExpr updating this PHI is 4657 // not invariant). 4658 // 4659 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4660 // this case predicates that allow us to prove that Op == SymbolicPHI will 4661 // be added. 4662 if (Op == SymbolicPHI) 4663 return nullptr; 4664 4665 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4666 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4667 if (SourceBits != NewBits) 4668 return nullptr; 4669 4670 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4671 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4672 if (!SExt && !ZExt) 4673 return nullptr; 4674 const SCEVTruncateExpr *Trunc = 4675 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4676 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4677 if (!Trunc) 4678 return nullptr; 4679 const SCEV *X = Trunc->getOperand(); 4680 if (X != SymbolicPHI) 4681 return nullptr; 4682 Signed = SExt != nullptr; 4683 return Trunc->getType(); 4684 } 4685 4686 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4687 if (!PN->getType()->isIntegerTy()) 4688 return nullptr; 4689 const Loop *L = LI.getLoopFor(PN->getParent()); 4690 if (!L || L->getHeader() != PN->getParent()) 4691 return nullptr; 4692 return L; 4693 } 4694 4695 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4696 // computation that updates the phi follows the following pattern: 4697 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4698 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4699 // If so, try to see if it can be rewritten as an AddRecExpr under some 4700 // Predicates. If successful, return them as a pair. Also cache the results 4701 // of the analysis. 4702 // 4703 // Example usage scenario: 4704 // Say the Rewriter is called for the following SCEV: 4705 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4706 // where: 4707 // %X = phi i64 (%Start, %BEValue) 4708 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4709 // and call this function with %SymbolicPHI = %X. 4710 // 4711 // The analysis will find that the value coming around the backedge has 4712 // the following SCEV: 4713 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4714 // Upon concluding that this matches the desired pattern, the function 4715 // will return the pair {NewAddRec, SmallPredsVec} where: 4716 // NewAddRec = {%Start,+,%Step} 4717 // SmallPredsVec = {P1, P2, P3} as follows: 4718 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4719 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4720 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4721 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4722 // under the predicates {P1,P2,P3}. 4723 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4724 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4725 // 4726 // TODO's: 4727 // 4728 // 1) Extend the Induction descriptor to also support inductions that involve 4729 // casts: When needed (namely, when we are called in the context of the 4730 // vectorizer induction analysis), a Set of cast instructions will be 4731 // populated by this method, and provided back to isInductionPHI. This is 4732 // needed to allow the vectorizer to properly record them to be ignored by 4733 // the cost model and to avoid vectorizing them (otherwise these casts, 4734 // which are redundant under the runtime overflow checks, will be 4735 // vectorized, which can be costly). 4736 // 4737 // 2) Support additional induction/PHISCEV patterns: We also want to support 4738 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4739 // after the induction update operation (the induction increment): 4740 // 4741 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4742 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4743 // 4744 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4745 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4746 // 4747 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4748 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4749 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4750 SmallVector<const SCEVPredicate *, 3> Predicates; 4751 4752 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4753 // return an AddRec expression under some predicate. 4754 4755 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4756 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4757 assert(L && "Expecting an integer loop header phi"); 4758 4759 // The loop may have multiple entrances or multiple exits; we can analyze 4760 // this phi as an addrec if it has a unique entry value and a unique 4761 // backedge value. 4762 Value *BEValueV = nullptr, *StartValueV = nullptr; 4763 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4764 Value *V = PN->getIncomingValue(i); 4765 if (L->contains(PN->getIncomingBlock(i))) { 4766 if (!BEValueV) { 4767 BEValueV = V; 4768 } else if (BEValueV != V) { 4769 BEValueV = nullptr; 4770 break; 4771 } 4772 } else if (!StartValueV) { 4773 StartValueV = V; 4774 } else if (StartValueV != V) { 4775 StartValueV = nullptr; 4776 break; 4777 } 4778 } 4779 if (!BEValueV || !StartValueV) 4780 return None; 4781 4782 const SCEV *BEValue = getSCEV(BEValueV); 4783 4784 // If the value coming around the backedge is an add with the symbolic 4785 // value we just inserted, possibly with casts that we can ignore under 4786 // an appropriate runtime guard, then we found a simple induction variable! 4787 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4788 if (!Add) 4789 return None; 4790 4791 // If there is a single occurrence of the symbolic value, possibly 4792 // casted, replace it with a recurrence. 4793 unsigned FoundIndex = Add->getNumOperands(); 4794 Type *TruncTy = nullptr; 4795 bool Signed; 4796 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4797 if ((TruncTy = 4798 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4799 if (FoundIndex == e) { 4800 FoundIndex = i; 4801 break; 4802 } 4803 4804 if (FoundIndex == Add->getNumOperands()) 4805 return None; 4806 4807 // Create an add with everything but the specified operand. 4808 SmallVector<const SCEV *, 8> Ops; 4809 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4810 if (i != FoundIndex) 4811 Ops.push_back(Add->getOperand(i)); 4812 const SCEV *Accum = getAddExpr(Ops); 4813 4814 // The runtime checks will not be valid if the step amount is 4815 // varying inside the loop. 4816 if (!isLoopInvariant(Accum, L)) 4817 return None; 4818 4819 // *** Part2: Create the predicates 4820 4821 // Analysis was successful: we have a phi-with-cast pattern for which we 4822 // can return an AddRec expression under the following predicates: 4823 // 4824 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4825 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4826 // P2: An Equal predicate that guarantees that 4827 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4828 // P3: An Equal predicate that guarantees that 4829 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4830 // 4831 // As we next prove, the above predicates guarantee that: 4832 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4833 // 4834 // 4835 // More formally, we want to prove that: 4836 // Expr(i+1) = Start + (i+1) * Accum 4837 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4838 // 4839 // Given that: 4840 // 1) Expr(0) = Start 4841 // 2) Expr(1) = Start + Accum 4842 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4843 // 3) Induction hypothesis (step i): 4844 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4845 // 4846 // Proof: 4847 // Expr(i+1) = 4848 // = Start + (i+1)*Accum 4849 // = (Start + i*Accum) + Accum 4850 // = Expr(i) + Accum 4851 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4852 // :: from step i 4853 // 4854 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4855 // 4856 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4857 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4858 // + Accum :: from P3 4859 // 4860 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4861 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4862 // 4863 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4864 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4865 // 4866 // By induction, the same applies to all iterations 1<=i<n: 4867 // 4868 4869 // Create a truncated addrec for which we will add a no overflow check (P1). 4870 const SCEV *StartVal = getSCEV(StartValueV); 4871 const SCEV *PHISCEV = 4872 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4873 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4874 4875 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4876 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4877 // will be constant. 4878 // 4879 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4880 // add P1. 4881 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4882 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4883 Signed ? SCEVWrapPredicate::IncrementNSSW 4884 : SCEVWrapPredicate::IncrementNUSW; 4885 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4886 Predicates.push_back(AddRecPred); 4887 } 4888 4889 // Create the Equal Predicates P2,P3: 4890 4891 // It is possible that the predicates P2 and/or P3 are computable at 4892 // compile time due to StartVal and/or Accum being constants. 4893 // If either one is, then we can check that now and escape if either P2 4894 // or P3 is false. 4895 4896 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4897 // for each of StartVal and Accum 4898 auto getExtendedExpr = [&](const SCEV *Expr, 4899 bool CreateSignExtend) -> const SCEV * { 4900 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4901 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4902 const SCEV *ExtendedExpr = 4903 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4904 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4905 return ExtendedExpr; 4906 }; 4907 4908 // Given: 4909 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4910 // = getExtendedExpr(Expr) 4911 // Determine whether the predicate P: Expr == ExtendedExpr 4912 // is known to be false at compile time 4913 auto PredIsKnownFalse = [&](const SCEV *Expr, 4914 const SCEV *ExtendedExpr) -> bool { 4915 return Expr != ExtendedExpr && 4916 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4917 }; 4918 4919 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4920 if (PredIsKnownFalse(StartVal, StartExtended)) { 4921 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4922 return None; 4923 } 4924 4925 // The Step is always Signed (because the overflow checks are either 4926 // NSSW or NUSW) 4927 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4928 if (PredIsKnownFalse(Accum, AccumExtended)) { 4929 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4930 return None; 4931 } 4932 4933 auto AppendPredicate = [&](const SCEV *Expr, 4934 const SCEV *ExtendedExpr) -> void { 4935 if (Expr != ExtendedExpr && 4936 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4937 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4938 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4939 Predicates.push_back(Pred); 4940 } 4941 }; 4942 4943 AppendPredicate(StartVal, StartExtended); 4944 AppendPredicate(Accum, AccumExtended); 4945 4946 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4947 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4948 // into NewAR if it will also add the runtime overflow checks specified in 4949 // Predicates. 4950 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4951 4952 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4953 std::make_pair(NewAR, Predicates); 4954 // Remember the result of the analysis for this SCEV at this locayyytion. 4955 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4956 return PredRewrite; 4957 } 4958 4959 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4960 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4961 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4962 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4963 if (!L) 4964 return None; 4965 4966 // Check to see if we already analyzed this PHI. 4967 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4968 if (I != PredicatedSCEVRewrites.end()) { 4969 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4970 I->second; 4971 // Analysis was done before and failed to create an AddRec: 4972 if (Rewrite.first == SymbolicPHI) 4973 return None; 4974 // Analysis was done before and succeeded to create an AddRec under 4975 // a predicate: 4976 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4977 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4978 return Rewrite; 4979 } 4980 4981 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4982 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4983 4984 // Record in the cache that the analysis failed 4985 if (!Rewrite) { 4986 SmallVector<const SCEVPredicate *, 3> Predicates; 4987 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4988 return None; 4989 } 4990 4991 return Rewrite; 4992 } 4993 4994 // FIXME: This utility is currently required because the Rewriter currently 4995 // does not rewrite this expression: 4996 // {0, +, (sext ix (trunc iy to ix) to iy)} 4997 // into {0, +, %step}, 4998 // even when the following Equal predicate exists: 4999 // "%step == (sext ix (trunc iy to ix) to iy)". 5000 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5001 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5002 if (AR1 == AR2) 5003 return true; 5004 5005 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5006 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5007 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5008 return false; 5009 return true; 5010 }; 5011 5012 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5013 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5014 return false; 5015 return true; 5016 } 5017 5018 /// A helper function for createAddRecFromPHI to handle simple cases. 5019 /// 5020 /// This function tries to find an AddRec expression for the simplest (yet most 5021 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5022 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5023 /// technique for finding the AddRec expression. 5024 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5025 Value *BEValueV, 5026 Value *StartValueV) { 5027 const Loop *L = LI.getLoopFor(PN->getParent()); 5028 assert(L && L->getHeader() == PN->getParent()); 5029 assert(BEValueV && StartValueV); 5030 5031 auto BO = MatchBinaryOp(BEValueV, DT); 5032 if (!BO) 5033 return nullptr; 5034 5035 if (BO->Opcode != Instruction::Add) 5036 return nullptr; 5037 5038 const SCEV *Accum = nullptr; 5039 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5040 Accum = getSCEV(BO->RHS); 5041 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5042 Accum = getSCEV(BO->LHS); 5043 5044 if (!Accum) 5045 return nullptr; 5046 5047 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5048 if (BO->IsNUW) 5049 Flags = setFlags(Flags, SCEV::FlagNUW); 5050 if (BO->IsNSW) 5051 Flags = setFlags(Flags, SCEV::FlagNSW); 5052 5053 const SCEV *StartVal = getSCEV(StartValueV); 5054 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5055 5056 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5057 5058 // We can add Flags to the post-inc expression only if we 5059 // know that it is *undefined behavior* for BEValueV to 5060 // overflow. 5061 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5062 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5063 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5064 5065 return PHISCEV; 5066 } 5067 5068 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5069 const Loop *L = LI.getLoopFor(PN->getParent()); 5070 if (!L || L->getHeader() != PN->getParent()) 5071 return nullptr; 5072 5073 // The loop may have multiple entrances or multiple exits; we can analyze 5074 // this phi as an addrec if it has a unique entry value and a unique 5075 // backedge value. 5076 Value *BEValueV = nullptr, *StartValueV = nullptr; 5077 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5078 Value *V = PN->getIncomingValue(i); 5079 if (L->contains(PN->getIncomingBlock(i))) { 5080 if (!BEValueV) { 5081 BEValueV = V; 5082 } else if (BEValueV != V) { 5083 BEValueV = nullptr; 5084 break; 5085 } 5086 } else if (!StartValueV) { 5087 StartValueV = V; 5088 } else if (StartValueV != V) { 5089 StartValueV = nullptr; 5090 break; 5091 } 5092 } 5093 if (!BEValueV || !StartValueV) 5094 return nullptr; 5095 5096 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5097 "PHI node already processed?"); 5098 5099 // First, try to find AddRec expression without creating a fictituos symbolic 5100 // value for PN. 5101 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5102 return S; 5103 5104 // Handle PHI node value symbolically. 5105 const SCEV *SymbolicName = getUnknown(PN); 5106 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5107 5108 // Using this symbolic name for the PHI, analyze the value coming around 5109 // the back-edge. 5110 const SCEV *BEValue = getSCEV(BEValueV); 5111 5112 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5113 // has a special value for the first iteration of the loop. 5114 5115 // If the value coming around the backedge is an add with the symbolic 5116 // value we just inserted, then we found a simple induction variable! 5117 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5118 // If there is a single occurrence of the symbolic value, replace it 5119 // with a recurrence. 5120 unsigned FoundIndex = Add->getNumOperands(); 5121 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5122 if (Add->getOperand(i) == SymbolicName) 5123 if (FoundIndex == e) { 5124 FoundIndex = i; 5125 break; 5126 } 5127 5128 if (FoundIndex != Add->getNumOperands()) { 5129 // Create an add with everything but the specified operand. 5130 SmallVector<const SCEV *, 8> Ops; 5131 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5132 if (i != FoundIndex) 5133 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5134 L, *this)); 5135 const SCEV *Accum = getAddExpr(Ops); 5136 5137 // This is not a valid addrec if the step amount is varying each 5138 // loop iteration, but is not itself an addrec in this loop. 5139 if (isLoopInvariant(Accum, L) || 5140 (isa<SCEVAddRecExpr>(Accum) && 5141 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5142 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5143 5144 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5145 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5146 if (BO->IsNUW) 5147 Flags = setFlags(Flags, SCEV::FlagNUW); 5148 if (BO->IsNSW) 5149 Flags = setFlags(Flags, SCEV::FlagNSW); 5150 } 5151 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5152 // If the increment is an inbounds GEP, then we know the address 5153 // space cannot be wrapped around. We cannot make any guarantee 5154 // about signed or unsigned overflow because pointers are 5155 // unsigned but we may have a negative index from the base 5156 // pointer. We can guarantee that no unsigned wrap occurs if the 5157 // indices form a positive value. 5158 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5159 Flags = setFlags(Flags, SCEV::FlagNW); 5160 5161 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5162 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5163 Flags = setFlags(Flags, SCEV::FlagNUW); 5164 } 5165 5166 // We cannot transfer nuw and nsw flags from subtraction 5167 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5168 // for instance. 5169 } 5170 5171 const SCEV *StartVal = getSCEV(StartValueV); 5172 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5173 5174 // Okay, for the entire analysis of this edge we assumed the PHI 5175 // to be symbolic. We now need to go back and purge all of the 5176 // entries for the scalars that use the symbolic expression. 5177 forgetSymbolicName(PN, SymbolicName); 5178 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5179 5180 // We can add Flags to the post-inc expression only if we 5181 // know that it is *undefined behavior* for BEValueV to 5182 // overflow. 5183 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5184 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5185 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5186 5187 return PHISCEV; 5188 } 5189 } 5190 } else { 5191 // Otherwise, this could be a loop like this: 5192 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5193 // In this case, j = {1,+,1} and BEValue is j. 5194 // Because the other in-value of i (0) fits the evolution of BEValue 5195 // i really is an addrec evolution. 5196 // 5197 // We can generalize this saying that i is the shifted value of BEValue 5198 // by one iteration: 5199 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5200 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5201 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5202 if (Shifted != getCouldNotCompute() && 5203 Start != getCouldNotCompute()) { 5204 const SCEV *StartVal = getSCEV(StartValueV); 5205 if (Start == StartVal) { 5206 // Okay, for the entire analysis of this edge we assumed the PHI 5207 // to be symbolic. We now need to go back and purge all of the 5208 // entries for the scalars that use the symbolic expression. 5209 forgetSymbolicName(PN, SymbolicName); 5210 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5211 return Shifted; 5212 } 5213 } 5214 } 5215 5216 // Remove the temporary PHI node SCEV that has been inserted while intending 5217 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5218 // as it will prevent later (possibly simpler) SCEV expressions to be added 5219 // to the ValueExprMap. 5220 eraseValueFromMap(PN); 5221 5222 return nullptr; 5223 } 5224 5225 // Checks if the SCEV S is available at BB. S is considered available at BB 5226 // if S can be materialized at BB without introducing a fault. 5227 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5228 BasicBlock *BB) { 5229 struct CheckAvailable { 5230 bool TraversalDone = false; 5231 bool Available = true; 5232 5233 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5234 BasicBlock *BB = nullptr; 5235 DominatorTree &DT; 5236 5237 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5238 : L(L), BB(BB), DT(DT) {} 5239 5240 bool setUnavailable() { 5241 TraversalDone = true; 5242 Available = false; 5243 return false; 5244 } 5245 5246 bool follow(const SCEV *S) { 5247 switch (S->getSCEVType()) { 5248 case scConstant: 5249 case scPtrToInt: 5250 case scTruncate: 5251 case scZeroExtend: 5252 case scSignExtend: 5253 case scAddExpr: 5254 case scMulExpr: 5255 case scUMaxExpr: 5256 case scSMaxExpr: 5257 case scUMinExpr: 5258 case scSMinExpr: 5259 // These expressions are available if their operand(s) is/are. 5260 return true; 5261 5262 case scAddRecExpr: { 5263 // We allow add recurrences that are on the loop BB is in, or some 5264 // outer loop. This guarantees availability because the value of the 5265 // add recurrence at BB is simply the "current" value of the induction 5266 // variable. We can relax this in the future; for instance an add 5267 // recurrence on a sibling dominating loop is also available at BB. 5268 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5269 if (L && (ARLoop == L || ARLoop->contains(L))) 5270 return true; 5271 5272 return setUnavailable(); 5273 } 5274 5275 case scUnknown: { 5276 // For SCEVUnknown, we check for simple dominance. 5277 const auto *SU = cast<SCEVUnknown>(S); 5278 Value *V = SU->getValue(); 5279 5280 if (isa<Argument>(V)) 5281 return false; 5282 5283 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5284 return false; 5285 5286 return setUnavailable(); 5287 } 5288 5289 case scUDivExpr: 5290 case scCouldNotCompute: 5291 // We do not try to smart about these at all. 5292 return setUnavailable(); 5293 } 5294 llvm_unreachable("Unknown SCEV kind!"); 5295 } 5296 5297 bool isDone() { return TraversalDone; } 5298 }; 5299 5300 CheckAvailable CA(L, BB, DT); 5301 SCEVTraversal<CheckAvailable> ST(CA); 5302 5303 ST.visitAll(S); 5304 return CA.Available; 5305 } 5306 5307 // Try to match a control flow sequence that branches out at BI and merges back 5308 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5309 // match. 5310 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5311 Value *&C, Value *&LHS, Value *&RHS) { 5312 C = BI->getCondition(); 5313 5314 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5315 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5316 5317 if (!LeftEdge.isSingleEdge()) 5318 return false; 5319 5320 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5321 5322 Use &LeftUse = Merge->getOperandUse(0); 5323 Use &RightUse = Merge->getOperandUse(1); 5324 5325 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5326 LHS = LeftUse; 5327 RHS = RightUse; 5328 return true; 5329 } 5330 5331 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5332 LHS = RightUse; 5333 RHS = LeftUse; 5334 return true; 5335 } 5336 5337 return false; 5338 } 5339 5340 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5341 auto IsReachable = 5342 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5343 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5344 const Loop *L = LI.getLoopFor(PN->getParent()); 5345 5346 // We don't want to break LCSSA, even in a SCEV expression tree. 5347 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5348 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5349 return nullptr; 5350 5351 // Try to match 5352 // 5353 // br %cond, label %left, label %right 5354 // left: 5355 // br label %merge 5356 // right: 5357 // br label %merge 5358 // merge: 5359 // V = phi [ %x, %left ], [ %y, %right ] 5360 // 5361 // as "select %cond, %x, %y" 5362 5363 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5364 assert(IDom && "At least the entry block should dominate PN"); 5365 5366 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5367 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5368 5369 if (BI && BI->isConditional() && 5370 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5371 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5372 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5373 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5374 } 5375 5376 return nullptr; 5377 } 5378 5379 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5380 if (const SCEV *S = createAddRecFromPHI(PN)) 5381 return S; 5382 5383 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5384 return S; 5385 5386 // If the PHI has a single incoming value, follow that value, unless the 5387 // PHI's incoming blocks are in a different loop, in which case doing so 5388 // risks breaking LCSSA form. Instcombine would normally zap these, but 5389 // it doesn't have DominatorTree information, so it may miss cases. 5390 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5391 if (LI.replacementPreservesLCSSAForm(PN, V)) 5392 return getSCEV(V); 5393 5394 // If it's not a loop phi, we can't handle it yet. 5395 return getUnknown(PN); 5396 } 5397 5398 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5399 Value *Cond, 5400 Value *TrueVal, 5401 Value *FalseVal) { 5402 // Handle "constant" branch or select. This can occur for instance when a 5403 // loop pass transforms an inner loop and moves on to process the outer loop. 5404 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5405 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5406 5407 // Try to match some simple smax or umax patterns. 5408 auto *ICI = dyn_cast<ICmpInst>(Cond); 5409 if (!ICI) 5410 return getUnknown(I); 5411 5412 Value *LHS = ICI->getOperand(0); 5413 Value *RHS = ICI->getOperand(1); 5414 5415 switch (ICI->getPredicate()) { 5416 case ICmpInst::ICMP_SLT: 5417 case ICmpInst::ICMP_SLE: 5418 std::swap(LHS, RHS); 5419 LLVM_FALLTHROUGH; 5420 case ICmpInst::ICMP_SGT: 5421 case ICmpInst::ICMP_SGE: 5422 // a >s b ? a+x : b+x -> smax(a, b)+x 5423 // a >s b ? b+x : a+x -> smin(a, b)+x 5424 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5425 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5426 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5427 const SCEV *LA = getSCEV(TrueVal); 5428 const SCEV *RA = getSCEV(FalseVal); 5429 const SCEV *LDiff = getMinusSCEV(LA, LS); 5430 const SCEV *RDiff = getMinusSCEV(RA, RS); 5431 if (LDiff == RDiff) 5432 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5433 LDiff = getMinusSCEV(LA, RS); 5434 RDiff = getMinusSCEV(RA, LS); 5435 if (LDiff == RDiff) 5436 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5437 } 5438 break; 5439 case ICmpInst::ICMP_ULT: 5440 case ICmpInst::ICMP_ULE: 5441 std::swap(LHS, RHS); 5442 LLVM_FALLTHROUGH; 5443 case ICmpInst::ICMP_UGT: 5444 case ICmpInst::ICMP_UGE: 5445 // a >u b ? a+x : b+x -> umax(a, b)+x 5446 // a >u b ? b+x : a+x -> umin(a, b)+x 5447 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5448 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5449 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5450 const SCEV *LA = getSCEV(TrueVal); 5451 const SCEV *RA = getSCEV(FalseVal); 5452 const SCEV *LDiff = getMinusSCEV(LA, LS); 5453 const SCEV *RDiff = getMinusSCEV(RA, RS); 5454 if (LDiff == RDiff) 5455 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5456 LDiff = getMinusSCEV(LA, RS); 5457 RDiff = getMinusSCEV(RA, LS); 5458 if (LDiff == RDiff) 5459 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5460 } 5461 break; 5462 case ICmpInst::ICMP_NE: 5463 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5464 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5465 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5466 const SCEV *One = getOne(I->getType()); 5467 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5468 const SCEV *LA = getSCEV(TrueVal); 5469 const SCEV *RA = getSCEV(FalseVal); 5470 const SCEV *LDiff = getMinusSCEV(LA, LS); 5471 const SCEV *RDiff = getMinusSCEV(RA, One); 5472 if (LDiff == RDiff) 5473 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5474 } 5475 break; 5476 case ICmpInst::ICMP_EQ: 5477 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5478 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5479 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5480 const SCEV *One = getOne(I->getType()); 5481 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5482 const SCEV *LA = getSCEV(TrueVal); 5483 const SCEV *RA = getSCEV(FalseVal); 5484 const SCEV *LDiff = getMinusSCEV(LA, One); 5485 const SCEV *RDiff = getMinusSCEV(RA, LS); 5486 if (LDiff == RDiff) 5487 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5488 } 5489 break; 5490 default: 5491 break; 5492 } 5493 5494 return getUnknown(I); 5495 } 5496 5497 /// Expand GEP instructions into add and multiply operations. This allows them 5498 /// to be analyzed by regular SCEV code. 5499 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5500 // Don't attempt to analyze GEPs over unsized objects. 5501 if (!GEP->getSourceElementType()->isSized()) 5502 return getUnknown(GEP); 5503 5504 SmallVector<const SCEV *, 4> IndexExprs; 5505 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5506 IndexExprs.push_back(getSCEV(*Index)); 5507 return getGEPExpr(GEP, IndexExprs); 5508 } 5509 5510 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5511 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5512 return C->getAPInt().countTrailingZeros(); 5513 5514 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5515 return GetMinTrailingZeros(I->getOperand()); 5516 5517 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5518 return std::min(GetMinTrailingZeros(T->getOperand()), 5519 (uint32_t)getTypeSizeInBits(T->getType())); 5520 5521 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5522 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5523 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5524 ? getTypeSizeInBits(E->getType()) 5525 : OpRes; 5526 } 5527 5528 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5529 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5530 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5531 ? getTypeSizeInBits(E->getType()) 5532 : OpRes; 5533 } 5534 5535 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5536 // The result is the min of all operands results. 5537 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5538 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5539 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5540 return MinOpRes; 5541 } 5542 5543 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5544 // The result is the sum of all operands results. 5545 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5546 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5547 for (unsigned i = 1, e = M->getNumOperands(); 5548 SumOpRes != BitWidth && i != e; ++i) 5549 SumOpRes = 5550 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5551 return SumOpRes; 5552 } 5553 5554 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5555 // The result is the min of all operands results. 5556 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5557 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5558 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5559 return MinOpRes; 5560 } 5561 5562 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5563 // The result is the min of all operands results. 5564 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5565 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5566 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5567 return MinOpRes; 5568 } 5569 5570 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5571 // The result is the min of all operands results. 5572 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5573 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5574 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5575 return MinOpRes; 5576 } 5577 5578 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5579 // For a SCEVUnknown, ask ValueTracking. 5580 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5581 return Known.countMinTrailingZeros(); 5582 } 5583 5584 // SCEVUDivExpr 5585 return 0; 5586 } 5587 5588 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5589 auto I = MinTrailingZerosCache.find(S); 5590 if (I != MinTrailingZerosCache.end()) 5591 return I->second; 5592 5593 uint32_t Result = GetMinTrailingZerosImpl(S); 5594 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5595 assert(InsertPair.second && "Should insert a new key"); 5596 return InsertPair.first->second; 5597 } 5598 5599 /// Helper method to assign a range to V from metadata present in the IR. 5600 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5601 if (Instruction *I = dyn_cast<Instruction>(V)) 5602 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5603 return getConstantRangeFromMetadata(*MD); 5604 5605 return None; 5606 } 5607 5608 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5609 SCEV::NoWrapFlags Flags) { 5610 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5611 AddRec->setNoWrapFlags(Flags); 5612 UnsignedRanges.erase(AddRec); 5613 SignedRanges.erase(AddRec); 5614 } 5615 } 5616 5617 /// Determine the range for a particular SCEV. If SignHint is 5618 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5619 /// with a "cleaner" unsigned (resp. signed) representation. 5620 const ConstantRange & 5621 ScalarEvolution::getRangeRef(const SCEV *S, 5622 ScalarEvolution::RangeSignHint SignHint) { 5623 DenseMap<const SCEV *, ConstantRange> &Cache = 5624 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5625 : SignedRanges; 5626 ConstantRange::PreferredRangeType RangeType = 5627 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5628 ? ConstantRange::Unsigned : ConstantRange::Signed; 5629 5630 // See if we've computed this range already. 5631 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5632 if (I != Cache.end()) 5633 return I->second; 5634 5635 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5636 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5637 5638 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5639 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5640 using OBO = OverflowingBinaryOperator; 5641 5642 // If the value has known zeros, the maximum value will have those known zeros 5643 // as well. 5644 uint32_t TZ = GetMinTrailingZeros(S); 5645 if (TZ != 0) { 5646 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5647 ConservativeResult = 5648 ConstantRange(APInt::getMinValue(BitWidth), 5649 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5650 else 5651 ConservativeResult = ConstantRange( 5652 APInt::getSignedMinValue(BitWidth), 5653 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5654 } 5655 5656 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5657 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5658 unsigned WrapType = OBO::AnyWrap; 5659 if (Add->hasNoSignedWrap()) 5660 WrapType |= OBO::NoSignedWrap; 5661 if (Add->hasNoUnsignedWrap()) 5662 WrapType |= OBO::NoUnsignedWrap; 5663 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5664 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5665 WrapType, RangeType); 5666 return setRange(Add, SignHint, 5667 ConservativeResult.intersectWith(X, RangeType)); 5668 } 5669 5670 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5671 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5672 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5673 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5674 return setRange(Mul, SignHint, 5675 ConservativeResult.intersectWith(X, RangeType)); 5676 } 5677 5678 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5679 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5680 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5681 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5682 return setRange(SMax, SignHint, 5683 ConservativeResult.intersectWith(X, RangeType)); 5684 } 5685 5686 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5687 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5688 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5689 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5690 return setRange(UMax, SignHint, 5691 ConservativeResult.intersectWith(X, RangeType)); 5692 } 5693 5694 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5695 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5696 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5697 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5698 return setRange(SMin, SignHint, 5699 ConservativeResult.intersectWith(X, RangeType)); 5700 } 5701 5702 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5703 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5704 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5705 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5706 return setRange(UMin, SignHint, 5707 ConservativeResult.intersectWith(X, RangeType)); 5708 } 5709 5710 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5711 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5712 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5713 return setRange(UDiv, SignHint, 5714 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5715 } 5716 5717 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5718 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5719 return setRange(ZExt, SignHint, 5720 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5721 RangeType)); 5722 } 5723 5724 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5725 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5726 return setRange(SExt, SignHint, 5727 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5728 RangeType)); 5729 } 5730 5731 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 5732 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 5733 return setRange(PtrToInt, SignHint, X); 5734 } 5735 5736 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5737 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5738 return setRange(Trunc, SignHint, 5739 ConservativeResult.intersectWith(X.truncate(BitWidth), 5740 RangeType)); 5741 } 5742 5743 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5744 // If there's no unsigned wrap, the value will never be less than its 5745 // initial value. 5746 if (AddRec->hasNoUnsignedWrap()) { 5747 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5748 if (!UnsignedMinValue.isNullValue()) 5749 ConservativeResult = ConservativeResult.intersectWith( 5750 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5751 } 5752 5753 // If there's no signed wrap, and all the operands except initial value have 5754 // the same sign or zero, the value won't ever be: 5755 // 1: smaller than initial value if operands are non negative, 5756 // 2: bigger than initial value if operands are non positive. 5757 // For both cases, value can not cross signed min/max boundary. 5758 if (AddRec->hasNoSignedWrap()) { 5759 bool AllNonNeg = true; 5760 bool AllNonPos = true; 5761 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5762 if (!isKnownNonNegative(AddRec->getOperand(i))) 5763 AllNonNeg = false; 5764 if (!isKnownNonPositive(AddRec->getOperand(i))) 5765 AllNonPos = false; 5766 } 5767 if (AllNonNeg) 5768 ConservativeResult = ConservativeResult.intersectWith( 5769 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5770 APInt::getSignedMinValue(BitWidth)), 5771 RangeType); 5772 else if (AllNonPos) 5773 ConservativeResult = ConservativeResult.intersectWith( 5774 ConstantRange::getNonEmpty( 5775 APInt::getSignedMinValue(BitWidth), 5776 getSignedRangeMax(AddRec->getStart()) + 1), 5777 RangeType); 5778 } 5779 5780 // TODO: non-affine addrec 5781 if (AddRec->isAffine()) { 5782 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5783 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5784 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5785 auto RangeFromAffine = getRangeForAffineAR( 5786 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5787 BitWidth); 5788 ConservativeResult = 5789 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5790 5791 auto RangeFromFactoring = getRangeViaFactoring( 5792 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5793 BitWidth); 5794 ConservativeResult = 5795 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5796 } 5797 5798 // Now try symbolic BE count and more powerful methods. 5799 if (UseExpensiveRangeSharpening) { 5800 const SCEV *SymbolicMaxBECount = 5801 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 5802 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 5803 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5804 AddRec->hasNoSelfWrap()) { 5805 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 5806 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 5807 ConservativeResult = 5808 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 5809 } 5810 } 5811 } 5812 5813 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5814 } 5815 5816 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5817 // Check if the IR explicitly contains !range metadata. 5818 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5819 if (MDRange.hasValue()) 5820 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5821 RangeType); 5822 5823 // Split here to avoid paying the compile-time cost of calling both 5824 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5825 // if needed. 5826 const DataLayout &DL = getDataLayout(); 5827 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5828 // For a SCEVUnknown, ask ValueTracking. 5829 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5830 if (Known.getBitWidth() != BitWidth) 5831 Known = Known.zextOrTrunc(BitWidth); 5832 // If Known does not result in full-set, intersect with it. 5833 if (Known.getMinValue() != Known.getMaxValue() + 1) 5834 ConservativeResult = ConservativeResult.intersectWith( 5835 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5836 RangeType); 5837 } else { 5838 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5839 "generalize as needed!"); 5840 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5841 // If the pointer size is larger than the index size type, this can cause 5842 // NS to be larger than BitWidth. So compensate for this. 5843 if (U->getType()->isPointerTy()) { 5844 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5845 int ptrIdxDiff = ptrSize - BitWidth; 5846 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5847 NS -= ptrIdxDiff; 5848 } 5849 5850 if (NS > 1) 5851 ConservativeResult = ConservativeResult.intersectWith( 5852 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5853 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5854 RangeType); 5855 } 5856 5857 // A range of Phi is a subset of union of all ranges of its input. 5858 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5859 // Make sure that we do not run over cycled Phis. 5860 if (PendingPhiRanges.insert(Phi).second) { 5861 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5862 for (auto &Op : Phi->operands()) { 5863 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5864 RangeFromOps = RangeFromOps.unionWith(OpRange); 5865 // No point to continue if we already have a full set. 5866 if (RangeFromOps.isFullSet()) 5867 break; 5868 } 5869 ConservativeResult = 5870 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5871 bool Erased = PendingPhiRanges.erase(Phi); 5872 assert(Erased && "Failed to erase Phi properly?"); 5873 (void) Erased; 5874 } 5875 } 5876 5877 return setRange(U, SignHint, std::move(ConservativeResult)); 5878 } 5879 5880 return setRange(S, SignHint, std::move(ConservativeResult)); 5881 } 5882 5883 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5884 // values that the expression can take. Initially, the expression has a value 5885 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5886 // argument defines if we treat Step as signed or unsigned. 5887 static ConstantRange getRangeForAffineARHelper(APInt Step, 5888 const ConstantRange &StartRange, 5889 const APInt &MaxBECount, 5890 unsigned BitWidth, bool Signed) { 5891 // If either Step or MaxBECount is 0, then the expression won't change, and we 5892 // just need to return the initial range. 5893 if (Step == 0 || MaxBECount == 0) 5894 return StartRange; 5895 5896 // If we don't know anything about the initial value (i.e. StartRange is 5897 // FullRange), then we don't know anything about the final range either. 5898 // Return FullRange. 5899 if (StartRange.isFullSet()) 5900 return ConstantRange::getFull(BitWidth); 5901 5902 // If Step is signed and negative, then we use its absolute value, but we also 5903 // note that we're moving in the opposite direction. 5904 bool Descending = Signed && Step.isNegative(); 5905 5906 if (Signed) 5907 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5908 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5909 // This equations hold true due to the well-defined wrap-around behavior of 5910 // APInt. 5911 Step = Step.abs(); 5912 5913 // Check if Offset is more than full span of BitWidth. If it is, the 5914 // expression is guaranteed to overflow. 5915 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5916 return ConstantRange::getFull(BitWidth); 5917 5918 // Offset is by how much the expression can change. Checks above guarantee no 5919 // overflow here. 5920 APInt Offset = Step * MaxBECount; 5921 5922 // Minimum value of the final range will match the minimal value of StartRange 5923 // if the expression is increasing and will be decreased by Offset otherwise. 5924 // Maximum value of the final range will match the maximal value of StartRange 5925 // if the expression is decreasing and will be increased by Offset otherwise. 5926 APInt StartLower = StartRange.getLower(); 5927 APInt StartUpper = StartRange.getUpper() - 1; 5928 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5929 : (StartUpper + std::move(Offset)); 5930 5931 // It's possible that the new minimum/maximum value will fall into the initial 5932 // range (due to wrap around). This means that the expression can take any 5933 // value in this bitwidth, and we have to return full range. 5934 if (StartRange.contains(MovedBoundary)) 5935 return ConstantRange::getFull(BitWidth); 5936 5937 APInt NewLower = 5938 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5939 APInt NewUpper = 5940 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5941 NewUpper += 1; 5942 5943 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5944 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5945 } 5946 5947 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5948 const SCEV *Step, 5949 const SCEV *MaxBECount, 5950 unsigned BitWidth) { 5951 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5952 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5953 "Precondition!"); 5954 5955 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5956 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5957 5958 // First, consider step signed. 5959 ConstantRange StartSRange = getSignedRange(Start); 5960 ConstantRange StepSRange = getSignedRange(Step); 5961 5962 // If Step can be both positive and negative, we need to find ranges for the 5963 // maximum absolute step values in both directions and union them. 5964 ConstantRange SR = 5965 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5966 MaxBECountValue, BitWidth, /* Signed = */ true); 5967 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5968 StartSRange, MaxBECountValue, 5969 BitWidth, /* Signed = */ true)); 5970 5971 // Next, consider step unsigned. 5972 ConstantRange UR = getRangeForAffineARHelper( 5973 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5974 MaxBECountValue, BitWidth, /* Signed = */ false); 5975 5976 // Finally, intersect signed and unsigned ranges. 5977 return SR.intersectWith(UR, ConstantRange::Smallest); 5978 } 5979 5980 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 5981 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 5982 ScalarEvolution::RangeSignHint SignHint) { 5983 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 5984 assert(AddRec->hasNoSelfWrap() && 5985 "This only works for non-self-wrapping AddRecs!"); 5986 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 5987 const SCEV *Step = AddRec->getStepRecurrence(*this); 5988 // Only deal with constant step to save compile time. 5989 if (!isa<SCEVConstant>(Step)) 5990 return ConstantRange::getFull(BitWidth); 5991 // Let's make sure that we can prove that we do not self-wrap during 5992 // MaxBECount iterations. We need this because MaxBECount is a maximum 5993 // iteration count estimate, and we might infer nw from some exit for which we 5994 // do not know max exit count (or any other side reasoning). 5995 // TODO: Turn into assert at some point. 5996 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 5997 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 5998 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 5999 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6000 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6001 MaxItersWithoutWrap)) 6002 return ConstantRange::getFull(BitWidth); 6003 6004 ICmpInst::Predicate LEPred = 6005 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6006 ICmpInst::Predicate GEPred = 6007 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6008 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6009 6010 // We know that there is no self-wrap. Let's take Start and End values and 6011 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6012 // the iteration. They either lie inside the range [Min(Start, End), 6013 // Max(Start, End)] or outside it: 6014 // 6015 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6016 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6017 // 6018 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6019 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6020 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6021 // Start <= End and step is positive, or Start >= End and step is negative. 6022 const SCEV *Start = AddRec->getStart(); 6023 ConstantRange StartRange = getRangeRef(Start, SignHint); 6024 ConstantRange EndRange = getRangeRef(End, SignHint); 6025 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6026 // If they already cover full iteration space, we will know nothing useful 6027 // even if we prove what we want to prove. 6028 if (RangeBetween.isFullSet()) 6029 return RangeBetween; 6030 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6031 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6032 : RangeBetween.isWrappedSet(); 6033 if (IsWrappedSet) 6034 return ConstantRange::getFull(BitWidth); 6035 6036 if (isKnownPositive(Step) && 6037 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6038 return RangeBetween; 6039 else if (isKnownNegative(Step) && 6040 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6041 return RangeBetween; 6042 return ConstantRange::getFull(BitWidth); 6043 } 6044 6045 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6046 const SCEV *Step, 6047 const SCEV *MaxBECount, 6048 unsigned BitWidth) { 6049 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6050 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6051 6052 struct SelectPattern { 6053 Value *Condition = nullptr; 6054 APInt TrueValue; 6055 APInt FalseValue; 6056 6057 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6058 const SCEV *S) { 6059 Optional<unsigned> CastOp; 6060 APInt Offset(BitWidth, 0); 6061 6062 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6063 "Should be!"); 6064 6065 // Peel off a constant offset: 6066 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6067 // In the future we could consider being smarter here and handle 6068 // {Start+Step,+,Step} too. 6069 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6070 return; 6071 6072 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6073 S = SA->getOperand(1); 6074 } 6075 6076 // Peel off a cast operation 6077 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6078 CastOp = SCast->getSCEVType(); 6079 S = SCast->getOperand(); 6080 } 6081 6082 using namespace llvm::PatternMatch; 6083 6084 auto *SU = dyn_cast<SCEVUnknown>(S); 6085 const APInt *TrueVal, *FalseVal; 6086 if (!SU || 6087 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6088 m_APInt(FalseVal)))) { 6089 Condition = nullptr; 6090 return; 6091 } 6092 6093 TrueValue = *TrueVal; 6094 FalseValue = *FalseVal; 6095 6096 // Re-apply the cast we peeled off earlier 6097 if (CastOp.hasValue()) 6098 switch (*CastOp) { 6099 default: 6100 llvm_unreachable("Unknown SCEV cast type!"); 6101 6102 case scTruncate: 6103 TrueValue = TrueValue.trunc(BitWidth); 6104 FalseValue = FalseValue.trunc(BitWidth); 6105 break; 6106 case scZeroExtend: 6107 TrueValue = TrueValue.zext(BitWidth); 6108 FalseValue = FalseValue.zext(BitWidth); 6109 break; 6110 case scSignExtend: 6111 TrueValue = TrueValue.sext(BitWidth); 6112 FalseValue = FalseValue.sext(BitWidth); 6113 break; 6114 } 6115 6116 // Re-apply the constant offset we peeled off earlier 6117 TrueValue += Offset; 6118 FalseValue += Offset; 6119 } 6120 6121 bool isRecognized() { return Condition != nullptr; } 6122 }; 6123 6124 SelectPattern StartPattern(*this, BitWidth, Start); 6125 if (!StartPattern.isRecognized()) 6126 return ConstantRange::getFull(BitWidth); 6127 6128 SelectPattern StepPattern(*this, BitWidth, Step); 6129 if (!StepPattern.isRecognized()) 6130 return ConstantRange::getFull(BitWidth); 6131 6132 if (StartPattern.Condition != StepPattern.Condition) { 6133 // We don't handle this case today; but we could, by considering four 6134 // possibilities below instead of two. I'm not sure if there are cases where 6135 // that will help over what getRange already does, though. 6136 return ConstantRange::getFull(BitWidth); 6137 } 6138 6139 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6140 // construct arbitrary general SCEV expressions here. This function is called 6141 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6142 // say) can end up caching a suboptimal value. 6143 6144 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6145 // C2352 and C2512 (otherwise it isn't needed). 6146 6147 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6148 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6149 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6150 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6151 6152 ConstantRange TrueRange = 6153 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6154 ConstantRange FalseRange = 6155 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6156 6157 return TrueRange.unionWith(FalseRange); 6158 } 6159 6160 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6161 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6162 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6163 6164 // Return early if there are no flags to propagate to the SCEV. 6165 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6166 if (BinOp->hasNoUnsignedWrap()) 6167 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6168 if (BinOp->hasNoSignedWrap()) 6169 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6170 if (Flags == SCEV::FlagAnyWrap) 6171 return SCEV::FlagAnyWrap; 6172 6173 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6174 } 6175 6176 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6177 // Here we check that I is in the header of the innermost loop containing I, 6178 // since we only deal with instructions in the loop header. The actual loop we 6179 // need to check later will come from an add recurrence, but getting that 6180 // requires computing the SCEV of the operands, which can be expensive. This 6181 // check we can do cheaply to rule out some cases early. 6182 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6183 if (InnermostContainingLoop == nullptr || 6184 InnermostContainingLoop->getHeader() != I->getParent()) 6185 return false; 6186 6187 // Only proceed if we can prove that I does not yield poison. 6188 if (!programUndefinedIfPoison(I)) 6189 return false; 6190 6191 // At this point we know that if I is executed, then it does not wrap 6192 // according to at least one of NSW or NUW. If I is not executed, then we do 6193 // not know if the calculation that I represents would wrap. Multiple 6194 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6195 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6196 // derived from other instructions that map to the same SCEV. We cannot make 6197 // that guarantee for cases where I is not executed. So we need to find the 6198 // loop that I is considered in relation to and prove that I is executed for 6199 // every iteration of that loop. That implies that the value that I 6200 // calculates does not wrap anywhere in the loop, so then we can apply the 6201 // flags to the SCEV. 6202 // 6203 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6204 // from different loops, so that we know which loop to prove that I is 6205 // executed in. 6206 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6207 // I could be an extractvalue from a call to an overflow intrinsic. 6208 // TODO: We can do better here in some cases. 6209 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6210 return false; 6211 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6212 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6213 bool AllOtherOpsLoopInvariant = true; 6214 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6215 ++OtherOpIndex) { 6216 if (OtherOpIndex != OpIndex) { 6217 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6218 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6219 AllOtherOpsLoopInvariant = false; 6220 break; 6221 } 6222 } 6223 } 6224 if (AllOtherOpsLoopInvariant && 6225 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6226 return true; 6227 } 6228 } 6229 return false; 6230 } 6231 6232 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6233 // If we know that \c I can never be poison period, then that's enough. 6234 if (isSCEVExprNeverPoison(I)) 6235 return true; 6236 6237 // For an add recurrence specifically, we assume that infinite loops without 6238 // side effects are undefined behavior, and then reason as follows: 6239 // 6240 // If the add recurrence is poison in any iteration, it is poison on all 6241 // future iterations (since incrementing poison yields poison). If the result 6242 // of the add recurrence is fed into the loop latch condition and the loop 6243 // does not contain any throws or exiting blocks other than the latch, we now 6244 // have the ability to "choose" whether the backedge is taken or not (by 6245 // choosing a sufficiently evil value for the poison feeding into the branch) 6246 // for every iteration including and after the one in which \p I first became 6247 // poison. There are two possibilities (let's call the iteration in which \p 6248 // I first became poison as K): 6249 // 6250 // 1. In the set of iterations including and after K, the loop body executes 6251 // no side effects. In this case executing the backege an infinte number 6252 // of times will yield undefined behavior. 6253 // 6254 // 2. In the set of iterations including and after K, the loop body executes 6255 // at least one side effect. In this case, that specific instance of side 6256 // effect is control dependent on poison, which also yields undefined 6257 // behavior. 6258 6259 auto *ExitingBB = L->getExitingBlock(); 6260 auto *LatchBB = L->getLoopLatch(); 6261 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6262 return false; 6263 6264 SmallPtrSet<const Instruction *, 16> Pushed; 6265 SmallVector<const Instruction *, 8> PoisonStack; 6266 6267 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6268 // things that are known to be poison under that assumption go on the 6269 // PoisonStack. 6270 Pushed.insert(I); 6271 PoisonStack.push_back(I); 6272 6273 bool LatchControlDependentOnPoison = false; 6274 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6275 const Instruction *Poison = PoisonStack.pop_back_val(); 6276 6277 for (auto *PoisonUser : Poison->users()) { 6278 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6279 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6280 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6281 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6282 assert(BI->isConditional() && "Only possibility!"); 6283 if (BI->getParent() == LatchBB) { 6284 LatchControlDependentOnPoison = true; 6285 break; 6286 } 6287 } 6288 } 6289 } 6290 6291 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6292 } 6293 6294 ScalarEvolution::LoopProperties 6295 ScalarEvolution::getLoopProperties(const Loop *L) { 6296 using LoopProperties = ScalarEvolution::LoopProperties; 6297 6298 auto Itr = LoopPropertiesCache.find(L); 6299 if (Itr == LoopPropertiesCache.end()) { 6300 auto HasSideEffects = [](Instruction *I) { 6301 if (auto *SI = dyn_cast<StoreInst>(I)) 6302 return !SI->isSimple(); 6303 6304 return I->mayHaveSideEffects(); 6305 }; 6306 6307 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6308 /*HasNoSideEffects*/ true}; 6309 6310 for (auto *BB : L->getBlocks()) 6311 for (auto &I : *BB) { 6312 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6313 LP.HasNoAbnormalExits = false; 6314 if (HasSideEffects(&I)) 6315 LP.HasNoSideEffects = false; 6316 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6317 break; // We're already as pessimistic as we can get. 6318 } 6319 6320 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6321 assert(InsertPair.second && "We just checked!"); 6322 Itr = InsertPair.first; 6323 } 6324 6325 return Itr->second; 6326 } 6327 6328 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6329 if (!isSCEVable(V->getType())) 6330 return getUnknown(V); 6331 6332 if (Instruction *I = dyn_cast<Instruction>(V)) { 6333 // Don't attempt to analyze instructions in blocks that aren't 6334 // reachable. Such instructions don't matter, and they aren't required 6335 // to obey basic rules for definitions dominating uses which this 6336 // analysis depends on. 6337 if (!DT.isReachableFromEntry(I->getParent())) 6338 return getUnknown(UndefValue::get(V->getType())); 6339 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6340 return getConstant(CI); 6341 else if (isa<ConstantPointerNull>(V)) 6342 // FIXME: we shouldn't special-case null pointer constant. 6343 return getZero(V->getType()); 6344 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6345 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6346 else if (!isa<ConstantExpr>(V)) 6347 return getUnknown(V); 6348 6349 Operator *U = cast<Operator>(V); 6350 if (auto BO = MatchBinaryOp(U, DT)) { 6351 switch (BO->Opcode) { 6352 case Instruction::Add: { 6353 // The simple thing to do would be to just call getSCEV on both operands 6354 // and call getAddExpr with the result. However if we're looking at a 6355 // bunch of things all added together, this can be quite inefficient, 6356 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6357 // Instead, gather up all the operands and make a single getAddExpr call. 6358 // LLVM IR canonical form means we need only traverse the left operands. 6359 SmallVector<const SCEV *, 4> AddOps; 6360 do { 6361 if (BO->Op) { 6362 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6363 AddOps.push_back(OpSCEV); 6364 break; 6365 } 6366 6367 // If a NUW or NSW flag can be applied to the SCEV for this 6368 // addition, then compute the SCEV for this addition by itself 6369 // with a separate call to getAddExpr. We need to do that 6370 // instead of pushing the operands of the addition onto AddOps, 6371 // since the flags are only known to apply to this particular 6372 // addition - they may not apply to other additions that can be 6373 // formed with operands from AddOps. 6374 const SCEV *RHS = getSCEV(BO->RHS); 6375 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6376 if (Flags != SCEV::FlagAnyWrap) { 6377 const SCEV *LHS = getSCEV(BO->LHS); 6378 if (BO->Opcode == Instruction::Sub) 6379 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6380 else 6381 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6382 break; 6383 } 6384 } 6385 6386 if (BO->Opcode == Instruction::Sub) 6387 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6388 else 6389 AddOps.push_back(getSCEV(BO->RHS)); 6390 6391 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6392 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6393 NewBO->Opcode != Instruction::Sub)) { 6394 AddOps.push_back(getSCEV(BO->LHS)); 6395 break; 6396 } 6397 BO = NewBO; 6398 } while (true); 6399 6400 return getAddExpr(AddOps); 6401 } 6402 6403 case Instruction::Mul: { 6404 SmallVector<const SCEV *, 4> MulOps; 6405 do { 6406 if (BO->Op) { 6407 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6408 MulOps.push_back(OpSCEV); 6409 break; 6410 } 6411 6412 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6413 if (Flags != SCEV::FlagAnyWrap) { 6414 MulOps.push_back( 6415 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6416 break; 6417 } 6418 } 6419 6420 MulOps.push_back(getSCEV(BO->RHS)); 6421 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6422 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6423 MulOps.push_back(getSCEV(BO->LHS)); 6424 break; 6425 } 6426 BO = NewBO; 6427 } while (true); 6428 6429 return getMulExpr(MulOps); 6430 } 6431 case Instruction::UDiv: 6432 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6433 case Instruction::URem: 6434 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6435 case Instruction::Sub: { 6436 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6437 if (BO->Op) 6438 Flags = getNoWrapFlagsFromUB(BO->Op); 6439 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6440 } 6441 case Instruction::And: 6442 // For an expression like x&255 that merely masks off the high bits, 6443 // use zext(trunc(x)) as the SCEV expression. 6444 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6445 if (CI->isZero()) 6446 return getSCEV(BO->RHS); 6447 if (CI->isMinusOne()) 6448 return getSCEV(BO->LHS); 6449 const APInt &A = CI->getValue(); 6450 6451 // Instcombine's ShrinkDemandedConstant may strip bits out of 6452 // constants, obscuring what would otherwise be a low-bits mask. 6453 // Use computeKnownBits to compute what ShrinkDemandedConstant 6454 // knew about to reconstruct a low-bits mask value. 6455 unsigned LZ = A.countLeadingZeros(); 6456 unsigned TZ = A.countTrailingZeros(); 6457 unsigned BitWidth = A.getBitWidth(); 6458 KnownBits Known(BitWidth); 6459 computeKnownBits(BO->LHS, Known, getDataLayout(), 6460 0, &AC, nullptr, &DT); 6461 6462 APInt EffectiveMask = 6463 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6464 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6465 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6466 const SCEV *LHS = getSCEV(BO->LHS); 6467 const SCEV *ShiftedLHS = nullptr; 6468 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6469 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6470 // For an expression like (x * 8) & 8, simplify the multiply. 6471 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6472 unsigned GCD = std::min(MulZeros, TZ); 6473 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6474 SmallVector<const SCEV*, 4> MulOps; 6475 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6476 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6477 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6478 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6479 } 6480 } 6481 if (!ShiftedLHS) 6482 ShiftedLHS = getUDivExpr(LHS, MulCount); 6483 return getMulExpr( 6484 getZeroExtendExpr( 6485 getTruncateExpr(ShiftedLHS, 6486 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6487 BO->LHS->getType()), 6488 MulCount); 6489 } 6490 } 6491 break; 6492 6493 case Instruction::Or: 6494 // If the RHS of the Or is a constant, we may have something like: 6495 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6496 // optimizations will transparently handle this case. 6497 // 6498 // In order for this transformation to be safe, the LHS must be of the 6499 // form X*(2^n) and the Or constant must be less than 2^n. 6500 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6501 const SCEV *LHS = getSCEV(BO->LHS); 6502 const APInt &CIVal = CI->getValue(); 6503 if (GetMinTrailingZeros(LHS) >= 6504 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6505 // Build a plain add SCEV. 6506 return getAddExpr(LHS, getSCEV(CI), 6507 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6508 } 6509 } 6510 break; 6511 6512 case Instruction::Xor: 6513 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6514 // If the RHS of xor is -1, then this is a not operation. 6515 if (CI->isMinusOne()) 6516 return getNotSCEV(getSCEV(BO->LHS)); 6517 6518 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6519 // This is a variant of the check for xor with -1, and it handles 6520 // the case where instcombine has trimmed non-demanded bits out 6521 // of an xor with -1. 6522 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6523 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6524 if (LBO->getOpcode() == Instruction::And && 6525 LCI->getValue() == CI->getValue()) 6526 if (const SCEVZeroExtendExpr *Z = 6527 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6528 Type *UTy = BO->LHS->getType(); 6529 const SCEV *Z0 = Z->getOperand(); 6530 Type *Z0Ty = Z0->getType(); 6531 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6532 6533 // If C is a low-bits mask, the zero extend is serving to 6534 // mask off the high bits. Complement the operand and 6535 // re-apply the zext. 6536 if (CI->getValue().isMask(Z0TySize)) 6537 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6538 6539 // If C is a single bit, it may be in the sign-bit position 6540 // before the zero-extend. In this case, represent the xor 6541 // using an add, which is equivalent, and re-apply the zext. 6542 APInt Trunc = CI->getValue().trunc(Z0TySize); 6543 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6544 Trunc.isSignMask()) 6545 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6546 UTy); 6547 } 6548 } 6549 break; 6550 6551 case Instruction::Shl: 6552 // Turn shift left of a constant amount into a multiply. 6553 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6554 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6555 6556 // If the shift count is not less than the bitwidth, the result of 6557 // the shift is undefined. Don't try to analyze it, because the 6558 // resolution chosen here may differ from the resolution chosen in 6559 // other parts of the compiler. 6560 if (SA->getValue().uge(BitWidth)) 6561 break; 6562 6563 // We can safely preserve the nuw flag in all cases. It's also safe to 6564 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6565 // requires special handling. It can be preserved as long as we're not 6566 // left shifting by bitwidth - 1. 6567 auto Flags = SCEV::FlagAnyWrap; 6568 if (BO->Op) { 6569 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6570 if ((MulFlags & SCEV::FlagNSW) && 6571 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6572 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6573 if (MulFlags & SCEV::FlagNUW) 6574 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6575 } 6576 6577 Constant *X = ConstantInt::get( 6578 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6579 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6580 } 6581 break; 6582 6583 case Instruction::AShr: { 6584 // AShr X, C, where C is a constant. 6585 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6586 if (!CI) 6587 break; 6588 6589 Type *OuterTy = BO->LHS->getType(); 6590 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6591 // If the shift count is not less than the bitwidth, the result of 6592 // the shift is undefined. Don't try to analyze it, because the 6593 // resolution chosen here may differ from the resolution chosen in 6594 // other parts of the compiler. 6595 if (CI->getValue().uge(BitWidth)) 6596 break; 6597 6598 if (CI->isZero()) 6599 return getSCEV(BO->LHS); // shift by zero --> noop 6600 6601 uint64_t AShrAmt = CI->getZExtValue(); 6602 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6603 6604 Operator *L = dyn_cast<Operator>(BO->LHS); 6605 if (L && L->getOpcode() == Instruction::Shl) { 6606 // X = Shl A, n 6607 // Y = AShr X, m 6608 // Both n and m are constant. 6609 6610 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6611 if (L->getOperand(1) == BO->RHS) 6612 // For a two-shift sext-inreg, i.e. n = m, 6613 // use sext(trunc(x)) as the SCEV expression. 6614 return getSignExtendExpr( 6615 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6616 6617 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6618 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6619 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6620 if (ShlAmt > AShrAmt) { 6621 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6622 // expression. We already checked that ShlAmt < BitWidth, so 6623 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6624 // ShlAmt - AShrAmt < Amt. 6625 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6626 ShlAmt - AShrAmt); 6627 return getSignExtendExpr( 6628 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6629 getConstant(Mul)), OuterTy); 6630 } 6631 } 6632 } 6633 if (BO->IsExact) { 6634 // Given exact arithmetic in-bounds right-shift by a constant, 6635 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x) 6636 const SCEV *X = getSCEV(BO->LHS); 6637 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false); 6638 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt); 6639 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult)); 6640 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW); 6641 } 6642 break; 6643 } 6644 } 6645 } 6646 6647 switch (U->getOpcode()) { 6648 case Instruction::Trunc: 6649 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6650 6651 case Instruction::ZExt: 6652 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6653 6654 case Instruction::SExt: 6655 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6656 // The NSW flag of a subtract does not always survive the conversion to 6657 // A + (-1)*B. By pushing sign extension onto its operands we are much 6658 // more likely to preserve NSW and allow later AddRec optimisations. 6659 // 6660 // NOTE: This is effectively duplicating this logic from getSignExtend: 6661 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6662 // but by that point the NSW information has potentially been lost. 6663 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6664 Type *Ty = U->getType(); 6665 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6666 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6667 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6668 } 6669 } 6670 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6671 6672 case Instruction::BitCast: 6673 // BitCasts are no-op casts so we just eliminate the cast. 6674 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6675 return getSCEV(U->getOperand(0)); 6676 break; 6677 6678 case Instruction::PtrToInt: { 6679 // Pointer to integer cast is straight-forward, so do model it. 6680 Value *Ptr = U->getOperand(0); 6681 const SCEV *Op = getSCEV(Ptr); 6682 Type *DstIntTy = U->getType(); 6683 // SCEV doesn't have constant pointer expression type, but it supports 6684 // nullptr constant (and only that one), which is modelled in SCEV as a 6685 // zero integer constant. So just skip the ptrtoint cast for constants. 6686 if (isa<SCEVConstant>(Op)) 6687 return getTruncateOrZeroExtend(Op, DstIntTy); 6688 Type *PtrTy = Ptr->getType(); 6689 Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy); 6690 // But only if effective SCEV (integer) type is wide enough to represent 6691 // all possible pointer values. 6692 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) != 6693 getDataLayout().getTypeSizeInBits(IntPtrTy)) 6694 return getUnknown(V); 6695 return getPtrToIntExpr(Op, DstIntTy); 6696 } 6697 case Instruction::IntToPtr: 6698 // Just don't deal with inttoptr casts. 6699 return getUnknown(V); 6700 6701 case Instruction::SDiv: 6702 // If both operands are non-negative, this is just an udiv. 6703 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6704 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6705 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6706 break; 6707 6708 case Instruction::SRem: 6709 // If both operands are non-negative, this is just an urem. 6710 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6711 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6712 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6713 break; 6714 6715 case Instruction::GetElementPtr: 6716 return createNodeForGEP(cast<GEPOperator>(U)); 6717 6718 case Instruction::PHI: 6719 return createNodeForPHI(cast<PHINode>(U)); 6720 6721 case Instruction::Select: 6722 // U can also be a select constant expr, which let fall through. Since 6723 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6724 // constant expressions cannot have instructions as operands, we'd have 6725 // returned getUnknown for a select constant expressions anyway. 6726 if (isa<Instruction>(U)) 6727 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6728 U->getOperand(1), U->getOperand(2)); 6729 break; 6730 6731 case Instruction::Call: 6732 case Instruction::Invoke: 6733 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6734 return getSCEV(RV); 6735 6736 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6737 switch (II->getIntrinsicID()) { 6738 case Intrinsic::abs: 6739 return getAbsExpr( 6740 getSCEV(II->getArgOperand(0)), 6741 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6742 case Intrinsic::umax: 6743 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6744 getSCEV(II->getArgOperand(1))); 6745 case Intrinsic::umin: 6746 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6747 getSCEV(II->getArgOperand(1))); 6748 case Intrinsic::smax: 6749 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6750 getSCEV(II->getArgOperand(1))); 6751 case Intrinsic::smin: 6752 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6753 getSCEV(II->getArgOperand(1))); 6754 case Intrinsic::usub_sat: { 6755 const SCEV *X = getSCEV(II->getArgOperand(0)); 6756 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6757 const SCEV *ClampedY = getUMinExpr(X, Y); 6758 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6759 } 6760 case Intrinsic::uadd_sat: { 6761 const SCEV *X = getSCEV(II->getArgOperand(0)); 6762 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6763 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6764 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6765 } 6766 case Intrinsic::start_loop_iterations: 6767 // A start_loop_iterations is just equivalent to the first operand for 6768 // SCEV purposes. 6769 return getSCEV(II->getArgOperand(0)); 6770 default: 6771 break; 6772 } 6773 } 6774 break; 6775 } 6776 6777 return getUnknown(V); 6778 } 6779 6780 //===----------------------------------------------------------------------===// 6781 // Iteration Count Computation Code 6782 // 6783 6784 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6785 if (!ExitCount) 6786 return 0; 6787 6788 ConstantInt *ExitConst = ExitCount->getValue(); 6789 6790 // Guard against huge trip counts. 6791 if (ExitConst->getValue().getActiveBits() > 32) 6792 return 0; 6793 6794 // In case of integer overflow, this returns 0, which is correct. 6795 return ((unsigned)ExitConst->getZExtValue()) + 1; 6796 } 6797 6798 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6799 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6800 return getSmallConstantTripCount(L, ExitingBB); 6801 6802 // No trip count information for multiple exits. 6803 return 0; 6804 } 6805 6806 unsigned 6807 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6808 const BasicBlock *ExitingBlock) { 6809 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6810 assert(L->isLoopExiting(ExitingBlock) && 6811 "Exiting block must actually branch out of the loop!"); 6812 const SCEVConstant *ExitCount = 6813 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6814 return getConstantTripCount(ExitCount); 6815 } 6816 6817 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6818 const auto *MaxExitCount = 6819 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6820 return getConstantTripCount(MaxExitCount); 6821 } 6822 6823 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6824 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6825 return getSmallConstantTripMultiple(L, ExitingBB); 6826 6827 // No trip multiple information for multiple exits. 6828 return 0; 6829 } 6830 6831 /// Returns the largest constant divisor of the trip count of this loop as a 6832 /// normal unsigned value, if possible. This means that the actual trip count is 6833 /// always a multiple of the returned value (don't forget the trip count could 6834 /// very well be zero as well!). 6835 /// 6836 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6837 /// multiple of a constant (which is also the case if the trip count is simply 6838 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6839 /// if the trip count is very large (>= 2^32). 6840 /// 6841 /// As explained in the comments for getSmallConstantTripCount, this assumes 6842 /// that control exits the loop via ExitingBlock. 6843 unsigned 6844 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6845 const BasicBlock *ExitingBlock) { 6846 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6847 assert(L->isLoopExiting(ExitingBlock) && 6848 "Exiting block must actually branch out of the loop!"); 6849 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6850 if (ExitCount == getCouldNotCompute()) 6851 return 1; 6852 6853 // Get the trip count from the BE count by adding 1. 6854 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6855 6856 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6857 if (!TC) 6858 // Attempt to factor more general cases. Returns the greatest power of 6859 // two divisor. If overflow happens, the trip count expression is still 6860 // divisible by the greatest power of 2 divisor returned. 6861 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6862 6863 ConstantInt *Result = TC->getValue(); 6864 6865 // Guard against huge trip counts (this requires checking 6866 // for zero to handle the case where the trip count == -1 and the 6867 // addition wraps). 6868 if (!Result || Result->getValue().getActiveBits() > 32 || 6869 Result->getValue().getActiveBits() == 0) 6870 return 1; 6871 6872 return (unsigned)Result->getZExtValue(); 6873 } 6874 6875 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6876 const BasicBlock *ExitingBlock, 6877 ExitCountKind Kind) { 6878 switch (Kind) { 6879 case Exact: 6880 case SymbolicMaximum: 6881 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6882 case ConstantMaximum: 6883 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 6884 }; 6885 llvm_unreachable("Invalid ExitCountKind!"); 6886 } 6887 6888 const SCEV * 6889 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6890 SCEVUnionPredicate &Preds) { 6891 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6892 } 6893 6894 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6895 ExitCountKind Kind) { 6896 switch (Kind) { 6897 case Exact: 6898 return getBackedgeTakenInfo(L).getExact(L, this); 6899 case ConstantMaximum: 6900 return getBackedgeTakenInfo(L).getConstantMax(this); 6901 case SymbolicMaximum: 6902 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 6903 }; 6904 llvm_unreachable("Invalid ExitCountKind!"); 6905 } 6906 6907 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6908 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 6909 } 6910 6911 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6912 static void 6913 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6914 BasicBlock *Header = L->getHeader(); 6915 6916 // Push all Loop-header PHIs onto the Worklist stack. 6917 for (PHINode &PN : Header->phis()) 6918 Worklist.push_back(&PN); 6919 } 6920 6921 const ScalarEvolution::BackedgeTakenInfo & 6922 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6923 auto &BTI = getBackedgeTakenInfo(L); 6924 if (BTI.hasFullInfo()) 6925 return BTI; 6926 6927 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6928 6929 if (!Pair.second) 6930 return Pair.first->second; 6931 6932 BackedgeTakenInfo Result = 6933 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6934 6935 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6936 } 6937 6938 ScalarEvolution::BackedgeTakenInfo & 6939 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6940 // Initially insert an invalid entry for this loop. If the insertion 6941 // succeeds, proceed to actually compute a backedge-taken count and 6942 // update the value. The temporary CouldNotCompute value tells SCEV 6943 // code elsewhere that it shouldn't attempt to request a new 6944 // backedge-taken count, which could result in infinite recursion. 6945 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6946 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6947 if (!Pair.second) 6948 return Pair.first->second; 6949 6950 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6951 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6952 // must be cleared in this scope. 6953 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6954 6955 // In product build, there are no usage of statistic. 6956 (void)NumTripCountsComputed; 6957 (void)NumTripCountsNotComputed; 6958 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6959 const SCEV *BEExact = Result.getExact(L, this); 6960 if (BEExact != getCouldNotCompute()) { 6961 assert(isLoopInvariant(BEExact, L) && 6962 isLoopInvariant(Result.getConstantMax(this), L) && 6963 "Computed backedge-taken count isn't loop invariant for loop!"); 6964 ++NumTripCountsComputed; 6965 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 6966 isa<PHINode>(L->getHeader()->begin())) { 6967 // Only count loops that have phi nodes as not being computable. 6968 ++NumTripCountsNotComputed; 6969 } 6970 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6971 6972 // Now that we know more about the trip count for this loop, forget any 6973 // existing SCEV values for PHI nodes in this loop since they are only 6974 // conservative estimates made without the benefit of trip count 6975 // information. This is similar to the code in forgetLoop, except that 6976 // it handles SCEVUnknown PHI nodes specially. 6977 if (Result.hasAnyInfo()) { 6978 SmallVector<Instruction *, 16> Worklist; 6979 PushLoopPHIs(L, Worklist); 6980 6981 SmallPtrSet<Instruction *, 8> Discovered; 6982 while (!Worklist.empty()) { 6983 Instruction *I = Worklist.pop_back_val(); 6984 6985 ValueExprMapType::iterator It = 6986 ValueExprMap.find_as(static_cast<Value *>(I)); 6987 if (It != ValueExprMap.end()) { 6988 const SCEV *Old = It->second; 6989 6990 // SCEVUnknown for a PHI either means that it has an unrecognized 6991 // structure, or it's a PHI that's in the progress of being computed 6992 // by createNodeForPHI. In the former case, additional loop trip 6993 // count information isn't going to change anything. In the later 6994 // case, createNodeForPHI will perform the necessary updates on its 6995 // own when it gets to that point. 6996 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6997 eraseValueFromMap(It->first); 6998 forgetMemoizedResults(Old); 6999 } 7000 if (PHINode *PN = dyn_cast<PHINode>(I)) 7001 ConstantEvolutionLoopExitValue.erase(PN); 7002 } 7003 7004 // Since we don't need to invalidate anything for correctness and we're 7005 // only invalidating to make SCEV's results more precise, we get to stop 7006 // early to avoid invalidating too much. This is especially important in 7007 // cases like: 7008 // 7009 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7010 // loop0: 7011 // %pn0 = phi 7012 // ... 7013 // loop1: 7014 // %pn1 = phi 7015 // ... 7016 // 7017 // where both loop0 and loop1's backedge taken count uses the SCEV 7018 // expression for %v. If we don't have the early stop below then in cases 7019 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7020 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7021 // count for loop1, effectively nullifying SCEV's trip count cache. 7022 for (auto *U : I->users()) 7023 if (auto *I = dyn_cast<Instruction>(U)) { 7024 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7025 if (LoopForUser && L->contains(LoopForUser) && 7026 Discovered.insert(I).second) 7027 Worklist.push_back(I); 7028 } 7029 } 7030 } 7031 7032 // Re-lookup the insert position, since the call to 7033 // computeBackedgeTakenCount above could result in a 7034 // recusive call to getBackedgeTakenInfo (on a different 7035 // loop), which would invalidate the iterator computed 7036 // earlier. 7037 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7038 } 7039 7040 void ScalarEvolution::forgetAllLoops() { 7041 // This method is intended to forget all info about loops. It should 7042 // invalidate caches as if the following happened: 7043 // - The trip counts of all loops have changed arbitrarily 7044 // - Every llvm::Value has been updated in place to produce a different 7045 // result. 7046 BackedgeTakenCounts.clear(); 7047 PredicatedBackedgeTakenCounts.clear(); 7048 LoopPropertiesCache.clear(); 7049 ConstantEvolutionLoopExitValue.clear(); 7050 ValueExprMap.clear(); 7051 ValuesAtScopes.clear(); 7052 LoopDispositions.clear(); 7053 BlockDispositions.clear(); 7054 UnsignedRanges.clear(); 7055 SignedRanges.clear(); 7056 ExprValueMap.clear(); 7057 HasRecMap.clear(); 7058 MinTrailingZerosCache.clear(); 7059 PredicatedSCEVRewrites.clear(); 7060 } 7061 7062 void ScalarEvolution::forgetLoop(const Loop *L) { 7063 // Drop any stored trip count value. 7064 auto RemoveLoopFromBackedgeMap = 7065 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 7066 auto BTCPos = Map.find(L); 7067 if (BTCPos != Map.end()) { 7068 BTCPos->second.clear(); 7069 Map.erase(BTCPos); 7070 } 7071 }; 7072 7073 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7074 SmallVector<Instruction *, 32> Worklist; 7075 SmallPtrSet<Instruction *, 16> Visited; 7076 7077 // Iterate over all the loops and sub-loops to drop SCEV information. 7078 while (!LoopWorklist.empty()) { 7079 auto *CurrL = LoopWorklist.pop_back_val(); 7080 7081 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 7082 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 7083 7084 // Drop information about predicated SCEV rewrites for this loop. 7085 for (auto I = PredicatedSCEVRewrites.begin(); 7086 I != PredicatedSCEVRewrites.end();) { 7087 std::pair<const SCEV *, const Loop *> Entry = I->first; 7088 if (Entry.second == CurrL) 7089 PredicatedSCEVRewrites.erase(I++); 7090 else 7091 ++I; 7092 } 7093 7094 auto LoopUsersItr = LoopUsers.find(CurrL); 7095 if (LoopUsersItr != LoopUsers.end()) { 7096 for (auto *S : LoopUsersItr->second) 7097 forgetMemoizedResults(S); 7098 LoopUsers.erase(LoopUsersItr); 7099 } 7100 7101 // Drop information about expressions based on loop-header PHIs. 7102 PushLoopPHIs(CurrL, Worklist); 7103 7104 while (!Worklist.empty()) { 7105 Instruction *I = Worklist.pop_back_val(); 7106 if (!Visited.insert(I).second) 7107 continue; 7108 7109 ValueExprMapType::iterator It = 7110 ValueExprMap.find_as(static_cast<Value *>(I)); 7111 if (It != ValueExprMap.end()) { 7112 eraseValueFromMap(It->first); 7113 forgetMemoizedResults(It->second); 7114 if (PHINode *PN = dyn_cast<PHINode>(I)) 7115 ConstantEvolutionLoopExitValue.erase(PN); 7116 } 7117 7118 PushDefUseChildren(I, Worklist); 7119 } 7120 7121 LoopPropertiesCache.erase(CurrL); 7122 // Forget all contained loops too, to avoid dangling entries in the 7123 // ValuesAtScopes map. 7124 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7125 } 7126 } 7127 7128 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7129 while (Loop *Parent = L->getParentLoop()) 7130 L = Parent; 7131 forgetLoop(L); 7132 } 7133 7134 void ScalarEvolution::forgetValue(Value *V) { 7135 Instruction *I = dyn_cast<Instruction>(V); 7136 if (!I) return; 7137 7138 // Drop information about expressions based on loop-header PHIs. 7139 SmallVector<Instruction *, 16> Worklist; 7140 Worklist.push_back(I); 7141 7142 SmallPtrSet<Instruction *, 8> Visited; 7143 while (!Worklist.empty()) { 7144 I = Worklist.pop_back_val(); 7145 if (!Visited.insert(I).second) 7146 continue; 7147 7148 ValueExprMapType::iterator It = 7149 ValueExprMap.find_as(static_cast<Value *>(I)); 7150 if (It != ValueExprMap.end()) { 7151 eraseValueFromMap(It->first); 7152 forgetMemoizedResults(It->second); 7153 if (PHINode *PN = dyn_cast<PHINode>(I)) 7154 ConstantEvolutionLoopExitValue.erase(PN); 7155 } 7156 7157 PushDefUseChildren(I, Worklist); 7158 } 7159 } 7160 7161 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7162 LoopDispositions.clear(); 7163 } 7164 7165 /// Get the exact loop backedge taken count considering all loop exits. A 7166 /// computable result can only be returned for loops with all exiting blocks 7167 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7168 /// is never skipped. This is a valid assumption as long as the loop exits via 7169 /// that test. For precise results, it is the caller's responsibility to specify 7170 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7171 const SCEV * 7172 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7173 SCEVUnionPredicate *Preds) const { 7174 // If any exits were not computable, the loop is not computable. 7175 if (!isComplete() || ExitNotTaken.empty()) 7176 return SE->getCouldNotCompute(); 7177 7178 const BasicBlock *Latch = L->getLoopLatch(); 7179 // All exiting blocks we have collected must dominate the only backedge. 7180 if (!Latch) 7181 return SE->getCouldNotCompute(); 7182 7183 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7184 // count is simply a minimum out of all these calculated exit counts. 7185 SmallVector<const SCEV *, 2> Ops; 7186 for (auto &ENT : ExitNotTaken) { 7187 const SCEV *BECount = ENT.ExactNotTaken; 7188 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7189 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7190 "We should only have known counts for exiting blocks that dominate " 7191 "latch!"); 7192 7193 Ops.push_back(BECount); 7194 7195 if (Preds && !ENT.hasAlwaysTruePredicate()) 7196 Preds->add(ENT.Predicate.get()); 7197 7198 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7199 "Predicate should be always true!"); 7200 } 7201 7202 return SE->getUMinFromMismatchedTypes(Ops); 7203 } 7204 7205 /// Get the exact not taken count for this loop exit. 7206 const SCEV * 7207 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7208 ScalarEvolution *SE) const { 7209 for (auto &ENT : ExitNotTaken) 7210 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7211 return ENT.ExactNotTaken; 7212 7213 return SE->getCouldNotCompute(); 7214 } 7215 7216 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7217 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7218 for (auto &ENT : ExitNotTaken) 7219 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7220 return ENT.MaxNotTaken; 7221 7222 return SE->getCouldNotCompute(); 7223 } 7224 7225 /// getConstantMax - Get the constant max backedge taken count for the loop. 7226 const SCEV * 7227 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7228 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7229 return !ENT.hasAlwaysTruePredicate(); 7230 }; 7231 7232 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7233 return SE->getCouldNotCompute(); 7234 7235 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7236 isa<SCEVConstant>(getConstantMax())) && 7237 "No point in having a non-constant max backedge taken count!"); 7238 return getConstantMax(); 7239 } 7240 7241 const SCEV * 7242 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7243 ScalarEvolution *SE) { 7244 if (!SymbolicMax) 7245 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7246 return SymbolicMax; 7247 } 7248 7249 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7250 ScalarEvolution *SE) const { 7251 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7252 return !ENT.hasAlwaysTruePredicate(); 7253 }; 7254 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7255 } 7256 7257 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 7258 ScalarEvolution *SE) const { 7259 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() && 7260 SE->hasOperand(getConstantMax(), S)) 7261 return true; 7262 7263 for (auto &ENT : ExitNotTaken) 7264 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 7265 SE->hasOperand(ENT.ExactNotTaken, S)) 7266 return true; 7267 7268 return false; 7269 } 7270 7271 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7272 : ExactNotTaken(E), MaxNotTaken(E) { 7273 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7274 isa<SCEVConstant>(MaxNotTaken)) && 7275 "No point in having a non-constant max backedge taken count!"); 7276 } 7277 7278 ScalarEvolution::ExitLimit::ExitLimit( 7279 const SCEV *E, const SCEV *M, bool MaxOrZero, 7280 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7281 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7282 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7283 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7284 "Exact is not allowed to be less precise than Max"); 7285 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7286 isa<SCEVConstant>(MaxNotTaken)) && 7287 "No point in having a non-constant max backedge taken count!"); 7288 for (auto *PredSet : PredSetList) 7289 for (auto *P : *PredSet) 7290 addPredicate(P); 7291 } 7292 7293 ScalarEvolution::ExitLimit::ExitLimit( 7294 const SCEV *E, const SCEV *M, bool MaxOrZero, 7295 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7296 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7297 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7298 isa<SCEVConstant>(MaxNotTaken)) && 7299 "No point in having a non-constant max backedge taken count!"); 7300 } 7301 7302 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7303 bool MaxOrZero) 7304 : ExitLimit(E, M, MaxOrZero, None) { 7305 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7306 isa<SCEVConstant>(MaxNotTaken)) && 7307 "No point in having a non-constant max backedge taken count!"); 7308 } 7309 7310 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7311 /// computable exit into a persistent ExitNotTakenInfo array. 7312 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7313 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7314 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7315 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7316 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7317 7318 ExitNotTaken.reserve(ExitCounts.size()); 7319 std::transform( 7320 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7321 [&](const EdgeExitInfo &EEI) { 7322 BasicBlock *ExitBB = EEI.first; 7323 const ExitLimit &EL = EEI.second; 7324 if (EL.Predicates.empty()) 7325 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7326 nullptr); 7327 7328 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7329 for (auto *Pred : EL.Predicates) 7330 Predicate->add(Pred); 7331 7332 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7333 std::move(Predicate)); 7334 }); 7335 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7336 isa<SCEVConstant>(ConstantMax)) && 7337 "No point in having a non-constant max backedge taken count!"); 7338 } 7339 7340 /// Invalidate this result and free the ExitNotTakenInfo array. 7341 void ScalarEvolution::BackedgeTakenInfo::clear() { 7342 ExitNotTaken.clear(); 7343 } 7344 7345 /// Compute the number of times the backedge of the specified loop will execute. 7346 ScalarEvolution::BackedgeTakenInfo 7347 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7348 bool AllowPredicates) { 7349 SmallVector<BasicBlock *, 8> ExitingBlocks; 7350 L->getExitingBlocks(ExitingBlocks); 7351 7352 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7353 7354 SmallVector<EdgeExitInfo, 4> ExitCounts; 7355 bool CouldComputeBECount = true; 7356 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7357 const SCEV *MustExitMaxBECount = nullptr; 7358 const SCEV *MayExitMaxBECount = nullptr; 7359 bool MustExitMaxOrZero = false; 7360 7361 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7362 // and compute maxBECount. 7363 // Do a union of all the predicates here. 7364 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7365 BasicBlock *ExitBB = ExitingBlocks[i]; 7366 7367 // We canonicalize untaken exits to br (constant), ignore them so that 7368 // proving an exit untaken doesn't negatively impact our ability to reason 7369 // about the loop as whole. 7370 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7371 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7372 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7373 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7374 continue; 7375 } 7376 7377 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7378 7379 assert((AllowPredicates || EL.Predicates.empty()) && 7380 "Predicated exit limit when predicates are not allowed!"); 7381 7382 // 1. For each exit that can be computed, add an entry to ExitCounts. 7383 // CouldComputeBECount is true only if all exits can be computed. 7384 if (EL.ExactNotTaken == getCouldNotCompute()) 7385 // We couldn't compute an exact value for this exit, so 7386 // we won't be able to compute an exact value for the loop. 7387 CouldComputeBECount = false; 7388 else 7389 ExitCounts.emplace_back(ExitBB, EL); 7390 7391 // 2. Derive the loop's MaxBECount from each exit's max number of 7392 // non-exiting iterations. Partition the loop exits into two kinds: 7393 // LoopMustExits and LoopMayExits. 7394 // 7395 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7396 // is a LoopMayExit. If any computable LoopMustExit is found, then 7397 // MaxBECount is the minimum EL.MaxNotTaken of computable 7398 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7399 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7400 // computable EL.MaxNotTaken. 7401 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7402 DT.dominates(ExitBB, Latch)) { 7403 if (!MustExitMaxBECount) { 7404 MustExitMaxBECount = EL.MaxNotTaken; 7405 MustExitMaxOrZero = EL.MaxOrZero; 7406 } else { 7407 MustExitMaxBECount = 7408 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7409 } 7410 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7411 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7412 MayExitMaxBECount = EL.MaxNotTaken; 7413 else { 7414 MayExitMaxBECount = 7415 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7416 } 7417 } 7418 } 7419 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7420 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7421 // The loop backedge will be taken the maximum or zero times if there's 7422 // a single exit that must be taken the maximum or zero times. 7423 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7424 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7425 MaxBECount, MaxOrZero); 7426 } 7427 7428 ScalarEvolution::ExitLimit 7429 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7430 bool AllowPredicates) { 7431 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7432 // If our exiting block does not dominate the latch, then its connection with 7433 // loop's exit limit may be far from trivial. 7434 const BasicBlock *Latch = L->getLoopLatch(); 7435 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7436 return getCouldNotCompute(); 7437 7438 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7439 Instruction *Term = ExitingBlock->getTerminator(); 7440 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7441 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7442 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7443 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7444 "It should have one successor in loop and one exit block!"); 7445 // Proceed to the next level to examine the exit condition expression. 7446 return computeExitLimitFromCond( 7447 L, BI->getCondition(), ExitIfTrue, 7448 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7449 } 7450 7451 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7452 // For switch, make sure that there is a single exit from the loop. 7453 BasicBlock *Exit = nullptr; 7454 for (auto *SBB : successors(ExitingBlock)) 7455 if (!L->contains(SBB)) { 7456 if (Exit) // Multiple exit successors. 7457 return getCouldNotCompute(); 7458 Exit = SBB; 7459 } 7460 assert(Exit && "Exiting block must have at least one exit"); 7461 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7462 /*ControlsExit=*/IsOnlyExit); 7463 } 7464 7465 return getCouldNotCompute(); 7466 } 7467 7468 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7469 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7470 bool ControlsExit, bool AllowPredicates) { 7471 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7472 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7473 ControlsExit, AllowPredicates); 7474 } 7475 7476 Optional<ScalarEvolution::ExitLimit> 7477 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7478 bool ExitIfTrue, bool ControlsExit, 7479 bool AllowPredicates) { 7480 (void)this->L; 7481 (void)this->ExitIfTrue; 7482 (void)this->AllowPredicates; 7483 7484 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7485 this->AllowPredicates == AllowPredicates && 7486 "Variance in assumed invariant key components!"); 7487 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7488 if (Itr == TripCountMap.end()) 7489 return None; 7490 return Itr->second; 7491 } 7492 7493 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7494 bool ExitIfTrue, 7495 bool ControlsExit, 7496 bool AllowPredicates, 7497 const ExitLimit &EL) { 7498 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7499 this->AllowPredicates == AllowPredicates && 7500 "Variance in assumed invariant key components!"); 7501 7502 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7503 assert(InsertResult.second && "Expected successful insertion!"); 7504 (void)InsertResult; 7505 (void)ExitIfTrue; 7506 } 7507 7508 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7509 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7510 bool ControlsExit, bool AllowPredicates) { 7511 7512 if (auto MaybeEL = 7513 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7514 return *MaybeEL; 7515 7516 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7517 ControlsExit, AllowPredicates); 7518 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7519 return EL; 7520 } 7521 7522 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7523 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7524 bool ControlsExit, bool AllowPredicates) { 7525 // Check if the controlling expression for this loop is an And or Or. 7526 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7527 if (BO->getOpcode() == Instruction::And) { 7528 // Recurse on the operands of the and. 7529 bool EitherMayExit = !ExitIfTrue; 7530 ExitLimit EL0 = computeExitLimitFromCondCached( 7531 Cache, L, BO->getOperand(0), ExitIfTrue, 7532 ControlsExit && !EitherMayExit, AllowPredicates); 7533 ExitLimit EL1 = computeExitLimitFromCondCached( 7534 Cache, L, BO->getOperand(1), ExitIfTrue, 7535 ControlsExit && !EitherMayExit, AllowPredicates); 7536 // Be robust against unsimplified IR for the form "and i1 X, true" 7537 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7538 return CI->isOne() ? EL0 : EL1; 7539 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7540 return CI->isOne() ? EL1 : EL0; 7541 const SCEV *BECount = getCouldNotCompute(); 7542 const SCEV *MaxBECount = getCouldNotCompute(); 7543 if (EitherMayExit) { 7544 // Both conditions must be true for the loop to continue executing. 7545 // Choose the less conservative count. 7546 if (EL0.ExactNotTaken == getCouldNotCompute() || 7547 EL1.ExactNotTaken == getCouldNotCompute()) 7548 BECount = getCouldNotCompute(); 7549 else 7550 BECount = 7551 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7552 if (EL0.MaxNotTaken == getCouldNotCompute()) 7553 MaxBECount = EL1.MaxNotTaken; 7554 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7555 MaxBECount = EL0.MaxNotTaken; 7556 else 7557 MaxBECount = 7558 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7559 } else { 7560 // Both conditions must be true at the same time for the loop to exit. 7561 // For now, be conservative. 7562 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7563 MaxBECount = EL0.MaxNotTaken; 7564 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7565 BECount = EL0.ExactNotTaken; 7566 } 7567 7568 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7569 // to be more aggressive when computing BECount than when computing 7570 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7571 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7572 // to not. 7573 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7574 !isa<SCEVCouldNotCompute>(BECount)) 7575 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7576 7577 return ExitLimit(BECount, MaxBECount, false, 7578 {&EL0.Predicates, &EL1.Predicates}); 7579 } 7580 if (BO->getOpcode() == Instruction::Or) { 7581 // Recurse on the operands of the or. 7582 bool EitherMayExit = ExitIfTrue; 7583 ExitLimit EL0 = computeExitLimitFromCondCached( 7584 Cache, L, BO->getOperand(0), ExitIfTrue, 7585 ControlsExit && !EitherMayExit, AllowPredicates); 7586 ExitLimit EL1 = computeExitLimitFromCondCached( 7587 Cache, L, BO->getOperand(1), ExitIfTrue, 7588 ControlsExit && !EitherMayExit, AllowPredicates); 7589 // Be robust against unsimplified IR for the form "or i1 X, true" 7590 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7591 return CI->isZero() ? EL0 : EL1; 7592 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7593 return CI->isZero() ? EL1 : EL0; 7594 const SCEV *BECount = getCouldNotCompute(); 7595 const SCEV *MaxBECount = getCouldNotCompute(); 7596 if (EitherMayExit) { 7597 // Both conditions must be false for the loop to continue executing. 7598 // Choose the less conservative count. 7599 if (EL0.ExactNotTaken == getCouldNotCompute() || 7600 EL1.ExactNotTaken == getCouldNotCompute()) 7601 BECount = getCouldNotCompute(); 7602 else 7603 BECount = 7604 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7605 if (EL0.MaxNotTaken == getCouldNotCompute()) 7606 MaxBECount = EL1.MaxNotTaken; 7607 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7608 MaxBECount = EL0.MaxNotTaken; 7609 else 7610 MaxBECount = 7611 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7612 } else { 7613 // Both conditions must be false at the same time for the loop to exit. 7614 // For now, be conservative. 7615 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7616 MaxBECount = EL0.MaxNotTaken; 7617 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7618 BECount = EL0.ExactNotTaken; 7619 } 7620 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7621 // to be more aggressive when computing BECount than when computing 7622 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7623 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7624 // to not. 7625 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7626 !isa<SCEVCouldNotCompute>(BECount)) 7627 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7628 7629 return ExitLimit(BECount, MaxBECount, false, 7630 {&EL0.Predicates, &EL1.Predicates}); 7631 } 7632 } 7633 7634 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7635 // Proceed to the next level to examine the icmp. 7636 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7637 ExitLimit EL = 7638 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7639 if (EL.hasFullInfo() || !AllowPredicates) 7640 return EL; 7641 7642 // Try again, but use SCEV predicates this time. 7643 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7644 /*AllowPredicates=*/true); 7645 } 7646 7647 // Check for a constant condition. These are normally stripped out by 7648 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7649 // preserve the CFG and is temporarily leaving constant conditions 7650 // in place. 7651 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7652 if (ExitIfTrue == !CI->getZExtValue()) 7653 // The backedge is always taken. 7654 return getCouldNotCompute(); 7655 else 7656 // The backedge is never taken. 7657 return getZero(CI->getType()); 7658 } 7659 7660 // If it's not an integer or pointer comparison then compute it the hard way. 7661 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7662 } 7663 7664 ScalarEvolution::ExitLimit 7665 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7666 ICmpInst *ExitCond, 7667 bool ExitIfTrue, 7668 bool ControlsExit, 7669 bool AllowPredicates) { 7670 // If the condition was exit on true, convert the condition to exit on false 7671 ICmpInst::Predicate Pred; 7672 if (!ExitIfTrue) 7673 Pred = ExitCond->getPredicate(); 7674 else 7675 Pred = ExitCond->getInversePredicate(); 7676 const ICmpInst::Predicate OriginalPred = Pred; 7677 7678 // Handle common loops like: for (X = "string"; *X; ++X) 7679 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7680 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7681 ExitLimit ItCnt = 7682 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7683 if (ItCnt.hasAnyInfo()) 7684 return ItCnt; 7685 } 7686 7687 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7688 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7689 7690 // Try to evaluate any dependencies out of the loop. 7691 LHS = getSCEVAtScope(LHS, L); 7692 RHS = getSCEVAtScope(RHS, L); 7693 7694 // At this point, we would like to compute how many iterations of the 7695 // loop the predicate will return true for these inputs. 7696 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7697 // If there is a loop-invariant, force it into the RHS. 7698 std::swap(LHS, RHS); 7699 Pred = ICmpInst::getSwappedPredicate(Pred); 7700 } 7701 7702 // Simplify the operands before analyzing them. 7703 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7704 7705 // If we have a comparison of a chrec against a constant, try to use value 7706 // ranges to answer this query. 7707 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7708 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7709 if (AddRec->getLoop() == L) { 7710 // Form the constant range. 7711 ConstantRange CompRange = 7712 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7713 7714 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7715 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7716 } 7717 7718 switch (Pred) { 7719 case ICmpInst::ICMP_NE: { // while (X != Y) 7720 // Convert to: while (X-Y != 0) 7721 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7722 AllowPredicates); 7723 if (EL.hasAnyInfo()) return EL; 7724 break; 7725 } 7726 case ICmpInst::ICMP_EQ: { // while (X == Y) 7727 // Convert to: while (X-Y == 0) 7728 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7729 if (EL.hasAnyInfo()) return EL; 7730 break; 7731 } 7732 case ICmpInst::ICMP_SLT: 7733 case ICmpInst::ICMP_ULT: { // while (X < Y) 7734 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7735 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7736 AllowPredicates); 7737 if (EL.hasAnyInfo()) return EL; 7738 break; 7739 } 7740 case ICmpInst::ICMP_SGT: 7741 case ICmpInst::ICMP_UGT: { // while (X > Y) 7742 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7743 ExitLimit EL = 7744 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7745 AllowPredicates); 7746 if (EL.hasAnyInfo()) return EL; 7747 break; 7748 } 7749 default: 7750 break; 7751 } 7752 7753 auto *ExhaustiveCount = 7754 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7755 7756 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7757 return ExhaustiveCount; 7758 7759 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7760 ExitCond->getOperand(1), L, OriginalPred); 7761 } 7762 7763 ScalarEvolution::ExitLimit 7764 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7765 SwitchInst *Switch, 7766 BasicBlock *ExitingBlock, 7767 bool ControlsExit) { 7768 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7769 7770 // Give up if the exit is the default dest of a switch. 7771 if (Switch->getDefaultDest() == ExitingBlock) 7772 return getCouldNotCompute(); 7773 7774 assert(L->contains(Switch->getDefaultDest()) && 7775 "Default case must not exit the loop!"); 7776 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7777 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7778 7779 // while (X != Y) --> while (X-Y != 0) 7780 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7781 if (EL.hasAnyInfo()) 7782 return EL; 7783 7784 return getCouldNotCompute(); 7785 } 7786 7787 static ConstantInt * 7788 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7789 ScalarEvolution &SE) { 7790 const SCEV *InVal = SE.getConstant(C); 7791 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7792 assert(isa<SCEVConstant>(Val) && 7793 "Evaluation of SCEV at constant didn't fold correctly?"); 7794 return cast<SCEVConstant>(Val)->getValue(); 7795 } 7796 7797 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7798 /// compute the backedge execution count. 7799 ScalarEvolution::ExitLimit 7800 ScalarEvolution::computeLoadConstantCompareExitLimit( 7801 LoadInst *LI, 7802 Constant *RHS, 7803 const Loop *L, 7804 ICmpInst::Predicate predicate) { 7805 if (LI->isVolatile()) return getCouldNotCompute(); 7806 7807 // Check to see if the loaded pointer is a getelementptr of a global. 7808 // TODO: Use SCEV instead of manually grubbing with GEPs. 7809 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7810 if (!GEP) return getCouldNotCompute(); 7811 7812 // Make sure that it is really a constant global we are gepping, with an 7813 // initializer, and make sure the first IDX is really 0. 7814 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7815 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7816 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7817 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7818 return getCouldNotCompute(); 7819 7820 // Okay, we allow one non-constant index into the GEP instruction. 7821 Value *VarIdx = nullptr; 7822 std::vector<Constant*> Indexes; 7823 unsigned VarIdxNum = 0; 7824 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7825 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7826 Indexes.push_back(CI); 7827 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7828 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7829 VarIdx = GEP->getOperand(i); 7830 VarIdxNum = i-2; 7831 Indexes.push_back(nullptr); 7832 } 7833 7834 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7835 if (!VarIdx) 7836 return getCouldNotCompute(); 7837 7838 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7839 // Check to see if X is a loop variant variable value now. 7840 const SCEV *Idx = getSCEV(VarIdx); 7841 Idx = getSCEVAtScope(Idx, L); 7842 7843 // We can only recognize very limited forms of loop index expressions, in 7844 // particular, only affine AddRec's like {C1,+,C2}. 7845 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7846 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7847 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7848 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7849 return getCouldNotCompute(); 7850 7851 unsigned MaxSteps = MaxBruteForceIterations; 7852 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7853 ConstantInt *ItCst = ConstantInt::get( 7854 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7855 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7856 7857 // Form the GEP offset. 7858 Indexes[VarIdxNum] = Val; 7859 7860 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7861 Indexes); 7862 if (!Result) break; // Cannot compute! 7863 7864 // Evaluate the condition for this iteration. 7865 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7866 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7867 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7868 ++NumArrayLenItCounts; 7869 return getConstant(ItCst); // Found terminating iteration! 7870 } 7871 } 7872 return getCouldNotCompute(); 7873 } 7874 7875 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7876 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7877 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7878 if (!RHS) 7879 return getCouldNotCompute(); 7880 7881 const BasicBlock *Latch = L->getLoopLatch(); 7882 if (!Latch) 7883 return getCouldNotCompute(); 7884 7885 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7886 if (!Predecessor) 7887 return getCouldNotCompute(); 7888 7889 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7890 // Return LHS in OutLHS and shift_opt in OutOpCode. 7891 auto MatchPositiveShift = 7892 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7893 7894 using namespace PatternMatch; 7895 7896 ConstantInt *ShiftAmt; 7897 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7898 OutOpCode = Instruction::LShr; 7899 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7900 OutOpCode = Instruction::AShr; 7901 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7902 OutOpCode = Instruction::Shl; 7903 else 7904 return false; 7905 7906 return ShiftAmt->getValue().isStrictlyPositive(); 7907 }; 7908 7909 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7910 // 7911 // loop: 7912 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7913 // %iv.shifted = lshr i32 %iv, <positive constant> 7914 // 7915 // Return true on a successful match. Return the corresponding PHI node (%iv 7916 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7917 auto MatchShiftRecurrence = 7918 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7919 Optional<Instruction::BinaryOps> PostShiftOpCode; 7920 7921 { 7922 Instruction::BinaryOps OpC; 7923 Value *V; 7924 7925 // If we encounter a shift instruction, "peel off" the shift operation, 7926 // and remember that we did so. Later when we inspect %iv's backedge 7927 // value, we will make sure that the backedge value uses the same 7928 // operation. 7929 // 7930 // Note: the peeled shift operation does not have to be the same 7931 // instruction as the one feeding into the PHI's backedge value. We only 7932 // really care about it being the same *kind* of shift instruction -- 7933 // that's all that is required for our later inferences to hold. 7934 if (MatchPositiveShift(LHS, V, OpC)) { 7935 PostShiftOpCode = OpC; 7936 LHS = V; 7937 } 7938 } 7939 7940 PNOut = dyn_cast<PHINode>(LHS); 7941 if (!PNOut || PNOut->getParent() != L->getHeader()) 7942 return false; 7943 7944 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7945 Value *OpLHS; 7946 7947 return 7948 // The backedge value for the PHI node must be a shift by a positive 7949 // amount 7950 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7951 7952 // of the PHI node itself 7953 OpLHS == PNOut && 7954 7955 // and the kind of shift should be match the kind of shift we peeled 7956 // off, if any. 7957 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7958 }; 7959 7960 PHINode *PN; 7961 Instruction::BinaryOps OpCode; 7962 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7963 return getCouldNotCompute(); 7964 7965 const DataLayout &DL = getDataLayout(); 7966 7967 // The key rationale for this optimization is that for some kinds of shift 7968 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7969 // within a finite number of iterations. If the condition guarding the 7970 // backedge (in the sense that the backedge is taken if the condition is true) 7971 // is false for the value the shift recurrence stabilizes to, then we know 7972 // that the backedge is taken only a finite number of times. 7973 7974 ConstantInt *StableValue = nullptr; 7975 switch (OpCode) { 7976 default: 7977 llvm_unreachable("Impossible case!"); 7978 7979 case Instruction::AShr: { 7980 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7981 // bitwidth(K) iterations. 7982 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7983 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7984 Predecessor->getTerminator(), &DT); 7985 auto *Ty = cast<IntegerType>(RHS->getType()); 7986 if (Known.isNonNegative()) 7987 StableValue = ConstantInt::get(Ty, 0); 7988 else if (Known.isNegative()) 7989 StableValue = ConstantInt::get(Ty, -1, true); 7990 else 7991 return getCouldNotCompute(); 7992 7993 break; 7994 } 7995 case Instruction::LShr: 7996 case Instruction::Shl: 7997 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7998 // stabilize to 0 in at most bitwidth(K) iterations. 7999 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8000 break; 8001 } 8002 8003 auto *Result = 8004 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8005 assert(Result->getType()->isIntegerTy(1) && 8006 "Otherwise cannot be an operand to a branch instruction"); 8007 8008 if (Result->isZeroValue()) { 8009 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8010 const SCEV *UpperBound = 8011 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8012 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8013 } 8014 8015 return getCouldNotCompute(); 8016 } 8017 8018 /// Return true if we can constant fold an instruction of the specified type, 8019 /// assuming that all operands were constants. 8020 static bool CanConstantFold(const Instruction *I) { 8021 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8022 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8023 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8024 return true; 8025 8026 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8027 if (const Function *F = CI->getCalledFunction()) 8028 return canConstantFoldCallTo(CI, F); 8029 return false; 8030 } 8031 8032 /// Determine whether this instruction can constant evolve within this loop 8033 /// assuming its operands can all constant evolve. 8034 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8035 // An instruction outside of the loop can't be derived from a loop PHI. 8036 if (!L->contains(I)) return false; 8037 8038 if (isa<PHINode>(I)) { 8039 // We don't currently keep track of the control flow needed to evaluate 8040 // PHIs, so we cannot handle PHIs inside of loops. 8041 return L->getHeader() == I->getParent(); 8042 } 8043 8044 // If we won't be able to constant fold this expression even if the operands 8045 // are constants, bail early. 8046 return CanConstantFold(I); 8047 } 8048 8049 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8050 /// recursing through each instruction operand until reaching a loop header phi. 8051 static PHINode * 8052 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8053 DenseMap<Instruction *, PHINode *> &PHIMap, 8054 unsigned Depth) { 8055 if (Depth > MaxConstantEvolvingDepth) 8056 return nullptr; 8057 8058 // Otherwise, we can evaluate this instruction if all of its operands are 8059 // constant or derived from a PHI node themselves. 8060 PHINode *PHI = nullptr; 8061 for (Value *Op : UseInst->operands()) { 8062 if (isa<Constant>(Op)) continue; 8063 8064 Instruction *OpInst = dyn_cast<Instruction>(Op); 8065 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8066 8067 PHINode *P = dyn_cast<PHINode>(OpInst); 8068 if (!P) 8069 // If this operand is already visited, reuse the prior result. 8070 // We may have P != PHI if this is the deepest point at which the 8071 // inconsistent paths meet. 8072 P = PHIMap.lookup(OpInst); 8073 if (!P) { 8074 // Recurse and memoize the results, whether a phi is found or not. 8075 // This recursive call invalidates pointers into PHIMap. 8076 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8077 PHIMap[OpInst] = P; 8078 } 8079 if (!P) 8080 return nullptr; // Not evolving from PHI 8081 if (PHI && PHI != P) 8082 return nullptr; // Evolving from multiple different PHIs. 8083 PHI = P; 8084 } 8085 // This is a expression evolving from a constant PHI! 8086 return PHI; 8087 } 8088 8089 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8090 /// in the loop that V is derived from. We allow arbitrary operations along the 8091 /// way, but the operands of an operation must either be constants or a value 8092 /// derived from a constant PHI. If this expression does not fit with these 8093 /// constraints, return null. 8094 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8095 Instruction *I = dyn_cast<Instruction>(V); 8096 if (!I || !canConstantEvolve(I, L)) return nullptr; 8097 8098 if (PHINode *PN = dyn_cast<PHINode>(I)) 8099 return PN; 8100 8101 // Record non-constant instructions contained by the loop. 8102 DenseMap<Instruction *, PHINode *> PHIMap; 8103 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8104 } 8105 8106 /// EvaluateExpression - Given an expression that passes the 8107 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8108 /// in the loop has the value PHIVal. If we can't fold this expression for some 8109 /// reason, return null. 8110 static Constant *EvaluateExpression(Value *V, const Loop *L, 8111 DenseMap<Instruction *, Constant *> &Vals, 8112 const DataLayout &DL, 8113 const TargetLibraryInfo *TLI) { 8114 // Convenient constant check, but redundant for recursive calls. 8115 if (Constant *C = dyn_cast<Constant>(V)) return C; 8116 Instruction *I = dyn_cast<Instruction>(V); 8117 if (!I) return nullptr; 8118 8119 if (Constant *C = Vals.lookup(I)) return C; 8120 8121 // An instruction inside the loop depends on a value outside the loop that we 8122 // weren't given a mapping for, or a value such as a call inside the loop. 8123 if (!canConstantEvolve(I, L)) return nullptr; 8124 8125 // An unmapped PHI can be due to a branch or another loop inside this loop, 8126 // or due to this not being the initial iteration through a loop where we 8127 // couldn't compute the evolution of this particular PHI last time. 8128 if (isa<PHINode>(I)) return nullptr; 8129 8130 std::vector<Constant*> Operands(I->getNumOperands()); 8131 8132 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8133 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8134 if (!Operand) { 8135 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8136 if (!Operands[i]) return nullptr; 8137 continue; 8138 } 8139 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8140 Vals[Operand] = C; 8141 if (!C) return nullptr; 8142 Operands[i] = C; 8143 } 8144 8145 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8146 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8147 Operands[1], DL, TLI); 8148 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8149 if (!LI->isVolatile()) 8150 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8151 } 8152 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8153 } 8154 8155 8156 // If every incoming value to PN except the one for BB is a specific Constant, 8157 // return that, else return nullptr. 8158 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8159 Constant *IncomingVal = nullptr; 8160 8161 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8162 if (PN->getIncomingBlock(i) == BB) 8163 continue; 8164 8165 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8166 if (!CurrentVal) 8167 return nullptr; 8168 8169 if (IncomingVal != CurrentVal) { 8170 if (IncomingVal) 8171 return nullptr; 8172 IncomingVal = CurrentVal; 8173 } 8174 } 8175 8176 return IncomingVal; 8177 } 8178 8179 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8180 /// in the header of its containing loop, we know the loop executes a 8181 /// constant number of times, and the PHI node is just a recurrence 8182 /// involving constants, fold it. 8183 Constant * 8184 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8185 const APInt &BEs, 8186 const Loop *L) { 8187 auto I = ConstantEvolutionLoopExitValue.find(PN); 8188 if (I != ConstantEvolutionLoopExitValue.end()) 8189 return I->second; 8190 8191 if (BEs.ugt(MaxBruteForceIterations)) 8192 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8193 8194 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8195 8196 DenseMap<Instruction *, Constant *> CurrentIterVals; 8197 BasicBlock *Header = L->getHeader(); 8198 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8199 8200 BasicBlock *Latch = L->getLoopLatch(); 8201 if (!Latch) 8202 return nullptr; 8203 8204 for (PHINode &PHI : Header->phis()) { 8205 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8206 CurrentIterVals[&PHI] = StartCST; 8207 } 8208 if (!CurrentIterVals.count(PN)) 8209 return RetVal = nullptr; 8210 8211 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8212 8213 // Execute the loop symbolically to determine the exit value. 8214 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8215 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8216 8217 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8218 unsigned IterationNum = 0; 8219 const DataLayout &DL = getDataLayout(); 8220 for (; ; ++IterationNum) { 8221 if (IterationNum == NumIterations) 8222 return RetVal = CurrentIterVals[PN]; // Got exit value! 8223 8224 // Compute the value of the PHIs for the next iteration. 8225 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8226 DenseMap<Instruction *, Constant *> NextIterVals; 8227 Constant *NextPHI = 8228 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8229 if (!NextPHI) 8230 return nullptr; // Couldn't evaluate! 8231 NextIterVals[PN] = NextPHI; 8232 8233 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8234 8235 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8236 // cease to be able to evaluate one of them or if they stop evolving, 8237 // because that doesn't necessarily prevent us from computing PN. 8238 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8239 for (const auto &I : CurrentIterVals) { 8240 PHINode *PHI = dyn_cast<PHINode>(I.first); 8241 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8242 PHIsToCompute.emplace_back(PHI, I.second); 8243 } 8244 // We use two distinct loops because EvaluateExpression may invalidate any 8245 // iterators into CurrentIterVals. 8246 for (const auto &I : PHIsToCompute) { 8247 PHINode *PHI = I.first; 8248 Constant *&NextPHI = NextIterVals[PHI]; 8249 if (!NextPHI) { // Not already computed. 8250 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8251 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8252 } 8253 if (NextPHI != I.second) 8254 StoppedEvolving = false; 8255 } 8256 8257 // If all entries in CurrentIterVals == NextIterVals then we can stop 8258 // iterating, the loop can't continue to change. 8259 if (StoppedEvolving) 8260 return RetVal = CurrentIterVals[PN]; 8261 8262 CurrentIterVals.swap(NextIterVals); 8263 } 8264 } 8265 8266 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8267 Value *Cond, 8268 bool ExitWhen) { 8269 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8270 if (!PN) return getCouldNotCompute(); 8271 8272 // If the loop is canonicalized, the PHI will have exactly two entries. 8273 // That's the only form we support here. 8274 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8275 8276 DenseMap<Instruction *, Constant *> CurrentIterVals; 8277 BasicBlock *Header = L->getHeader(); 8278 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8279 8280 BasicBlock *Latch = L->getLoopLatch(); 8281 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8282 8283 for (PHINode &PHI : Header->phis()) { 8284 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8285 CurrentIterVals[&PHI] = StartCST; 8286 } 8287 if (!CurrentIterVals.count(PN)) 8288 return getCouldNotCompute(); 8289 8290 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8291 // the loop symbolically to determine when the condition gets a value of 8292 // "ExitWhen". 8293 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8294 const DataLayout &DL = getDataLayout(); 8295 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8296 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8297 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8298 8299 // Couldn't symbolically evaluate. 8300 if (!CondVal) return getCouldNotCompute(); 8301 8302 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8303 ++NumBruteForceTripCountsComputed; 8304 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8305 } 8306 8307 // Update all the PHI nodes for the next iteration. 8308 DenseMap<Instruction *, Constant *> NextIterVals; 8309 8310 // Create a list of which PHIs we need to compute. We want to do this before 8311 // calling EvaluateExpression on them because that may invalidate iterators 8312 // into CurrentIterVals. 8313 SmallVector<PHINode *, 8> PHIsToCompute; 8314 for (const auto &I : CurrentIterVals) { 8315 PHINode *PHI = dyn_cast<PHINode>(I.first); 8316 if (!PHI || PHI->getParent() != Header) continue; 8317 PHIsToCompute.push_back(PHI); 8318 } 8319 for (PHINode *PHI : PHIsToCompute) { 8320 Constant *&NextPHI = NextIterVals[PHI]; 8321 if (NextPHI) continue; // Already computed! 8322 8323 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8324 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8325 } 8326 CurrentIterVals.swap(NextIterVals); 8327 } 8328 8329 // Too many iterations were needed to evaluate. 8330 return getCouldNotCompute(); 8331 } 8332 8333 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8334 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8335 ValuesAtScopes[V]; 8336 // Check to see if we've folded this expression at this loop before. 8337 for (auto &LS : Values) 8338 if (LS.first == L) 8339 return LS.second ? LS.second : V; 8340 8341 Values.emplace_back(L, nullptr); 8342 8343 // Otherwise compute it. 8344 const SCEV *C = computeSCEVAtScope(V, L); 8345 for (auto &LS : reverse(ValuesAtScopes[V])) 8346 if (LS.first == L) { 8347 LS.second = C; 8348 break; 8349 } 8350 return C; 8351 } 8352 8353 /// This builds up a Constant using the ConstantExpr interface. That way, we 8354 /// will return Constants for objects which aren't represented by a 8355 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8356 /// Returns NULL if the SCEV isn't representable as a Constant. 8357 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8358 switch (V->getSCEVType()) { 8359 case scCouldNotCompute: 8360 case scAddRecExpr: 8361 return nullptr; 8362 case scConstant: 8363 return cast<SCEVConstant>(V)->getValue(); 8364 case scUnknown: 8365 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8366 case scSignExtend: { 8367 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8368 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8369 return ConstantExpr::getSExt(CastOp, SS->getType()); 8370 return nullptr; 8371 } 8372 case scZeroExtend: { 8373 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8374 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8375 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8376 return nullptr; 8377 } 8378 case scPtrToInt: { 8379 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8380 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8381 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8382 8383 return nullptr; 8384 } 8385 case scTruncate: { 8386 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8387 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8388 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8389 return nullptr; 8390 } 8391 case scAddExpr: { 8392 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8393 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8394 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8395 unsigned AS = PTy->getAddressSpace(); 8396 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8397 C = ConstantExpr::getBitCast(C, DestPtrTy); 8398 } 8399 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8400 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8401 if (!C2) 8402 return nullptr; 8403 8404 // First pointer! 8405 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8406 unsigned AS = C2->getType()->getPointerAddressSpace(); 8407 std::swap(C, C2); 8408 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8409 // The offsets have been converted to bytes. We can add bytes to an 8410 // i8* by GEP with the byte count in the first index. 8411 C = ConstantExpr::getBitCast(C, DestPtrTy); 8412 } 8413 8414 // Don't bother trying to sum two pointers. We probably can't 8415 // statically compute a load that results from it anyway. 8416 if (C2->getType()->isPointerTy()) 8417 return nullptr; 8418 8419 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8420 if (PTy->getElementType()->isStructTy()) 8421 C2 = ConstantExpr::getIntegerCast( 8422 C2, Type::getInt32Ty(C->getContext()), true); 8423 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8424 } else 8425 C = ConstantExpr::getAdd(C, C2); 8426 } 8427 return C; 8428 } 8429 return nullptr; 8430 } 8431 case scMulExpr: { 8432 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8433 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8434 // Don't bother with pointers at all. 8435 if (C->getType()->isPointerTy()) 8436 return nullptr; 8437 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8438 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8439 if (!C2 || C2->getType()->isPointerTy()) 8440 return nullptr; 8441 C = ConstantExpr::getMul(C, C2); 8442 } 8443 return C; 8444 } 8445 return nullptr; 8446 } 8447 case scUDivExpr: { 8448 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8449 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8450 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8451 if (LHS->getType() == RHS->getType()) 8452 return ConstantExpr::getUDiv(LHS, RHS); 8453 return nullptr; 8454 } 8455 case scSMaxExpr: 8456 case scUMaxExpr: 8457 case scSMinExpr: 8458 case scUMinExpr: 8459 return nullptr; // TODO: smax, umax, smin, umax. 8460 } 8461 llvm_unreachable("Unknown SCEV kind!"); 8462 } 8463 8464 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8465 if (isa<SCEVConstant>(V)) return V; 8466 8467 // If this instruction is evolved from a constant-evolving PHI, compute the 8468 // exit value from the loop without using SCEVs. 8469 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8470 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8471 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8472 const Loop *CurrLoop = this->LI[I->getParent()]; 8473 // Looking for loop exit value. 8474 if (CurrLoop && CurrLoop->getParentLoop() == L && 8475 PN->getParent() == CurrLoop->getHeader()) { 8476 // Okay, there is no closed form solution for the PHI node. Check 8477 // to see if the loop that contains it has a known backedge-taken 8478 // count. If so, we may be able to force computation of the exit 8479 // value. 8480 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8481 // This trivial case can show up in some degenerate cases where 8482 // the incoming IR has not yet been fully simplified. 8483 if (BackedgeTakenCount->isZero()) { 8484 Value *InitValue = nullptr; 8485 bool MultipleInitValues = false; 8486 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8487 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8488 if (!InitValue) 8489 InitValue = PN->getIncomingValue(i); 8490 else if (InitValue != PN->getIncomingValue(i)) { 8491 MultipleInitValues = true; 8492 break; 8493 } 8494 } 8495 } 8496 if (!MultipleInitValues && InitValue) 8497 return getSCEV(InitValue); 8498 } 8499 // Do we have a loop invariant value flowing around the backedge 8500 // for a loop which must execute the backedge? 8501 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8502 isKnownPositive(BackedgeTakenCount) && 8503 PN->getNumIncomingValues() == 2) { 8504 8505 unsigned InLoopPred = 8506 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8507 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8508 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8509 return getSCEV(BackedgeVal); 8510 } 8511 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8512 // Okay, we know how many times the containing loop executes. If 8513 // this is a constant evolving PHI node, get the final value at 8514 // the specified iteration number. 8515 Constant *RV = getConstantEvolutionLoopExitValue( 8516 PN, BTCC->getAPInt(), CurrLoop); 8517 if (RV) return getSCEV(RV); 8518 } 8519 } 8520 8521 // If there is a single-input Phi, evaluate it at our scope. If we can 8522 // prove that this replacement does not break LCSSA form, use new value. 8523 if (PN->getNumOperands() == 1) { 8524 const SCEV *Input = getSCEV(PN->getOperand(0)); 8525 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8526 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8527 // for the simplest case just support constants. 8528 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8529 } 8530 } 8531 8532 // Okay, this is an expression that we cannot symbolically evaluate 8533 // into a SCEV. Check to see if it's possible to symbolically evaluate 8534 // the arguments into constants, and if so, try to constant propagate the 8535 // result. This is particularly useful for computing loop exit values. 8536 if (CanConstantFold(I)) { 8537 SmallVector<Constant *, 4> Operands; 8538 bool MadeImprovement = false; 8539 for (Value *Op : I->operands()) { 8540 if (Constant *C = dyn_cast<Constant>(Op)) { 8541 Operands.push_back(C); 8542 continue; 8543 } 8544 8545 // If any of the operands is non-constant and if they are 8546 // non-integer and non-pointer, don't even try to analyze them 8547 // with scev techniques. 8548 if (!isSCEVable(Op->getType())) 8549 return V; 8550 8551 const SCEV *OrigV = getSCEV(Op); 8552 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8553 MadeImprovement |= OrigV != OpV; 8554 8555 Constant *C = BuildConstantFromSCEV(OpV); 8556 if (!C) return V; 8557 if (C->getType() != Op->getType()) 8558 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8559 Op->getType(), 8560 false), 8561 C, Op->getType()); 8562 Operands.push_back(C); 8563 } 8564 8565 // Check to see if getSCEVAtScope actually made an improvement. 8566 if (MadeImprovement) { 8567 Constant *C = nullptr; 8568 const DataLayout &DL = getDataLayout(); 8569 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8570 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8571 Operands[1], DL, &TLI); 8572 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8573 if (!Load->isVolatile()) 8574 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8575 DL); 8576 } else 8577 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8578 if (!C) return V; 8579 return getSCEV(C); 8580 } 8581 } 8582 } 8583 8584 // This is some other type of SCEVUnknown, just return it. 8585 return V; 8586 } 8587 8588 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8589 // Avoid performing the look-up in the common case where the specified 8590 // expression has no loop-variant portions. 8591 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8592 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8593 if (OpAtScope != Comm->getOperand(i)) { 8594 // Okay, at least one of these operands is loop variant but might be 8595 // foldable. Build a new instance of the folded commutative expression. 8596 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8597 Comm->op_begin()+i); 8598 NewOps.push_back(OpAtScope); 8599 8600 for (++i; i != e; ++i) { 8601 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8602 NewOps.push_back(OpAtScope); 8603 } 8604 if (isa<SCEVAddExpr>(Comm)) 8605 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8606 if (isa<SCEVMulExpr>(Comm)) 8607 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8608 if (isa<SCEVMinMaxExpr>(Comm)) 8609 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8610 llvm_unreachable("Unknown commutative SCEV type!"); 8611 } 8612 } 8613 // If we got here, all operands are loop invariant. 8614 return Comm; 8615 } 8616 8617 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8618 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8619 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8620 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8621 return Div; // must be loop invariant 8622 return getUDivExpr(LHS, RHS); 8623 } 8624 8625 // If this is a loop recurrence for a loop that does not contain L, then we 8626 // are dealing with the final value computed by the loop. 8627 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8628 // First, attempt to evaluate each operand. 8629 // Avoid performing the look-up in the common case where the specified 8630 // expression has no loop-variant portions. 8631 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8632 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8633 if (OpAtScope == AddRec->getOperand(i)) 8634 continue; 8635 8636 // Okay, at least one of these operands is loop variant but might be 8637 // foldable. Build a new instance of the folded commutative expression. 8638 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8639 AddRec->op_begin()+i); 8640 NewOps.push_back(OpAtScope); 8641 for (++i; i != e; ++i) 8642 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8643 8644 const SCEV *FoldedRec = 8645 getAddRecExpr(NewOps, AddRec->getLoop(), 8646 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8647 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8648 // The addrec may be folded to a nonrecurrence, for example, if the 8649 // induction variable is multiplied by zero after constant folding. Go 8650 // ahead and return the folded value. 8651 if (!AddRec) 8652 return FoldedRec; 8653 break; 8654 } 8655 8656 // If the scope is outside the addrec's loop, evaluate it by using the 8657 // loop exit value of the addrec. 8658 if (!AddRec->getLoop()->contains(L)) { 8659 // To evaluate this recurrence, we need to know how many times the AddRec 8660 // loop iterates. Compute this now. 8661 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8662 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8663 8664 // Then, evaluate the AddRec. 8665 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8666 } 8667 8668 return AddRec; 8669 } 8670 8671 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8672 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8673 if (Op == Cast->getOperand()) 8674 return Cast; // must be loop invariant 8675 return getZeroExtendExpr(Op, Cast->getType()); 8676 } 8677 8678 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8679 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8680 if (Op == Cast->getOperand()) 8681 return Cast; // must be loop invariant 8682 return getSignExtendExpr(Op, Cast->getType()); 8683 } 8684 8685 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8686 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8687 if (Op == Cast->getOperand()) 8688 return Cast; // must be loop invariant 8689 return getTruncateExpr(Op, Cast->getType()); 8690 } 8691 8692 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 8693 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8694 if (Op == Cast->getOperand()) 8695 return Cast; // must be loop invariant 8696 return getPtrToIntExpr(Op, Cast->getType()); 8697 } 8698 8699 llvm_unreachable("Unknown SCEV type!"); 8700 } 8701 8702 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8703 return getSCEVAtScope(getSCEV(V), L); 8704 } 8705 8706 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8707 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8708 return stripInjectiveFunctions(ZExt->getOperand()); 8709 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8710 return stripInjectiveFunctions(SExt->getOperand()); 8711 return S; 8712 } 8713 8714 /// Finds the minimum unsigned root of the following equation: 8715 /// 8716 /// A * X = B (mod N) 8717 /// 8718 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8719 /// A and B isn't important. 8720 /// 8721 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8722 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8723 ScalarEvolution &SE) { 8724 uint32_t BW = A.getBitWidth(); 8725 assert(BW == SE.getTypeSizeInBits(B->getType())); 8726 assert(A != 0 && "A must be non-zero."); 8727 8728 // 1. D = gcd(A, N) 8729 // 8730 // The gcd of A and N may have only one prime factor: 2. The number of 8731 // trailing zeros in A is its multiplicity 8732 uint32_t Mult2 = A.countTrailingZeros(); 8733 // D = 2^Mult2 8734 8735 // 2. Check if B is divisible by D. 8736 // 8737 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8738 // is not less than multiplicity of this prime factor for D. 8739 if (SE.GetMinTrailingZeros(B) < Mult2) 8740 return SE.getCouldNotCompute(); 8741 8742 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8743 // modulo (N / D). 8744 // 8745 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8746 // (N / D) in general. The inverse itself always fits into BW bits, though, 8747 // so we immediately truncate it. 8748 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8749 APInt Mod(BW + 1, 0); 8750 Mod.setBit(BW - Mult2); // Mod = N / D 8751 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8752 8753 // 4. Compute the minimum unsigned root of the equation: 8754 // I * (B / D) mod (N / D) 8755 // To simplify the computation, we factor out the divide by D: 8756 // (I * B mod N) / D 8757 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8758 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8759 } 8760 8761 /// For a given quadratic addrec, generate coefficients of the corresponding 8762 /// quadratic equation, multiplied by a common value to ensure that they are 8763 /// integers. 8764 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8765 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8766 /// were multiplied by, and BitWidth is the bit width of the original addrec 8767 /// coefficients. 8768 /// This function returns None if the addrec coefficients are not compile- 8769 /// time constants. 8770 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8771 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8772 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8773 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8774 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8775 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8776 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8777 << *AddRec << '\n'); 8778 8779 // We currently can only solve this if the coefficients are constants. 8780 if (!LC || !MC || !NC) { 8781 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8782 return None; 8783 } 8784 8785 APInt L = LC->getAPInt(); 8786 APInt M = MC->getAPInt(); 8787 APInt N = NC->getAPInt(); 8788 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8789 8790 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8791 unsigned NewWidth = BitWidth + 1; 8792 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8793 << BitWidth << '\n'); 8794 // The sign-extension (as opposed to a zero-extension) here matches the 8795 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8796 N = N.sext(NewWidth); 8797 M = M.sext(NewWidth); 8798 L = L.sext(NewWidth); 8799 8800 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8801 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8802 // L+M, L+2M+N, L+3M+3N, ... 8803 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8804 // 8805 // The equation Acc = 0 is then 8806 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8807 // In a quadratic form it becomes: 8808 // N n^2 + (2M-N) n + 2L = 0. 8809 8810 APInt A = N; 8811 APInt B = 2 * M - A; 8812 APInt C = 2 * L; 8813 APInt T = APInt(NewWidth, 2); 8814 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8815 << "x + " << C << ", coeff bw: " << NewWidth 8816 << ", multiplied by " << T << '\n'); 8817 return std::make_tuple(A, B, C, T, BitWidth); 8818 } 8819 8820 /// Helper function to compare optional APInts: 8821 /// (a) if X and Y both exist, return min(X, Y), 8822 /// (b) if neither X nor Y exist, return None, 8823 /// (c) if exactly one of X and Y exists, return that value. 8824 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8825 if (X.hasValue() && Y.hasValue()) { 8826 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8827 APInt XW = X->sextOrSelf(W); 8828 APInt YW = Y->sextOrSelf(W); 8829 return XW.slt(YW) ? *X : *Y; 8830 } 8831 if (!X.hasValue() && !Y.hasValue()) 8832 return None; 8833 return X.hasValue() ? *X : *Y; 8834 } 8835 8836 /// Helper function to truncate an optional APInt to a given BitWidth. 8837 /// When solving addrec-related equations, it is preferable to return a value 8838 /// that has the same bit width as the original addrec's coefficients. If the 8839 /// solution fits in the original bit width, truncate it (except for i1). 8840 /// Returning a value of a different bit width may inhibit some optimizations. 8841 /// 8842 /// In general, a solution to a quadratic equation generated from an addrec 8843 /// may require BW+1 bits, where BW is the bit width of the addrec's 8844 /// coefficients. The reason is that the coefficients of the quadratic 8845 /// equation are BW+1 bits wide (to avoid truncation when converting from 8846 /// the addrec to the equation). 8847 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8848 if (!X.hasValue()) 8849 return None; 8850 unsigned W = X->getBitWidth(); 8851 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8852 return X->trunc(BitWidth); 8853 return X; 8854 } 8855 8856 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8857 /// iterations. The values L, M, N are assumed to be signed, and they 8858 /// should all have the same bit widths. 8859 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8860 /// where BW is the bit width of the addrec's coefficients. 8861 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8862 /// returned as such, otherwise the bit width of the returned value may 8863 /// be greater than BW. 8864 /// 8865 /// This function returns None if 8866 /// (a) the addrec coefficients are not constant, or 8867 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8868 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8869 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8870 static Optional<APInt> 8871 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8872 APInt A, B, C, M; 8873 unsigned BitWidth; 8874 auto T = GetQuadraticEquation(AddRec); 8875 if (!T.hasValue()) 8876 return None; 8877 8878 std::tie(A, B, C, M, BitWidth) = *T; 8879 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8880 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8881 if (!X.hasValue()) 8882 return None; 8883 8884 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8885 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8886 if (!V->isZero()) 8887 return None; 8888 8889 return TruncIfPossible(X, BitWidth); 8890 } 8891 8892 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8893 /// iterations. The values M, N are assumed to be signed, and they 8894 /// should all have the same bit widths. 8895 /// Find the least n such that c(n) does not belong to the given range, 8896 /// while c(n-1) does. 8897 /// 8898 /// This function returns None if 8899 /// (a) the addrec coefficients are not constant, or 8900 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8901 /// bounds of the range. 8902 static Optional<APInt> 8903 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8904 const ConstantRange &Range, ScalarEvolution &SE) { 8905 assert(AddRec->getOperand(0)->isZero() && 8906 "Starting value of addrec should be 0"); 8907 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8908 << Range << ", addrec " << *AddRec << '\n'); 8909 // This case is handled in getNumIterationsInRange. Here we can assume that 8910 // we start in the range. 8911 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8912 "Addrec's initial value should be in range"); 8913 8914 APInt A, B, C, M; 8915 unsigned BitWidth; 8916 auto T = GetQuadraticEquation(AddRec); 8917 if (!T.hasValue()) 8918 return None; 8919 8920 // Be careful about the return value: there can be two reasons for not 8921 // returning an actual number. First, if no solutions to the equations 8922 // were found, and second, if the solutions don't leave the given range. 8923 // The first case means that the actual solution is "unknown", the second 8924 // means that it's known, but not valid. If the solution is unknown, we 8925 // cannot make any conclusions. 8926 // Return a pair: the optional solution and a flag indicating if the 8927 // solution was found. 8928 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8929 // Solve for signed overflow and unsigned overflow, pick the lower 8930 // solution. 8931 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8932 << Bound << " (before multiplying by " << M << ")\n"); 8933 Bound *= M; // The quadratic equation multiplier. 8934 8935 Optional<APInt> SO = None; 8936 if (BitWidth > 1) { 8937 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8938 "signed overflow\n"); 8939 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8940 } 8941 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8942 "unsigned overflow\n"); 8943 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8944 BitWidth+1); 8945 8946 auto LeavesRange = [&] (const APInt &X) { 8947 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8948 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8949 if (Range.contains(V0->getValue())) 8950 return false; 8951 // X should be at least 1, so X-1 is non-negative. 8952 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8953 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8954 if (Range.contains(V1->getValue())) 8955 return true; 8956 return false; 8957 }; 8958 8959 // If SolveQuadraticEquationWrap returns None, it means that there can 8960 // be a solution, but the function failed to find it. We cannot treat it 8961 // as "no solution". 8962 if (!SO.hasValue() || !UO.hasValue()) 8963 return { None, false }; 8964 8965 // Check the smaller value first to see if it leaves the range. 8966 // At this point, both SO and UO must have values. 8967 Optional<APInt> Min = MinOptional(SO, UO); 8968 if (LeavesRange(*Min)) 8969 return { Min, true }; 8970 Optional<APInt> Max = Min == SO ? UO : SO; 8971 if (LeavesRange(*Max)) 8972 return { Max, true }; 8973 8974 // Solutions were found, but were eliminated, hence the "true". 8975 return { None, true }; 8976 }; 8977 8978 std::tie(A, B, C, M, BitWidth) = *T; 8979 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8980 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8981 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8982 auto SL = SolveForBoundary(Lower); 8983 auto SU = SolveForBoundary(Upper); 8984 // If any of the solutions was unknown, no meaninigful conclusions can 8985 // be made. 8986 if (!SL.second || !SU.second) 8987 return None; 8988 8989 // Claim: The correct solution is not some value between Min and Max. 8990 // 8991 // Justification: Assuming that Min and Max are different values, one of 8992 // them is when the first signed overflow happens, the other is when the 8993 // first unsigned overflow happens. Crossing the range boundary is only 8994 // possible via an overflow (treating 0 as a special case of it, modeling 8995 // an overflow as crossing k*2^W for some k). 8996 // 8997 // The interesting case here is when Min was eliminated as an invalid 8998 // solution, but Max was not. The argument is that if there was another 8999 // overflow between Min and Max, it would also have been eliminated if 9000 // it was considered. 9001 // 9002 // For a given boundary, it is possible to have two overflows of the same 9003 // type (signed/unsigned) without having the other type in between: this 9004 // can happen when the vertex of the parabola is between the iterations 9005 // corresponding to the overflows. This is only possible when the two 9006 // overflows cross k*2^W for the same k. In such case, if the second one 9007 // left the range (and was the first one to do so), the first overflow 9008 // would have to enter the range, which would mean that either we had left 9009 // the range before or that we started outside of it. Both of these cases 9010 // are contradictions. 9011 // 9012 // Claim: In the case where SolveForBoundary returns None, the correct 9013 // solution is not some value between the Max for this boundary and the 9014 // Min of the other boundary. 9015 // 9016 // Justification: Assume that we had such Max_A and Min_B corresponding 9017 // to range boundaries A and B and such that Max_A < Min_B. If there was 9018 // a solution between Max_A and Min_B, it would have to be caused by an 9019 // overflow corresponding to either A or B. It cannot correspond to B, 9020 // since Min_B is the first occurrence of such an overflow. If it 9021 // corresponded to A, it would have to be either a signed or an unsigned 9022 // overflow that is larger than both eliminated overflows for A. But 9023 // between the eliminated overflows and this overflow, the values would 9024 // cover the entire value space, thus crossing the other boundary, which 9025 // is a contradiction. 9026 9027 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9028 } 9029 9030 ScalarEvolution::ExitLimit 9031 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9032 bool AllowPredicates) { 9033 9034 // This is only used for loops with a "x != y" exit test. The exit condition 9035 // is now expressed as a single expression, V = x-y. So the exit test is 9036 // effectively V != 0. We know and take advantage of the fact that this 9037 // expression only being used in a comparison by zero context. 9038 9039 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9040 // If the value is a constant 9041 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9042 // If the value is already zero, the branch will execute zero times. 9043 if (C->getValue()->isZero()) return C; 9044 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9045 } 9046 9047 const SCEVAddRecExpr *AddRec = 9048 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9049 9050 if (!AddRec && AllowPredicates) 9051 // Try to make this an AddRec using runtime tests, in the first X 9052 // iterations of this loop, where X is the SCEV expression found by the 9053 // algorithm below. 9054 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9055 9056 if (!AddRec || AddRec->getLoop() != L) 9057 return getCouldNotCompute(); 9058 9059 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9060 // the quadratic equation to solve it. 9061 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9062 // We can only use this value if the chrec ends up with an exact zero 9063 // value at this index. When solving for "X*X != 5", for example, we 9064 // should not accept a root of 2. 9065 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9066 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9067 return ExitLimit(R, R, false, Predicates); 9068 } 9069 return getCouldNotCompute(); 9070 } 9071 9072 // Otherwise we can only handle this if it is affine. 9073 if (!AddRec->isAffine()) 9074 return getCouldNotCompute(); 9075 9076 // If this is an affine expression, the execution count of this branch is 9077 // the minimum unsigned root of the following equation: 9078 // 9079 // Start + Step*N = 0 (mod 2^BW) 9080 // 9081 // equivalent to: 9082 // 9083 // Step*N = -Start (mod 2^BW) 9084 // 9085 // where BW is the common bit width of Start and Step. 9086 9087 // Get the initial value for the loop. 9088 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9089 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9090 9091 // For now we handle only constant steps. 9092 // 9093 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9094 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9095 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9096 // We have not yet seen any such cases. 9097 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9098 if (!StepC || StepC->getValue()->isZero()) 9099 return getCouldNotCompute(); 9100 9101 // For positive steps (counting up until unsigned overflow): 9102 // N = -Start/Step (as unsigned) 9103 // For negative steps (counting down to zero): 9104 // N = Start/-Step 9105 // First compute the unsigned distance from zero in the direction of Step. 9106 bool CountDown = StepC->getAPInt().isNegative(); 9107 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9108 9109 // Handle unitary steps, which cannot wraparound. 9110 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9111 // N = Distance (as unsigned) 9112 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9113 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9114 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9115 if (MaxBECountBase.ult(MaxBECount)) 9116 MaxBECount = MaxBECountBase; 9117 9118 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9119 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9120 // case, and see if we can improve the bound. 9121 // 9122 // Explicitly handling this here is necessary because getUnsignedRange 9123 // isn't context-sensitive; it doesn't know that we only care about the 9124 // range inside the loop. 9125 const SCEV *Zero = getZero(Distance->getType()); 9126 const SCEV *One = getOne(Distance->getType()); 9127 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9128 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9129 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9130 // as "unsigned_max(Distance + 1) - 1". 9131 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9132 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9133 } 9134 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9135 } 9136 9137 // If the condition controls loop exit (the loop exits only if the expression 9138 // is true) and the addition is no-wrap we can use unsigned divide to 9139 // compute the backedge count. In this case, the step may not divide the 9140 // distance, but we don't care because if the condition is "missed" the loop 9141 // will have undefined behavior due to wrapping. 9142 if (ControlsExit && AddRec->hasNoSelfWrap() && 9143 loopHasNoAbnormalExits(AddRec->getLoop())) { 9144 const SCEV *Exact = 9145 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9146 const SCEV *Max = 9147 Exact == getCouldNotCompute() 9148 ? Exact 9149 : getConstant(getUnsignedRangeMax(Exact)); 9150 return ExitLimit(Exact, Max, false, Predicates); 9151 } 9152 9153 // Solve the general equation. 9154 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9155 getNegativeSCEV(Start), *this); 9156 const SCEV *M = E == getCouldNotCompute() 9157 ? E 9158 : getConstant(getUnsignedRangeMax(E)); 9159 return ExitLimit(E, M, false, Predicates); 9160 } 9161 9162 ScalarEvolution::ExitLimit 9163 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9164 // Loops that look like: while (X == 0) are very strange indeed. We don't 9165 // handle them yet except for the trivial case. This could be expanded in the 9166 // future as needed. 9167 9168 // If the value is a constant, check to see if it is known to be non-zero 9169 // already. If so, the backedge will execute zero times. 9170 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9171 if (!C->getValue()->isZero()) 9172 return getZero(C->getType()); 9173 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9174 } 9175 9176 // We could implement others, but I really doubt anyone writes loops like 9177 // this, and if they did, they would already be constant folded. 9178 return getCouldNotCompute(); 9179 } 9180 9181 std::pair<const BasicBlock *, const BasicBlock *> 9182 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9183 const { 9184 // If the block has a unique predecessor, then there is no path from the 9185 // predecessor to the block that does not go through the direct edge 9186 // from the predecessor to the block. 9187 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9188 return {Pred, BB}; 9189 9190 // A loop's header is defined to be a block that dominates the loop. 9191 // If the header has a unique predecessor outside the loop, it must be 9192 // a block that has exactly one successor that can reach the loop. 9193 if (const Loop *L = LI.getLoopFor(BB)) 9194 return {L->getLoopPredecessor(), L->getHeader()}; 9195 9196 return {nullptr, nullptr}; 9197 } 9198 9199 /// SCEV structural equivalence is usually sufficient for testing whether two 9200 /// expressions are equal, however for the purposes of looking for a condition 9201 /// guarding a loop, it can be useful to be a little more general, since a 9202 /// front-end may have replicated the controlling expression. 9203 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9204 // Quick check to see if they are the same SCEV. 9205 if (A == B) return true; 9206 9207 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9208 // Not all instructions that are "identical" compute the same value. For 9209 // instance, two distinct alloca instructions allocating the same type are 9210 // identical and do not read memory; but compute distinct values. 9211 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9212 }; 9213 9214 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9215 // two different instructions with the same value. Check for this case. 9216 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9217 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9218 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9219 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9220 if (ComputesEqualValues(AI, BI)) 9221 return true; 9222 9223 // Otherwise assume they may have a different value. 9224 return false; 9225 } 9226 9227 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9228 const SCEV *&LHS, const SCEV *&RHS, 9229 unsigned Depth) { 9230 bool Changed = false; 9231 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9232 // '0 != 0'. 9233 auto TrivialCase = [&](bool TriviallyTrue) { 9234 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9235 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9236 return true; 9237 }; 9238 // If we hit the max recursion limit bail out. 9239 if (Depth >= 3) 9240 return false; 9241 9242 // Canonicalize a constant to the right side. 9243 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9244 // Check for both operands constant. 9245 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9246 if (ConstantExpr::getICmp(Pred, 9247 LHSC->getValue(), 9248 RHSC->getValue())->isNullValue()) 9249 return TrivialCase(false); 9250 else 9251 return TrivialCase(true); 9252 } 9253 // Otherwise swap the operands to put the constant on the right. 9254 std::swap(LHS, RHS); 9255 Pred = ICmpInst::getSwappedPredicate(Pred); 9256 Changed = true; 9257 } 9258 9259 // If we're comparing an addrec with a value which is loop-invariant in the 9260 // addrec's loop, put the addrec on the left. Also make a dominance check, 9261 // as both operands could be addrecs loop-invariant in each other's loop. 9262 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9263 const Loop *L = AR->getLoop(); 9264 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9265 std::swap(LHS, RHS); 9266 Pred = ICmpInst::getSwappedPredicate(Pred); 9267 Changed = true; 9268 } 9269 } 9270 9271 // If there's a constant operand, canonicalize comparisons with boundary 9272 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9273 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9274 const APInt &RA = RC->getAPInt(); 9275 9276 bool SimplifiedByConstantRange = false; 9277 9278 if (!ICmpInst::isEquality(Pred)) { 9279 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9280 if (ExactCR.isFullSet()) 9281 return TrivialCase(true); 9282 else if (ExactCR.isEmptySet()) 9283 return TrivialCase(false); 9284 9285 APInt NewRHS; 9286 CmpInst::Predicate NewPred; 9287 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9288 ICmpInst::isEquality(NewPred)) { 9289 // We were able to convert an inequality to an equality. 9290 Pred = NewPred; 9291 RHS = getConstant(NewRHS); 9292 Changed = SimplifiedByConstantRange = true; 9293 } 9294 } 9295 9296 if (!SimplifiedByConstantRange) { 9297 switch (Pred) { 9298 default: 9299 break; 9300 case ICmpInst::ICMP_EQ: 9301 case ICmpInst::ICMP_NE: 9302 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9303 if (!RA) 9304 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9305 if (const SCEVMulExpr *ME = 9306 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9307 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9308 ME->getOperand(0)->isAllOnesValue()) { 9309 RHS = AE->getOperand(1); 9310 LHS = ME->getOperand(1); 9311 Changed = true; 9312 } 9313 break; 9314 9315 9316 // The "Should have been caught earlier!" messages refer to the fact 9317 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9318 // should have fired on the corresponding cases, and canonicalized the 9319 // check to trivial case. 9320 9321 case ICmpInst::ICMP_UGE: 9322 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9323 Pred = ICmpInst::ICMP_UGT; 9324 RHS = getConstant(RA - 1); 9325 Changed = true; 9326 break; 9327 case ICmpInst::ICMP_ULE: 9328 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9329 Pred = ICmpInst::ICMP_ULT; 9330 RHS = getConstant(RA + 1); 9331 Changed = true; 9332 break; 9333 case ICmpInst::ICMP_SGE: 9334 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9335 Pred = ICmpInst::ICMP_SGT; 9336 RHS = getConstant(RA - 1); 9337 Changed = true; 9338 break; 9339 case ICmpInst::ICMP_SLE: 9340 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9341 Pred = ICmpInst::ICMP_SLT; 9342 RHS = getConstant(RA + 1); 9343 Changed = true; 9344 break; 9345 } 9346 } 9347 } 9348 9349 // Check for obvious equality. 9350 if (HasSameValue(LHS, RHS)) { 9351 if (ICmpInst::isTrueWhenEqual(Pred)) 9352 return TrivialCase(true); 9353 if (ICmpInst::isFalseWhenEqual(Pred)) 9354 return TrivialCase(false); 9355 } 9356 9357 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9358 // adding or subtracting 1 from one of the operands. 9359 switch (Pred) { 9360 case ICmpInst::ICMP_SLE: 9361 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9362 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9363 SCEV::FlagNSW); 9364 Pred = ICmpInst::ICMP_SLT; 9365 Changed = true; 9366 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9367 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9368 SCEV::FlagNSW); 9369 Pred = ICmpInst::ICMP_SLT; 9370 Changed = true; 9371 } 9372 break; 9373 case ICmpInst::ICMP_SGE: 9374 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9375 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9376 SCEV::FlagNSW); 9377 Pred = ICmpInst::ICMP_SGT; 9378 Changed = true; 9379 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9380 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9381 SCEV::FlagNSW); 9382 Pred = ICmpInst::ICMP_SGT; 9383 Changed = true; 9384 } 9385 break; 9386 case ICmpInst::ICMP_ULE: 9387 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9388 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9389 SCEV::FlagNUW); 9390 Pred = ICmpInst::ICMP_ULT; 9391 Changed = true; 9392 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9393 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9394 Pred = ICmpInst::ICMP_ULT; 9395 Changed = true; 9396 } 9397 break; 9398 case ICmpInst::ICMP_UGE: 9399 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9400 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9401 Pred = ICmpInst::ICMP_UGT; 9402 Changed = true; 9403 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9404 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9405 SCEV::FlagNUW); 9406 Pred = ICmpInst::ICMP_UGT; 9407 Changed = true; 9408 } 9409 break; 9410 default: 9411 break; 9412 } 9413 9414 // TODO: More simplifications are possible here. 9415 9416 // Recursively simplify until we either hit a recursion limit or nothing 9417 // changes. 9418 if (Changed) 9419 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9420 9421 return Changed; 9422 } 9423 9424 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9425 return getSignedRangeMax(S).isNegative(); 9426 } 9427 9428 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9429 return getSignedRangeMin(S).isStrictlyPositive(); 9430 } 9431 9432 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9433 return !getSignedRangeMin(S).isNegative(); 9434 } 9435 9436 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9437 return !getSignedRangeMax(S).isStrictlyPositive(); 9438 } 9439 9440 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9441 return isKnownNegative(S) || isKnownPositive(S); 9442 } 9443 9444 std::pair<const SCEV *, const SCEV *> 9445 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9446 // Compute SCEV on entry of loop L. 9447 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9448 if (Start == getCouldNotCompute()) 9449 return { Start, Start }; 9450 // Compute post increment SCEV for loop L. 9451 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9452 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9453 return { Start, PostInc }; 9454 } 9455 9456 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9457 const SCEV *LHS, const SCEV *RHS) { 9458 // First collect all loops. 9459 SmallPtrSet<const Loop *, 8> LoopsUsed; 9460 getUsedLoops(LHS, LoopsUsed); 9461 getUsedLoops(RHS, LoopsUsed); 9462 9463 if (LoopsUsed.empty()) 9464 return false; 9465 9466 // Domination relationship must be a linear order on collected loops. 9467 #ifndef NDEBUG 9468 for (auto *L1 : LoopsUsed) 9469 for (auto *L2 : LoopsUsed) 9470 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9471 DT.dominates(L2->getHeader(), L1->getHeader())) && 9472 "Domination relationship is not a linear order"); 9473 #endif 9474 9475 const Loop *MDL = 9476 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9477 [&](const Loop *L1, const Loop *L2) { 9478 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9479 }); 9480 9481 // Get init and post increment value for LHS. 9482 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9483 // if LHS contains unknown non-invariant SCEV then bail out. 9484 if (SplitLHS.first == getCouldNotCompute()) 9485 return false; 9486 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9487 // Get init and post increment value for RHS. 9488 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9489 // if RHS contains unknown non-invariant SCEV then bail out. 9490 if (SplitRHS.first == getCouldNotCompute()) 9491 return false; 9492 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9493 // It is possible that init SCEV contains an invariant load but it does 9494 // not dominate MDL and is not available at MDL loop entry, so we should 9495 // check it here. 9496 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9497 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9498 return false; 9499 9500 // It seems backedge guard check is faster than entry one so in some cases 9501 // it can speed up whole estimation by short circuit 9502 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9503 SplitRHS.second) && 9504 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9505 } 9506 9507 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9508 const SCEV *LHS, const SCEV *RHS) { 9509 // Canonicalize the inputs first. 9510 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9511 9512 if (isKnownViaInduction(Pred, LHS, RHS)) 9513 return true; 9514 9515 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9516 return true; 9517 9518 // Otherwise see what can be done with some simple reasoning. 9519 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9520 } 9521 9522 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9523 const SCEV *LHS, const SCEV *RHS, 9524 const Instruction *Context) { 9525 // TODO: Analyze guards and assumes from Context's block. 9526 return isKnownPredicate(Pred, LHS, RHS) || 9527 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9528 } 9529 9530 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9531 const SCEVAddRecExpr *LHS, 9532 const SCEV *RHS) { 9533 const Loop *L = LHS->getLoop(); 9534 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9535 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9536 } 9537 9538 Optional<ScalarEvolution::MonotonicPredicateType> 9539 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9540 ICmpInst::Predicate Pred, 9541 Optional<const SCEV *> NumIter, 9542 const Instruction *Context) { 9543 assert((!NumIter || !isa<SCEVCouldNotCompute>(*NumIter)) && 9544 "provided number of iterations must be computable!"); 9545 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred, NumIter, Context); 9546 9547 #ifndef NDEBUG 9548 // Verify an invariant: inverting the predicate should turn a monotonically 9549 // increasing change to a monotonically decreasing one, and vice versa. 9550 if (Result) { 9551 auto ResultSwapped = getMonotonicPredicateTypeImpl( 9552 LHS, ICmpInst::getSwappedPredicate(Pred), NumIter, Context); 9553 9554 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9555 assert(ResultSwapped.getValue() != Result.getValue() && 9556 "monotonicity should flip as we flip the predicate"); 9557 } 9558 #endif 9559 9560 return Result; 9561 } 9562 9563 Optional<ScalarEvolution::MonotonicPredicateType> 9564 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9565 ICmpInst::Predicate Pred, 9566 Optional<const SCEV *> NumIter, 9567 const Instruction *Context) { 9568 // A zero step value for LHS means the induction variable is essentially a 9569 // loop invariant value. We don't really depend on the predicate actually 9570 // flipping from false to true (for increasing predicates, and the other way 9571 // around for decreasing predicates), all we care about is that *if* the 9572 // predicate changes then it only changes from false to true. 9573 // 9574 // A zero step value in itself is not very useful, but there may be places 9575 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9576 // as general as possible. 9577 9578 // Only handle LE/LT/GE/GT predicates. 9579 if (!ICmpInst::isRelational(Pred)) 9580 return None; 9581 9582 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9583 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9584 "Should be greater or less!"); 9585 9586 bool IsUnsigned = ICmpInst::isUnsigned(Pred); 9587 assert((IsUnsigned || ICmpInst::isSigned(Pred)) && 9588 "Should be either signed or unsigned!"); 9589 // Check if we can prove no-wrap in the relevant range. 9590 9591 const SCEV *Step = LHS->getStepRecurrence(*this); 9592 bool IsStepNonNegative = isKnownNonNegative(Step); 9593 bool IsStepNonPositive = isKnownNonPositive(Step); 9594 // We need to know which direction the iteration is going. 9595 if (!IsStepNonNegative && !IsStepNonPositive) 9596 return None; 9597 9598 auto ProvedNoWrap = [&]() { 9599 // If the AddRec already has the flag, we are done. 9600 if (IsUnsigned ? LHS->hasNoUnsignedWrap() : LHS->hasNoSignedWrap()) 9601 return true; 9602 9603 if (!NumIter) 9604 return false; 9605 // We could not prove no-wrap on all iteration space. Can we prove it for 9606 // first iterations? In order to achieve it, check that: 9607 // 1. The addrec does not self-wrap; 9608 // 2. start <= end for non-negative step and start >= end for non-positive 9609 // step. 9610 bool HasNoSelfWrap = LHS->hasNoSelfWrap(); 9611 if (!HasNoSelfWrap) 9612 // If num iter has same type as the AddRec, and step is +/- 1, even max 9613 // possible number of iterations is not enough to self-wrap. 9614 if (NumIter.getValue()->getType() == LHS->getType()) 9615 if (Step == getOne(LHS->getType()) || 9616 Step == getMinusOne(LHS->getType())) 9617 HasNoSelfWrap = true; 9618 if (!HasNoSelfWrap) 9619 return false; 9620 const SCEV *Start = LHS->getStart(); 9621 const SCEV *End = LHS->evaluateAtIteration(*NumIter, *this); 9622 ICmpInst::Predicate NoOverflowPred = 9623 IsStepNonNegative ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_SGE; 9624 if (IsUnsigned) 9625 NoOverflowPred = ICmpInst::getUnsignedPredicate(NoOverflowPred); 9626 return isKnownPredicateAt(NoOverflowPred, Start, End, Context); 9627 }; 9628 9629 // If nothing worked, bail. 9630 if (!ProvedNoWrap()) 9631 return None; 9632 9633 if (IsUnsigned) 9634 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9635 else { 9636 if (IsStepNonNegative) 9637 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9638 9639 if (IsStepNonPositive) 9640 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9641 9642 return None; 9643 } 9644 } 9645 9646 Optional<ScalarEvolution::LoopInvariantPredicate> 9647 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9648 const SCEV *LHS, const SCEV *RHS, 9649 const Loop *L) { 9650 9651 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9652 if (!isLoopInvariant(RHS, L)) { 9653 if (!isLoopInvariant(LHS, L)) 9654 return None; 9655 9656 std::swap(LHS, RHS); 9657 Pred = ICmpInst::getSwappedPredicate(Pred); 9658 } 9659 9660 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9661 if (!ArLHS || ArLHS->getLoop() != L) 9662 return None; 9663 9664 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9665 if (!MonotonicType) 9666 return None; 9667 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9668 // true as the loop iterates, and the backedge is control dependent on 9669 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9670 // 9671 // * if the predicate was false in the first iteration then the predicate 9672 // is never evaluated again, since the loop exits without taking the 9673 // backedge. 9674 // * if the predicate was true in the first iteration then it will 9675 // continue to be true for all future iterations since it is 9676 // monotonically increasing. 9677 // 9678 // For both the above possibilities, we can replace the loop varying 9679 // predicate with its value on the first iteration of the loop (which is 9680 // loop invariant). 9681 // 9682 // A similar reasoning applies for a monotonically decreasing predicate, by 9683 // replacing true with false and false with true in the above two bullets. 9684 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9685 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9686 9687 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9688 return None; 9689 9690 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 9691 } 9692 9693 Optional<ScalarEvolution::LoopInvariantPredicate> 9694 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 9695 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9696 const Instruction *Context, const SCEV *MaxIter) { 9697 // Try to prove the following set of facts: 9698 // - The predicate is monotonic in the iteration space. 9699 // - If the check does not fail on the 1st iteration: 9700 // - It will not fail on the MaxIter'th iteration. 9701 // If the check does fail on the 1st iteration, we leave the loop and no 9702 // other checks matter. 9703 9704 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9705 if (!isLoopInvariant(RHS, L)) { 9706 if (!isLoopInvariant(LHS, L)) 9707 return None; 9708 9709 std::swap(LHS, RHS); 9710 Pred = ICmpInst::getSwappedPredicate(Pred); 9711 } 9712 9713 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9714 if (!AR || AR->getLoop() != L) 9715 return None; 9716 9717 if (!getMonotonicPredicateType(AR, Pred, MaxIter, Context)) 9718 return None; 9719 9720 // Value of IV on suggested last iteration. 9721 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9722 // Does it still meet the requirement? 9723 if (!isKnownPredicateAt(Pred, Last, RHS, Context)) 9724 return None; 9725 9726 // Everything is fine. 9727 return ScalarEvolution::LoopInvariantPredicate(Pred, AR->getStart(), RHS); 9728 } 9729 9730 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9731 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9732 if (HasSameValue(LHS, RHS)) 9733 return ICmpInst::isTrueWhenEqual(Pred); 9734 9735 // This code is split out from isKnownPredicate because it is called from 9736 // within isLoopEntryGuardedByCond. 9737 9738 auto CheckRanges = 9739 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9740 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9741 .contains(RangeLHS); 9742 }; 9743 9744 // The check at the top of the function catches the case where the values are 9745 // known to be equal. 9746 if (Pred == CmpInst::ICMP_EQ) 9747 return false; 9748 9749 if (Pred == CmpInst::ICMP_NE) 9750 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9751 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9752 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9753 9754 if (CmpInst::isSigned(Pred)) 9755 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9756 9757 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9758 } 9759 9760 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9761 const SCEV *LHS, 9762 const SCEV *RHS) { 9763 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9764 // Return Y via OutY. 9765 auto MatchBinaryAddToConst = 9766 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9767 SCEV::NoWrapFlags ExpectedFlags) { 9768 const SCEV *NonConstOp, *ConstOp; 9769 SCEV::NoWrapFlags FlagsPresent; 9770 9771 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9772 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9773 return false; 9774 9775 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9776 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9777 }; 9778 9779 APInt C; 9780 9781 switch (Pred) { 9782 default: 9783 break; 9784 9785 case ICmpInst::ICMP_SGE: 9786 std::swap(LHS, RHS); 9787 LLVM_FALLTHROUGH; 9788 case ICmpInst::ICMP_SLE: 9789 // X s<= (X + C)<nsw> if C >= 0 9790 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9791 return true; 9792 9793 // (X + C)<nsw> s<= X if C <= 0 9794 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9795 !C.isStrictlyPositive()) 9796 return true; 9797 break; 9798 9799 case ICmpInst::ICMP_SGT: 9800 std::swap(LHS, RHS); 9801 LLVM_FALLTHROUGH; 9802 case ICmpInst::ICMP_SLT: 9803 // X s< (X + C)<nsw> if C > 0 9804 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9805 C.isStrictlyPositive()) 9806 return true; 9807 9808 // (X + C)<nsw> s< X if C < 0 9809 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9810 return true; 9811 break; 9812 9813 case ICmpInst::ICMP_UGE: 9814 std::swap(LHS, RHS); 9815 LLVM_FALLTHROUGH; 9816 case ICmpInst::ICMP_ULE: 9817 // X u<= (X + C)<nuw> for any C 9818 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9819 return true; 9820 break; 9821 9822 case ICmpInst::ICMP_UGT: 9823 std::swap(LHS, RHS); 9824 LLVM_FALLTHROUGH; 9825 case ICmpInst::ICMP_ULT: 9826 // X u< (X + C)<nuw> if C != 0 9827 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9828 return true; 9829 break; 9830 } 9831 9832 return false; 9833 } 9834 9835 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9836 const SCEV *LHS, 9837 const SCEV *RHS) { 9838 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9839 return false; 9840 9841 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9842 // the stack can result in exponential time complexity. 9843 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9844 9845 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9846 // 9847 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9848 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9849 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9850 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9851 // use isKnownPredicate later if needed. 9852 return isKnownNonNegative(RHS) && 9853 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9854 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9855 } 9856 9857 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9858 ICmpInst::Predicate Pred, 9859 const SCEV *LHS, const SCEV *RHS) { 9860 // No need to even try if we know the module has no guards. 9861 if (!HasGuards) 9862 return false; 9863 9864 return any_of(*BB, [&](const Instruction &I) { 9865 using namespace llvm::PatternMatch; 9866 9867 Value *Condition; 9868 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9869 m_Value(Condition))) && 9870 isImpliedCond(Pred, LHS, RHS, Condition, false); 9871 }); 9872 } 9873 9874 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9875 /// protected by a conditional between LHS and RHS. This is used to 9876 /// to eliminate casts. 9877 bool 9878 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9879 ICmpInst::Predicate Pred, 9880 const SCEV *LHS, const SCEV *RHS) { 9881 // Interpret a null as meaning no loop, where there is obviously no guard 9882 // (interprocedural conditions notwithstanding). 9883 if (!L) return true; 9884 9885 if (VerifyIR) 9886 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9887 "This cannot be done on broken IR!"); 9888 9889 9890 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9891 return true; 9892 9893 BasicBlock *Latch = L->getLoopLatch(); 9894 if (!Latch) 9895 return false; 9896 9897 BranchInst *LoopContinuePredicate = 9898 dyn_cast<BranchInst>(Latch->getTerminator()); 9899 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9900 isImpliedCond(Pred, LHS, RHS, 9901 LoopContinuePredicate->getCondition(), 9902 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9903 return true; 9904 9905 // We don't want more than one activation of the following loops on the stack 9906 // -- that can lead to O(n!) time complexity. 9907 if (WalkingBEDominatingConds) 9908 return false; 9909 9910 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9911 9912 // See if we can exploit a trip count to prove the predicate. 9913 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9914 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9915 if (LatchBECount != getCouldNotCompute()) { 9916 // We know that Latch branches back to the loop header exactly 9917 // LatchBECount times. This means the backdege condition at Latch is 9918 // equivalent to "{0,+,1} u< LatchBECount". 9919 Type *Ty = LatchBECount->getType(); 9920 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9921 const SCEV *LoopCounter = 9922 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9923 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9924 LatchBECount)) 9925 return true; 9926 } 9927 9928 // Check conditions due to any @llvm.assume intrinsics. 9929 for (auto &AssumeVH : AC.assumptions()) { 9930 if (!AssumeVH) 9931 continue; 9932 auto *CI = cast<CallInst>(AssumeVH); 9933 if (!DT.dominates(CI, Latch->getTerminator())) 9934 continue; 9935 9936 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9937 return true; 9938 } 9939 9940 // If the loop is not reachable from the entry block, we risk running into an 9941 // infinite loop as we walk up into the dom tree. These loops do not matter 9942 // anyway, so we just return a conservative answer when we see them. 9943 if (!DT.isReachableFromEntry(L->getHeader())) 9944 return false; 9945 9946 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9947 return true; 9948 9949 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9950 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9951 assert(DTN && "should reach the loop header before reaching the root!"); 9952 9953 BasicBlock *BB = DTN->getBlock(); 9954 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9955 return true; 9956 9957 BasicBlock *PBB = BB->getSinglePredecessor(); 9958 if (!PBB) 9959 continue; 9960 9961 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9962 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9963 continue; 9964 9965 Value *Condition = ContinuePredicate->getCondition(); 9966 9967 // If we have an edge `E` within the loop body that dominates the only 9968 // latch, the condition guarding `E` also guards the backedge. This 9969 // reasoning works only for loops with a single latch. 9970 9971 BasicBlockEdge DominatingEdge(PBB, BB); 9972 if (DominatingEdge.isSingleEdge()) { 9973 // We're constructively (and conservatively) enumerating edges within the 9974 // loop body that dominate the latch. The dominator tree better agree 9975 // with us on this: 9976 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9977 9978 if (isImpliedCond(Pred, LHS, RHS, Condition, 9979 BB != ContinuePredicate->getSuccessor(0))) 9980 return true; 9981 } 9982 } 9983 9984 return false; 9985 } 9986 9987 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 9988 ICmpInst::Predicate Pred, 9989 const SCEV *LHS, 9990 const SCEV *RHS) { 9991 if (VerifyIR) 9992 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 9993 "This cannot be done on broken IR!"); 9994 9995 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9996 return true; 9997 9998 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9999 // the facts (a >= b && a != b) separately. A typical situation is when the 10000 // non-strict comparison is known from ranges and non-equality is known from 10001 // dominating predicates. If we are proving strict comparison, we always try 10002 // to prove non-equality and non-strict comparison separately. 10003 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10004 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10005 bool ProvedNonStrictComparison = false; 10006 bool ProvedNonEquality = false; 10007 10008 if (ProvingStrictComparison) { 10009 ProvedNonStrictComparison = 10010 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 10011 ProvedNonEquality = 10012 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 10013 if (ProvedNonStrictComparison && ProvedNonEquality) 10014 return true; 10015 } 10016 10017 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10018 auto ProveViaGuard = [&](const BasicBlock *Block) { 10019 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10020 return true; 10021 if (ProvingStrictComparison) { 10022 if (!ProvedNonStrictComparison) 10023 ProvedNonStrictComparison = 10024 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 10025 if (!ProvedNonEquality) 10026 ProvedNonEquality = 10027 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 10028 if (ProvedNonStrictComparison && ProvedNonEquality) 10029 return true; 10030 } 10031 return false; 10032 }; 10033 10034 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10035 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10036 const Instruction *Context = &BB->front(); 10037 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10038 return true; 10039 if (ProvingStrictComparison) { 10040 if (!ProvedNonStrictComparison) 10041 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS, 10042 Condition, Inverse, Context); 10043 if (!ProvedNonEquality) 10044 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, 10045 Condition, Inverse, Context); 10046 if (ProvedNonStrictComparison && ProvedNonEquality) 10047 return true; 10048 } 10049 return false; 10050 }; 10051 10052 // Starting at the block's predecessor, climb up the predecessor chain, as long 10053 // as there are predecessors that can be found that have unique successors 10054 // leading to the original block. 10055 const Loop *ContainingLoop = LI.getLoopFor(BB); 10056 const BasicBlock *PredBB; 10057 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10058 PredBB = ContainingLoop->getLoopPredecessor(); 10059 else 10060 PredBB = BB->getSinglePredecessor(); 10061 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10062 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10063 if (ProveViaGuard(Pair.first)) 10064 return true; 10065 10066 const BranchInst *LoopEntryPredicate = 10067 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10068 if (!LoopEntryPredicate || 10069 LoopEntryPredicate->isUnconditional()) 10070 continue; 10071 10072 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10073 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10074 return true; 10075 } 10076 10077 // Check conditions due to any @llvm.assume intrinsics. 10078 for (auto &AssumeVH : AC.assumptions()) { 10079 if (!AssumeVH) 10080 continue; 10081 auto *CI = cast<CallInst>(AssumeVH); 10082 if (!DT.dominates(CI, BB)) 10083 continue; 10084 10085 if (ProveViaCond(CI->getArgOperand(0), false)) 10086 return true; 10087 } 10088 10089 return false; 10090 } 10091 10092 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10093 ICmpInst::Predicate Pred, 10094 const SCEV *LHS, 10095 const SCEV *RHS) { 10096 // Interpret a null as meaning no loop, where there is obviously no guard 10097 // (interprocedural conditions notwithstanding). 10098 if (!L) 10099 return false; 10100 10101 // Both LHS and RHS must be available at loop entry. 10102 assert(isAvailableAtLoopEntry(LHS, L) && 10103 "LHS is not available at Loop Entry"); 10104 assert(isAvailableAtLoopEntry(RHS, L) && 10105 "RHS is not available at Loop Entry"); 10106 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10107 } 10108 10109 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10110 const SCEV *RHS, 10111 const Value *FoundCondValue, bool Inverse, 10112 const Instruction *Context) { 10113 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10114 return false; 10115 10116 auto ClearOnExit = 10117 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10118 10119 // Recursively handle And and Or conditions. 10120 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 10121 if (BO->getOpcode() == Instruction::And) { 10122 if (!Inverse) 10123 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 10124 Context) || 10125 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 10126 Context); 10127 } else if (BO->getOpcode() == Instruction::Or) { 10128 if (Inverse) 10129 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 10130 Context) || 10131 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 10132 Context); 10133 } 10134 } 10135 10136 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10137 if (!ICI) return false; 10138 10139 // Now that we found a conditional branch that dominates the loop or controls 10140 // the loop latch. Check to see if it is the comparison we are looking for. 10141 ICmpInst::Predicate FoundPred; 10142 if (Inverse) 10143 FoundPred = ICI->getInversePredicate(); 10144 else 10145 FoundPred = ICI->getPredicate(); 10146 10147 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10148 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10149 10150 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10151 } 10152 10153 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10154 const SCEV *RHS, 10155 ICmpInst::Predicate FoundPred, 10156 const SCEV *FoundLHS, const SCEV *FoundRHS, 10157 const Instruction *Context) { 10158 // Balance the types. 10159 if (getTypeSizeInBits(LHS->getType()) < 10160 getTypeSizeInBits(FoundLHS->getType())) { 10161 // For unsigned and equality predicates, try to prove that both found 10162 // operands fit into narrow unsigned range. If so, try to prove facts in 10163 // narrow types. 10164 if (!CmpInst::isSigned(FoundPred)) { 10165 auto *NarrowType = LHS->getType(); 10166 auto *WideType = FoundLHS->getType(); 10167 auto BitWidth = getTypeSizeInBits(NarrowType); 10168 const SCEV *MaxValue = getZeroExtendExpr( 10169 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10170 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10171 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10172 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10173 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10174 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10175 TruncFoundRHS, Context)) 10176 return true; 10177 } 10178 } 10179 10180 if (CmpInst::isSigned(Pred)) { 10181 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10182 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10183 } else { 10184 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10185 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10186 } 10187 } else if (getTypeSizeInBits(LHS->getType()) > 10188 getTypeSizeInBits(FoundLHS->getType())) { 10189 if (CmpInst::isSigned(FoundPred)) { 10190 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10191 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10192 } else { 10193 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10194 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10195 } 10196 } 10197 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10198 FoundRHS, Context); 10199 } 10200 10201 bool ScalarEvolution::isImpliedCondBalancedTypes( 10202 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10203 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10204 const Instruction *Context) { 10205 assert(getTypeSizeInBits(LHS->getType()) == 10206 getTypeSizeInBits(FoundLHS->getType()) && 10207 "Types should be balanced!"); 10208 // Canonicalize the query to match the way instcombine will have 10209 // canonicalized the comparison. 10210 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10211 if (LHS == RHS) 10212 return CmpInst::isTrueWhenEqual(Pred); 10213 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10214 if (FoundLHS == FoundRHS) 10215 return CmpInst::isFalseWhenEqual(FoundPred); 10216 10217 // Check to see if we can make the LHS or RHS match. 10218 if (LHS == FoundRHS || RHS == FoundLHS) { 10219 if (isa<SCEVConstant>(RHS)) { 10220 std::swap(FoundLHS, FoundRHS); 10221 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10222 } else { 10223 std::swap(LHS, RHS); 10224 Pred = ICmpInst::getSwappedPredicate(Pred); 10225 } 10226 } 10227 10228 // Check whether the found predicate is the same as the desired predicate. 10229 if (FoundPred == Pred) 10230 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10231 10232 // Check whether swapping the found predicate makes it the same as the 10233 // desired predicate. 10234 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10235 if (isa<SCEVConstant>(RHS)) 10236 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10237 else 10238 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, 10239 LHS, FoundLHS, FoundRHS, Context); 10240 } 10241 10242 // Unsigned comparison is the same as signed comparison when both the operands 10243 // are non-negative. 10244 if (CmpInst::isUnsigned(FoundPred) && 10245 CmpInst::getSignedPredicate(FoundPred) == Pred && 10246 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10247 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10248 10249 // Check if we can make progress by sharpening ranges. 10250 if (FoundPred == ICmpInst::ICMP_NE && 10251 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10252 10253 const SCEVConstant *C = nullptr; 10254 const SCEV *V = nullptr; 10255 10256 if (isa<SCEVConstant>(FoundLHS)) { 10257 C = cast<SCEVConstant>(FoundLHS); 10258 V = FoundRHS; 10259 } else { 10260 C = cast<SCEVConstant>(FoundRHS); 10261 V = FoundLHS; 10262 } 10263 10264 // The guarding predicate tells us that C != V. If the known range 10265 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10266 // range we consider has to correspond to same signedness as the 10267 // predicate we're interested in folding. 10268 10269 APInt Min = ICmpInst::isSigned(Pred) ? 10270 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10271 10272 if (Min == C->getAPInt()) { 10273 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10274 // This is true even if (Min + 1) wraps around -- in case of 10275 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10276 10277 APInt SharperMin = Min + 1; 10278 10279 switch (Pred) { 10280 case ICmpInst::ICMP_SGE: 10281 case ICmpInst::ICMP_UGE: 10282 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10283 // RHS, we're done. 10284 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10285 Context)) 10286 return true; 10287 LLVM_FALLTHROUGH; 10288 10289 case ICmpInst::ICMP_SGT: 10290 case ICmpInst::ICMP_UGT: 10291 // We know from the range information that (V `Pred` Min || 10292 // V == Min). We know from the guarding condition that !(V 10293 // == Min). This gives us 10294 // 10295 // V `Pred` Min || V == Min && !(V == Min) 10296 // => V `Pred` Min 10297 // 10298 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10299 10300 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10301 Context)) 10302 return true; 10303 break; 10304 10305 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10306 case ICmpInst::ICMP_SLE: 10307 case ICmpInst::ICMP_ULE: 10308 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10309 LHS, V, getConstant(SharperMin), Context)) 10310 return true; 10311 LLVM_FALLTHROUGH; 10312 10313 case ICmpInst::ICMP_SLT: 10314 case ICmpInst::ICMP_ULT: 10315 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10316 LHS, V, getConstant(Min), Context)) 10317 return true; 10318 break; 10319 10320 default: 10321 // No change 10322 break; 10323 } 10324 } 10325 } 10326 10327 // Check whether the actual condition is beyond sufficient. 10328 if (FoundPred == ICmpInst::ICMP_EQ) 10329 if (ICmpInst::isTrueWhenEqual(Pred)) 10330 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10331 return true; 10332 if (Pred == ICmpInst::ICMP_NE) 10333 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10334 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10335 Context)) 10336 return true; 10337 10338 // Otherwise assume the worst. 10339 return false; 10340 } 10341 10342 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10343 const SCEV *&L, const SCEV *&R, 10344 SCEV::NoWrapFlags &Flags) { 10345 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10346 if (!AE || AE->getNumOperands() != 2) 10347 return false; 10348 10349 L = AE->getOperand(0); 10350 R = AE->getOperand(1); 10351 Flags = AE->getNoWrapFlags(); 10352 return true; 10353 } 10354 10355 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10356 const SCEV *Less) { 10357 // We avoid subtracting expressions here because this function is usually 10358 // fairly deep in the call stack (i.e. is called many times). 10359 10360 // X - X = 0. 10361 if (More == Less) 10362 return APInt(getTypeSizeInBits(More->getType()), 0); 10363 10364 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10365 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10366 const auto *MAR = cast<SCEVAddRecExpr>(More); 10367 10368 if (LAR->getLoop() != MAR->getLoop()) 10369 return None; 10370 10371 // We look at affine expressions only; not for correctness but to keep 10372 // getStepRecurrence cheap. 10373 if (!LAR->isAffine() || !MAR->isAffine()) 10374 return None; 10375 10376 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10377 return None; 10378 10379 Less = LAR->getStart(); 10380 More = MAR->getStart(); 10381 10382 // fall through 10383 } 10384 10385 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10386 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10387 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10388 return M - L; 10389 } 10390 10391 SCEV::NoWrapFlags Flags; 10392 const SCEV *LLess = nullptr, *RLess = nullptr; 10393 const SCEV *LMore = nullptr, *RMore = nullptr; 10394 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10395 // Compare (X + C1) vs X. 10396 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10397 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10398 if (RLess == More) 10399 return -(C1->getAPInt()); 10400 10401 // Compare X vs (X + C2). 10402 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10403 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10404 if (RMore == Less) 10405 return C2->getAPInt(); 10406 10407 // Compare (X + C1) vs (X + C2). 10408 if (C1 && C2 && RLess == RMore) 10409 return C2->getAPInt() - C1->getAPInt(); 10410 10411 return None; 10412 } 10413 10414 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10415 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10416 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10417 // Try to recognize the following pattern: 10418 // 10419 // FoundRHS = ... 10420 // ... 10421 // loop: 10422 // FoundLHS = {Start,+,W} 10423 // context_bb: // Basic block from the same loop 10424 // known(Pred, FoundLHS, FoundRHS) 10425 // 10426 // If some predicate is known in the context of a loop, it is also known on 10427 // each iteration of this loop, including the first iteration. Therefore, in 10428 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10429 // prove the original pred using this fact. 10430 if (!Context) 10431 return false; 10432 const BasicBlock *ContextBB = Context->getParent(); 10433 // Make sure AR varies in the context block. 10434 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10435 const Loop *L = AR->getLoop(); 10436 // Make sure that context belongs to the loop and executes on 1st iteration 10437 // (if it ever executes at all). 10438 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10439 return false; 10440 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10441 return false; 10442 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10443 } 10444 10445 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10446 const Loop *L = AR->getLoop(); 10447 // Make sure that context belongs to the loop and executes on 1st iteration 10448 // (if it ever executes at all). 10449 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10450 return false; 10451 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10452 return false; 10453 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10454 } 10455 10456 return false; 10457 } 10458 10459 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10460 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10461 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10462 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10463 return false; 10464 10465 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10466 if (!AddRecLHS) 10467 return false; 10468 10469 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10470 if (!AddRecFoundLHS) 10471 return false; 10472 10473 // We'd like to let SCEV reason about control dependencies, so we constrain 10474 // both the inequalities to be about add recurrences on the same loop. This 10475 // way we can use isLoopEntryGuardedByCond later. 10476 10477 const Loop *L = AddRecFoundLHS->getLoop(); 10478 if (L != AddRecLHS->getLoop()) 10479 return false; 10480 10481 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10482 // 10483 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10484 // ... (2) 10485 // 10486 // Informal proof for (2), assuming (1) [*]: 10487 // 10488 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10489 // 10490 // Then 10491 // 10492 // FoundLHS s< FoundRHS s< INT_MIN - C 10493 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10494 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10495 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10496 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10497 // <=> FoundLHS + C s< FoundRHS + C 10498 // 10499 // [*]: (1) can be proved by ruling out overflow. 10500 // 10501 // [**]: This can be proved by analyzing all the four possibilities: 10502 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10503 // (A s>= 0, B s>= 0). 10504 // 10505 // Note: 10506 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10507 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10508 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10509 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10510 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10511 // C)". 10512 10513 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10514 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10515 if (!LDiff || !RDiff || *LDiff != *RDiff) 10516 return false; 10517 10518 if (LDiff->isMinValue()) 10519 return true; 10520 10521 APInt FoundRHSLimit; 10522 10523 if (Pred == CmpInst::ICMP_ULT) { 10524 FoundRHSLimit = -(*RDiff); 10525 } else { 10526 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10527 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10528 } 10529 10530 // Try to prove (1) or (2), as needed. 10531 return isAvailableAtLoopEntry(FoundRHS, L) && 10532 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10533 getConstant(FoundRHSLimit)); 10534 } 10535 10536 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10537 const SCEV *LHS, const SCEV *RHS, 10538 const SCEV *FoundLHS, 10539 const SCEV *FoundRHS, unsigned Depth) { 10540 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10541 10542 auto ClearOnExit = make_scope_exit([&]() { 10543 if (LPhi) { 10544 bool Erased = PendingMerges.erase(LPhi); 10545 assert(Erased && "Failed to erase LPhi!"); 10546 (void)Erased; 10547 } 10548 if (RPhi) { 10549 bool Erased = PendingMerges.erase(RPhi); 10550 assert(Erased && "Failed to erase RPhi!"); 10551 (void)Erased; 10552 } 10553 }); 10554 10555 // Find respective Phis and check that they are not being pending. 10556 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10557 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10558 if (!PendingMerges.insert(Phi).second) 10559 return false; 10560 LPhi = Phi; 10561 } 10562 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10563 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10564 // If we detect a loop of Phi nodes being processed by this method, for 10565 // example: 10566 // 10567 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10568 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10569 // 10570 // we don't want to deal with a case that complex, so return conservative 10571 // answer false. 10572 if (!PendingMerges.insert(Phi).second) 10573 return false; 10574 RPhi = Phi; 10575 } 10576 10577 // If none of LHS, RHS is a Phi, nothing to do here. 10578 if (!LPhi && !RPhi) 10579 return false; 10580 10581 // If there is a SCEVUnknown Phi we are interested in, make it left. 10582 if (!LPhi) { 10583 std::swap(LHS, RHS); 10584 std::swap(FoundLHS, FoundRHS); 10585 std::swap(LPhi, RPhi); 10586 Pred = ICmpInst::getSwappedPredicate(Pred); 10587 } 10588 10589 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10590 const BasicBlock *LBB = LPhi->getParent(); 10591 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10592 10593 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10594 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10595 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10596 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10597 }; 10598 10599 if (RPhi && RPhi->getParent() == LBB) { 10600 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10601 // If we compare two Phis from the same block, and for each entry block 10602 // the predicate is true for incoming values from this block, then the 10603 // predicate is also true for the Phis. 10604 for (const BasicBlock *IncBB : predecessors(LBB)) { 10605 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10606 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10607 if (!ProvedEasily(L, R)) 10608 return false; 10609 } 10610 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10611 // Case two: RHS is also a Phi from the same basic block, and it is an 10612 // AddRec. It means that there is a loop which has both AddRec and Unknown 10613 // PHIs, for it we can compare incoming values of AddRec from above the loop 10614 // and latch with their respective incoming values of LPhi. 10615 // TODO: Generalize to handle loops with many inputs in a header. 10616 if (LPhi->getNumIncomingValues() != 2) return false; 10617 10618 auto *RLoop = RAR->getLoop(); 10619 auto *Predecessor = RLoop->getLoopPredecessor(); 10620 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10621 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10622 if (!ProvedEasily(L1, RAR->getStart())) 10623 return false; 10624 auto *Latch = RLoop->getLoopLatch(); 10625 assert(Latch && "Loop with AddRec with no latch?"); 10626 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10627 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10628 return false; 10629 } else { 10630 // In all other cases go over inputs of LHS and compare each of them to RHS, 10631 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10632 // At this point RHS is either a non-Phi, or it is a Phi from some block 10633 // different from LBB. 10634 for (const BasicBlock *IncBB : predecessors(LBB)) { 10635 // Check that RHS is available in this block. 10636 if (!dominates(RHS, IncBB)) 10637 return false; 10638 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10639 if (!ProvedEasily(L, RHS)) 10640 return false; 10641 } 10642 } 10643 return true; 10644 } 10645 10646 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10647 const SCEV *LHS, const SCEV *RHS, 10648 const SCEV *FoundLHS, 10649 const SCEV *FoundRHS, 10650 const Instruction *Context) { 10651 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10652 return true; 10653 10654 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10655 return true; 10656 10657 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10658 Context)) 10659 return true; 10660 10661 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10662 FoundLHS, FoundRHS) || 10663 // ~x < ~y --> x > y 10664 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10665 getNotSCEV(FoundRHS), 10666 getNotSCEV(FoundLHS)); 10667 } 10668 10669 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10670 template <typename MinMaxExprType> 10671 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10672 const SCEV *Candidate) { 10673 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10674 if (!MinMaxExpr) 10675 return false; 10676 10677 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10678 } 10679 10680 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10681 ICmpInst::Predicate Pred, 10682 const SCEV *LHS, const SCEV *RHS) { 10683 // If both sides are affine addrecs for the same loop, with equal 10684 // steps, and we know the recurrences don't wrap, then we only 10685 // need to check the predicate on the starting values. 10686 10687 if (!ICmpInst::isRelational(Pred)) 10688 return false; 10689 10690 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10691 if (!LAR) 10692 return false; 10693 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10694 if (!RAR) 10695 return false; 10696 if (LAR->getLoop() != RAR->getLoop()) 10697 return false; 10698 if (!LAR->isAffine() || !RAR->isAffine()) 10699 return false; 10700 10701 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10702 return false; 10703 10704 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10705 SCEV::FlagNSW : SCEV::FlagNUW; 10706 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10707 return false; 10708 10709 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10710 } 10711 10712 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10713 /// expression? 10714 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10715 ICmpInst::Predicate Pred, 10716 const SCEV *LHS, const SCEV *RHS) { 10717 switch (Pred) { 10718 default: 10719 return false; 10720 10721 case ICmpInst::ICMP_SGE: 10722 std::swap(LHS, RHS); 10723 LLVM_FALLTHROUGH; 10724 case ICmpInst::ICMP_SLE: 10725 return 10726 // min(A, ...) <= A 10727 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10728 // A <= max(A, ...) 10729 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10730 10731 case ICmpInst::ICMP_UGE: 10732 std::swap(LHS, RHS); 10733 LLVM_FALLTHROUGH; 10734 case ICmpInst::ICMP_ULE: 10735 return 10736 // min(A, ...) <= A 10737 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10738 // A <= max(A, ...) 10739 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10740 } 10741 10742 llvm_unreachable("covered switch fell through?!"); 10743 } 10744 10745 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10746 const SCEV *LHS, const SCEV *RHS, 10747 const SCEV *FoundLHS, 10748 const SCEV *FoundRHS, 10749 unsigned Depth) { 10750 assert(getTypeSizeInBits(LHS->getType()) == 10751 getTypeSizeInBits(RHS->getType()) && 10752 "LHS and RHS have different sizes?"); 10753 assert(getTypeSizeInBits(FoundLHS->getType()) == 10754 getTypeSizeInBits(FoundRHS->getType()) && 10755 "FoundLHS and FoundRHS have different sizes?"); 10756 // We want to avoid hurting the compile time with analysis of too big trees. 10757 if (Depth > MaxSCEVOperationsImplicationDepth) 10758 return false; 10759 10760 // We only want to work with GT comparison so far. 10761 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10762 Pred = CmpInst::getSwappedPredicate(Pred); 10763 std::swap(LHS, RHS); 10764 std::swap(FoundLHS, FoundRHS); 10765 } 10766 10767 // For unsigned, try to reduce it to corresponding signed comparison. 10768 if (Pred == ICmpInst::ICMP_UGT) 10769 // We can replace unsigned predicate with its signed counterpart if all 10770 // involved values are non-negative. 10771 // TODO: We could have better support for unsigned. 10772 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10773 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10774 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10775 // use this fact to prove that LHS and RHS are non-negative. 10776 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10777 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10778 FoundRHS) && 10779 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10780 FoundRHS)) 10781 Pred = ICmpInst::ICMP_SGT; 10782 } 10783 10784 if (Pred != ICmpInst::ICMP_SGT) 10785 return false; 10786 10787 auto GetOpFromSExt = [&](const SCEV *S) { 10788 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10789 return Ext->getOperand(); 10790 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10791 // the constant in some cases. 10792 return S; 10793 }; 10794 10795 // Acquire values from extensions. 10796 auto *OrigLHS = LHS; 10797 auto *OrigFoundLHS = FoundLHS; 10798 LHS = GetOpFromSExt(LHS); 10799 FoundLHS = GetOpFromSExt(FoundLHS); 10800 10801 // Is the SGT predicate can be proved trivially or using the found context. 10802 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10803 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10804 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10805 FoundRHS, Depth + 1); 10806 }; 10807 10808 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10809 // We want to avoid creation of any new non-constant SCEV. Since we are 10810 // going to compare the operands to RHS, we should be certain that we don't 10811 // need any size extensions for this. So let's decline all cases when the 10812 // sizes of types of LHS and RHS do not match. 10813 // TODO: Maybe try to get RHS from sext to catch more cases? 10814 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10815 return false; 10816 10817 // Should not overflow. 10818 if (!LHSAddExpr->hasNoSignedWrap()) 10819 return false; 10820 10821 auto *LL = LHSAddExpr->getOperand(0); 10822 auto *LR = LHSAddExpr->getOperand(1); 10823 auto *MinusOne = getMinusOne(RHS->getType()); 10824 10825 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10826 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10827 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10828 }; 10829 // Try to prove the following rule: 10830 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10831 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10832 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10833 return true; 10834 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10835 Value *LL, *LR; 10836 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10837 10838 using namespace llvm::PatternMatch; 10839 10840 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10841 // Rules for division. 10842 // We are going to perform some comparisons with Denominator and its 10843 // derivative expressions. In general case, creating a SCEV for it may 10844 // lead to a complex analysis of the entire graph, and in particular it 10845 // can request trip count recalculation for the same loop. This would 10846 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10847 // this, we only want to create SCEVs that are constants in this section. 10848 // So we bail if Denominator is not a constant. 10849 if (!isa<ConstantInt>(LR)) 10850 return false; 10851 10852 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10853 10854 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10855 // then a SCEV for the numerator already exists and matches with FoundLHS. 10856 auto *Numerator = getExistingSCEV(LL); 10857 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10858 return false; 10859 10860 // Make sure that the numerator matches with FoundLHS and the denominator 10861 // is positive. 10862 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10863 return false; 10864 10865 auto *DTy = Denominator->getType(); 10866 auto *FRHSTy = FoundRHS->getType(); 10867 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10868 // One of types is a pointer and another one is not. We cannot extend 10869 // them properly to a wider type, so let us just reject this case. 10870 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10871 // to avoid this check. 10872 return false; 10873 10874 // Given that: 10875 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10876 auto *WTy = getWiderType(DTy, FRHSTy); 10877 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10878 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10879 10880 // Try to prove the following rule: 10881 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10882 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10883 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10884 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10885 if (isKnownNonPositive(RHS) && 10886 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10887 return true; 10888 10889 // Try to prove the following rule: 10890 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10891 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10892 // If we divide it by Denominator > 2, then: 10893 // 1. If FoundLHS is negative, then the result is 0. 10894 // 2. If FoundLHS is non-negative, then the result is non-negative. 10895 // Anyways, the result is non-negative. 10896 auto *MinusOne = getMinusOne(WTy); 10897 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10898 if (isKnownNegative(RHS) && 10899 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10900 return true; 10901 } 10902 } 10903 10904 // If our expression contained SCEVUnknown Phis, and we split it down and now 10905 // need to prove something for them, try to prove the predicate for every 10906 // possible incoming values of those Phis. 10907 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10908 return true; 10909 10910 return false; 10911 } 10912 10913 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10914 const SCEV *LHS, const SCEV *RHS) { 10915 // zext x u<= sext x, sext x s<= zext x 10916 switch (Pred) { 10917 case ICmpInst::ICMP_SGE: 10918 std::swap(LHS, RHS); 10919 LLVM_FALLTHROUGH; 10920 case ICmpInst::ICMP_SLE: { 10921 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10922 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10923 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10924 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10925 return true; 10926 break; 10927 } 10928 case ICmpInst::ICMP_UGE: 10929 std::swap(LHS, RHS); 10930 LLVM_FALLTHROUGH; 10931 case ICmpInst::ICMP_ULE: { 10932 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10933 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10934 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10935 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10936 return true; 10937 break; 10938 } 10939 default: 10940 break; 10941 }; 10942 return false; 10943 } 10944 10945 bool 10946 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10947 const SCEV *LHS, const SCEV *RHS) { 10948 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10949 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10950 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10951 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10952 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10953 } 10954 10955 bool 10956 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10957 const SCEV *LHS, const SCEV *RHS, 10958 const SCEV *FoundLHS, 10959 const SCEV *FoundRHS) { 10960 switch (Pred) { 10961 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10962 case ICmpInst::ICMP_EQ: 10963 case ICmpInst::ICMP_NE: 10964 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10965 return true; 10966 break; 10967 case ICmpInst::ICMP_SLT: 10968 case ICmpInst::ICMP_SLE: 10969 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10970 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10971 return true; 10972 break; 10973 case ICmpInst::ICMP_SGT: 10974 case ICmpInst::ICMP_SGE: 10975 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10976 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10977 return true; 10978 break; 10979 case ICmpInst::ICMP_ULT: 10980 case ICmpInst::ICMP_ULE: 10981 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10982 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10983 return true; 10984 break; 10985 case ICmpInst::ICMP_UGT: 10986 case ICmpInst::ICMP_UGE: 10987 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10988 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10989 return true; 10990 break; 10991 } 10992 10993 // Maybe it can be proved via operations? 10994 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10995 return true; 10996 10997 return false; 10998 } 10999 11000 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11001 const SCEV *LHS, 11002 const SCEV *RHS, 11003 const SCEV *FoundLHS, 11004 const SCEV *FoundRHS) { 11005 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11006 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11007 // reduce the compile time impact of this optimization. 11008 return false; 11009 11010 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11011 if (!Addend) 11012 return false; 11013 11014 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11015 11016 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11017 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11018 ConstantRange FoundLHSRange = 11019 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 11020 11021 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11022 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11023 11024 // We can also compute the range of values for `LHS` that satisfy the 11025 // consequent, "`LHS` `Pred` `RHS`": 11026 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11027 ConstantRange SatisfyingLHSRange = 11028 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 11029 11030 // The antecedent implies the consequent if every value of `LHS` that 11031 // satisfies the antecedent also satisfies the consequent. 11032 return SatisfyingLHSRange.contains(LHSRange); 11033 } 11034 11035 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11036 bool IsSigned, bool NoWrap) { 11037 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11038 11039 if (NoWrap) return false; 11040 11041 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11042 const SCEV *One = getOne(Stride->getType()); 11043 11044 if (IsSigned) { 11045 APInt MaxRHS = getSignedRangeMax(RHS); 11046 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11047 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11048 11049 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11050 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11051 } 11052 11053 APInt MaxRHS = getUnsignedRangeMax(RHS); 11054 APInt MaxValue = APInt::getMaxValue(BitWidth); 11055 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11056 11057 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11058 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11059 } 11060 11061 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11062 bool IsSigned, bool NoWrap) { 11063 if (NoWrap) return false; 11064 11065 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11066 const SCEV *One = getOne(Stride->getType()); 11067 11068 if (IsSigned) { 11069 APInt MinRHS = getSignedRangeMin(RHS); 11070 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11071 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11072 11073 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11074 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11075 } 11076 11077 APInt MinRHS = getUnsignedRangeMin(RHS); 11078 APInt MinValue = APInt::getMinValue(BitWidth); 11079 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11080 11081 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11082 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11083 } 11084 11085 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 11086 bool Equality) { 11087 const SCEV *One = getOne(Step->getType()); 11088 Delta = Equality ? getAddExpr(Delta, Step) 11089 : getAddExpr(Delta, getMinusSCEV(Step, One)); 11090 return getUDivExpr(Delta, Step); 11091 } 11092 11093 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11094 const SCEV *Stride, 11095 const SCEV *End, 11096 unsigned BitWidth, 11097 bool IsSigned) { 11098 11099 assert(!isKnownNonPositive(Stride) && 11100 "Stride is expected strictly positive!"); 11101 // Calculate the maximum backedge count based on the range of values 11102 // permitted by Start, End, and Stride. 11103 const SCEV *MaxBECount; 11104 APInt MinStart = 11105 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11106 11107 APInt StrideForMaxBECount = 11108 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11109 11110 // We already know that the stride is positive, so we paper over conservatism 11111 // in our range computation by forcing StrideForMaxBECount to be at least one. 11112 // In theory this is unnecessary, but we expect MaxBECount to be a 11113 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11114 // is nothing to constant fold it to). 11115 APInt One(BitWidth, 1, IsSigned); 11116 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11117 11118 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11119 : APInt::getMaxValue(BitWidth); 11120 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11121 11122 // Although End can be a MAX expression we estimate MaxEnd considering only 11123 // the case End = RHS of the loop termination condition. This is safe because 11124 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11125 // taken count. 11126 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11127 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11128 11129 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11130 getConstant(StrideForMaxBECount) /* Step */, 11131 false /* Equality */); 11132 11133 return MaxBECount; 11134 } 11135 11136 ScalarEvolution::ExitLimit 11137 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11138 const Loop *L, bool IsSigned, 11139 bool ControlsExit, bool AllowPredicates) { 11140 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11141 11142 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11143 bool PredicatedIV = false; 11144 11145 if (!IV && AllowPredicates) { 11146 // Try to make this an AddRec using runtime tests, in the first X 11147 // iterations of this loop, where X is the SCEV expression found by the 11148 // algorithm below. 11149 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11150 PredicatedIV = true; 11151 } 11152 11153 // Avoid weird loops 11154 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11155 return getCouldNotCompute(); 11156 11157 bool NoWrap = ControlsExit && 11158 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11159 11160 const SCEV *Stride = IV->getStepRecurrence(*this); 11161 11162 bool PositiveStride = isKnownPositive(Stride); 11163 11164 // Avoid negative or zero stride values. 11165 if (!PositiveStride) { 11166 // We can compute the correct backedge taken count for loops with unknown 11167 // strides if we can prove that the loop is not an infinite loop with side 11168 // effects. Here's the loop structure we are trying to handle - 11169 // 11170 // i = start 11171 // do { 11172 // A[i] = i; 11173 // i += s; 11174 // } while (i < end); 11175 // 11176 // The backedge taken count for such loops is evaluated as - 11177 // (max(end, start + stride) - start - 1) /u stride 11178 // 11179 // The additional preconditions that we need to check to prove correctness 11180 // of the above formula is as follows - 11181 // 11182 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11183 // NoWrap flag). 11184 // b) loop is single exit with no side effects. 11185 // 11186 // 11187 // Precondition a) implies that if the stride is negative, this is a single 11188 // trip loop. The backedge taken count formula reduces to zero in this case. 11189 // 11190 // Precondition b) implies that the unknown stride cannot be zero otherwise 11191 // we have UB. 11192 // 11193 // The positive stride case is the same as isKnownPositive(Stride) returning 11194 // true (original behavior of the function). 11195 // 11196 // We want to make sure that the stride is truly unknown as there are edge 11197 // cases where ScalarEvolution propagates no wrap flags to the 11198 // post-increment/decrement IV even though the increment/decrement operation 11199 // itself is wrapping. The computed backedge taken count may be wrong in 11200 // such cases. This is prevented by checking that the stride is not known to 11201 // be either positive or non-positive. For example, no wrap flags are 11202 // propagated to the post-increment IV of this loop with a trip count of 2 - 11203 // 11204 // unsigned char i; 11205 // for(i=127; i<128; i+=129) 11206 // A[i] = i; 11207 // 11208 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11209 !loopHasNoSideEffects(L)) 11210 return getCouldNotCompute(); 11211 } else if (!Stride->isOne() && 11212 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 11213 // Avoid proven overflow cases: this will ensure that the backedge taken 11214 // count will not generate any unsigned overflow. Relaxed no-overflow 11215 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11216 // undefined behaviors like the case of C language. 11217 return getCouldNotCompute(); 11218 11219 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 11220 : ICmpInst::ICMP_ULT; 11221 const SCEV *Start = IV->getStart(); 11222 const SCEV *End = RHS; 11223 // When the RHS is not invariant, we do not know the end bound of the loop and 11224 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11225 // calculate the MaxBECount, given the start, stride and max value for the end 11226 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11227 // checked above). 11228 if (!isLoopInvariant(RHS, L)) { 11229 const SCEV *MaxBECount = computeMaxBECountForLT( 11230 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11231 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11232 false /*MaxOrZero*/, Predicates); 11233 } 11234 // If the backedge is taken at least once, then it will be taken 11235 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11236 // is the LHS value of the less-than comparison the first time it is evaluated 11237 // and End is the RHS. 11238 const SCEV *BECountIfBackedgeTaken = 11239 computeBECount(getMinusSCEV(End, Start), Stride, false); 11240 // If the loop entry is guarded by the result of the backedge test of the 11241 // first loop iteration, then we know the backedge will be taken at least 11242 // once and so the backedge taken count is as above. If not then we use the 11243 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11244 // as if the backedge is taken at least once max(End,Start) is End and so the 11245 // result is as above, and if not max(End,Start) is Start so we get a backedge 11246 // count of zero. 11247 const SCEV *BECount; 11248 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 11249 BECount = BECountIfBackedgeTaken; 11250 else { 11251 // If we know that RHS >= Start in the context of loop, then we know that 11252 // max(RHS, Start) = RHS at this point. 11253 if (isLoopEntryGuardedByCond( 11254 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 11255 End = RHS; 11256 else 11257 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11258 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 11259 } 11260 11261 const SCEV *MaxBECount; 11262 bool MaxOrZero = false; 11263 if (isa<SCEVConstant>(BECount)) 11264 MaxBECount = BECount; 11265 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11266 // If we know exactly how many times the backedge will be taken if it's 11267 // taken at least once, then the backedge count will either be that or 11268 // zero. 11269 MaxBECount = BECountIfBackedgeTaken; 11270 MaxOrZero = true; 11271 } else { 11272 MaxBECount = computeMaxBECountForLT( 11273 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11274 } 11275 11276 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11277 !isa<SCEVCouldNotCompute>(BECount)) 11278 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11279 11280 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11281 } 11282 11283 ScalarEvolution::ExitLimit 11284 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11285 const Loop *L, bool IsSigned, 11286 bool ControlsExit, bool AllowPredicates) { 11287 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11288 // We handle only IV > Invariant 11289 if (!isLoopInvariant(RHS, L)) 11290 return getCouldNotCompute(); 11291 11292 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11293 if (!IV && AllowPredicates) 11294 // Try to make this an AddRec using runtime tests, in the first X 11295 // iterations of this loop, where X is the SCEV expression found by the 11296 // algorithm below. 11297 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11298 11299 // Avoid weird loops 11300 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11301 return getCouldNotCompute(); 11302 11303 bool NoWrap = ControlsExit && 11304 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11305 11306 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11307 11308 // Avoid negative or zero stride values 11309 if (!isKnownPositive(Stride)) 11310 return getCouldNotCompute(); 11311 11312 // Avoid proven overflow cases: this will ensure that the backedge taken count 11313 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11314 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11315 // behaviors like the case of C language. 11316 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 11317 return getCouldNotCompute(); 11318 11319 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 11320 : ICmpInst::ICMP_UGT; 11321 11322 const SCEV *Start = IV->getStart(); 11323 const SCEV *End = RHS; 11324 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11325 // If we know that Start >= RHS in the context of loop, then we know that 11326 // min(RHS, Start) = RHS at this point. 11327 if (isLoopEntryGuardedByCond( 11328 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11329 End = RHS; 11330 else 11331 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11332 } 11333 11334 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 11335 11336 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11337 : getUnsignedRangeMax(Start); 11338 11339 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11340 : getUnsignedRangeMin(Stride); 11341 11342 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11343 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11344 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11345 11346 // Although End can be a MIN expression we estimate MinEnd considering only 11347 // the case End = RHS. This is safe because in the other case (Start - End) 11348 // is zero, leading to a zero maximum backedge taken count. 11349 APInt MinEnd = 11350 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11351 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11352 11353 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11354 ? BECount 11355 : computeBECount(getConstant(MaxStart - MinEnd), 11356 getConstant(MinStride), false); 11357 11358 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11359 MaxBECount = BECount; 11360 11361 return ExitLimit(BECount, MaxBECount, false, Predicates); 11362 } 11363 11364 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11365 ScalarEvolution &SE) const { 11366 if (Range.isFullSet()) // Infinite loop. 11367 return SE.getCouldNotCompute(); 11368 11369 // If the start is a non-zero constant, shift the range to simplify things. 11370 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11371 if (!SC->getValue()->isZero()) { 11372 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 11373 Operands[0] = SE.getZero(SC->getType()); 11374 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11375 getNoWrapFlags(FlagNW)); 11376 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11377 return ShiftedAddRec->getNumIterationsInRange( 11378 Range.subtract(SC->getAPInt()), SE); 11379 // This is strange and shouldn't happen. 11380 return SE.getCouldNotCompute(); 11381 } 11382 11383 // The only time we can solve this is when we have all constant indices. 11384 // Otherwise, we cannot determine the overflow conditions. 11385 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11386 return SE.getCouldNotCompute(); 11387 11388 // Okay at this point we know that all elements of the chrec are constants and 11389 // that the start element is zero. 11390 11391 // First check to see if the range contains zero. If not, the first 11392 // iteration exits. 11393 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11394 if (!Range.contains(APInt(BitWidth, 0))) 11395 return SE.getZero(getType()); 11396 11397 if (isAffine()) { 11398 // If this is an affine expression then we have this situation: 11399 // Solve {0,+,A} in Range === Ax in Range 11400 11401 // We know that zero is in the range. If A is positive then we know that 11402 // the upper value of the range must be the first possible exit value. 11403 // If A is negative then the lower of the range is the last possible loop 11404 // value. Also note that we already checked for a full range. 11405 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11406 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11407 11408 // The exit value should be (End+A)/A. 11409 APInt ExitVal = (End + A).udiv(A); 11410 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11411 11412 // Evaluate at the exit value. If we really did fall out of the valid 11413 // range, then we computed our trip count, otherwise wrap around or other 11414 // things must have happened. 11415 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11416 if (Range.contains(Val->getValue())) 11417 return SE.getCouldNotCompute(); // Something strange happened 11418 11419 // Ensure that the previous value is in the range. This is a sanity check. 11420 assert(Range.contains( 11421 EvaluateConstantChrecAtConstant(this, 11422 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11423 "Linear scev computation is off in a bad way!"); 11424 return SE.getConstant(ExitValue); 11425 } 11426 11427 if (isQuadratic()) { 11428 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11429 return SE.getConstant(S.getValue()); 11430 } 11431 11432 return SE.getCouldNotCompute(); 11433 } 11434 11435 const SCEVAddRecExpr * 11436 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11437 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11438 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11439 // but in this case we cannot guarantee that the value returned will be an 11440 // AddRec because SCEV does not have a fixed point where it stops 11441 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11442 // may happen if we reach arithmetic depth limit while simplifying. So we 11443 // construct the returned value explicitly. 11444 SmallVector<const SCEV *, 3> Ops; 11445 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11446 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11447 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11448 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11449 // We know that the last operand is not a constant zero (otherwise it would 11450 // have been popped out earlier). This guarantees us that if the result has 11451 // the same last operand, then it will also not be popped out, meaning that 11452 // the returned value will be an AddRec. 11453 const SCEV *Last = getOperand(getNumOperands() - 1); 11454 assert(!Last->isZero() && "Recurrency with zero step?"); 11455 Ops.push_back(Last); 11456 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11457 SCEV::FlagAnyWrap)); 11458 } 11459 11460 // Return true when S contains at least an undef value. 11461 static inline bool containsUndefs(const SCEV *S) { 11462 return SCEVExprContains(S, [](const SCEV *S) { 11463 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11464 return isa<UndefValue>(SU->getValue()); 11465 return false; 11466 }); 11467 } 11468 11469 namespace { 11470 11471 // Collect all steps of SCEV expressions. 11472 struct SCEVCollectStrides { 11473 ScalarEvolution &SE; 11474 SmallVectorImpl<const SCEV *> &Strides; 11475 11476 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11477 : SE(SE), Strides(S) {} 11478 11479 bool follow(const SCEV *S) { 11480 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11481 Strides.push_back(AR->getStepRecurrence(SE)); 11482 return true; 11483 } 11484 11485 bool isDone() const { return false; } 11486 }; 11487 11488 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11489 struct SCEVCollectTerms { 11490 SmallVectorImpl<const SCEV *> &Terms; 11491 11492 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11493 11494 bool follow(const SCEV *S) { 11495 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11496 isa<SCEVSignExtendExpr>(S)) { 11497 if (!containsUndefs(S)) 11498 Terms.push_back(S); 11499 11500 // Stop recursion: once we collected a term, do not walk its operands. 11501 return false; 11502 } 11503 11504 // Keep looking. 11505 return true; 11506 } 11507 11508 bool isDone() const { return false; } 11509 }; 11510 11511 // Check if a SCEV contains an AddRecExpr. 11512 struct SCEVHasAddRec { 11513 bool &ContainsAddRec; 11514 11515 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11516 ContainsAddRec = false; 11517 } 11518 11519 bool follow(const SCEV *S) { 11520 if (isa<SCEVAddRecExpr>(S)) { 11521 ContainsAddRec = true; 11522 11523 // Stop recursion: once we collected a term, do not walk its operands. 11524 return false; 11525 } 11526 11527 // Keep looking. 11528 return true; 11529 } 11530 11531 bool isDone() const { return false; } 11532 }; 11533 11534 // Find factors that are multiplied with an expression that (possibly as a 11535 // subexpression) contains an AddRecExpr. In the expression: 11536 // 11537 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11538 // 11539 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11540 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11541 // parameters as they form a product with an induction variable. 11542 // 11543 // This collector expects all array size parameters to be in the same MulExpr. 11544 // It might be necessary to later add support for collecting parameters that are 11545 // spread over different nested MulExpr. 11546 struct SCEVCollectAddRecMultiplies { 11547 SmallVectorImpl<const SCEV *> &Terms; 11548 ScalarEvolution &SE; 11549 11550 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11551 : Terms(T), SE(SE) {} 11552 11553 bool follow(const SCEV *S) { 11554 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11555 bool HasAddRec = false; 11556 SmallVector<const SCEV *, 0> Operands; 11557 for (auto Op : Mul->operands()) { 11558 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11559 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11560 Operands.push_back(Op); 11561 } else if (Unknown) { 11562 HasAddRec = true; 11563 } else { 11564 bool ContainsAddRec = false; 11565 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11566 visitAll(Op, ContiansAddRec); 11567 HasAddRec |= ContainsAddRec; 11568 } 11569 } 11570 if (Operands.size() == 0) 11571 return true; 11572 11573 if (!HasAddRec) 11574 return false; 11575 11576 Terms.push_back(SE.getMulExpr(Operands)); 11577 // Stop recursion: once we collected a term, do not walk its operands. 11578 return false; 11579 } 11580 11581 // Keep looking. 11582 return true; 11583 } 11584 11585 bool isDone() const { return false; } 11586 }; 11587 11588 } // end anonymous namespace 11589 11590 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11591 /// two places: 11592 /// 1) The strides of AddRec expressions. 11593 /// 2) Unknowns that are multiplied with AddRec expressions. 11594 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11595 SmallVectorImpl<const SCEV *> &Terms) { 11596 SmallVector<const SCEV *, 4> Strides; 11597 SCEVCollectStrides StrideCollector(*this, Strides); 11598 visitAll(Expr, StrideCollector); 11599 11600 LLVM_DEBUG({ 11601 dbgs() << "Strides:\n"; 11602 for (const SCEV *S : Strides) 11603 dbgs() << *S << "\n"; 11604 }); 11605 11606 for (const SCEV *S : Strides) { 11607 SCEVCollectTerms TermCollector(Terms); 11608 visitAll(S, TermCollector); 11609 } 11610 11611 LLVM_DEBUG({ 11612 dbgs() << "Terms:\n"; 11613 for (const SCEV *T : Terms) 11614 dbgs() << *T << "\n"; 11615 }); 11616 11617 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11618 visitAll(Expr, MulCollector); 11619 } 11620 11621 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11622 SmallVectorImpl<const SCEV *> &Terms, 11623 SmallVectorImpl<const SCEV *> &Sizes) { 11624 int Last = Terms.size() - 1; 11625 const SCEV *Step = Terms[Last]; 11626 11627 // End of recursion. 11628 if (Last == 0) { 11629 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11630 SmallVector<const SCEV *, 2> Qs; 11631 for (const SCEV *Op : M->operands()) 11632 if (!isa<SCEVConstant>(Op)) 11633 Qs.push_back(Op); 11634 11635 Step = SE.getMulExpr(Qs); 11636 } 11637 11638 Sizes.push_back(Step); 11639 return true; 11640 } 11641 11642 for (const SCEV *&Term : Terms) { 11643 // Normalize the terms before the next call to findArrayDimensionsRec. 11644 const SCEV *Q, *R; 11645 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11646 11647 // Bail out when GCD does not evenly divide one of the terms. 11648 if (!R->isZero()) 11649 return false; 11650 11651 Term = Q; 11652 } 11653 11654 // Remove all SCEVConstants. 11655 Terms.erase( 11656 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11657 Terms.end()); 11658 11659 if (Terms.size() > 0) 11660 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11661 return false; 11662 11663 Sizes.push_back(Step); 11664 return true; 11665 } 11666 11667 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11668 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11669 for (const SCEV *T : Terms) 11670 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11671 return true; 11672 11673 return false; 11674 } 11675 11676 // Return the number of product terms in S. 11677 static inline int numberOfTerms(const SCEV *S) { 11678 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11679 return Expr->getNumOperands(); 11680 return 1; 11681 } 11682 11683 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11684 if (isa<SCEVConstant>(T)) 11685 return nullptr; 11686 11687 if (isa<SCEVUnknown>(T)) 11688 return T; 11689 11690 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11691 SmallVector<const SCEV *, 2> Factors; 11692 for (const SCEV *Op : M->operands()) 11693 if (!isa<SCEVConstant>(Op)) 11694 Factors.push_back(Op); 11695 11696 return SE.getMulExpr(Factors); 11697 } 11698 11699 return T; 11700 } 11701 11702 /// Return the size of an element read or written by Inst. 11703 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11704 Type *Ty; 11705 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11706 Ty = Store->getValueOperand()->getType(); 11707 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11708 Ty = Load->getType(); 11709 else 11710 return nullptr; 11711 11712 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11713 return getSizeOfExpr(ETy, Ty); 11714 } 11715 11716 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11717 SmallVectorImpl<const SCEV *> &Sizes, 11718 const SCEV *ElementSize) { 11719 if (Terms.size() < 1 || !ElementSize) 11720 return; 11721 11722 // Early return when Terms do not contain parameters: we do not delinearize 11723 // non parametric SCEVs. 11724 if (!containsParameters(Terms)) 11725 return; 11726 11727 LLVM_DEBUG({ 11728 dbgs() << "Terms:\n"; 11729 for (const SCEV *T : Terms) 11730 dbgs() << *T << "\n"; 11731 }); 11732 11733 // Remove duplicates. 11734 array_pod_sort(Terms.begin(), Terms.end()); 11735 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11736 11737 // Put larger terms first. 11738 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11739 return numberOfTerms(LHS) > numberOfTerms(RHS); 11740 }); 11741 11742 // Try to divide all terms by the element size. If term is not divisible by 11743 // element size, proceed with the original term. 11744 for (const SCEV *&Term : Terms) { 11745 const SCEV *Q, *R; 11746 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11747 if (!Q->isZero()) 11748 Term = Q; 11749 } 11750 11751 SmallVector<const SCEV *, 4> NewTerms; 11752 11753 // Remove constant factors. 11754 for (const SCEV *T : Terms) 11755 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11756 NewTerms.push_back(NewT); 11757 11758 LLVM_DEBUG({ 11759 dbgs() << "Terms after sorting:\n"; 11760 for (const SCEV *T : NewTerms) 11761 dbgs() << *T << "\n"; 11762 }); 11763 11764 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11765 Sizes.clear(); 11766 return; 11767 } 11768 11769 // The last element to be pushed into Sizes is the size of an element. 11770 Sizes.push_back(ElementSize); 11771 11772 LLVM_DEBUG({ 11773 dbgs() << "Sizes:\n"; 11774 for (const SCEV *S : Sizes) 11775 dbgs() << *S << "\n"; 11776 }); 11777 } 11778 11779 void ScalarEvolution::computeAccessFunctions( 11780 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11781 SmallVectorImpl<const SCEV *> &Sizes) { 11782 // Early exit in case this SCEV is not an affine multivariate function. 11783 if (Sizes.empty()) 11784 return; 11785 11786 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11787 if (!AR->isAffine()) 11788 return; 11789 11790 const SCEV *Res = Expr; 11791 int Last = Sizes.size() - 1; 11792 for (int i = Last; i >= 0; i--) { 11793 const SCEV *Q, *R; 11794 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11795 11796 LLVM_DEBUG({ 11797 dbgs() << "Res: " << *Res << "\n"; 11798 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11799 dbgs() << "Res divided by Sizes[i]:\n"; 11800 dbgs() << "Quotient: " << *Q << "\n"; 11801 dbgs() << "Remainder: " << *R << "\n"; 11802 }); 11803 11804 Res = Q; 11805 11806 // Do not record the last subscript corresponding to the size of elements in 11807 // the array. 11808 if (i == Last) { 11809 11810 // Bail out if the remainder is too complex. 11811 if (isa<SCEVAddRecExpr>(R)) { 11812 Subscripts.clear(); 11813 Sizes.clear(); 11814 return; 11815 } 11816 11817 continue; 11818 } 11819 11820 // Record the access function for the current subscript. 11821 Subscripts.push_back(R); 11822 } 11823 11824 // Also push in last position the remainder of the last division: it will be 11825 // the access function of the innermost dimension. 11826 Subscripts.push_back(Res); 11827 11828 std::reverse(Subscripts.begin(), Subscripts.end()); 11829 11830 LLVM_DEBUG({ 11831 dbgs() << "Subscripts:\n"; 11832 for (const SCEV *S : Subscripts) 11833 dbgs() << *S << "\n"; 11834 }); 11835 } 11836 11837 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11838 /// sizes of an array access. Returns the remainder of the delinearization that 11839 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11840 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11841 /// expressions in the stride and base of a SCEV corresponding to the 11842 /// computation of a GCD (greatest common divisor) of base and stride. When 11843 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11844 /// 11845 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11846 /// 11847 /// void foo(long n, long m, long o, double A[n][m][o]) { 11848 /// 11849 /// for (long i = 0; i < n; i++) 11850 /// for (long j = 0; j < m; j++) 11851 /// for (long k = 0; k < o; k++) 11852 /// A[i][j][k] = 1.0; 11853 /// } 11854 /// 11855 /// the delinearization input is the following AddRec SCEV: 11856 /// 11857 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11858 /// 11859 /// From this SCEV, we are able to say that the base offset of the access is %A 11860 /// because it appears as an offset that does not divide any of the strides in 11861 /// the loops: 11862 /// 11863 /// CHECK: Base offset: %A 11864 /// 11865 /// and then SCEV->delinearize determines the size of some of the dimensions of 11866 /// the array as these are the multiples by which the strides are happening: 11867 /// 11868 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11869 /// 11870 /// Note that the outermost dimension remains of UnknownSize because there are 11871 /// no strides that would help identifying the size of the last dimension: when 11872 /// the array has been statically allocated, one could compute the size of that 11873 /// dimension by dividing the overall size of the array by the size of the known 11874 /// dimensions: %m * %o * 8. 11875 /// 11876 /// Finally delinearize provides the access functions for the array reference 11877 /// that does correspond to A[i][j][k] of the above C testcase: 11878 /// 11879 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11880 /// 11881 /// The testcases are checking the output of a function pass: 11882 /// DelinearizationPass that walks through all loads and stores of a function 11883 /// asking for the SCEV of the memory access with respect to all enclosing 11884 /// loops, calling SCEV->delinearize on that and printing the results. 11885 void ScalarEvolution::delinearize(const SCEV *Expr, 11886 SmallVectorImpl<const SCEV *> &Subscripts, 11887 SmallVectorImpl<const SCEV *> &Sizes, 11888 const SCEV *ElementSize) { 11889 // First step: collect parametric terms. 11890 SmallVector<const SCEV *, 4> Terms; 11891 collectParametricTerms(Expr, Terms); 11892 11893 if (Terms.empty()) 11894 return; 11895 11896 // Second step: find subscript sizes. 11897 findArrayDimensions(Terms, Sizes, ElementSize); 11898 11899 if (Sizes.empty()) 11900 return; 11901 11902 // Third step: compute the access functions for each subscript. 11903 computeAccessFunctions(Expr, Subscripts, Sizes); 11904 11905 if (Subscripts.empty()) 11906 return; 11907 11908 LLVM_DEBUG({ 11909 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11910 dbgs() << "ArrayDecl[UnknownSize]"; 11911 for (const SCEV *S : Sizes) 11912 dbgs() << "[" << *S << "]"; 11913 11914 dbgs() << "\nArrayRef"; 11915 for (const SCEV *S : Subscripts) 11916 dbgs() << "[" << *S << "]"; 11917 dbgs() << "\n"; 11918 }); 11919 } 11920 11921 bool ScalarEvolution::getIndexExpressionsFromGEP( 11922 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11923 SmallVectorImpl<int> &Sizes) { 11924 assert(Subscripts.empty() && Sizes.empty() && 11925 "Expected output lists to be empty on entry to this function."); 11926 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11927 Type *Ty = GEP->getPointerOperandType(); 11928 bool DroppedFirstDim = false; 11929 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11930 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11931 if (i == 1) { 11932 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11933 Ty = PtrTy->getElementType(); 11934 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11935 Ty = ArrayTy->getElementType(); 11936 } else { 11937 Subscripts.clear(); 11938 Sizes.clear(); 11939 return false; 11940 } 11941 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11942 if (Const->getValue()->isZero()) { 11943 DroppedFirstDim = true; 11944 continue; 11945 } 11946 Subscripts.push_back(Expr); 11947 continue; 11948 } 11949 11950 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11951 if (!ArrayTy) { 11952 Subscripts.clear(); 11953 Sizes.clear(); 11954 return false; 11955 } 11956 11957 Subscripts.push_back(Expr); 11958 if (!(DroppedFirstDim && i == 2)) 11959 Sizes.push_back(ArrayTy->getNumElements()); 11960 11961 Ty = ArrayTy->getElementType(); 11962 } 11963 return !Subscripts.empty(); 11964 } 11965 11966 //===----------------------------------------------------------------------===// 11967 // SCEVCallbackVH Class Implementation 11968 //===----------------------------------------------------------------------===// 11969 11970 void ScalarEvolution::SCEVCallbackVH::deleted() { 11971 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11972 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11973 SE->ConstantEvolutionLoopExitValue.erase(PN); 11974 SE->eraseValueFromMap(getValPtr()); 11975 // this now dangles! 11976 } 11977 11978 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11979 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11980 11981 // Forget all the expressions associated with users of the old value, 11982 // so that future queries will recompute the expressions using the new 11983 // value. 11984 Value *Old = getValPtr(); 11985 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11986 SmallPtrSet<User *, 8> Visited; 11987 while (!Worklist.empty()) { 11988 User *U = Worklist.pop_back_val(); 11989 // Deleting the Old value will cause this to dangle. Postpone 11990 // that until everything else is done. 11991 if (U == Old) 11992 continue; 11993 if (!Visited.insert(U).second) 11994 continue; 11995 if (PHINode *PN = dyn_cast<PHINode>(U)) 11996 SE->ConstantEvolutionLoopExitValue.erase(PN); 11997 SE->eraseValueFromMap(U); 11998 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11999 } 12000 // Delete the Old value. 12001 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12002 SE->ConstantEvolutionLoopExitValue.erase(PN); 12003 SE->eraseValueFromMap(Old); 12004 // this now dangles! 12005 } 12006 12007 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12008 : CallbackVH(V), SE(se) {} 12009 12010 //===----------------------------------------------------------------------===// 12011 // ScalarEvolution Class Implementation 12012 //===----------------------------------------------------------------------===// 12013 12014 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12015 AssumptionCache &AC, DominatorTree &DT, 12016 LoopInfo &LI) 12017 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12018 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12019 LoopDispositions(64), BlockDispositions(64) { 12020 // To use guards for proving predicates, we need to scan every instruction in 12021 // relevant basic blocks, and not just terminators. Doing this is a waste of 12022 // time if the IR does not actually contain any calls to 12023 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12024 // 12025 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12026 // to _add_ guards to the module when there weren't any before, and wants 12027 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12028 // efficient in lieu of being smart in that rather obscure case. 12029 12030 auto *GuardDecl = F.getParent()->getFunction( 12031 Intrinsic::getName(Intrinsic::experimental_guard)); 12032 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12033 } 12034 12035 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12036 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12037 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12038 ValueExprMap(std::move(Arg.ValueExprMap)), 12039 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12040 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12041 PendingMerges(std::move(Arg.PendingMerges)), 12042 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12043 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12044 PredicatedBackedgeTakenCounts( 12045 std::move(Arg.PredicatedBackedgeTakenCounts)), 12046 ConstantEvolutionLoopExitValue( 12047 std::move(Arg.ConstantEvolutionLoopExitValue)), 12048 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12049 LoopDispositions(std::move(Arg.LoopDispositions)), 12050 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12051 BlockDispositions(std::move(Arg.BlockDispositions)), 12052 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12053 SignedRanges(std::move(Arg.SignedRanges)), 12054 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12055 UniquePreds(std::move(Arg.UniquePreds)), 12056 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12057 LoopUsers(std::move(Arg.LoopUsers)), 12058 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12059 FirstUnknown(Arg.FirstUnknown) { 12060 Arg.FirstUnknown = nullptr; 12061 } 12062 12063 ScalarEvolution::~ScalarEvolution() { 12064 // Iterate through all the SCEVUnknown instances and call their 12065 // destructors, so that they release their references to their values. 12066 for (SCEVUnknown *U = FirstUnknown; U;) { 12067 SCEVUnknown *Tmp = U; 12068 U = U->Next; 12069 Tmp->~SCEVUnknown(); 12070 } 12071 FirstUnknown = nullptr; 12072 12073 ExprValueMap.clear(); 12074 ValueExprMap.clear(); 12075 HasRecMap.clear(); 12076 12077 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 12078 // that a loop had multiple computable exits. 12079 for (auto &BTCI : BackedgeTakenCounts) 12080 BTCI.second.clear(); 12081 for (auto &BTCI : PredicatedBackedgeTakenCounts) 12082 BTCI.second.clear(); 12083 12084 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12085 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12086 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12087 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12088 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12089 } 12090 12091 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12092 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12093 } 12094 12095 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12096 const Loop *L) { 12097 // Print all inner loops first 12098 for (Loop *I : *L) 12099 PrintLoopInfo(OS, SE, I); 12100 12101 OS << "Loop "; 12102 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12103 OS << ": "; 12104 12105 SmallVector<BasicBlock *, 8> ExitingBlocks; 12106 L->getExitingBlocks(ExitingBlocks); 12107 if (ExitingBlocks.size() != 1) 12108 OS << "<multiple exits> "; 12109 12110 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12111 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12112 else 12113 OS << "Unpredictable backedge-taken count.\n"; 12114 12115 if (ExitingBlocks.size() > 1) 12116 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12117 OS << " exit count for " << ExitingBlock->getName() << ": " 12118 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12119 } 12120 12121 OS << "Loop "; 12122 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12123 OS << ": "; 12124 12125 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12126 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12127 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12128 OS << ", actual taken count either this or zero."; 12129 } else { 12130 OS << "Unpredictable max backedge-taken count. "; 12131 } 12132 12133 OS << "\n" 12134 "Loop "; 12135 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12136 OS << ": "; 12137 12138 SCEVUnionPredicate Pred; 12139 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12140 if (!isa<SCEVCouldNotCompute>(PBT)) { 12141 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12142 OS << " Predicates:\n"; 12143 Pred.print(OS, 4); 12144 } else { 12145 OS << "Unpredictable predicated backedge-taken count. "; 12146 } 12147 OS << "\n"; 12148 12149 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12150 OS << "Loop "; 12151 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12152 OS << ": "; 12153 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12154 } 12155 } 12156 12157 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12158 switch (LD) { 12159 case ScalarEvolution::LoopVariant: 12160 return "Variant"; 12161 case ScalarEvolution::LoopInvariant: 12162 return "Invariant"; 12163 case ScalarEvolution::LoopComputable: 12164 return "Computable"; 12165 } 12166 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12167 } 12168 12169 void ScalarEvolution::print(raw_ostream &OS) const { 12170 // ScalarEvolution's implementation of the print method is to print 12171 // out SCEV values of all instructions that are interesting. Doing 12172 // this potentially causes it to create new SCEV objects though, 12173 // which technically conflicts with the const qualifier. This isn't 12174 // observable from outside the class though, so casting away the 12175 // const isn't dangerous. 12176 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12177 12178 if (ClassifyExpressions) { 12179 OS << "Classifying expressions for: "; 12180 F.printAsOperand(OS, /*PrintType=*/false); 12181 OS << "\n"; 12182 for (Instruction &I : instructions(F)) 12183 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12184 OS << I << '\n'; 12185 OS << " --> "; 12186 const SCEV *SV = SE.getSCEV(&I); 12187 SV->print(OS); 12188 if (!isa<SCEVCouldNotCompute>(SV)) { 12189 OS << " U: "; 12190 SE.getUnsignedRange(SV).print(OS); 12191 OS << " S: "; 12192 SE.getSignedRange(SV).print(OS); 12193 } 12194 12195 const Loop *L = LI.getLoopFor(I.getParent()); 12196 12197 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12198 if (AtUse != SV) { 12199 OS << " --> "; 12200 AtUse->print(OS); 12201 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12202 OS << " U: "; 12203 SE.getUnsignedRange(AtUse).print(OS); 12204 OS << " S: "; 12205 SE.getSignedRange(AtUse).print(OS); 12206 } 12207 } 12208 12209 if (L) { 12210 OS << "\t\t" "Exits: "; 12211 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12212 if (!SE.isLoopInvariant(ExitValue, L)) { 12213 OS << "<<Unknown>>"; 12214 } else { 12215 OS << *ExitValue; 12216 } 12217 12218 bool First = true; 12219 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12220 if (First) { 12221 OS << "\t\t" "LoopDispositions: { "; 12222 First = false; 12223 } else { 12224 OS << ", "; 12225 } 12226 12227 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12228 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12229 } 12230 12231 for (auto *InnerL : depth_first(L)) { 12232 if (InnerL == L) 12233 continue; 12234 if (First) { 12235 OS << "\t\t" "LoopDispositions: { "; 12236 First = false; 12237 } else { 12238 OS << ", "; 12239 } 12240 12241 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12242 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12243 } 12244 12245 OS << " }"; 12246 } 12247 12248 OS << "\n"; 12249 } 12250 } 12251 12252 OS << "Determining loop execution counts for: "; 12253 F.printAsOperand(OS, /*PrintType=*/false); 12254 OS << "\n"; 12255 for (Loop *I : LI) 12256 PrintLoopInfo(OS, &SE, I); 12257 } 12258 12259 ScalarEvolution::LoopDisposition 12260 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12261 auto &Values = LoopDispositions[S]; 12262 for (auto &V : Values) { 12263 if (V.getPointer() == L) 12264 return V.getInt(); 12265 } 12266 Values.emplace_back(L, LoopVariant); 12267 LoopDisposition D = computeLoopDisposition(S, L); 12268 auto &Values2 = LoopDispositions[S]; 12269 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12270 if (V.getPointer() == L) { 12271 V.setInt(D); 12272 break; 12273 } 12274 } 12275 return D; 12276 } 12277 12278 ScalarEvolution::LoopDisposition 12279 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12280 switch (S->getSCEVType()) { 12281 case scConstant: 12282 return LoopInvariant; 12283 case scPtrToInt: 12284 case scTruncate: 12285 case scZeroExtend: 12286 case scSignExtend: 12287 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12288 case scAddRecExpr: { 12289 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12290 12291 // If L is the addrec's loop, it's computable. 12292 if (AR->getLoop() == L) 12293 return LoopComputable; 12294 12295 // Add recurrences are never invariant in the function-body (null loop). 12296 if (!L) 12297 return LoopVariant; 12298 12299 // Everything that is not defined at loop entry is variant. 12300 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12301 return LoopVariant; 12302 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12303 " dominate the contained loop's header?"); 12304 12305 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12306 if (AR->getLoop()->contains(L)) 12307 return LoopInvariant; 12308 12309 // This recurrence is variant w.r.t. L if any of its operands 12310 // are variant. 12311 for (auto *Op : AR->operands()) 12312 if (!isLoopInvariant(Op, L)) 12313 return LoopVariant; 12314 12315 // Otherwise it's loop-invariant. 12316 return LoopInvariant; 12317 } 12318 case scAddExpr: 12319 case scMulExpr: 12320 case scUMaxExpr: 12321 case scSMaxExpr: 12322 case scUMinExpr: 12323 case scSMinExpr: { 12324 bool HasVarying = false; 12325 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12326 LoopDisposition D = getLoopDisposition(Op, L); 12327 if (D == LoopVariant) 12328 return LoopVariant; 12329 if (D == LoopComputable) 12330 HasVarying = true; 12331 } 12332 return HasVarying ? LoopComputable : LoopInvariant; 12333 } 12334 case scUDivExpr: { 12335 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12336 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12337 if (LD == LoopVariant) 12338 return LoopVariant; 12339 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12340 if (RD == LoopVariant) 12341 return LoopVariant; 12342 return (LD == LoopInvariant && RD == LoopInvariant) ? 12343 LoopInvariant : LoopComputable; 12344 } 12345 case scUnknown: 12346 // All non-instruction values are loop invariant. All instructions are loop 12347 // invariant if they are not contained in the specified loop. 12348 // Instructions are never considered invariant in the function body 12349 // (null loop) because they are defined within the "loop". 12350 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12351 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12352 return LoopInvariant; 12353 case scCouldNotCompute: 12354 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12355 } 12356 llvm_unreachable("Unknown SCEV kind!"); 12357 } 12358 12359 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12360 return getLoopDisposition(S, L) == LoopInvariant; 12361 } 12362 12363 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12364 return getLoopDisposition(S, L) == LoopComputable; 12365 } 12366 12367 ScalarEvolution::BlockDisposition 12368 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12369 auto &Values = BlockDispositions[S]; 12370 for (auto &V : Values) { 12371 if (V.getPointer() == BB) 12372 return V.getInt(); 12373 } 12374 Values.emplace_back(BB, DoesNotDominateBlock); 12375 BlockDisposition D = computeBlockDisposition(S, BB); 12376 auto &Values2 = BlockDispositions[S]; 12377 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12378 if (V.getPointer() == BB) { 12379 V.setInt(D); 12380 break; 12381 } 12382 } 12383 return D; 12384 } 12385 12386 ScalarEvolution::BlockDisposition 12387 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12388 switch (S->getSCEVType()) { 12389 case scConstant: 12390 return ProperlyDominatesBlock; 12391 case scPtrToInt: 12392 case scTruncate: 12393 case scZeroExtend: 12394 case scSignExtend: 12395 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12396 case scAddRecExpr: { 12397 // This uses a "dominates" query instead of "properly dominates" query 12398 // to test for proper dominance too, because the instruction which 12399 // produces the addrec's value is a PHI, and a PHI effectively properly 12400 // dominates its entire containing block. 12401 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12402 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12403 return DoesNotDominateBlock; 12404 12405 // Fall through into SCEVNAryExpr handling. 12406 LLVM_FALLTHROUGH; 12407 } 12408 case scAddExpr: 12409 case scMulExpr: 12410 case scUMaxExpr: 12411 case scSMaxExpr: 12412 case scUMinExpr: 12413 case scSMinExpr: { 12414 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12415 bool Proper = true; 12416 for (const SCEV *NAryOp : NAry->operands()) { 12417 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12418 if (D == DoesNotDominateBlock) 12419 return DoesNotDominateBlock; 12420 if (D == DominatesBlock) 12421 Proper = false; 12422 } 12423 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12424 } 12425 case scUDivExpr: { 12426 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12427 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12428 BlockDisposition LD = getBlockDisposition(LHS, BB); 12429 if (LD == DoesNotDominateBlock) 12430 return DoesNotDominateBlock; 12431 BlockDisposition RD = getBlockDisposition(RHS, BB); 12432 if (RD == DoesNotDominateBlock) 12433 return DoesNotDominateBlock; 12434 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12435 ProperlyDominatesBlock : DominatesBlock; 12436 } 12437 case scUnknown: 12438 if (Instruction *I = 12439 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12440 if (I->getParent() == BB) 12441 return DominatesBlock; 12442 if (DT.properlyDominates(I->getParent(), BB)) 12443 return ProperlyDominatesBlock; 12444 return DoesNotDominateBlock; 12445 } 12446 return ProperlyDominatesBlock; 12447 case scCouldNotCompute: 12448 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12449 } 12450 llvm_unreachable("Unknown SCEV kind!"); 12451 } 12452 12453 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12454 return getBlockDisposition(S, BB) >= DominatesBlock; 12455 } 12456 12457 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12458 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12459 } 12460 12461 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12462 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12463 } 12464 12465 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12466 auto IsS = [&](const SCEV *X) { return S == X; }; 12467 auto ContainsS = [&](const SCEV *X) { 12468 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12469 }; 12470 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12471 } 12472 12473 void 12474 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12475 ValuesAtScopes.erase(S); 12476 LoopDispositions.erase(S); 12477 BlockDispositions.erase(S); 12478 UnsignedRanges.erase(S); 12479 SignedRanges.erase(S); 12480 ExprValueMap.erase(S); 12481 HasRecMap.erase(S); 12482 MinTrailingZerosCache.erase(S); 12483 12484 for (auto I = PredicatedSCEVRewrites.begin(); 12485 I != PredicatedSCEVRewrites.end();) { 12486 std::pair<const SCEV *, const Loop *> Entry = I->first; 12487 if (Entry.first == S) 12488 PredicatedSCEVRewrites.erase(I++); 12489 else 12490 ++I; 12491 } 12492 12493 auto RemoveSCEVFromBackedgeMap = 12494 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12495 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12496 BackedgeTakenInfo &BEInfo = I->second; 12497 if (BEInfo.hasOperand(S, this)) { 12498 BEInfo.clear(); 12499 Map.erase(I++); 12500 } else 12501 ++I; 12502 } 12503 }; 12504 12505 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12506 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12507 } 12508 12509 void 12510 ScalarEvolution::getUsedLoops(const SCEV *S, 12511 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12512 struct FindUsedLoops { 12513 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12514 : LoopsUsed(LoopsUsed) {} 12515 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12516 bool follow(const SCEV *S) { 12517 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12518 LoopsUsed.insert(AR->getLoop()); 12519 return true; 12520 } 12521 12522 bool isDone() const { return false; } 12523 }; 12524 12525 FindUsedLoops F(LoopsUsed); 12526 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12527 } 12528 12529 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12530 SmallPtrSet<const Loop *, 8> LoopsUsed; 12531 getUsedLoops(S, LoopsUsed); 12532 for (auto *L : LoopsUsed) 12533 LoopUsers[L].push_back(S); 12534 } 12535 12536 void ScalarEvolution::verify() const { 12537 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12538 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12539 12540 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12541 12542 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12543 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12544 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12545 12546 const SCEV *visitConstant(const SCEVConstant *Constant) { 12547 return SE.getConstant(Constant->getAPInt()); 12548 } 12549 12550 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12551 return SE.getUnknown(Expr->getValue()); 12552 } 12553 12554 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12555 return SE.getCouldNotCompute(); 12556 } 12557 }; 12558 12559 SCEVMapper SCM(SE2); 12560 12561 while (!LoopStack.empty()) { 12562 auto *L = LoopStack.pop_back_val(); 12563 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 12564 12565 auto *CurBECount = SCM.visit( 12566 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12567 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12568 12569 if (CurBECount == SE2.getCouldNotCompute() || 12570 NewBECount == SE2.getCouldNotCompute()) { 12571 // NB! This situation is legal, but is very suspicious -- whatever pass 12572 // change the loop to make a trip count go from could not compute to 12573 // computable or vice-versa *should have* invalidated SCEV. However, we 12574 // choose not to assert here (for now) since we don't want false 12575 // positives. 12576 continue; 12577 } 12578 12579 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12580 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12581 // not propagate undef aggressively). This means we can (and do) fail 12582 // verification in cases where a transform makes the trip count of a loop 12583 // go from "undef" to "undef+1" (say). The transform is fine, since in 12584 // both cases the loop iterates "undef" times, but SCEV thinks we 12585 // increased the trip count of the loop by 1 incorrectly. 12586 continue; 12587 } 12588 12589 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12590 SE.getTypeSizeInBits(NewBECount->getType())) 12591 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12592 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12593 SE.getTypeSizeInBits(NewBECount->getType())) 12594 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12595 12596 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12597 12598 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12599 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12600 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12601 dbgs() << "Old: " << *CurBECount << "\n"; 12602 dbgs() << "New: " << *NewBECount << "\n"; 12603 dbgs() << "Delta: " << *Delta << "\n"; 12604 std::abort(); 12605 } 12606 } 12607 12608 // Collect all valid loops currently in LoopInfo. 12609 SmallPtrSet<Loop *, 32> ValidLoops; 12610 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12611 while (!Worklist.empty()) { 12612 Loop *L = Worklist.pop_back_val(); 12613 if (ValidLoops.contains(L)) 12614 continue; 12615 ValidLoops.insert(L); 12616 Worklist.append(L->begin(), L->end()); 12617 } 12618 // Check for SCEV expressions referencing invalid/deleted loops. 12619 for (auto &KV : ValueExprMap) { 12620 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12621 if (!AR) 12622 continue; 12623 assert(ValidLoops.contains(AR->getLoop()) && 12624 "AddRec references invalid loop"); 12625 } 12626 } 12627 12628 bool ScalarEvolution::invalidate( 12629 Function &F, const PreservedAnalyses &PA, 12630 FunctionAnalysisManager::Invalidator &Inv) { 12631 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12632 // of its dependencies is invalidated. 12633 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12634 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12635 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12636 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12637 Inv.invalidate<LoopAnalysis>(F, PA); 12638 } 12639 12640 AnalysisKey ScalarEvolutionAnalysis::Key; 12641 12642 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12643 FunctionAnalysisManager &AM) { 12644 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12645 AM.getResult<AssumptionAnalysis>(F), 12646 AM.getResult<DominatorTreeAnalysis>(F), 12647 AM.getResult<LoopAnalysis>(F)); 12648 } 12649 12650 PreservedAnalyses 12651 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12652 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12653 return PreservedAnalyses::all(); 12654 } 12655 12656 PreservedAnalyses 12657 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12658 // For compatibility with opt's -analyze feature under legacy pass manager 12659 // which was not ported to NPM. This keeps tests using 12660 // update_analyze_test_checks.py working. 12661 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12662 << F.getName() << "':\n"; 12663 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12664 return PreservedAnalyses::all(); 12665 } 12666 12667 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12668 "Scalar Evolution Analysis", false, true) 12669 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12670 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12671 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12672 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12673 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12674 "Scalar Evolution Analysis", false, true) 12675 12676 char ScalarEvolutionWrapperPass::ID = 0; 12677 12678 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12679 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12680 } 12681 12682 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12683 SE.reset(new ScalarEvolution( 12684 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12685 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12686 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12687 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12688 return false; 12689 } 12690 12691 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12692 12693 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12694 SE->print(OS); 12695 } 12696 12697 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12698 if (!VerifySCEV) 12699 return; 12700 12701 SE->verify(); 12702 } 12703 12704 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12705 AU.setPreservesAll(); 12706 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12707 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12708 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12709 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12710 } 12711 12712 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12713 const SCEV *RHS) { 12714 FoldingSetNodeID ID; 12715 assert(LHS->getType() == RHS->getType() && 12716 "Type mismatch between LHS and RHS"); 12717 // Unique this node based on the arguments 12718 ID.AddInteger(SCEVPredicate::P_Equal); 12719 ID.AddPointer(LHS); 12720 ID.AddPointer(RHS); 12721 void *IP = nullptr; 12722 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12723 return S; 12724 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12725 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12726 UniquePreds.InsertNode(Eq, IP); 12727 return Eq; 12728 } 12729 12730 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12731 const SCEVAddRecExpr *AR, 12732 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12733 FoldingSetNodeID ID; 12734 // Unique this node based on the arguments 12735 ID.AddInteger(SCEVPredicate::P_Wrap); 12736 ID.AddPointer(AR); 12737 ID.AddInteger(AddedFlags); 12738 void *IP = nullptr; 12739 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12740 return S; 12741 auto *OF = new (SCEVAllocator) 12742 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12743 UniquePreds.InsertNode(OF, IP); 12744 return OF; 12745 } 12746 12747 namespace { 12748 12749 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12750 public: 12751 12752 /// Rewrites \p S in the context of a loop L and the SCEV predication 12753 /// infrastructure. 12754 /// 12755 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12756 /// equivalences present in \p Pred. 12757 /// 12758 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12759 /// \p NewPreds such that the result will be an AddRecExpr. 12760 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12761 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12762 SCEVUnionPredicate *Pred) { 12763 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12764 return Rewriter.visit(S); 12765 } 12766 12767 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12768 if (Pred) { 12769 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12770 for (auto *Pred : ExprPreds) 12771 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12772 if (IPred->getLHS() == Expr) 12773 return IPred->getRHS(); 12774 } 12775 return convertToAddRecWithPreds(Expr); 12776 } 12777 12778 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12779 const SCEV *Operand = visit(Expr->getOperand()); 12780 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12781 if (AR && AR->getLoop() == L && AR->isAffine()) { 12782 // This couldn't be folded because the operand didn't have the nuw 12783 // flag. Add the nusw flag as an assumption that we could make. 12784 const SCEV *Step = AR->getStepRecurrence(SE); 12785 Type *Ty = Expr->getType(); 12786 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12787 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12788 SE.getSignExtendExpr(Step, Ty), L, 12789 AR->getNoWrapFlags()); 12790 } 12791 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12792 } 12793 12794 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12795 const SCEV *Operand = visit(Expr->getOperand()); 12796 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12797 if (AR && AR->getLoop() == L && AR->isAffine()) { 12798 // This couldn't be folded because the operand didn't have the nsw 12799 // flag. Add the nssw flag as an assumption that we could make. 12800 const SCEV *Step = AR->getStepRecurrence(SE); 12801 Type *Ty = Expr->getType(); 12802 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12803 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12804 SE.getSignExtendExpr(Step, Ty), L, 12805 AR->getNoWrapFlags()); 12806 } 12807 return SE.getSignExtendExpr(Operand, Expr->getType()); 12808 } 12809 12810 private: 12811 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12812 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12813 SCEVUnionPredicate *Pred) 12814 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12815 12816 bool addOverflowAssumption(const SCEVPredicate *P) { 12817 if (!NewPreds) { 12818 // Check if we've already made this assumption. 12819 return Pred && Pred->implies(P); 12820 } 12821 NewPreds->insert(P); 12822 return true; 12823 } 12824 12825 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12826 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12827 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12828 return addOverflowAssumption(A); 12829 } 12830 12831 // If \p Expr represents a PHINode, we try to see if it can be represented 12832 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12833 // to add this predicate as a runtime overflow check, we return the AddRec. 12834 // If \p Expr does not meet these conditions (is not a PHI node, or we 12835 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12836 // return \p Expr. 12837 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12838 if (!isa<PHINode>(Expr->getValue())) 12839 return Expr; 12840 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12841 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12842 if (!PredicatedRewrite) 12843 return Expr; 12844 for (auto *P : PredicatedRewrite->second){ 12845 // Wrap predicates from outer loops are not supported. 12846 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12847 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12848 if (L != AR->getLoop()) 12849 return Expr; 12850 } 12851 if (!addOverflowAssumption(P)) 12852 return Expr; 12853 } 12854 return PredicatedRewrite->first; 12855 } 12856 12857 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12858 SCEVUnionPredicate *Pred; 12859 const Loop *L; 12860 }; 12861 12862 } // end anonymous namespace 12863 12864 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12865 SCEVUnionPredicate &Preds) { 12866 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12867 } 12868 12869 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12870 const SCEV *S, const Loop *L, 12871 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12872 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12873 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12874 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12875 12876 if (!AddRec) 12877 return nullptr; 12878 12879 // Since the transformation was successful, we can now transfer the SCEV 12880 // predicates. 12881 for (auto *P : TransformPreds) 12882 Preds.insert(P); 12883 12884 return AddRec; 12885 } 12886 12887 /// SCEV predicates 12888 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12889 SCEVPredicateKind Kind) 12890 : FastID(ID), Kind(Kind) {} 12891 12892 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12893 const SCEV *LHS, const SCEV *RHS) 12894 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12895 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12896 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12897 } 12898 12899 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12900 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12901 12902 if (!Op) 12903 return false; 12904 12905 return Op->LHS == LHS && Op->RHS == RHS; 12906 } 12907 12908 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12909 12910 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12911 12912 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12913 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12914 } 12915 12916 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12917 const SCEVAddRecExpr *AR, 12918 IncrementWrapFlags Flags) 12919 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12920 12921 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12922 12923 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12924 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12925 12926 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12927 } 12928 12929 bool SCEVWrapPredicate::isAlwaysTrue() const { 12930 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12931 IncrementWrapFlags IFlags = Flags; 12932 12933 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12934 IFlags = clearFlags(IFlags, IncrementNSSW); 12935 12936 return IFlags == IncrementAnyWrap; 12937 } 12938 12939 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12940 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12941 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12942 OS << "<nusw>"; 12943 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12944 OS << "<nssw>"; 12945 OS << "\n"; 12946 } 12947 12948 SCEVWrapPredicate::IncrementWrapFlags 12949 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12950 ScalarEvolution &SE) { 12951 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12952 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12953 12954 // We can safely transfer the NSW flag as NSSW. 12955 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12956 ImpliedFlags = IncrementNSSW; 12957 12958 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12959 // If the increment is positive, the SCEV NUW flag will also imply the 12960 // WrapPredicate NUSW flag. 12961 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12962 if (Step->getValue()->getValue().isNonNegative()) 12963 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12964 } 12965 12966 return ImpliedFlags; 12967 } 12968 12969 /// Union predicates don't get cached so create a dummy set ID for it. 12970 SCEVUnionPredicate::SCEVUnionPredicate() 12971 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12972 12973 bool SCEVUnionPredicate::isAlwaysTrue() const { 12974 return all_of(Preds, 12975 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12976 } 12977 12978 ArrayRef<const SCEVPredicate *> 12979 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12980 auto I = SCEVToPreds.find(Expr); 12981 if (I == SCEVToPreds.end()) 12982 return ArrayRef<const SCEVPredicate *>(); 12983 return I->second; 12984 } 12985 12986 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12987 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12988 return all_of(Set->Preds, 12989 [this](const SCEVPredicate *I) { return this->implies(I); }); 12990 12991 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12992 if (ScevPredsIt == SCEVToPreds.end()) 12993 return false; 12994 auto &SCEVPreds = ScevPredsIt->second; 12995 12996 return any_of(SCEVPreds, 12997 [N](const SCEVPredicate *I) { return I->implies(N); }); 12998 } 12999 13000 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13001 13002 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13003 for (auto Pred : Preds) 13004 Pred->print(OS, Depth); 13005 } 13006 13007 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13008 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13009 for (auto Pred : Set->Preds) 13010 add(Pred); 13011 return; 13012 } 13013 13014 if (implies(N)) 13015 return; 13016 13017 const SCEV *Key = N->getExpr(); 13018 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13019 " associated expression!"); 13020 13021 SCEVToPreds[Key].push_back(N); 13022 Preds.push_back(N); 13023 } 13024 13025 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13026 Loop &L) 13027 : SE(SE), L(L) {} 13028 13029 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13030 const SCEV *Expr = SE.getSCEV(V); 13031 RewriteEntry &Entry = RewriteMap[Expr]; 13032 13033 // If we already have an entry and the version matches, return it. 13034 if (Entry.second && Generation == Entry.first) 13035 return Entry.second; 13036 13037 // We found an entry but it's stale. Rewrite the stale entry 13038 // according to the current predicate. 13039 if (Entry.second) 13040 Expr = Entry.second; 13041 13042 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13043 Entry = {Generation, NewSCEV}; 13044 13045 return NewSCEV; 13046 } 13047 13048 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13049 if (!BackedgeCount) { 13050 SCEVUnionPredicate BackedgePred; 13051 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13052 addPredicate(BackedgePred); 13053 } 13054 return BackedgeCount; 13055 } 13056 13057 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13058 if (Preds.implies(&Pred)) 13059 return; 13060 Preds.add(&Pred); 13061 updateGeneration(); 13062 } 13063 13064 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13065 return Preds; 13066 } 13067 13068 void PredicatedScalarEvolution::updateGeneration() { 13069 // If the generation number wrapped recompute everything. 13070 if (++Generation == 0) { 13071 for (auto &II : RewriteMap) { 13072 const SCEV *Rewritten = II.second.second; 13073 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13074 } 13075 } 13076 } 13077 13078 void PredicatedScalarEvolution::setNoOverflow( 13079 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13080 const SCEV *Expr = getSCEV(V); 13081 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13082 13083 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13084 13085 // Clear the statically implied flags. 13086 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13087 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13088 13089 auto II = FlagsMap.insert({V, Flags}); 13090 if (!II.second) 13091 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13092 } 13093 13094 bool PredicatedScalarEvolution::hasNoOverflow( 13095 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13096 const SCEV *Expr = getSCEV(V); 13097 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13098 13099 Flags = SCEVWrapPredicate::clearFlags( 13100 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13101 13102 auto II = FlagsMap.find(V); 13103 13104 if (II != FlagsMap.end()) 13105 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13106 13107 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13108 } 13109 13110 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13111 const SCEV *Expr = this->getSCEV(V); 13112 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13113 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13114 13115 if (!New) 13116 return nullptr; 13117 13118 for (auto *P : NewPreds) 13119 Preds.add(P); 13120 13121 updateGeneration(); 13122 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13123 return New; 13124 } 13125 13126 PredicatedScalarEvolution::PredicatedScalarEvolution( 13127 const PredicatedScalarEvolution &Init) 13128 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13129 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13130 for (auto I : Init.FlagsMap) 13131 FlagsMap.insert(I); 13132 } 13133 13134 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13135 // For each block. 13136 for (auto *BB : L.getBlocks()) 13137 for (auto &I : *BB) { 13138 if (!SE.isSCEVable(I.getType())) 13139 continue; 13140 13141 auto *Expr = SE.getSCEV(&I); 13142 auto II = RewriteMap.find(Expr); 13143 13144 if (II == RewriteMap.end()) 13145 continue; 13146 13147 // Don't print things that are not interesting. 13148 if (II->second.second == Expr) 13149 continue; 13150 13151 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13152 OS.indent(Depth + 2) << *Expr << "\n"; 13153 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13154 } 13155 } 13156 13157 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13158 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13159 // for URem with constant power-of-2 second operands. 13160 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13161 // 4, A / B becomes X / 8). 13162 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13163 const SCEV *&RHS) { 13164 // Try to match 'zext (trunc A to iB) to iY', which is used 13165 // for URem with constant power-of-2 second operands. Make sure the size of 13166 // the operand A matches the size of the whole expressions. 13167 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13168 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13169 LHS = Trunc->getOperand(); 13170 if (LHS->getType() != Expr->getType()) 13171 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13172 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13173 << getTypeSizeInBits(Trunc->getType())); 13174 return true; 13175 } 13176 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13177 if (Add == nullptr || Add->getNumOperands() != 2) 13178 return false; 13179 13180 const SCEV *A = Add->getOperand(1); 13181 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13182 13183 if (Mul == nullptr) 13184 return false; 13185 13186 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13187 // (SomeExpr + (-(SomeExpr / B) * B)). 13188 if (Expr == getURemExpr(A, B)) { 13189 LHS = A; 13190 RHS = B; 13191 return true; 13192 } 13193 return false; 13194 }; 13195 13196 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13197 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13198 return MatchURemWithDivisor(Mul->getOperand(1)) || 13199 MatchURemWithDivisor(Mul->getOperand(2)); 13200 13201 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13202 if (Mul->getNumOperands() == 2) 13203 return MatchURemWithDivisor(Mul->getOperand(1)) || 13204 MatchURemWithDivisor(Mul->getOperand(0)) || 13205 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13206 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13207 return false; 13208 } 13209 13210 const SCEV * 13211 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13212 SmallVector<BasicBlock*, 16> ExitingBlocks; 13213 L->getExitingBlocks(ExitingBlocks); 13214 13215 // Form an expression for the maximum exit count possible for this loop. We 13216 // merge the max and exact information to approximate a version of 13217 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13218 SmallVector<const SCEV*, 4> ExitCounts; 13219 for (BasicBlock *ExitingBB : ExitingBlocks) { 13220 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13221 if (isa<SCEVCouldNotCompute>(ExitCount)) 13222 ExitCount = getExitCount(L, ExitingBB, 13223 ScalarEvolution::ConstantMaximum); 13224 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13225 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13226 "We should only have known counts for exiting blocks that " 13227 "dominate latch!"); 13228 ExitCounts.push_back(ExitCount); 13229 } 13230 } 13231 if (ExitCounts.empty()) 13232 return getCouldNotCompute(); 13233 return getUMinFromMismatchedTypes(ExitCounts); 13234 } 13235 13236 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13237 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13238 /// we cannot guarantee that the replacement is loop invariant in the loop of 13239 /// the AddRec. 13240 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13241 ValueToSCEVMapTy ⤅ 13242 13243 public: 13244 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13245 : SCEVRewriteVisitor(SE), Map(M) {} 13246 13247 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13248 13249 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13250 auto I = Map.find(Expr->getValue()); 13251 if (I == Map.end()) 13252 return Expr; 13253 return I->second; 13254 } 13255 }; 13256 13257 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13258 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13259 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13260 if (!isa<SCEVUnknown>(LHS)) { 13261 std::swap(LHS, RHS); 13262 Predicate = CmpInst::getSwappedPredicate(Predicate); 13263 } 13264 13265 // For now, limit to conditions that provide information about unknown 13266 // expressions. 13267 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13268 if (!LHSUnknown) 13269 return; 13270 13271 // TODO: use information from more predicates. 13272 switch (Predicate) { 13273 case CmpInst::ICMP_ULT: { 13274 if (!containsAddRecurrence(RHS)) { 13275 const SCEV *Base = LHS; 13276 auto I = RewriteMap.find(LHSUnknown->getValue()); 13277 if (I != RewriteMap.end()) 13278 Base = I->second; 13279 13280 RewriteMap[LHSUnknown->getValue()] = 13281 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 13282 } 13283 break; 13284 } 13285 case CmpInst::ICMP_ULE: { 13286 if (!containsAddRecurrence(RHS)) { 13287 const SCEV *Base = LHS; 13288 auto I = RewriteMap.find(LHSUnknown->getValue()); 13289 if (I != RewriteMap.end()) 13290 Base = I->second; 13291 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 13292 } 13293 break; 13294 } 13295 case CmpInst::ICMP_EQ: 13296 if (isa<SCEVConstant>(RHS)) 13297 RewriteMap[LHSUnknown->getValue()] = RHS; 13298 break; 13299 case CmpInst::ICMP_NE: 13300 if (isa<SCEVConstant>(RHS) && 13301 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13302 RewriteMap[LHSUnknown->getValue()] = 13303 getUMaxExpr(LHS, getOne(RHS->getType())); 13304 break; 13305 default: 13306 break; 13307 } 13308 }; 13309 // Starting at the loop predecessor, climb up the predecessor chain, as long 13310 // as there are predecessors that can be found that have unique successors 13311 // leading to the original header. 13312 // TODO: share this logic with isLoopEntryGuardedByCond. 13313 ValueToSCEVMapTy RewriteMap; 13314 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13315 L->getLoopPredecessor(), L->getHeader()); 13316 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13317 13318 const BranchInst *LoopEntryPredicate = 13319 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13320 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13321 continue; 13322 13323 // TODO: use information from more complex conditions, e.g. AND expressions. 13324 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 13325 if (!Cmp) 13326 continue; 13327 13328 auto Predicate = Cmp->getPredicate(); 13329 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 13330 Predicate = CmpInst::getInversePredicate(Predicate); 13331 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13332 getSCEV(Cmp->getOperand(1)), RewriteMap); 13333 } 13334 13335 // Also collect information from assumptions dominating the loop. 13336 for (auto &AssumeVH : AC.assumptions()) { 13337 if (!AssumeVH) 13338 continue; 13339 auto *AssumeI = cast<CallInst>(AssumeVH); 13340 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13341 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13342 continue; 13343 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13344 getSCEV(Cmp->getOperand(1)), RewriteMap); 13345 } 13346 13347 if (RewriteMap.empty()) 13348 return Expr; 13349 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13350 return Rewriter.visit(Expr); 13351 } 13352