1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumArrayLenItCounts, 143 "Number of trip counts computed with array length"); 144 STATISTIC(NumTripCountsComputed, 145 "Number of loops with predictable loop counts"); 146 STATISTIC(NumTripCountsNotComputed, 147 "Number of loops without predictable loop counts"); 148 STATISTIC(NumBruteForceTripCountsComputed, 149 "Number of loops with trip counts computed by force"); 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 160 static cl::opt<bool> VerifySCEV( 161 "verify-scev", cl::Hidden, 162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 163 static cl::opt<bool> VerifySCEVStrict( 164 "verify-scev-strict", cl::Hidden, 165 cl::desc("Enable stricter verification with -verify-scev is passed")); 166 static cl::opt<bool> 167 VerifySCEVMap("verify-scev-maps", cl::Hidden, 168 cl::desc("Verify no dangling value in ScalarEvolution's " 169 "ExprValueMap (slow)")); 170 171 static cl::opt<bool> VerifyIR( 172 "scev-verify-ir", cl::Hidden, 173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 174 cl::init(false)); 175 176 static cl::opt<unsigned> MulOpsInlineThreshold( 177 "scev-mulops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 179 cl::init(32)); 180 181 static cl::opt<unsigned> AddOpsInlineThreshold( 182 "scev-addops-inline-threshold", cl::Hidden, 183 cl::desc("Threshold for inlining addition operands into a SCEV"), 184 cl::init(500)); 185 186 static cl::opt<unsigned> MaxSCEVCompareDepth( 187 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 189 cl::init(32)); 190 191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> MaxValueCompareDepth( 197 "scalar-evolution-max-value-compare-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive value complexity comparisons"), 199 cl::init(2)); 200 201 static cl::opt<unsigned> 202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive arithmetics"), 204 cl::init(32)); 205 206 static cl::opt<unsigned> MaxConstantEvolvingDepth( 207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 209 210 static cl::opt<unsigned> 211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 217 cl::desc("Max coefficients in AddRec during evolving"), 218 cl::init(8)); 219 220 static cl::opt<unsigned> 221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 222 cl::desc("Size of the expression which is considered huge"), 223 cl::init(4096)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 //===----------------------------------------------------------------------===// 237 // SCEV class definitions 238 //===----------------------------------------------------------------------===// 239 240 //===----------------------------------------------------------------------===// 241 // Implementation of the SCEV class. 242 // 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 LLVM_DUMP_METHOD void SCEV::dump() const { 246 print(dbgs()); 247 dbgs() << '\n'; 248 } 249 #endif 250 251 void SCEV::print(raw_ostream &OS) const { 252 switch (getSCEVType()) { 253 case scConstant: 254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 255 return; 256 case scPtrToInt: { 257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 258 const SCEV *Op = PtrToInt->getOperand(); 259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 260 << *PtrToInt->getType() << ")"; 261 return; 262 } 263 case scTruncate: { 264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 265 const SCEV *Op = Trunc->getOperand(); 266 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 267 << *Trunc->getType() << ")"; 268 return; 269 } 270 case scZeroExtend: { 271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 272 const SCEV *Op = ZExt->getOperand(); 273 OS << "(zext " << *Op->getType() << " " << *Op << " to " 274 << *ZExt->getType() << ")"; 275 return; 276 } 277 case scSignExtend: { 278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 279 const SCEV *Op = SExt->getOperand(); 280 OS << "(sext " << *Op->getType() << " " << *Op << " to " 281 << *SExt->getType() << ")"; 282 return; 283 } 284 case scAddRecExpr: { 285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 286 OS << "{" << *AR->getOperand(0); 287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 288 OS << ",+," << *AR->getOperand(i); 289 OS << "}<"; 290 if (AR->hasNoUnsignedWrap()) 291 OS << "nuw><"; 292 if (AR->hasNoSignedWrap()) 293 OS << "nsw><"; 294 if (AR->hasNoSelfWrap() && 295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 296 OS << "nw><"; 297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 298 OS << ">"; 299 return; 300 } 301 case scAddExpr: 302 case scMulExpr: 303 case scUMaxExpr: 304 case scSMaxExpr: 305 case scUMinExpr: 306 case scSMinExpr: { 307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 308 const char *OpStr = nullptr; 309 switch (NAry->getSCEVType()) { 310 case scAddExpr: OpStr = " + "; break; 311 case scMulExpr: OpStr = " * "; break; 312 case scUMaxExpr: OpStr = " umax "; break; 313 case scSMaxExpr: OpStr = " smax "; break; 314 case scUMinExpr: 315 OpStr = " umin "; 316 break; 317 case scSMinExpr: 318 OpStr = " smin "; 319 break; 320 default: 321 llvm_unreachable("There are no other nary expression types."); 322 } 323 OS << "("; 324 ListSeparator LS(OpStr); 325 for (const SCEV *Op : NAry->operands()) 326 OS << LS << *Op; 327 OS << ")"; 328 switch (NAry->getSCEVType()) { 329 case scAddExpr: 330 case scMulExpr: 331 if (NAry->hasNoUnsignedWrap()) 332 OS << "<nuw>"; 333 if (NAry->hasNoSignedWrap()) 334 OS << "<nsw>"; 335 break; 336 default: 337 // Nothing to print for other nary expressions. 338 break; 339 } 340 return; 341 } 342 case scUDivExpr: { 343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 345 return; 346 } 347 case scUnknown: { 348 const SCEVUnknown *U = cast<SCEVUnknown>(this); 349 Type *AllocTy; 350 if (U->isSizeOf(AllocTy)) { 351 OS << "sizeof(" << *AllocTy << ")"; 352 return; 353 } 354 if (U->isAlignOf(AllocTy)) { 355 OS << "alignof(" << *AllocTy << ")"; 356 return; 357 } 358 359 Type *CTy; 360 Constant *FieldNo; 361 if (U->isOffsetOf(CTy, FieldNo)) { 362 OS << "offsetof(" << *CTy << ", "; 363 FieldNo->printAsOperand(OS, false); 364 OS << ")"; 365 return; 366 } 367 368 // Otherwise just print it normally. 369 U->getValue()->printAsOperand(OS, false); 370 return; 371 } 372 case scCouldNotCompute: 373 OS << "***COULDNOTCOMPUTE***"; 374 return; 375 } 376 llvm_unreachable("Unknown SCEV kind!"); 377 } 378 379 Type *SCEV::getType() const { 380 switch (getSCEVType()) { 381 case scConstant: 382 return cast<SCEVConstant>(this)->getType(); 383 case scPtrToInt: 384 case scTruncate: 385 case scZeroExtend: 386 case scSignExtend: 387 return cast<SCEVCastExpr>(this)->getType(); 388 case scAddRecExpr: 389 case scMulExpr: 390 case scUMaxExpr: 391 case scSMaxExpr: 392 case scUMinExpr: 393 case scSMinExpr: 394 return cast<SCEVNAryExpr>(this)->getType(); 395 case scAddExpr: 396 return cast<SCEVAddExpr>(this)->getType(); 397 case scUDivExpr: 398 return cast<SCEVUDivExpr>(this)->getType(); 399 case scUnknown: 400 return cast<SCEVUnknown>(this)->getType(); 401 case scCouldNotCompute: 402 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 403 } 404 llvm_unreachable("Unknown SCEV kind!"); 405 } 406 407 bool SCEV::isZero() const { 408 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 409 return SC->getValue()->isZero(); 410 return false; 411 } 412 413 bool SCEV::isOne() const { 414 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 415 return SC->getValue()->isOne(); 416 return false; 417 } 418 419 bool SCEV::isAllOnesValue() const { 420 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 421 return SC->getValue()->isMinusOne(); 422 return false; 423 } 424 425 bool SCEV::isNonConstantNegative() const { 426 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 427 if (!Mul) return false; 428 429 // If there is a constant factor, it will be first. 430 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 431 if (!SC) return false; 432 433 // Return true if the value is negative, this matches things like (-42 * V). 434 return SC->getAPInt().isNegative(); 435 } 436 437 SCEVCouldNotCompute::SCEVCouldNotCompute() : 438 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 439 440 bool SCEVCouldNotCompute::classof(const SCEV *S) { 441 return S->getSCEVType() == scCouldNotCompute; 442 } 443 444 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 445 FoldingSetNodeID ID; 446 ID.AddInteger(scConstant); 447 ID.AddPointer(V); 448 void *IP = nullptr; 449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 450 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 451 UniqueSCEVs.InsertNode(S, IP); 452 return S; 453 } 454 455 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 456 return getConstant(ConstantInt::get(getContext(), Val)); 457 } 458 459 const SCEV * 460 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 461 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 462 return getConstant(ConstantInt::get(ITy, V, isSigned)); 463 } 464 465 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 466 const SCEV *op, Type *ty) 467 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 468 Operands[0] = op; 469 } 470 471 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 472 Type *ITy) 473 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 474 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 475 "Must be a non-bit-width-changing pointer-to-integer cast!"); 476 } 477 478 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 479 SCEVTypes SCEVTy, const SCEV *op, 480 Type *ty) 481 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 482 483 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 484 Type *ty) 485 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 486 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 487 "Cannot truncate non-integer value!"); 488 } 489 490 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 491 const SCEV *op, Type *ty) 492 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 493 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 494 "Cannot zero extend non-integer value!"); 495 } 496 497 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 498 const SCEV *op, Type *ty) 499 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 500 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 501 "Cannot sign extend non-integer value!"); 502 } 503 504 void SCEVUnknown::deleted() { 505 // Clear this SCEVUnknown from various maps. 506 SE->forgetMemoizedResults(this); 507 508 // Remove this SCEVUnknown from the uniquing map. 509 SE->UniqueSCEVs.RemoveNode(this); 510 511 // Release the value. 512 setValPtr(nullptr); 513 } 514 515 void SCEVUnknown::allUsesReplacedWith(Value *New) { 516 // Remove this SCEVUnknown from the uniquing map. 517 SE->UniqueSCEVs.RemoveNode(this); 518 519 // Update this SCEVUnknown to point to the new value. This is needed 520 // because there may still be outstanding SCEVs which still point to 521 // this SCEVUnknown. 522 setValPtr(New); 523 } 524 525 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 526 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 527 if (VCE->getOpcode() == Instruction::PtrToInt) 528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 529 if (CE->getOpcode() == Instruction::GetElementPtr && 530 CE->getOperand(0)->isNullValue() && 531 CE->getNumOperands() == 2) 532 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 533 if (CI->isOne()) { 534 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 535 ->getElementType(); 536 return true; 537 } 538 539 return false; 540 } 541 542 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 543 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 544 if (VCE->getOpcode() == Instruction::PtrToInt) 545 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 546 if (CE->getOpcode() == Instruction::GetElementPtr && 547 CE->getOperand(0)->isNullValue()) { 548 Type *Ty = 549 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 550 if (StructType *STy = dyn_cast<StructType>(Ty)) 551 if (!STy->isPacked() && 552 CE->getNumOperands() == 3 && 553 CE->getOperand(1)->isNullValue()) { 554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 555 if (CI->isOne() && 556 STy->getNumElements() == 2 && 557 STy->getElementType(0)->isIntegerTy(1)) { 558 AllocTy = STy->getElementType(1); 559 return true; 560 } 561 } 562 } 563 564 return false; 565 } 566 567 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 569 if (VCE->getOpcode() == Instruction::PtrToInt) 570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 571 if (CE->getOpcode() == Instruction::GetElementPtr && 572 CE->getNumOperands() == 3 && 573 CE->getOperand(0)->isNullValue() && 574 CE->getOperand(1)->isNullValue()) { 575 Type *Ty = 576 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 577 // Ignore vector types here so that ScalarEvolutionExpander doesn't 578 // emit getelementptrs that index into vectors. 579 if (Ty->isStructTy() || Ty->isArrayTy()) { 580 CTy = Ty; 581 FieldNo = CE->getOperand(2); 582 return true; 583 } 584 } 585 586 return false; 587 } 588 589 //===----------------------------------------------------------------------===// 590 // SCEV Utilities 591 //===----------------------------------------------------------------------===// 592 593 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 594 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 595 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 596 /// have been previously deemed to be "equally complex" by this routine. It is 597 /// intended to avoid exponential time complexity in cases like: 598 /// 599 /// %a = f(%x, %y) 600 /// %b = f(%a, %a) 601 /// %c = f(%b, %b) 602 /// 603 /// %d = f(%x, %y) 604 /// %e = f(%d, %d) 605 /// %f = f(%e, %e) 606 /// 607 /// CompareValueComplexity(%f, %c) 608 /// 609 /// Since we do not continue running this routine on expression trees once we 610 /// have seen unequal values, there is no need to track them in the cache. 611 static int 612 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 613 const LoopInfo *const LI, Value *LV, Value *RV, 614 unsigned Depth) { 615 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 616 return 0; 617 618 // Order pointer values after integer values. This helps SCEVExpander form 619 // GEPs. 620 bool LIsPointer = LV->getType()->isPointerTy(), 621 RIsPointer = RV->getType()->isPointerTy(); 622 if (LIsPointer != RIsPointer) 623 return (int)LIsPointer - (int)RIsPointer; 624 625 // Compare getValueID values. 626 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 627 if (LID != RID) 628 return (int)LID - (int)RID; 629 630 // Sort arguments by their position. 631 if (const auto *LA = dyn_cast<Argument>(LV)) { 632 const auto *RA = cast<Argument>(RV); 633 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 634 return (int)LArgNo - (int)RArgNo; 635 } 636 637 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 638 const auto *RGV = cast<GlobalValue>(RV); 639 640 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 641 auto LT = GV->getLinkage(); 642 return !(GlobalValue::isPrivateLinkage(LT) || 643 GlobalValue::isInternalLinkage(LT)); 644 }; 645 646 // Use the names to distinguish the two values, but only if the 647 // names are semantically important. 648 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 649 return LGV->getName().compare(RGV->getName()); 650 } 651 652 // For instructions, compare their loop depth, and their operand count. This 653 // is pretty loose. 654 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 655 const auto *RInst = cast<Instruction>(RV); 656 657 // Compare loop depths. 658 const BasicBlock *LParent = LInst->getParent(), 659 *RParent = RInst->getParent(); 660 if (LParent != RParent) { 661 unsigned LDepth = LI->getLoopDepth(LParent), 662 RDepth = LI->getLoopDepth(RParent); 663 if (LDepth != RDepth) 664 return (int)LDepth - (int)RDepth; 665 } 666 667 // Compare the number of operands. 668 unsigned LNumOps = LInst->getNumOperands(), 669 RNumOps = RInst->getNumOperands(); 670 if (LNumOps != RNumOps) 671 return (int)LNumOps - (int)RNumOps; 672 673 for (unsigned Idx : seq(0u, LNumOps)) { 674 int Result = 675 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 676 RInst->getOperand(Idx), Depth + 1); 677 if (Result != 0) 678 return Result; 679 } 680 } 681 682 EqCacheValue.unionSets(LV, RV); 683 return 0; 684 } 685 686 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 687 // than RHS, respectively. A three-way result allows recursive comparisons to be 688 // more efficient. 689 // If the max analysis depth was reached, return None, assuming we do not know 690 // if they are equivalent for sure. 691 static Optional<int> 692 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 693 EquivalenceClasses<const Value *> &EqCacheValue, 694 const LoopInfo *const LI, const SCEV *LHS, 695 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 696 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 697 if (LHS == RHS) 698 return 0; 699 700 // Primarily, sort the SCEVs by their getSCEVType(). 701 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 702 if (LType != RType) 703 return (int)LType - (int)RType; 704 705 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 706 return 0; 707 708 if (Depth > MaxSCEVCompareDepth) 709 return None; 710 711 // Aside from the getSCEVType() ordering, the particular ordering 712 // isn't very important except that it's beneficial to be consistent, 713 // so that (a + b) and (b + a) don't end up as different expressions. 714 switch (LType) { 715 case scUnknown: { 716 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 717 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 718 719 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 720 RU->getValue(), Depth + 1); 721 if (X == 0) 722 EqCacheSCEV.unionSets(LHS, RHS); 723 return X; 724 } 725 726 case scConstant: { 727 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 728 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 729 730 // Compare constant values. 731 const APInt &LA = LC->getAPInt(); 732 const APInt &RA = RC->getAPInt(); 733 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 734 if (LBitWidth != RBitWidth) 735 return (int)LBitWidth - (int)RBitWidth; 736 return LA.ult(RA) ? -1 : 1; 737 } 738 739 case scAddRecExpr: { 740 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 741 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 742 743 // There is always a dominance between two recs that are used by one SCEV, 744 // so we can safely sort recs by loop header dominance. We require such 745 // order in getAddExpr. 746 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 747 if (LLoop != RLoop) { 748 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 749 assert(LHead != RHead && "Two loops share the same header?"); 750 if (DT.dominates(LHead, RHead)) 751 return 1; 752 else 753 assert(DT.dominates(RHead, LHead) && 754 "No dominance between recurrences used by one SCEV?"); 755 return -1; 756 } 757 758 // Addrec complexity grows with operand count. 759 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 760 if (LNumOps != RNumOps) 761 return (int)LNumOps - (int)RNumOps; 762 763 // Lexicographically compare. 764 for (unsigned i = 0; i != LNumOps; ++i) { 765 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 766 LA->getOperand(i), RA->getOperand(i), DT, 767 Depth + 1); 768 if (X != 0) 769 return X; 770 } 771 EqCacheSCEV.unionSets(LHS, RHS); 772 return 0; 773 } 774 775 case scAddExpr: 776 case scMulExpr: 777 case scSMaxExpr: 778 case scUMaxExpr: 779 case scSMinExpr: 780 case scUMinExpr: { 781 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 782 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 783 784 // Lexicographically compare n-ary expressions. 785 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 786 if (LNumOps != RNumOps) 787 return (int)LNumOps - (int)RNumOps; 788 789 for (unsigned i = 0; i != LNumOps; ++i) { 790 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 791 LC->getOperand(i), RC->getOperand(i), DT, 792 Depth + 1); 793 if (X != 0) 794 return X; 795 } 796 EqCacheSCEV.unionSets(LHS, RHS); 797 return 0; 798 } 799 800 case scUDivExpr: { 801 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 802 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 803 804 // Lexicographically compare udiv expressions. 805 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 806 RC->getLHS(), DT, Depth + 1); 807 if (X != 0) 808 return X; 809 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 810 RC->getRHS(), DT, Depth + 1); 811 if (X == 0) 812 EqCacheSCEV.unionSets(LHS, RHS); 813 return X; 814 } 815 816 case scPtrToInt: 817 case scTruncate: 818 case scZeroExtend: 819 case scSignExtend: { 820 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 821 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 822 823 // Compare cast expressions by operand. 824 auto X = 825 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 826 RC->getOperand(), DT, Depth + 1); 827 if (X == 0) 828 EqCacheSCEV.unionSets(LHS, RHS); 829 return X; 830 } 831 832 case scCouldNotCompute: 833 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 834 } 835 llvm_unreachable("Unknown SCEV kind!"); 836 } 837 838 /// Given a list of SCEV objects, order them by their complexity, and group 839 /// objects of the same complexity together by value. When this routine is 840 /// finished, we know that any duplicates in the vector are consecutive and that 841 /// complexity is monotonically increasing. 842 /// 843 /// Note that we go take special precautions to ensure that we get deterministic 844 /// results from this routine. In other words, we don't want the results of 845 /// this to depend on where the addresses of various SCEV objects happened to 846 /// land in memory. 847 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 848 LoopInfo *LI, DominatorTree &DT) { 849 if (Ops.size() < 2) return; // Noop 850 851 EquivalenceClasses<const SCEV *> EqCacheSCEV; 852 EquivalenceClasses<const Value *> EqCacheValue; 853 854 // Whether LHS has provably less complexity than RHS. 855 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 856 auto Complexity = 857 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 858 return Complexity && *Complexity < 0; 859 }; 860 if (Ops.size() == 2) { 861 // This is the common case, which also happens to be trivially simple. 862 // Special case it. 863 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 864 if (IsLessComplex(RHS, LHS)) 865 std::swap(LHS, RHS); 866 return; 867 } 868 869 // Do the rough sort by complexity. 870 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 871 return IsLessComplex(LHS, RHS); 872 }); 873 874 // Now that we are sorted by complexity, group elements of the same 875 // complexity. Note that this is, at worst, N^2, but the vector is likely to 876 // be extremely short in practice. Note that we take this approach because we 877 // do not want to depend on the addresses of the objects we are grouping. 878 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 879 const SCEV *S = Ops[i]; 880 unsigned Complexity = S->getSCEVType(); 881 882 // If there are any objects of the same complexity and same value as this 883 // one, group them. 884 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 885 if (Ops[j] == S) { // Found a duplicate. 886 // Move it to immediately after i'th element. 887 std::swap(Ops[i+1], Ops[j]); 888 ++i; // no need to rescan it. 889 if (i == e-2) return; // Done! 890 } 891 } 892 } 893 } 894 895 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 896 /// least HugeExprThreshold nodes). 897 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 898 return any_of(Ops, [](const SCEV *S) { 899 return S->getExpressionSize() >= HugeExprThreshold; 900 }); 901 } 902 903 //===----------------------------------------------------------------------===// 904 // Simple SCEV method implementations 905 //===----------------------------------------------------------------------===// 906 907 /// Compute BC(It, K). The result has width W. Assume, K > 0. 908 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 909 ScalarEvolution &SE, 910 Type *ResultTy) { 911 // Handle the simplest case efficiently. 912 if (K == 1) 913 return SE.getTruncateOrZeroExtend(It, ResultTy); 914 915 // We are using the following formula for BC(It, K): 916 // 917 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 918 // 919 // Suppose, W is the bitwidth of the return value. We must be prepared for 920 // overflow. Hence, we must assure that the result of our computation is 921 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 922 // safe in modular arithmetic. 923 // 924 // However, this code doesn't use exactly that formula; the formula it uses 925 // is something like the following, where T is the number of factors of 2 in 926 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 927 // exponentiation: 928 // 929 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 930 // 931 // This formula is trivially equivalent to the previous formula. However, 932 // this formula can be implemented much more efficiently. The trick is that 933 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 934 // arithmetic. To do exact division in modular arithmetic, all we have 935 // to do is multiply by the inverse. Therefore, this step can be done at 936 // width W. 937 // 938 // The next issue is how to safely do the division by 2^T. The way this 939 // is done is by doing the multiplication step at a width of at least W + T 940 // bits. This way, the bottom W+T bits of the product are accurate. Then, 941 // when we perform the division by 2^T (which is equivalent to a right shift 942 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 943 // truncated out after the division by 2^T. 944 // 945 // In comparison to just directly using the first formula, this technique 946 // is much more efficient; using the first formula requires W * K bits, 947 // but this formula less than W + K bits. Also, the first formula requires 948 // a division step, whereas this formula only requires multiplies and shifts. 949 // 950 // It doesn't matter whether the subtraction step is done in the calculation 951 // width or the input iteration count's width; if the subtraction overflows, 952 // the result must be zero anyway. We prefer here to do it in the width of 953 // the induction variable because it helps a lot for certain cases; CodeGen 954 // isn't smart enough to ignore the overflow, which leads to much less 955 // efficient code if the width of the subtraction is wider than the native 956 // register width. 957 // 958 // (It's possible to not widen at all by pulling out factors of 2 before 959 // the multiplication; for example, K=2 can be calculated as 960 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 961 // extra arithmetic, so it's not an obvious win, and it gets 962 // much more complicated for K > 3.) 963 964 // Protection from insane SCEVs; this bound is conservative, 965 // but it probably doesn't matter. 966 if (K > 1000) 967 return SE.getCouldNotCompute(); 968 969 unsigned W = SE.getTypeSizeInBits(ResultTy); 970 971 // Calculate K! / 2^T and T; we divide out the factors of two before 972 // multiplying for calculating K! / 2^T to avoid overflow. 973 // Other overflow doesn't matter because we only care about the bottom 974 // W bits of the result. 975 APInt OddFactorial(W, 1); 976 unsigned T = 1; 977 for (unsigned i = 3; i <= K; ++i) { 978 APInt Mult(W, i); 979 unsigned TwoFactors = Mult.countTrailingZeros(); 980 T += TwoFactors; 981 Mult.lshrInPlace(TwoFactors); 982 OddFactorial *= Mult; 983 } 984 985 // We need at least W + T bits for the multiplication step 986 unsigned CalculationBits = W + T; 987 988 // Calculate 2^T, at width T+W. 989 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 990 991 // Calculate the multiplicative inverse of K! / 2^T; 992 // this multiplication factor will perform the exact division by 993 // K! / 2^T. 994 APInt Mod = APInt::getSignedMinValue(W+1); 995 APInt MultiplyFactor = OddFactorial.zext(W+1); 996 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 997 MultiplyFactor = MultiplyFactor.trunc(W); 998 999 // Calculate the product, at width T+W 1000 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1001 CalculationBits); 1002 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1003 for (unsigned i = 1; i != K; ++i) { 1004 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1005 Dividend = SE.getMulExpr(Dividend, 1006 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1007 } 1008 1009 // Divide by 2^T 1010 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1011 1012 // Truncate the result, and divide by K! / 2^T. 1013 1014 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1015 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1016 } 1017 1018 /// Return the value of this chain of recurrences at the specified iteration 1019 /// number. We can evaluate this recurrence by multiplying each element in the 1020 /// chain by the binomial coefficient corresponding to it. In other words, we 1021 /// can evaluate {A,+,B,+,C,+,D} as: 1022 /// 1023 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1024 /// 1025 /// where BC(It, k) stands for binomial coefficient. 1026 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1027 ScalarEvolution &SE) const { 1028 const SCEV *Result = getStart(); 1029 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1030 // The computation is correct in the face of overflow provided that the 1031 // multiplication is performed _after_ the evaluation of the binomial 1032 // coefficient. 1033 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1034 if (isa<SCEVCouldNotCompute>(Coeff)) 1035 return Coeff; 1036 1037 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1038 } 1039 return Result; 1040 } 1041 1042 //===----------------------------------------------------------------------===// 1043 // SCEV Expression folder implementations 1044 //===----------------------------------------------------------------------===// 1045 1046 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty, 1047 unsigned Depth) { 1048 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1049 assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once."); 1050 1051 // We could be called with an integer-typed operands during SCEV rewrites. 1052 // Since the operand is an integer already, just perform zext/trunc/self cast. 1053 if (!Op->getType()->isPointerTy()) 1054 return getTruncateOrZeroExtend(Op, Ty); 1055 1056 // What would be an ID for such a SCEV cast expression? 1057 FoldingSetNodeID ID; 1058 ID.AddInteger(scPtrToInt); 1059 ID.AddPointer(Op); 1060 1061 void *IP = nullptr; 1062 1063 // Is there already an expression for such a cast? 1064 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1065 return getTruncateOrZeroExtend(S, Ty); 1066 1067 // If not, is this expression something we can't reduce any further? 1068 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1069 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1070 assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType( 1071 Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) && 1072 "We can only model ptrtoint if SCEV's effective (integer) type is " 1073 "sufficiently wide to represent all possible pointer values."); 1074 1075 // Perform some basic constant folding. If the operand of the ptr2int cast 1076 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1077 // left as-is), but produce a zero constant. 1078 // NOTE: We could handle a more general case, but lack motivational cases. 1079 if (isa<ConstantPointerNull>(U->getValue())) 1080 return getZero(Ty); 1081 1082 // Create an explicit cast node. 1083 // We can reuse the existing insert position since if we get here, 1084 // we won't have made any changes which would invalidate it. 1085 SCEV *S = new (SCEVAllocator) 1086 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1087 UniqueSCEVs.InsertNode(S, IP); 1088 addToLoopUseLists(S); 1089 return getTruncateOrZeroExtend(S, Ty); 1090 } 1091 1092 assert(Depth == 0 && 1093 "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's."); 1094 1095 // Otherwise, we've got some expression that is more complex than just a 1096 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1097 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1098 // only, and the expressions must otherwise be integer-typed. 1099 // So sink the cast down to the SCEVUnknown's. 1100 1101 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1102 /// which computes a pointer-typed value, and rewrites the whole expression 1103 /// tree so that *all* the computations are done on integers, and the only 1104 /// pointer-typed operands in the expression are SCEVUnknown. 1105 class SCEVPtrToIntSinkingRewriter 1106 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1107 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1108 1109 public: 1110 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1111 1112 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1113 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1114 return Rewriter.visit(Scev); 1115 } 1116 1117 const SCEV *visit(const SCEV *S) { 1118 Type *STy = S->getType(); 1119 // If the expression is not pointer-typed, just keep it as-is. 1120 if (!STy->isPointerTy()) 1121 return S; 1122 // Else, recursively sink the cast down into it. 1123 return Base::visit(S); 1124 } 1125 1126 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1127 SmallVector<const SCEV *, 2> Operands; 1128 bool Changed = false; 1129 for (auto *Op : Expr->operands()) { 1130 Operands.push_back(visit(Op)); 1131 Changed |= Op != Operands.back(); 1132 } 1133 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1134 } 1135 1136 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1137 SmallVector<const SCEV *, 2> Operands; 1138 bool Changed = false; 1139 for (auto *Op : Expr->operands()) { 1140 Operands.push_back(visit(Op)); 1141 Changed |= Op != Operands.back(); 1142 } 1143 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1144 } 1145 1146 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1147 Type *ExprPtrTy = Expr->getType(); 1148 assert(ExprPtrTy->isPointerTy() && 1149 "Should only reach pointer-typed SCEVUnknown's."); 1150 Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy); 1151 return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1); 1152 } 1153 }; 1154 1155 // And actually perform the cast sinking. 1156 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1157 assert(IntOp->getType()->isIntegerTy() && 1158 "We must have succeeded in sinking the cast, " 1159 "and ending up with an integer-typed expression!"); 1160 return getTruncateOrZeroExtend(IntOp, Ty); 1161 } 1162 1163 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1164 unsigned Depth) { 1165 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1166 "This is not a truncating conversion!"); 1167 assert(isSCEVable(Ty) && 1168 "This is not a conversion to a SCEVable type!"); 1169 Ty = getEffectiveSCEVType(Ty); 1170 1171 FoldingSetNodeID ID; 1172 ID.AddInteger(scTruncate); 1173 ID.AddPointer(Op); 1174 ID.AddPointer(Ty); 1175 void *IP = nullptr; 1176 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1177 1178 // Fold if the operand is constant. 1179 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1180 return getConstant( 1181 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1182 1183 // trunc(trunc(x)) --> trunc(x) 1184 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1185 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1186 1187 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1188 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1189 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1190 1191 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1192 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1193 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1194 1195 if (Depth > MaxCastDepth) { 1196 SCEV *S = 1197 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1198 UniqueSCEVs.InsertNode(S, IP); 1199 addToLoopUseLists(S); 1200 return S; 1201 } 1202 1203 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1204 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1205 // if after transforming we have at most one truncate, not counting truncates 1206 // that replace other casts. 1207 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1208 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1209 SmallVector<const SCEV *, 4> Operands; 1210 unsigned numTruncs = 0; 1211 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1212 ++i) { 1213 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1214 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1215 isa<SCEVTruncateExpr>(S)) 1216 numTruncs++; 1217 Operands.push_back(S); 1218 } 1219 if (numTruncs < 2) { 1220 if (isa<SCEVAddExpr>(Op)) 1221 return getAddExpr(Operands); 1222 else if (isa<SCEVMulExpr>(Op)) 1223 return getMulExpr(Operands); 1224 else 1225 llvm_unreachable("Unexpected SCEV type for Op."); 1226 } 1227 // Although we checked in the beginning that ID is not in the cache, it is 1228 // possible that during recursion and different modification ID was inserted 1229 // into the cache. So if we find it, just return it. 1230 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1231 return S; 1232 } 1233 1234 // If the input value is a chrec scev, truncate the chrec's operands. 1235 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1236 SmallVector<const SCEV *, 4> Operands; 1237 for (const SCEV *Op : AddRec->operands()) 1238 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1239 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1240 } 1241 1242 // Return zero if truncating to known zeros. 1243 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1244 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1245 return getZero(Ty); 1246 1247 // The cast wasn't folded; create an explicit cast node. We can reuse 1248 // the existing insert position since if we get here, we won't have 1249 // made any changes which would invalidate it. 1250 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1251 Op, Ty); 1252 UniqueSCEVs.InsertNode(S, IP); 1253 addToLoopUseLists(S); 1254 return S; 1255 } 1256 1257 // Get the limit of a recurrence such that incrementing by Step cannot cause 1258 // signed overflow as long as the value of the recurrence within the 1259 // loop does not exceed this limit before incrementing. 1260 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1261 ICmpInst::Predicate *Pred, 1262 ScalarEvolution *SE) { 1263 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1264 if (SE->isKnownPositive(Step)) { 1265 *Pred = ICmpInst::ICMP_SLT; 1266 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1267 SE->getSignedRangeMax(Step)); 1268 } 1269 if (SE->isKnownNegative(Step)) { 1270 *Pred = ICmpInst::ICMP_SGT; 1271 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1272 SE->getSignedRangeMin(Step)); 1273 } 1274 return nullptr; 1275 } 1276 1277 // Get the limit of a recurrence such that incrementing by Step cannot cause 1278 // unsigned overflow as long as the value of the recurrence within the loop does 1279 // not exceed this limit before incrementing. 1280 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1281 ICmpInst::Predicate *Pred, 1282 ScalarEvolution *SE) { 1283 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1284 *Pred = ICmpInst::ICMP_ULT; 1285 1286 return SE->getConstant(APInt::getMinValue(BitWidth) - 1287 SE->getUnsignedRangeMax(Step)); 1288 } 1289 1290 namespace { 1291 1292 struct ExtendOpTraitsBase { 1293 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1294 unsigned); 1295 }; 1296 1297 // Used to make code generic over signed and unsigned overflow. 1298 template <typename ExtendOp> struct ExtendOpTraits { 1299 // Members present: 1300 // 1301 // static const SCEV::NoWrapFlags WrapType; 1302 // 1303 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1304 // 1305 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1306 // ICmpInst::Predicate *Pred, 1307 // ScalarEvolution *SE); 1308 }; 1309 1310 template <> 1311 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1312 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1313 1314 static const GetExtendExprTy GetExtendExpr; 1315 1316 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1317 ICmpInst::Predicate *Pred, 1318 ScalarEvolution *SE) { 1319 return getSignedOverflowLimitForStep(Step, Pred, SE); 1320 } 1321 }; 1322 1323 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1324 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1325 1326 template <> 1327 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1328 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1329 1330 static const GetExtendExprTy GetExtendExpr; 1331 1332 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1333 ICmpInst::Predicate *Pred, 1334 ScalarEvolution *SE) { 1335 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1336 } 1337 }; 1338 1339 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1340 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1341 1342 } // end anonymous namespace 1343 1344 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1345 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1346 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1347 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1348 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1349 // expression "Step + sext/zext(PreIncAR)" is congruent with 1350 // "sext/zext(PostIncAR)" 1351 template <typename ExtendOpTy> 1352 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1353 ScalarEvolution *SE, unsigned Depth) { 1354 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1355 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1356 1357 const Loop *L = AR->getLoop(); 1358 const SCEV *Start = AR->getStart(); 1359 const SCEV *Step = AR->getStepRecurrence(*SE); 1360 1361 // Check for a simple looking step prior to loop entry. 1362 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1363 if (!SA) 1364 return nullptr; 1365 1366 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1367 // subtraction is expensive. For this purpose, perform a quick and dirty 1368 // difference, by checking for Step in the operand list. 1369 SmallVector<const SCEV *, 4> DiffOps; 1370 for (const SCEV *Op : SA->operands()) 1371 if (Op != Step) 1372 DiffOps.push_back(Op); 1373 1374 if (DiffOps.size() == SA->getNumOperands()) 1375 return nullptr; 1376 1377 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1378 // `Step`: 1379 1380 // 1. NSW/NUW flags on the step increment. 1381 auto PreStartFlags = 1382 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1383 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1384 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1385 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1386 1387 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1388 // "S+X does not sign/unsign-overflow". 1389 // 1390 1391 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1392 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1393 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1394 return PreStart; 1395 1396 // 2. Direct overflow check on the step operation's expression. 1397 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1398 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1399 const SCEV *OperandExtendedStart = 1400 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1401 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1402 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1403 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1404 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1405 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1406 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1407 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1408 } 1409 return PreStart; 1410 } 1411 1412 // 3. Loop precondition. 1413 ICmpInst::Predicate Pred; 1414 const SCEV *OverflowLimit = 1415 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1416 1417 if (OverflowLimit && 1418 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1419 return PreStart; 1420 1421 return nullptr; 1422 } 1423 1424 // Get the normalized zero or sign extended expression for this AddRec's Start. 1425 template <typename ExtendOpTy> 1426 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1427 ScalarEvolution *SE, 1428 unsigned Depth) { 1429 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1430 1431 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1432 if (!PreStart) 1433 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1434 1435 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1436 Depth), 1437 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1438 } 1439 1440 // Try to prove away overflow by looking at "nearby" add recurrences. A 1441 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1442 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1443 // 1444 // Formally: 1445 // 1446 // {S,+,X} == {S-T,+,X} + T 1447 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1448 // 1449 // If ({S-T,+,X} + T) does not overflow ... (1) 1450 // 1451 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1452 // 1453 // If {S-T,+,X} does not overflow ... (2) 1454 // 1455 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1456 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1457 // 1458 // If (S-T)+T does not overflow ... (3) 1459 // 1460 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1461 // == {Ext(S),+,Ext(X)} == LHS 1462 // 1463 // Thus, if (1), (2) and (3) are true for some T, then 1464 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1465 // 1466 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1467 // does not overflow" restricted to the 0th iteration. Therefore we only need 1468 // to check for (1) and (2). 1469 // 1470 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1471 // is `Delta` (defined below). 1472 template <typename ExtendOpTy> 1473 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1474 const SCEV *Step, 1475 const Loop *L) { 1476 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1477 1478 // We restrict `Start` to a constant to prevent SCEV from spending too much 1479 // time here. It is correct (but more expensive) to continue with a 1480 // non-constant `Start` and do a general SCEV subtraction to compute 1481 // `PreStart` below. 1482 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1483 if (!StartC) 1484 return false; 1485 1486 APInt StartAI = StartC->getAPInt(); 1487 1488 for (unsigned Delta : {-2, -1, 1, 2}) { 1489 const SCEV *PreStart = getConstant(StartAI - Delta); 1490 1491 FoldingSetNodeID ID; 1492 ID.AddInteger(scAddRecExpr); 1493 ID.AddPointer(PreStart); 1494 ID.AddPointer(Step); 1495 ID.AddPointer(L); 1496 void *IP = nullptr; 1497 const auto *PreAR = 1498 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1499 1500 // Give up if we don't already have the add recurrence we need because 1501 // actually constructing an add recurrence is relatively expensive. 1502 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1503 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1504 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1505 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1506 DeltaS, &Pred, this); 1507 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1508 return true; 1509 } 1510 } 1511 1512 return false; 1513 } 1514 1515 // Finds an integer D for an expression (C + x + y + ...) such that the top 1516 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1517 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1518 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1519 // the (C + x + y + ...) expression is \p WholeAddExpr. 1520 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1521 const SCEVConstant *ConstantTerm, 1522 const SCEVAddExpr *WholeAddExpr) { 1523 const APInt &C = ConstantTerm->getAPInt(); 1524 const unsigned BitWidth = C.getBitWidth(); 1525 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1526 uint32_t TZ = BitWidth; 1527 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1528 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1529 if (TZ) { 1530 // Set D to be as many least significant bits of C as possible while still 1531 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1532 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1533 } 1534 return APInt(BitWidth, 0); 1535 } 1536 1537 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1538 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1539 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1540 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1541 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1542 const APInt &ConstantStart, 1543 const SCEV *Step) { 1544 const unsigned BitWidth = ConstantStart.getBitWidth(); 1545 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1546 if (TZ) 1547 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1548 : ConstantStart; 1549 return APInt(BitWidth, 0); 1550 } 1551 1552 const SCEV * 1553 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1554 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1555 "This is not an extending conversion!"); 1556 assert(isSCEVable(Ty) && 1557 "This is not a conversion to a SCEVable type!"); 1558 Ty = getEffectiveSCEVType(Ty); 1559 1560 // Fold if the operand is constant. 1561 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1562 return getConstant( 1563 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1564 1565 // zext(zext(x)) --> zext(x) 1566 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1567 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1568 1569 // Before doing any expensive analysis, check to see if we've already 1570 // computed a SCEV for this Op and Ty. 1571 FoldingSetNodeID ID; 1572 ID.AddInteger(scZeroExtend); 1573 ID.AddPointer(Op); 1574 ID.AddPointer(Ty); 1575 void *IP = nullptr; 1576 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1577 if (Depth > MaxCastDepth) { 1578 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1579 Op, Ty); 1580 UniqueSCEVs.InsertNode(S, IP); 1581 addToLoopUseLists(S); 1582 return S; 1583 } 1584 1585 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1586 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1587 // It's possible the bits taken off by the truncate were all zero bits. If 1588 // so, we should be able to simplify this further. 1589 const SCEV *X = ST->getOperand(); 1590 ConstantRange CR = getUnsignedRange(X); 1591 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1592 unsigned NewBits = getTypeSizeInBits(Ty); 1593 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1594 CR.zextOrTrunc(NewBits))) 1595 return getTruncateOrZeroExtend(X, Ty, Depth); 1596 } 1597 1598 // If the input value is a chrec scev, and we can prove that the value 1599 // did not overflow the old, smaller, value, we can zero extend all of the 1600 // operands (often constants). This allows analysis of something like 1601 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1602 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1603 if (AR->isAffine()) { 1604 const SCEV *Start = AR->getStart(); 1605 const SCEV *Step = AR->getStepRecurrence(*this); 1606 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1607 const Loop *L = AR->getLoop(); 1608 1609 if (!AR->hasNoUnsignedWrap()) { 1610 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1611 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1612 } 1613 1614 // If we have special knowledge that this addrec won't overflow, 1615 // we don't need to do any further analysis. 1616 if (AR->hasNoUnsignedWrap()) 1617 return getAddRecExpr( 1618 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1619 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1620 1621 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1622 // Note that this serves two purposes: It filters out loops that are 1623 // simply not analyzable, and it covers the case where this code is 1624 // being called from within backedge-taken count analysis, such that 1625 // attempting to ask for the backedge-taken count would likely result 1626 // in infinite recursion. In the later case, the analysis code will 1627 // cope with a conservative value, and it will take care to purge 1628 // that value once it has finished. 1629 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1630 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1631 // Manually compute the final value for AR, checking for overflow. 1632 1633 // Check whether the backedge-taken count can be losslessly casted to 1634 // the addrec's type. The count is always unsigned. 1635 const SCEV *CastedMaxBECount = 1636 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1637 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1638 CastedMaxBECount, MaxBECount->getType(), Depth); 1639 if (MaxBECount == RecastedMaxBECount) { 1640 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1641 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1642 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1643 SCEV::FlagAnyWrap, Depth + 1); 1644 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1645 SCEV::FlagAnyWrap, 1646 Depth + 1), 1647 WideTy, Depth + 1); 1648 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1649 const SCEV *WideMaxBECount = 1650 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1651 const SCEV *OperandExtendedAdd = 1652 getAddExpr(WideStart, 1653 getMulExpr(WideMaxBECount, 1654 getZeroExtendExpr(Step, WideTy, Depth + 1), 1655 SCEV::FlagAnyWrap, Depth + 1), 1656 SCEV::FlagAnyWrap, Depth + 1); 1657 if (ZAdd == OperandExtendedAdd) { 1658 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1659 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1660 // Return the expression with the addrec on the outside. 1661 return getAddRecExpr( 1662 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1663 Depth + 1), 1664 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1665 AR->getNoWrapFlags()); 1666 } 1667 // Similar to above, only this time treat the step value as signed. 1668 // This covers loops that count down. 1669 OperandExtendedAdd = 1670 getAddExpr(WideStart, 1671 getMulExpr(WideMaxBECount, 1672 getSignExtendExpr(Step, WideTy, Depth + 1), 1673 SCEV::FlagAnyWrap, Depth + 1), 1674 SCEV::FlagAnyWrap, Depth + 1); 1675 if (ZAdd == OperandExtendedAdd) { 1676 // Cache knowledge of AR NW, which is propagated to this AddRec. 1677 // Negative step causes unsigned wrap, but it still can't self-wrap. 1678 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1679 // Return the expression with the addrec on the outside. 1680 return getAddRecExpr( 1681 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1682 Depth + 1), 1683 getSignExtendExpr(Step, Ty, Depth + 1), L, 1684 AR->getNoWrapFlags()); 1685 } 1686 } 1687 } 1688 1689 // Normally, in the cases we can prove no-overflow via a 1690 // backedge guarding condition, we can also compute a backedge 1691 // taken count for the loop. The exceptions are assumptions and 1692 // guards present in the loop -- SCEV is not great at exploiting 1693 // these to compute max backedge taken counts, but can still use 1694 // these to prove lack of overflow. Use this fact to avoid 1695 // doing extra work that may not pay off. 1696 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1697 !AC.assumptions().empty()) { 1698 1699 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1700 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1701 if (AR->hasNoUnsignedWrap()) { 1702 // Same as nuw case above - duplicated here to avoid a compile time 1703 // issue. It's not clear that the order of checks does matter, but 1704 // it's one of two issue possible causes for a change which was 1705 // reverted. Be conservative for the moment. 1706 return getAddRecExpr( 1707 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1708 Depth + 1), 1709 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1710 AR->getNoWrapFlags()); 1711 } 1712 1713 // For a negative step, we can extend the operands iff doing so only 1714 // traverses values in the range zext([0,UINT_MAX]). 1715 if (isKnownNegative(Step)) { 1716 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1717 getSignedRangeMin(Step)); 1718 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1719 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1720 // Cache knowledge of AR NW, which is propagated to this 1721 // AddRec. Negative step causes unsigned wrap, but it 1722 // still can't self-wrap. 1723 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1724 // Return the expression with the addrec on the outside. 1725 return getAddRecExpr( 1726 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1727 Depth + 1), 1728 getSignExtendExpr(Step, Ty, Depth + 1), L, 1729 AR->getNoWrapFlags()); 1730 } 1731 } 1732 } 1733 1734 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1735 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1736 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1737 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1738 const APInt &C = SC->getAPInt(); 1739 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1740 if (D != 0) { 1741 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1742 const SCEV *SResidual = 1743 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1744 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1745 return getAddExpr(SZExtD, SZExtR, 1746 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1747 Depth + 1); 1748 } 1749 } 1750 1751 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1752 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1753 return getAddRecExpr( 1754 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1755 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1756 } 1757 } 1758 1759 // zext(A % B) --> zext(A) % zext(B) 1760 { 1761 const SCEV *LHS; 1762 const SCEV *RHS; 1763 if (matchURem(Op, LHS, RHS)) 1764 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1765 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1766 } 1767 1768 // zext(A / B) --> zext(A) / zext(B). 1769 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1770 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1771 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1772 1773 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1774 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1775 if (SA->hasNoUnsignedWrap()) { 1776 // If the addition does not unsign overflow then we can, by definition, 1777 // commute the zero extension with the addition operation. 1778 SmallVector<const SCEV *, 4> Ops; 1779 for (const auto *Op : SA->operands()) 1780 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1781 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1782 } 1783 1784 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1785 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1786 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1787 // 1788 // Often address arithmetics contain expressions like 1789 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1790 // This transformation is useful while proving that such expressions are 1791 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1792 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1793 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1794 if (D != 0) { 1795 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1796 const SCEV *SResidual = 1797 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1798 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1799 return getAddExpr(SZExtD, SZExtR, 1800 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1801 Depth + 1); 1802 } 1803 } 1804 } 1805 1806 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1807 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1808 if (SM->hasNoUnsignedWrap()) { 1809 // If the multiply does not unsign overflow then we can, by definition, 1810 // commute the zero extension with the multiply operation. 1811 SmallVector<const SCEV *, 4> Ops; 1812 for (const auto *Op : SM->operands()) 1813 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1814 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1815 } 1816 1817 // zext(2^K * (trunc X to iN)) to iM -> 1818 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1819 // 1820 // Proof: 1821 // 1822 // zext(2^K * (trunc X to iN)) to iM 1823 // = zext((trunc X to iN) << K) to iM 1824 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1825 // (because shl removes the top K bits) 1826 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1827 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1828 // 1829 if (SM->getNumOperands() == 2) 1830 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1831 if (MulLHS->getAPInt().isPowerOf2()) 1832 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1833 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1834 MulLHS->getAPInt().logBase2(); 1835 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1836 return getMulExpr( 1837 getZeroExtendExpr(MulLHS, Ty), 1838 getZeroExtendExpr( 1839 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1840 SCEV::FlagNUW, Depth + 1); 1841 } 1842 } 1843 1844 // The cast wasn't folded; create an explicit cast node. 1845 // Recompute the insert position, as it may have been invalidated. 1846 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1847 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1848 Op, Ty); 1849 UniqueSCEVs.InsertNode(S, IP); 1850 addToLoopUseLists(S); 1851 return S; 1852 } 1853 1854 const SCEV * 1855 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1856 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1857 "This is not an extending conversion!"); 1858 assert(isSCEVable(Ty) && 1859 "This is not a conversion to a SCEVable type!"); 1860 Ty = getEffectiveSCEVType(Ty); 1861 1862 // Fold if the operand is constant. 1863 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1864 return getConstant( 1865 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1866 1867 // sext(sext(x)) --> sext(x) 1868 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1869 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1870 1871 // sext(zext(x)) --> zext(x) 1872 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1873 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1874 1875 // Before doing any expensive analysis, check to see if we've already 1876 // computed a SCEV for this Op and Ty. 1877 FoldingSetNodeID ID; 1878 ID.AddInteger(scSignExtend); 1879 ID.AddPointer(Op); 1880 ID.AddPointer(Ty); 1881 void *IP = nullptr; 1882 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1883 // Limit recursion depth. 1884 if (Depth > MaxCastDepth) { 1885 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1886 Op, Ty); 1887 UniqueSCEVs.InsertNode(S, IP); 1888 addToLoopUseLists(S); 1889 return S; 1890 } 1891 1892 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1893 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1894 // It's possible the bits taken off by the truncate were all sign bits. If 1895 // so, we should be able to simplify this further. 1896 const SCEV *X = ST->getOperand(); 1897 ConstantRange CR = getSignedRange(X); 1898 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1899 unsigned NewBits = getTypeSizeInBits(Ty); 1900 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1901 CR.sextOrTrunc(NewBits))) 1902 return getTruncateOrSignExtend(X, Ty, Depth); 1903 } 1904 1905 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1906 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1907 if (SA->hasNoSignedWrap()) { 1908 // If the addition does not sign overflow then we can, by definition, 1909 // commute the sign extension with the addition operation. 1910 SmallVector<const SCEV *, 4> Ops; 1911 for (const auto *Op : SA->operands()) 1912 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1913 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1914 } 1915 1916 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1917 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1918 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1919 // 1920 // For instance, this will bring two seemingly different expressions: 1921 // 1 + sext(5 + 20 * %x + 24 * %y) and 1922 // sext(6 + 20 * %x + 24 * %y) 1923 // to the same form: 1924 // 2 + sext(4 + 20 * %x + 24 * %y) 1925 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1926 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1927 if (D != 0) { 1928 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1929 const SCEV *SResidual = 1930 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1931 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1932 return getAddExpr(SSExtD, SSExtR, 1933 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1934 Depth + 1); 1935 } 1936 } 1937 } 1938 // If the input value is a chrec scev, and we can prove that the value 1939 // did not overflow the old, smaller, value, we can sign extend all of the 1940 // operands (often constants). This allows analysis of something like 1941 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1942 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1943 if (AR->isAffine()) { 1944 const SCEV *Start = AR->getStart(); 1945 const SCEV *Step = AR->getStepRecurrence(*this); 1946 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1947 const Loop *L = AR->getLoop(); 1948 1949 if (!AR->hasNoSignedWrap()) { 1950 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1951 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1952 } 1953 1954 // If we have special knowledge that this addrec won't overflow, 1955 // we don't need to do any further analysis. 1956 if (AR->hasNoSignedWrap()) 1957 return getAddRecExpr( 1958 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1959 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1960 1961 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1962 // Note that this serves two purposes: It filters out loops that are 1963 // simply not analyzable, and it covers the case where this code is 1964 // being called from within backedge-taken count analysis, such that 1965 // attempting to ask for the backedge-taken count would likely result 1966 // in infinite recursion. In the later case, the analysis code will 1967 // cope with a conservative value, and it will take care to purge 1968 // that value once it has finished. 1969 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1970 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1971 // Manually compute the final value for AR, checking for 1972 // overflow. 1973 1974 // Check whether the backedge-taken count can be losslessly casted to 1975 // the addrec's type. The count is always unsigned. 1976 const SCEV *CastedMaxBECount = 1977 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1978 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1979 CastedMaxBECount, MaxBECount->getType(), Depth); 1980 if (MaxBECount == RecastedMaxBECount) { 1981 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1982 // Check whether Start+Step*MaxBECount has no signed overflow. 1983 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1984 SCEV::FlagAnyWrap, Depth + 1); 1985 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1986 SCEV::FlagAnyWrap, 1987 Depth + 1), 1988 WideTy, Depth + 1); 1989 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1990 const SCEV *WideMaxBECount = 1991 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1992 const SCEV *OperandExtendedAdd = 1993 getAddExpr(WideStart, 1994 getMulExpr(WideMaxBECount, 1995 getSignExtendExpr(Step, WideTy, Depth + 1), 1996 SCEV::FlagAnyWrap, Depth + 1), 1997 SCEV::FlagAnyWrap, Depth + 1); 1998 if (SAdd == OperandExtendedAdd) { 1999 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2000 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2001 // Return the expression with the addrec on the outside. 2002 return getAddRecExpr( 2003 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2004 Depth + 1), 2005 getSignExtendExpr(Step, Ty, Depth + 1), L, 2006 AR->getNoWrapFlags()); 2007 } 2008 // Similar to above, only this time treat the step value as unsigned. 2009 // This covers loops that count up with an unsigned step. 2010 OperandExtendedAdd = 2011 getAddExpr(WideStart, 2012 getMulExpr(WideMaxBECount, 2013 getZeroExtendExpr(Step, WideTy, Depth + 1), 2014 SCEV::FlagAnyWrap, Depth + 1), 2015 SCEV::FlagAnyWrap, Depth + 1); 2016 if (SAdd == OperandExtendedAdd) { 2017 // If AR wraps around then 2018 // 2019 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2020 // => SAdd != OperandExtendedAdd 2021 // 2022 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2023 // (SAdd == OperandExtendedAdd => AR is NW) 2024 2025 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2026 2027 // Return the expression with the addrec on the outside. 2028 return getAddRecExpr( 2029 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2030 Depth + 1), 2031 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2032 AR->getNoWrapFlags()); 2033 } 2034 } 2035 } 2036 2037 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2038 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2039 if (AR->hasNoSignedWrap()) { 2040 // Same as nsw case above - duplicated here to avoid a compile time 2041 // issue. It's not clear that the order of checks does matter, but 2042 // it's one of two issue possible causes for a change which was 2043 // reverted. Be conservative for the moment. 2044 return getAddRecExpr( 2045 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2046 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2047 } 2048 2049 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2050 // if D + (C - D + Step * n) could be proven to not signed wrap 2051 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2052 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2053 const APInt &C = SC->getAPInt(); 2054 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2055 if (D != 0) { 2056 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2057 const SCEV *SResidual = 2058 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2059 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2060 return getAddExpr(SSExtD, SSExtR, 2061 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2062 Depth + 1); 2063 } 2064 } 2065 2066 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2067 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2068 return getAddRecExpr( 2069 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2070 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2071 } 2072 } 2073 2074 // If the input value is provably positive and we could not simplify 2075 // away the sext build a zext instead. 2076 if (isKnownNonNegative(Op)) 2077 return getZeroExtendExpr(Op, Ty, Depth + 1); 2078 2079 // The cast wasn't folded; create an explicit cast node. 2080 // Recompute the insert position, as it may have been invalidated. 2081 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2082 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2083 Op, Ty); 2084 UniqueSCEVs.InsertNode(S, IP); 2085 addToLoopUseLists(S); 2086 return S; 2087 } 2088 2089 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2090 /// unspecified bits out to the given type. 2091 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2092 Type *Ty) { 2093 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2094 "This is not an extending conversion!"); 2095 assert(isSCEVable(Ty) && 2096 "This is not a conversion to a SCEVable type!"); 2097 Ty = getEffectiveSCEVType(Ty); 2098 2099 // Sign-extend negative constants. 2100 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2101 if (SC->getAPInt().isNegative()) 2102 return getSignExtendExpr(Op, Ty); 2103 2104 // Peel off a truncate cast. 2105 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2106 const SCEV *NewOp = T->getOperand(); 2107 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2108 return getAnyExtendExpr(NewOp, Ty); 2109 return getTruncateOrNoop(NewOp, Ty); 2110 } 2111 2112 // Next try a zext cast. If the cast is folded, use it. 2113 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2114 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2115 return ZExt; 2116 2117 // Next try a sext cast. If the cast is folded, use it. 2118 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2119 if (!isa<SCEVSignExtendExpr>(SExt)) 2120 return SExt; 2121 2122 // Force the cast to be folded into the operands of an addrec. 2123 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2124 SmallVector<const SCEV *, 4> Ops; 2125 for (const SCEV *Op : AR->operands()) 2126 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2127 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2128 } 2129 2130 // If the expression is obviously signed, use the sext cast value. 2131 if (isa<SCEVSMaxExpr>(Op)) 2132 return SExt; 2133 2134 // Absent any other information, use the zext cast value. 2135 return ZExt; 2136 } 2137 2138 /// Process the given Ops list, which is a list of operands to be added under 2139 /// the given scale, update the given map. This is a helper function for 2140 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2141 /// that would form an add expression like this: 2142 /// 2143 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2144 /// 2145 /// where A and B are constants, update the map with these values: 2146 /// 2147 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2148 /// 2149 /// and add 13 + A*B*29 to AccumulatedConstant. 2150 /// This will allow getAddRecExpr to produce this: 2151 /// 2152 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2153 /// 2154 /// This form often exposes folding opportunities that are hidden in 2155 /// the original operand list. 2156 /// 2157 /// Return true iff it appears that any interesting folding opportunities 2158 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2159 /// the common case where no interesting opportunities are present, and 2160 /// is also used as a check to avoid infinite recursion. 2161 static bool 2162 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2163 SmallVectorImpl<const SCEV *> &NewOps, 2164 APInt &AccumulatedConstant, 2165 const SCEV *const *Ops, size_t NumOperands, 2166 const APInt &Scale, 2167 ScalarEvolution &SE) { 2168 bool Interesting = false; 2169 2170 // Iterate over the add operands. They are sorted, with constants first. 2171 unsigned i = 0; 2172 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2173 ++i; 2174 // Pull a buried constant out to the outside. 2175 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2176 Interesting = true; 2177 AccumulatedConstant += Scale * C->getAPInt(); 2178 } 2179 2180 // Next comes everything else. We're especially interested in multiplies 2181 // here, but they're in the middle, so just visit the rest with one loop. 2182 for (; i != NumOperands; ++i) { 2183 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2184 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2185 APInt NewScale = 2186 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2187 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2188 // A multiplication of a constant with another add; recurse. 2189 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2190 Interesting |= 2191 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2192 Add->op_begin(), Add->getNumOperands(), 2193 NewScale, SE); 2194 } else { 2195 // A multiplication of a constant with some other value. Update 2196 // the map. 2197 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2198 const SCEV *Key = SE.getMulExpr(MulOps); 2199 auto Pair = M.insert({Key, NewScale}); 2200 if (Pair.second) { 2201 NewOps.push_back(Pair.first->first); 2202 } else { 2203 Pair.first->second += NewScale; 2204 // The map already had an entry for this value, which may indicate 2205 // a folding opportunity. 2206 Interesting = true; 2207 } 2208 } 2209 } else { 2210 // An ordinary operand. Update the map. 2211 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2212 M.insert({Ops[i], Scale}); 2213 if (Pair.second) { 2214 NewOps.push_back(Pair.first->first); 2215 } else { 2216 Pair.first->second += Scale; 2217 // The map already had an entry for this value, which may indicate 2218 // a folding opportunity. 2219 Interesting = true; 2220 } 2221 } 2222 } 2223 2224 return Interesting; 2225 } 2226 2227 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2228 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2229 // can't-overflow flags for the operation if possible. 2230 static SCEV::NoWrapFlags 2231 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2232 const ArrayRef<const SCEV *> Ops, 2233 SCEV::NoWrapFlags Flags) { 2234 using namespace std::placeholders; 2235 2236 using OBO = OverflowingBinaryOperator; 2237 2238 bool CanAnalyze = 2239 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2240 (void)CanAnalyze; 2241 assert(CanAnalyze && "don't call from other places!"); 2242 2243 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2244 SCEV::NoWrapFlags SignOrUnsignWrap = 2245 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2246 2247 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2248 auto IsKnownNonNegative = [&](const SCEV *S) { 2249 return SE->isKnownNonNegative(S); 2250 }; 2251 2252 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2253 Flags = 2254 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2255 2256 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2257 2258 if (SignOrUnsignWrap != SignOrUnsignMask && 2259 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2260 isa<SCEVConstant>(Ops[0])) { 2261 2262 auto Opcode = [&] { 2263 switch (Type) { 2264 case scAddExpr: 2265 return Instruction::Add; 2266 case scMulExpr: 2267 return Instruction::Mul; 2268 default: 2269 llvm_unreachable("Unexpected SCEV op."); 2270 } 2271 }(); 2272 2273 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2274 2275 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2276 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2277 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2278 Opcode, C, OBO::NoSignedWrap); 2279 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2280 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2281 } 2282 2283 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2284 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2285 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2286 Opcode, C, OBO::NoUnsignedWrap); 2287 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2288 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2289 } 2290 } 2291 2292 return Flags; 2293 } 2294 2295 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2296 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2297 } 2298 2299 /// Get a canonical add expression, or something simpler if possible. 2300 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2301 SCEV::NoWrapFlags OrigFlags, 2302 unsigned Depth) { 2303 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2304 "only nuw or nsw allowed"); 2305 assert(!Ops.empty() && "Cannot get empty add!"); 2306 if (Ops.size() == 1) return Ops[0]; 2307 #ifndef NDEBUG 2308 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2309 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2310 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2311 "SCEVAddExpr operand types don't match!"); 2312 #endif 2313 2314 // Sort by complexity, this groups all similar expression types together. 2315 GroupByComplexity(Ops, &LI, DT); 2316 2317 // If there are any constants, fold them together. 2318 unsigned Idx = 0; 2319 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2320 ++Idx; 2321 assert(Idx < Ops.size()); 2322 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2323 // We found two constants, fold them together! 2324 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2325 if (Ops.size() == 2) return Ops[0]; 2326 Ops.erase(Ops.begin()+1); // Erase the folded element 2327 LHSC = cast<SCEVConstant>(Ops[0]); 2328 } 2329 2330 // If we are left with a constant zero being added, strip it off. 2331 if (LHSC->getValue()->isZero()) { 2332 Ops.erase(Ops.begin()); 2333 --Idx; 2334 } 2335 2336 if (Ops.size() == 1) return Ops[0]; 2337 } 2338 2339 // Delay expensive flag strengthening until necessary. 2340 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2341 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2342 }; 2343 2344 // Limit recursion calls depth. 2345 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2346 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2347 2348 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2349 // Don't strengthen flags if we have no new information. 2350 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2351 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2352 Add->setNoWrapFlags(ComputeFlags(Ops)); 2353 return S; 2354 } 2355 2356 // Okay, check to see if the same value occurs in the operand list more than 2357 // once. If so, merge them together into an multiply expression. Since we 2358 // sorted the list, these values are required to be adjacent. 2359 Type *Ty = Ops[0]->getType(); 2360 bool FoundMatch = false; 2361 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2362 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2363 // Scan ahead to count how many equal operands there are. 2364 unsigned Count = 2; 2365 while (i+Count != e && Ops[i+Count] == Ops[i]) 2366 ++Count; 2367 // Merge the values into a multiply. 2368 const SCEV *Scale = getConstant(Ty, Count); 2369 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2370 if (Ops.size() == Count) 2371 return Mul; 2372 Ops[i] = Mul; 2373 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2374 --i; e -= Count - 1; 2375 FoundMatch = true; 2376 } 2377 if (FoundMatch) 2378 return getAddExpr(Ops, OrigFlags, Depth + 1); 2379 2380 // Check for truncates. If all the operands are truncated from the same 2381 // type, see if factoring out the truncate would permit the result to be 2382 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2383 // if the contents of the resulting outer trunc fold to something simple. 2384 auto FindTruncSrcType = [&]() -> Type * { 2385 // We're ultimately looking to fold an addrec of truncs and muls of only 2386 // constants and truncs, so if we find any other types of SCEV 2387 // as operands of the addrec then we bail and return nullptr here. 2388 // Otherwise, we return the type of the operand of a trunc that we find. 2389 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2390 return T->getOperand()->getType(); 2391 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2392 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2393 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2394 return T->getOperand()->getType(); 2395 } 2396 return nullptr; 2397 }; 2398 if (auto *SrcType = FindTruncSrcType()) { 2399 SmallVector<const SCEV *, 8> LargeOps; 2400 bool Ok = true; 2401 // Check all the operands to see if they can be represented in the 2402 // source type of the truncate. 2403 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2404 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2405 if (T->getOperand()->getType() != SrcType) { 2406 Ok = false; 2407 break; 2408 } 2409 LargeOps.push_back(T->getOperand()); 2410 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2411 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2412 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2413 SmallVector<const SCEV *, 8> LargeMulOps; 2414 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2415 if (const SCEVTruncateExpr *T = 2416 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2417 if (T->getOperand()->getType() != SrcType) { 2418 Ok = false; 2419 break; 2420 } 2421 LargeMulOps.push_back(T->getOperand()); 2422 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2423 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2424 } else { 2425 Ok = false; 2426 break; 2427 } 2428 } 2429 if (Ok) 2430 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2431 } else { 2432 Ok = false; 2433 break; 2434 } 2435 } 2436 if (Ok) { 2437 // Evaluate the expression in the larger type. 2438 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2439 // If it folds to something simple, use it. Otherwise, don't. 2440 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2441 return getTruncateExpr(Fold, Ty); 2442 } 2443 } 2444 2445 // Skip past any other cast SCEVs. 2446 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2447 ++Idx; 2448 2449 // If there are add operands they would be next. 2450 if (Idx < Ops.size()) { 2451 bool DeletedAdd = false; 2452 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2453 if (Ops.size() > AddOpsInlineThreshold || 2454 Add->getNumOperands() > AddOpsInlineThreshold) 2455 break; 2456 // If we have an add, expand the add operands onto the end of the operands 2457 // list. 2458 Ops.erase(Ops.begin()+Idx); 2459 Ops.append(Add->op_begin(), Add->op_end()); 2460 DeletedAdd = true; 2461 } 2462 2463 // If we deleted at least one add, we added operands to the end of the list, 2464 // and they are not necessarily sorted. Recurse to resort and resimplify 2465 // any operands we just acquired. 2466 if (DeletedAdd) 2467 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2468 } 2469 2470 // Skip over the add expression until we get to a multiply. 2471 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2472 ++Idx; 2473 2474 // Check to see if there are any folding opportunities present with 2475 // operands multiplied by constant values. 2476 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2477 uint64_t BitWidth = getTypeSizeInBits(Ty); 2478 DenseMap<const SCEV *, APInt> M; 2479 SmallVector<const SCEV *, 8> NewOps; 2480 APInt AccumulatedConstant(BitWidth, 0); 2481 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2482 Ops.data(), Ops.size(), 2483 APInt(BitWidth, 1), *this)) { 2484 struct APIntCompare { 2485 bool operator()(const APInt &LHS, const APInt &RHS) const { 2486 return LHS.ult(RHS); 2487 } 2488 }; 2489 2490 // Some interesting folding opportunity is present, so its worthwhile to 2491 // re-generate the operands list. Group the operands by constant scale, 2492 // to avoid multiplying by the same constant scale multiple times. 2493 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2494 for (const SCEV *NewOp : NewOps) 2495 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2496 // Re-generate the operands list. 2497 Ops.clear(); 2498 if (AccumulatedConstant != 0) 2499 Ops.push_back(getConstant(AccumulatedConstant)); 2500 for (auto &MulOp : MulOpLists) 2501 if (MulOp.first != 0) 2502 Ops.push_back(getMulExpr( 2503 getConstant(MulOp.first), 2504 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2505 SCEV::FlagAnyWrap, Depth + 1)); 2506 if (Ops.empty()) 2507 return getZero(Ty); 2508 if (Ops.size() == 1) 2509 return Ops[0]; 2510 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2511 } 2512 } 2513 2514 // If we are adding something to a multiply expression, make sure the 2515 // something is not already an operand of the multiply. If so, merge it into 2516 // the multiply. 2517 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2518 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2519 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2520 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2521 if (isa<SCEVConstant>(MulOpSCEV)) 2522 continue; 2523 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2524 if (MulOpSCEV == Ops[AddOp]) { 2525 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2526 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2527 if (Mul->getNumOperands() != 2) { 2528 // If the multiply has more than two operands, we must get the 2529 // Y*Z term. 2530 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2531 Mul->op_begin()+MulOp); 2532 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2533 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2534 } 2535 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2536 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2537 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2538 SCEV::FlagAnyWrap, Depth + 1); 2539 if (Ops.size() == 2) return OuterMul; 2540 if (AddOp < Idx) { 2541 Ops.erase(Ops.begin()+AddOp); 2542 Ops.erase(Ops.begin()+Idx-1); 2543 } else { 2544 Ops.erase(Ops.begin()+Idx); 2545 Ops.erase(Ops.begin()+AddOp-1); 2546 } 2547 Ops.push_back(OuterMul); 2548 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2549 } 2550 2551 // Check this multiply against other multiplies being added together. 2552 for (unsigned OtherMulIdx = Idx+1; 2553 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2554 ++OtherMulIdx) { 2555 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2556 // If MulOp occurs in OtherMul, we can fold the two multiplies 2557 // together. 2558 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2559 OMulOp != e; ++OMulOp) 2560 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2561 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2562 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2563 if (Mul->getNumOperands() != 2) { 2564 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2565 Mul->op_begin()+MulOp); 2566 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2567 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2568 } 2569 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2570 if (OtherMul->getNumOperands() != 2) { 2571 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2572 OtherMul->op_begin()+OMulOp); 2573 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2574 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2575 } 2576 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2577 const SCEV *InnerMulSum = 2578 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2579 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2580 SCEV::FlagAnyWrap, Depth + 1); 2581 if (Ops.size() == 2) return OuterMul; 2582 Ops.erase(Ops.begin()+Idx); 2583 Ops.erase(Ops.begin()+OtherMulIdx-1); 2584 Ops.push_back(OuterMul); 2585 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2586 } 2587 } 2588 } 2589 } 2590 2591 // If there are any add recurrences in the operands list, see if any other 2592 // added values are loop invariant. If so, we can fold them into the 2593 // recurrence. 2594 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2595 ++Idx; 2596 2597 // Scan over all recurrences, trying to fold loop invariants into them. 2598 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2599 // Scan all of the other operands to this add and add them to the vector if 2600 // they are loop invariant w.r.t. the recurrence. 2601 SmallVector<const SCEV *, 8> LIOps; 2602 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2603 const Loop *AddRecLoop = AddRec->getLoop(); 2604 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2605 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2606 LIOps.push_back(Ops[i]); 2607 Ops.erase(Ops.begin()+i); 2608 --i; --e; 2609 } 2610 2611 // If we found some loop invariants, fold them into the recurrence. 2612 if (!LIOps.empty()) { 2613 // Compute nowrap flags for the addition of the loop-invariant ops and 2614 // the addrec. Temporarily push it as an operand for that purpose. 2615 LIOps.push_back(AddRec); 2616 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2617 LIOps.pop_back(); 2618 2619 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2620 LIOps.push_back(AddRec->getStart()); 2621 2622 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2623 // This follows from the fact that the no-wrap flags on the outer add 2624 // expression are applicable on the 0th iteration, when the add recurrence 2625 // will be equal to its start value. 2626 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2627 2628 // Build the new addrec. Propagate the NUW and NSW flags if both the 2629 // outer add and the inner addrec are guaranteed to have no overflow. 2630 // Always propagate NW. 2631 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2632 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2633 2634 // If all of the other operands were loop invariant, we are done. 2635 if (Ops.size() == 1) return NewRec; 2636 2637 // Otherwise, add the folded AddRec by the non-invariant parts. 2638 for (unsigned i = 0;; ++i) 2639 if (Ops[i] == AddRec) { 2640 Ops[i] = NewRec; 2641 break; 2642 } 2643 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2644 } 2645 2646 // Okay, if there weren't any loop invariants to be folded, check to see if 2647 // there are multiple AddRec's with the same loop induction variable being 2648 // added together. If so, we can fold them. 2649 for (unsigned OtherIdx = Idx+1; 2650 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2651 ++OtherIdx) { 2652 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2653 // so that the 1st found AddRecExpr is dominated by all others. 2654 assert(DT.dominates( 2655 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2656 AddRec->getLoop()->getHeader()) && 2657 "AddRecExprs are not sorted in reverse dominance order?"); 2658 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2659 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2660 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2661 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2662 ++OtherIdx) { 2663 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2664 if (OtherAddRec->getLoop() == AddRecLoop) { 2665 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2666 i != e; ++i) { 2667 if (i >= AddRecOps.size()) { 2668 AddRecOps.append(OtherAddRec->op_begin()+i, 2669 OtherAddRec->op_end()); 2670 break; 2671 } 2672 SmallVector<const SCEV *, 2> TwoOps = { 2673 AddRecOps[i], OtherAddRec->getOperand(i)}; 2674 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2675 } 2676 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2677 } 2678 } 2679 // Step size has changed, so we cannot guarantee no self-wraparound. 2680 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2681 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2682 } 2683 } 2684 2685 // Otherwise couldn't fold anything into this recurrence. Move onto the 2686 // next one. 2687 } 2688 2689 // Okay, it looks like we really DO need an add expr. Check to see if we 2690 // already have one, otherwise create a new one. 2691 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2692 } 2693 2694 const SCEV * 2695 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2696 SCEV::NoWrapFlags Flags) { 2697 FoldingSetNodeID ID; 2698 ID.AddInteger(scAddExpr); 2699 for (const SCEV *Op : Ops) 2700 ID.AddPointer(Op); 2701 void *IP = nullptr; 2702 SCEVAddExpr *S = 2703 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2704 if (!S) { 2705 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2706 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2707 S = new (SCEVAllocator) 2708 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2709 UniqueSCEVs.InsertNode(S, IP); 2710 addToLoopUseLists(S); 2711 } 2712 S->setNoWrapFlags(Flags); 2713 return S; 2714 } 2715 2716 const SCEV * 2717 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2718 const Loop *L, SCEV::NoWrapFlags Flags) { 2719 FoldingSetNodeID ID; 2720 ID.AddInteger(scAddRecExpr); 2721 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2722 ID.AddPointer(Ops[i]); 2723 ID.AddPointer(L); 2724 void *IP = nullptr; 2725 SCEVAddRecExpr *S = 2726 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2727 if (!S) { 2728 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2729 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2730 S = new (SCEVAllocator) 2731 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2732 UniqueSCEVs.InsertNode(S, IP); 2733 addToLoopUseLists(S); 2734 } 2735 setNoWrapFlags(S, Flags); 2736 return S; 2737 } 2738 2739 const SCEV * 2740 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2741 SCEV::NoWrapFlags Flags) { 2742 FoldingSetNodeID ID; 2743 ID.AddInteger(scMulExpr); 2744 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2745 ID.AddPointer(Ops[i]); 2746 void *IP = nullptr; 2747 SCEVMulExpr *S = 2748 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2749 if (!S) { 2750 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2751 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2752 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2753 O, Ops.size()); 2754 UniqueSCEVs.InsertNode(S, IP); 2755 addToLoopUseLists(S); 2756 } 2757 S->setNoWrapFlags(Flags); 2758 return S; 2759 } 2760 2761 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2762 uint64_t k = i*j; 2763 if (j > 1 && k / j != i) Overflow = true; 2764 return k; 2765 } 2766 2767 /// Compute the result of "n choose k", the binomial coefficient. If an 2768 /// intermediate computation overflows, Overflow will be set and the return will 2769 /// be garbage. Overflow is not cleared on absence of overflow. 2770 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2771 // We use the multiplicative formula: 2772 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2773 // At each iteration, we take the n-th term of the numeral and divide by the 2774 // (k-n)th term of the denominator. This division will always produce an 2775 // integral result, and helps reduce the chance of overflow in the 2776 // intermediate computations. However, we can still overflow even when the 2777 // final result would fit. 2778 2779 if (n == 0 || n == k) return 1; 2780 if (k > n) return 0; 2781 2782 if (k > n/2) 2783 k = n-k; 2784 2785 uint64_t r = 1; 2786 for (uint64_t i = 1; i <= k; ++i) { 2787 r = umul_ov(r, n-(i-1), Overflow); 2788 r /= i; 2789 } 2790 return r; 2791 } 2792 2793 /// Determine if any of the operands in this SCEV are a constant or if 2794 /// any of the add or multiply expressions in this SCEV contain a constant. 2795 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2796 struct FindConstantInAddMulChain { 2797 bool FoundConstant = false; 2798 2799 bool follow(const SCEV *S) { 2800 FoundConstant |= isa<SCEVConstant>(S); 2801 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2802 } 2803 2804 bool isDone() const { 2805 return FoundConstant; 2806 } 2807 }; 2808 2809 FindConstantInAddMulChain F; 2810 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2811 ST.visitAll(StartExpr); 2812 return F.FoundConstant; 2813 } 2814 2815 /// Get a canonical multiply expression, or something simpler if possible. 2816 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2817 SCEV::NoWrapFlags OrigFlags, 2818 unsigned Depth) { 2819 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2820 "only nuw or nsw allowed"); 2821 assert(!Ops.empty() && "Cannot get empty mul!"); 2822 if (Ops.size() == 1) return Ops[0]; 2823 #ifndef NDEBUG 2824 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2825 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2826 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2827 "SCEVMulExpr operand types don't match!"); 2828 #endif 2829 2830 // Sort by complexity, this groups all similar expression types together. 2831 GroupByComplexity(Ops, &LI, DT); 2832 2833 // If there are any constants, fold them together. 2834 unsigned Idx = 0; 2835 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2836 ++Idx; 2837 assert(Idx < Ops.size()); 2838 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2839 // We found two constants, fold them together! 2840 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2841 if (Ops.size() == 2) return Ops[0]; 2842 Ops.erase(Ops.begin()+1); // Erase the folded element 2843 LHSC = cast<SCEVConstant>(Ops[0]); 2844 } 2845 2846 // If we have a multiply of zero, it will always be zero. 2847 if (LHSC->getValue()->isZero()) 2848 return LHSC; 2849 2850 // If we are left with a constant one being multiplied, strip it off. 2851 if (LHSC->getValue()->isOne()) { 2852 Ops.erase(Ops.begin()); 2853 --Idx; 2854 } 2855 2856 if (Ops.size() == 1) 2857 return Ops[0]; 2858 } 2859 2860 // Delay expensive flag strengthening until necessary. 2861 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2862 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 2863 }; 2864 2865 // Limit recursion calls depth. 2866 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2867 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 2868 2869 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2870 // Don't strengthen flags if we have no new information. 2871 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 2872 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 2873 Mul->setNoWrapFlags(ComputeFlags(Ops)); 2874 return S; 2875 } 2876 2877 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2878 if (Ops.size() == 2) { 2879 // C1*(C2+V) -> C1*C2 + C1*V 2880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2881 // If any of Add's ops are Adds or Muls with a constant, apply this 2882 // transformation as well. 2883 // 2884 // TODO: There are some cases where this transformation is not 2885 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2886 // this transformation should be narrowed down. 2887 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2888 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2889 SCEV::FlagAnyWrap, Depth + 1), 2890 getMulExpr(LHSC, Add->getOperand(1), 2891 SCEV::FlagAnyWrap, Depth + 1), 2892 SCEV::FlagAnyWrap, Depth + 1); 2893 2894 if (Ops[0]->isAllOnesValue()) { 2895 // If we have a mul by -1 of an add, try distributing the -1 among the 2896 // add operands. 2897 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2898 SmallVector<const SCEV *, 4> NewOps; 2899 bool AnyFolded = false; 2900 for (const SCEV *AddOp : Add->operands()) { 2901 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2902 Depth + 1); 2903 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2904 NewOps.push_back(Mul); 2905 } 2906 if (AnyFolded) 2907 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2908 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2909 // Negation preserves a recurrence's no self-wrap property. 2910 SmallVector<const SCEV *, 4> Operands; 2911 for (const SCEV *AddRecOp : AddRec->operands()) 2912 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2913 Depth + 1)); 2914 2915 return getAddRecExpr(Operands, AddRec->getLoop(), 2916 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2917 } 2918 } 2919 } 2920 } 2921 2922 // Skip over the add expression until we get to a multiply. 2923 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2924 ++Idx; 2925 2926 // If there are mul operands inline them all into this expression. 2927 if (Idx < Ops.size()) { 2928 bool DeletedMul = false; 2929 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2930 if (Ops.size() > MulOpsInlineThreshold) 2931 break; 2932 // If we have an mul, expand the mul operands onto the end of the 2933 // operands list. 2934 Ops.erase(Ops.begin()+Idx); 2935 Ops.append(Mul->op_begin(), Mul->op_end()); 2936 DeletedMul = true; 2937 } 2938 2939 // If we deleted at least one mul, we added operands to the end of the 2940 // list, and they are not necessarily sorted. Recurse to resort and 2941 // resimplify any operands we just acquired. 2942 if (DeletedMul) 2943 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2944 } 2945 2946 // If there are any add recurrences in the operands list, see if any other 2947 // added values are loop invariant. If so, we can fold them into the 2948 // recurrence. 2949 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2950 ++Idx; 2951 2952 // Scan over all recurrences, trying to fold loop invariants into them. 2953 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2954 // Scan all of the other operands to this mul and add them to the vector 2955 // if they are loop invariant w.r.t. the recurrence. 2956 SmallVector<const SCEV *, 8> LIOps; 2957 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2958 const Loop *AddRecLoop = AddRec->getLoop(); 2959 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2960 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2961 LIOps.push_back(Ops[i]); 2962 Ops.erase(Ops.begin()+i); 2963 --i; --e; 2964 } 2965 2966 // If we found some loop invariants, fold them into the recurrence. 2967 if (!LIOps.empty()) { 2968 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2969 SmallVector<const SCEV *, 4> NewOps; 2970 NewOps.reserve(AddRec->getNumOperands()); 2971 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2972 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2973 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2974 SCEV::FlagAnyWrap, Depth + 1)); 2975 2976 // Build the new addrec. Propagate the NUW and NSW flags if both the 2977 // outer mul and the inner addrec are guaranteed to have no overflow. 2978 // 2979 // No self-wrap cannot be guaranteed after changing the step size, but 2980 // will be inferred if either NUW or NSW is true. 2981 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 2982 const SCEV *NewRec = getAddRecExpr( 2983 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 2984 2985 // If all of the other operands were loop invariant, we are done. 2986 if (Ops.size() == 1) return NewRec; 2987 2988 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2989 for (unsigned i = 0;; ++i) 2990 if (Ops[i] == AddRec) { 2991 Ops[i] = NewRec; 2992 break; 2993 } 2994 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2995 } 2996 2997 // Okay, if there weren't any loop invariants to be folded, check to see 2998 // if there are multiple AddRec's with the same loop induction variable 2999 // being multiplied together. If so, we can fold them. 3000 3001 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3002 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3003 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3004 // ]]],+,...up to x=2n}. 3005 // Note that the arguments to choose() are always integers with values 3006 // known at compile time, never SCEV objects. 3007 // 3008 // The implementation avoids pointless extra computations when the two 3009 // addrec's are of different length (mathematically, it's equivalent to 3010 // an infinite stream of zeros on the right). 3011 bool OpsModified = false; 3012 for (unsigned OtherIdx = Idx+1; 3013 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3014 ++OtherIdx) { 3015 const SCEVAddRecExpr *OtherAddRec = 3016 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3017 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3018 continue; 3019 3020 // Limit max number of arguments to avoid creation of unreasonably big 3021 // SCEVAddRecs with very complex operands. 3022 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3023 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3024 continue; 3025 3026 bool Overflow = false; 3027 Type *Ty = AddRec->getType(); 3028 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3029 SmallVector<const SCEV*, 7> AddRecOps; 3030 for (int x = 0, xe = AddRec->getNumOperands() + 3031 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3032 SmallVector <const SCEV *, 7> SumOps; 3033 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3034 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3035 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3036 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3037 z < ze && !Overflow; ++z) { 3038 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3039 uint64_t Coeff; 3040 if (LargerThan64Bits) 3041 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3042 else 3043 Coeff = Coeff1*Coeff2; 3044 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3045 const SCEV *Term1 = AddRec->getOperand(y-z); 3046 const SCEV *Term2 = OtherAddRec->getOperand(z); 3047 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3048 SCEV::FlagAnyWrap, Depth + 1)); 3049 } 3050 } 3051 if (SumOps.empty()) 3052 SumOps.push_back(getZero(Ty)); 3053 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3054 } 3055 if (!Overflow) { 3056 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3057 SCEV::FlagAnyWrap); 3058 if (Ops.size() == 2) return NewAddRec; 3059 Ops[Idx] = NewAddRec; 3060 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3061 OpsModified = true; 3062 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3063 if (!AddRec) 3064 break; 3065 } 3066 } 3067 if (OpsModified) 3068 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3069 3070 // Otherwise couldn't fold anything into this recurrence. Move onto the 3071 // next one. 3072 } 3073 3074 // Okay, it looks like we really DO need an mul expr. Check to see if we 3075 // already have one, otherwise create a new one. 3076 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3077 } 3078 3079 /// Represents an unsigned remainder expression based on unsigned division. 3080 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3081 const SCEV *RHS) { 3082 assert(getEffectiveSCEVType(LHS->getType()) == 3083 getEffectiveSCEVType(RHS->getType()) && 3084 "SCEVURemExpr operand types don't match!"); 3085 3086 // Short-circuit easy cases 3087 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3088 // If constant is one, the result is trivial 3089 if (RHSC->getValue()->isOne()) 3090 return getZero(LHS->getType()); // X urem 1 --> 0 3091 3092 // If constant is a power of two, fold into a zext(trunc(LHS)). 3093 if (RHSC->getAPInt().isPowerOf2()) { 3094 Type *FullTy = LHS->getType(); 3095 Type *TruncTy = 3096 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3097 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3098 } 3099 } 3100 3101 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3102 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3103 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3104 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3105 } 3106 3107 /// Get a canonical unsigned division expression, or something simpler if 3108 /// possible. 3109 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3110 const SCEV *RHS) { 3111 assert(getEffectiveSCEVType(LHS->getType()) == 3112 getEffectiveSCEVType(RHS->getType()) && 3113 "SCEVUDivExpr operand types don't match!"); 3114 3115 FoldingSetNodeID ID; 3116 ID.AddInteger(scUDivExpr); 3117 ID.AddPointer(LHS); 3118 ID.AddPointer(RHS); 3119 void *IP = nullptr; 3120 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3121 return S; 3122 3123 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3124 if (RHSC->getValue()->isOne()) 3125 return LHS; // X udiv 1 --> x 3126 // If the denominator is zero, the result of the udiv is undefined. Don't 3127 // try to analyze it, because the resolution chosen here may differ from 3128 // the resolution chosen in other parts of the compiler. 3129 if (!RHSC->getValue()->isZero()) { 3130 // Determine if the division can be folded into the operands of 3131 // its operands. 3132 // TODO: Generalize this to non-constants by using known-bits information. 3133 Type *Ty = LHS->getType(); 3134 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3135 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3136 // For non-power-of-two values, effectively round the value up to the 3137 // nearest power of two. 3138 if (!RHSC->getAPInt().isPowerOf2()) 3139 ++MaxShiftAmt; 3140 IntegerType *ExtTy = 3141 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3142 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3143 if (const SCEVConstant *Step = 3144 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3145 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3146 const APInt &StepInt = Step->getAPInt(); 3147 const APInt &DivInt = RHSC->getAPInt(); 3148 if (!StepInt.urem(DivInt) && 3149 getZeroExtendExpr(AR, ExtTy) == 3150 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3151 getZeroExtendExpr(Step, ExtTy), 3152 AR->getLoop(), SCEV::FlagAnyWrap)) { 3153 SmallVector<const SCEV *, 4> Operands; 3154 for (const SCEV *Op : AR->operands()) 3155 Operands.push_back(getUDivExpr(Op, RHS)); 3156 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3157 } 3158 /// Get a canonical UDivExpr for a recurrence. 3159 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3160 // We can currently only fold X%N if X is constant. 3161 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3162 if (StartC && !DivInt.urem(StepInt) && 3163 getZeroExtendExpr(AR, ExtTy) == 3164 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3165 getZeroExtendExpr(Step, ExtTy), 3166 AR->getLoop(), SCEV::FlagAnyWrap)) { 3167 const APInt &StartInt = StartC->getAPInt(); 3168 const APInt &StartRem = StartInt.urem(StepInt); 3169 if (StartRem != 0) { 3170 const SCEV *NewLHS = 3171 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3172 AR->getLoop(), SCEV::FlagNW); 3173 if (LHS != NewLHS) { 3174 LHS = NewLHS; 3175 3176 // Reset the ID to include the new LHS, and check if it is 3177 // already cached. 3178 ID.clear(); 3179 ID.AddInteger(scUDivExpr); 3180 ID.AddPointer(LHS); 3181 ID.AddPointer(RHS); 3182 IP = nullptr; 3183 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3184 return S; 3185 } 3186 } 3187 } 3188 } 3189 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3190 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3191 SmallVector<const SCEV *, 4> Operands; 3192 for (const SCEV *Op : M->operands()) 3193 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3194 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3195 // Find an operand that's safely divisible. 3196 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3197 const SCEV *Op = M->getOperand(i); 3198 const SCEV *Div = getUDivExpr(Op, RHSC); 3199 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3200 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3201 Operands[i] = Div; 3202 return getMulExpr(Operands); 3203 } 3204 } 3205 } 3206 3207 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3208 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3209 if (auto *DivisorConstant = 3210 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3211 bool Overflow = false; 3212 APInt NewRHS = 3213 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3214 if (Overflow) { 3215 return getConstant(RHSC->getType(), 0, false); 3216 } 3217 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3218 } 3219 } 3220 3221 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3222 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3223 SmallVector<const SCEV *, 4> Operands; 3224 for (const SCEV *Op : A->operands()) 3225 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3226 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3227 Operands.clear(); 3228 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3229 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3230 if (isa<SCEVUDivExpr>(Op) || 3231 getMulExpr(Op, RHS) != A->getOperand(i)) 3232 break; 3233 Operands.push_back(Op); 3234 } 3235 if (Operands.size() == A->getNumOperands()) 3236 return getAddExpr(Operands); 3237 } 3238 } 3239 3240 // Fold if both operands are constant. 3241 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3242 Constant *LHSCV = LHSC->getValue(); 3243 Constant *RHSCV = RHSC->getValue(); 3244 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3245 RHSCV))); 3246 } 3247 } 3248 } 3249 3250 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3251 // changes). Make sure we get a new one. 3252 IP = nullptr; 3253 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3254 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3255 LHS, RHS); 3256 UniqueSCEVs.InsertNode(S, IP); 3257 addToLoopUseLists(S); 3258 return S; 3259 } 3260 3261 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3262 APInt A = C1->getAPInt().abs(); 3263 APInt B = C2->getAPInt().abs(); 3264 uint32_t ABW = A.getBitWidth(); 3265 uint32_t BBW = B.getBitWidth(); 3266 3267 if (ABW > BBW) 3268 B = B.zext(ABW); 3269 else if (ABW < BBW) 3270 A = A.zext(BBW); 3271 3272 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3273 } 3274 3275 /// Get a canonical unsigned division expression, or something simpler if 3276 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3277 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3278 /// it's not exact because the udiv may be clearing bits. 3279 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3280 const SCEV *RHS) { 3281 // TODO: we could try to find factors in all sorts of things, but for now we 3282 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3283 // end of this file for inspiration. 3284 3285 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3286 if (!Mul || !Mul->hasNoUnsignedWrap()) 3287 return getUDivExpr(LHS, RHS); 3288 3289 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3290 // If the mulexpr multiplies by a constant, then that constant must be the 3291 // first element of the mulexpr. 3292 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3293 if (LHSCst == RHSCst) { 3294 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3295 return getMulExpr(Operands); 3296 } 3297 3298 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3299 // that there's a factor provided by one of the other terms. We need to 3300 // check. 3301 APInt Factor = gcd(LHSCst, RHSCst); 3302 if (!Factor.isIntN(1)) { 3303 LHSCst = 3304 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3305 RHSCst = 3306 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3307 SmallVector<const SCEV *, 2> Operands; 3308 Operands.push_back(LHSCst); 3309 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3310 LHS = getMulExpr(Operands); 3311 RHS = RHSCst; 3312 Mul = dyn_cast<SCEVMulExpr>(LHS); 3313 if (!Mul) 3314 return getUDivExactExpr(LHS, RHS); 3315 } 3316 } 3317 } 3318 3319 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3320 if (Mul->getOperand(i) == RHS) { 3321 SmallVector<const SCEV *, 2> Operands; 3322 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3323 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3324 return getMulExpr(Operands); 3325 } 3326 } 3327 3328 return getUDivExpr(LHS, RHS); 3329 } 3330 3331 /// Get an add recurrence expression for the specified loop. Simplify the 3332 /// expression as much as possible. 3333 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3334 const Loop *L, 3335 SCEV::NoWrapFlags Flags) { 3336 SmallVector<const SCEV *, 4> Operands; 3337 Operands.push_back(Start); 3338 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3339 if (StepChrec->getLoop() == L) { 3340 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3341 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3342 } 3343 3344 Operands.push_back(Step); 3345 return getAddRecExpr(Operands, L, Flags); 3346 } 3347 3348 /// Get an add recurrence expression for the specified loop. Simplify the 3349 /// expression as much as possible. 3350 const SCEV * 3351 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3352 const Loop *L, SCEV::NoWrapFlags Flags) { 3353 if (Operands.size() == 1) return Operands[0]; 3354 #ifndef NDEBUG 3355 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3356 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3357 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3358 "SCEVAddRecExpr operand types don't match!"); 3359 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3360 assert(isLoopInvariant(Operands[i], L) && 3361 "SCEVAddRecExpr operand is not loop-invariant!"); 3362 #endif 3363 3364 if (Operands.back()->isZero()) { 3365 Operands.pop_back(); 3366 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3367 } 3368 3369 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3370 // use that information to infer NUW and NSW flags. However, computing a 3371 // BE count requires calling getAddRecExpr, so we may not yet have a 3372 // meaningful BE count at this point (and if we don't, we'd be stuck 3373 // with a SCEVCouldNotCompute as the cached BE count). 3374 3375 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3376 3377 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3378 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3379 const Loop *NestedLoop = NestedAR->getLoop(); 3380 if (L->contains(NestedLoop) 3381 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3382 : (!NestedLoop->contains(L) && 3383 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3384 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3385 Operands[0] = NestedAR->getStart(); 3386 // AddRecs require their operands be loop-invariant with respect to their 3387 // loops. Don't perform this transformation if it would break this 3388 // requirement. 3389 bool AllInvariant = all_of( 3390 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3391 3392 if (AllInvariant) { 3393 // Create a recurrence for the outer loop with the same step size. 3394 // 3395 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3396 // inner recurrence has the same property. 3397 SCEV::NoWrapFlags OuterFlags = 3398 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3399 3400 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3401 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3402 return isLoopInvariant(Op, NestedLoop); 3403 }); 3404 3405 if (AllInvariant) { 3406 // Ok, both add recurrences are valid after the transformation. 3407 // 3408 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3409 // the outer recurrence has the same property. 3410 SCEV::NoWrapFlags InnerFlags = 3411 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3412 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3413 } 3414 } 3415 // Reset Operands to its original state. 3416 Operands[0] = NestedAR; 3417 } 3418 } 3419 3420 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3421 // already have one, otherwise create a new one. 3422 return getOrCreateAddRecExpr(Operands, L, Flags); 3423 } 3424 3425 const SCEV * 3426 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3427 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3428 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3429 // getSCEV(Base)->getType() has the same address space as Base->getType() 3430 // because SCEV::getType() preserves the address space. 3431 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3432 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3433 // instruction to its SCEV, because the Instruction may be guarded by control 3434 // flow and the no-overflow bits may not be valid for the expression in any 3435 // context. This can be fixed similarly to how these flags are handled for 3436 // adds. 3437 SCEV::NoWrapFlags OffsetWrap = 3438 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3439 3440 Type *CurTy = GEP->getType(); 3441 bool FirstIter = true; 3442 SmallVector<const SCEV *, 4> Offsets; 3443 for (const SCEV *IndexExpr : IndexExprs) { 3444 // Compute the (potentially symbolic) offset in bytes for this index. 3445 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3446 // For a struct, add the member offset. 3447 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3448 unsigned FieldNo = Index->getZExtValue(); 3449 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3450 Offsets.push_back(FieldOffset); 3451 3452 // Update CurTy to the type of the field at Index. 3453 CurTy = STy->getTypeAtIndex(Index); 3454 } else { 3455 // Update CurTy to its element type. 3456 if (FirstIter) { 3457 assert(isa<PointerType>(CurTy) && 3458 "The first index of a GEP indexes a pointer"); 3459 CurTy = GEP->getSourceElementType(); 3460 FirstIter = false; 3461 } else { 3462 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3463 } 3464 // For an array, add the element offset, explicitly scaled. 3465 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3466 // Getelementptr indices are signed. 3467 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3468 3469 // Multiply the index by the element size to compute the element offset. 3470 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3471 Offsets.push_back(LocalOffset); 3472 } 3473 } 3474 3475 // Handle degenerate case of GEP without offsets. 3476 if (Offsets.empty()) 3477 return BaseExpr; 3478 3479 // Add the offsets together, assuming nsw if inbounds. 3480 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3481 // Add the base address and the offset. We cannot use the nsw flag, as the 3482 // base address is unsigned. However, if we know that the offset is 3483 // non-negative, we can use nuw. 3484 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3485 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3486 return getAddExpr(BaseExpr, Offset, BaseWrap); 3487 } 3488 3489 std::tuple<SCEV *, FoldingSetNodeID, void *> 3490 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3491 ArrayRef<const SCEV *> Ops) { 3492 FoldingSetNodeID ID; 3493 void *IP = nullptr; 3494 ID.AddInteger(SCEVType); 3495 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3496 ID.AddPointer(Ops[i]); 3497 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3498 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3499 } 3500 3501 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3502 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3503 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3504 } 3505 3506 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) { 3507 Type *Ty = Op->getType(); 3508 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty)); 3509 } 3510 3511 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3512 SmallVectorImpl<const SCEV *> &Ops) { 3513 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3514 if (Ops.size() == 1) return Ops[0]; 3515 #ifndef NDEBUG 3516 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3517 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3518 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3519 "Operand types don't match!"); 3520 #endif 3521 3522 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3523 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3524 3525 // Sort by complexity, this groups all similar expression types together. 3526 GroupByComplexity(Ops, &LI, DT); 3527 3528 // Check if we have created the same expression before. 3529 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3530 return S; 3531 } 3532 3533 // If there are any constants, fold them together. 3534 unsigned Idx = 0; 3535 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3536 ++Idx; 3537 assert(Idx < Ops.size()); 3538 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3539 if (Kind == scSMaxExpr) 3540 return APIntOps::smax(LHS, RHS); 3541 else if (Kind == scSMinExpr) 3542 return APIntOps::smin(LHS, RHS); 3543 else if (Kind == scUMaxExpr) 3544 return APIntOps::umax(LHS, RHS); 3545 else if (Kind == scUMinExpr) 3546 return APIntOps::umin(LHS, RHS); 3547 llvm_unreachable("Unknown SCEV min/max opcode"); 3548 }; 3549 3550 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3551 // We found two constants, fold them together! 3552 ConstantInt *Fold = ConstantInt::get( 3553 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3554 Ops[0] = getConstant(Fold); 3555 Ops.erase(Ops.begin()+1); // Erase the folded element 3556 if (Ops.size() == 1) return Ops[0]; 3557 LHSC = cast<SCEVConstant>(Ops[0]); 3558 } 3559 3560 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3561 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3562 3563 if (IsMax ? IsMinV : IsMaxV) { 3564 // If we are left with a constant minimum(/maximum)-int, strip it off. 3565 Ops.erase(Ops.begin()); 3566 --Idx; 3567 } else if (IsMax ? IsMaxV : IsMinV) { 3568 // If we have a max(/min) with a constant maximum(/minimum)-int, 3569 // it will always be the extremum. 3570 return LHSC; 3571 } 3572 3573 if (Ops.size() == 1) return Ops[0]; 3574 } 3575 3576 // Find the first operation of the same kind 3577 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3578 ++Idx; 3579 3580 // Check to see if one of the operands is of the same kind. If so, expand its 3581 // operands onto our operand list, and recurse to simplify. 3582 if (Idx < Ops.size()) { 3583 bool DeletedAny = false; 3584 while (Ops[Idx]->getSCEVType() == Kind) { 3585 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3586 Ops.erase(Ops.begin()+Idx); 3587 Ops.append(SMME->op_begin(), SMME->op_end()); 3588 DeletedAny = true; 3589 } 3590 3591 if (DeletedAny) 3592 return getMinMaxExpr(Kind, Ops); 3593 } 3594 3595 // Okay, check to see if the same value occurs in the operand list twice. If 3596 // so, delete one. Since we sorted the list, these values are required to 3597 // be adjacent. 3598 llvm::CmpInst::Predicate GEPred = 3599 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3600 llvm::CmpInst::Predicate LEPred = 3601 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3602 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3603 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3604 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3605 if (Ops[i] == Ops[i + 1] || 3606 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3607 // X op Y op Y --> X op Y 3608 // X op Y --> X, if we know X, Y are ordered appropriately 3609 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3610 --i; 3611 --e; 3612 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3613 Ops[i + 1])) { 3614 // X op Y --> Y, if we know X, Y are ordered appropriately 3615 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3616 --i; 3617 --e; 3618 } 3619 } 3620 3621 if (Ops.size() == 1) return Ops[0]; 3622 3623 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3624 3625 // Okay, it looks like we really DO need an expr. Check to see if we 3626 // already have one, otherwise create a new one. 3627 const SCEV *ExistingSCEV; 3628 FoldingSetNodeID ID; 3629 void *IP; 3630 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3631 if (ExistingSCEV) 3632 return ExistingSCEV; 3633 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3634 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3635 SCEV *S = new (SCEVAllocator) 3636 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3637 3638 UniqueSCEVs.InsertNode(S, IP); 3639 addToLoopUseLists(S); 3640 return S; 3641 } 3642 3643 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3644 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3645 return getSMaxExpr(Ops); 3646 } 3647 3648 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3649 return getMinMaxExpr(scSMaxExpr, Ops); 3650 } 3651 3652 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3653 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3654 return getUMaxExpr(Ops); 3655 } 3656 3657 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3658 return getMinMaxExpr(scUMaxExpr, Ops); 3659 } 3660 3661 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3662 const SCEV *RHS) { 3663 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3664 return getSMinExpr(Ops); 3665 } 3666 3667 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3668 return getMinMaxExpr(scSMinExpr, Ops); 3669 } 3670 3671 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3672 const SCEV *RHS) { 3673 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3674 return getUMinExpr(Ops); 3675 } 3676 3677 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3678 return getMinMaxExpr(scUMinExpr, Ops); 3679 } 3680 3681 const SCEV * 3682 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3683 ScalableVectorType *ScalableTy) { 3684 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3685 Constant *One = ConstantInt::get(IntTy, 1); 3686 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3687 // Note that the expression we created is the final expression, we don't 3688 // want to simplify it any further Also, if we call a normal getSCEV(), 3689 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3690 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3691 } 3692 3693 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3694 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3695 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3696 // We can bypass creating a target-independent constant expression and then 3697 // folding it back into a ConstantInt. This is just a compile-time 3698 // optimization. 3699 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3700 } 3701 3702 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3703 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3704 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3705 // We can bypass creating a target-independent constant expression and then 3706 // folding it back into a ConstantInt. This is just a compile-time 3707 // optimization. 3708 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3709 } 3710 3711 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3712 StructType *STy, 3713 unsigned FieldNo) { 3714 // We can bypass creating a target-independent constant expression and then 3715 // folding it back into a ConstantInt. This is just a compile-time 3716 // optimization. 3717 return getConstant( 3718 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3719 } 3720 3721 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3722 // Don't attempt to do anything other than create a SCEVUnknown object 3723 // here. createSCEV only calls getUnknown after checking for all other 3724 // interesting possibilities, and any other code that calls getUnknown 3725 // is doing so in order to hide a value from SCEV canonicalization. 3726 3727 FoldingSetNodeID ID; 3728 ID.AddInteger(scUnknown); 3729 ID.AddPointer(V); 3730 void *IP = nullptr; 3731 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3732 assert(cast<SCEVUnknown>(S)->getValue() == V && 3733 "Stale SCEVUnknown in uniquing map!"); 3734 return S; 3735 } 3736 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3737 FirstUnknown); 3738 FirstUnknown = cast<SCEVUnknown>(S); 3739 UniqueSCEVs.InsertNode(S, IP); 3740 return S; 3741 } 3742 3743 //===----------------------------------------------------------------------===// 3744 // Basic SCEV Analysis and PHI Idiom Recognition Code 3745 // 3746 3747 /// Test if values of the given type are analyzable within the SCEV 3748 /// framework. This primarily includes integer types, and it can optionally 3749 /// include pointer types if the ScalarEvolution class has access to 3750 /// target-specific information. 3751 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3752 // Integers and pointers are always SCEVable. 3753 return Ty->isIntOrPtrTy(); 3754 } 3755 3756 /// Return the size in bits of the specified type, for which isSCEVable must 3757 /// return true. 3758 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3759 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3760 if (Ty->isPointerTy()) 3761 return getDataLayout().getIndexTypeSizeInBits(Ty); 3762 return getDataLayout().getTypeSizeInBits(Ty); 3763 } 3764 3765 /// Return a type with the same bitwidth as the given type and which represents 3766 /// how SCEV will treat the given type, for which isSCEVable must return 3767 /// true. For pointer types, this is the pointer index sized integer type. 3768 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3769 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3770 3771 if (Ty->isIntegerTy()) 3772 return Ty; 3773 3774 // The only other support type is pointer. 3775 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3776 return getDataLayout().getIndexType(Ty); 3777 } 3778 3779 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3780 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3781 } 3782 3783 const SCEV *ScalarEvolution::getCouldNotCompute() { 3784 return CouldNotCompute.get(); 3785 } 3786 3787 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3788 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3789 auto *SU = dyn_cast<SCEVUnknown>(S); 3790 return SU && SU->getValue() == nullptr; 3791 }); 3792 3793 return !ContainsNulls; 3794 } 3795 3796 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3797 HasRecMapType::iterator I = HasRecMap.find(S); 3798 if (I != HasRecMap.end()) 3799 return I->second; 3800 3801 bool FoundAddRec = 3802 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3803 HasRecMap.insert({S, FoundAddRec}); 3804 return FoundAddRec; 3805 } 3806 3807 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3808 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3809 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3810 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3811 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3812 if (!Add) 3813 return {S, nullptr}; 3814 3815 if (Add->getNumOperands() != 2) 3816 return {S, nullptr}; 3817 3818 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3819 if (!ConstOp) 3820 return {S, nullptr}; 3821 3822 return {Add->getOperand(1), ConstOp->getValue()}; 3823 } 3824 3825 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3826 /// by the value and offset from any ValueOffsetPair in the set. 3827 SetVector<ScalarEvolution::ValueOffsetPair> * 3828 ScalarEvolution::getSCEVValues(const SCEV *S) { 3829 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3830 if (SI == ExprValueMap.end()) 3831 return nullptr; 3832 #ifndef NDEBUG 3833 if (VerifySCEVMap) { 3834 // Check there is no dangling Value in the set returned. 3835 for (const auto &VE : SI->second) 3836 assert(ValueExprMap.count(VE.first)); 3837 } 3838 #endif 3839 return &SI->second; 3840 } 3841 3842 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3843 /// cannot be used separately. eraseValueFromMap should be used to remove 3844 /// V from ValueExprMap and ExprValueMap at the same time. 3845 void ScalarEvolution::eraseValueFromMap(Value *V) { 3846 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3847 if (I != ValueExprMap.end()) { 3848 const SCEV *S = I->second; 3849 // Remove {V, 0} from the set of ExprValueMap[S] 3850 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3851 SV->remove({V, nullptr}); 3852 3853 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3854 const SCEV *Stripped; 3855 ConstantInt *Offset; 3856 std::tie(Stripped, Offset) = splitAddExpr(S); 3857 if (Offset != nullptr) { 3858 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3859 SV->remove({V, Offset}); 3860 } 3861 ValueExprMap.erase(V); 3862 } 3863 } 3864 3865 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3866 /// TODO: In reality it is better to check the poison recursively 3867 /// but this is better than nothing. 3868 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3869 if (auto *I = dyn_cast<Instruction>(V)) { 3870 if (isa<OverflowingBinaryOperator>(I)) { 3871 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3872 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3873 return true; 3874 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3875 return true; 3876 } 3877 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3878 return true; 3879 } 3880 return false; 3881 } 3882 3883 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3884 /// create a new one. 3885 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3886 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3887 3888 const SCEV *S = getExistingSCEV(V); 3889 if (S == nullptr) { 3890 S = createSCEV(V); 3891 // During PHI resolution, it is possible to create two SCEVs for the same 3892 // V, so it is needed to double check whether V->S is inserted into 3893 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3894 std::pair<ValueExprMapType::iterator, bool> Pair = 3895 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3896 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3897 ExprValueMap[S].insert({V, nullptr}); 3898 3899 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3900 // ExprValueMap. 3901 const SCEV *Stripped = S; 3902 ConstantInt *Offset = nullptr; 3903 std::tie(Stripped, Offset) = splitAddExpr(S); 3904 // If stripped is SCEVUnknown, don't bother to save 3905 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3906 // increase the complexity of the expansion code. 3907 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3908 // because it may generate add/sub instead of GEP in SCEV expansion. 3909 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3910 !isa<GetElementPtrInst>(V)) 3911 ExprValueMap[Stripped].insert({V, Offset}); 3912 } 3913 } 3914 return S; 3915 } 3916 3917 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3918 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3919 3920 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3921 if (I != ValueExprMap.end()) { 3922 const SCEV *S = I->second; 3923 if (checkValidity(S)) 3924 return S; 3925 eraseValueFromMap(V); 3926 forgetMemoizedResults(S); 3927 } 3928 return nullptr; 3929 } 3930 3931 /// Return a SCEV corresponding to -V = -1*V 3932 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3933 SCEV::NoWrapFlags Flags) { 3934 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3935 return getConstant( 3936 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3937 3938 Type *Ty = V->getType(); 3939 Ty = getEffectiveSCEVType(Ty); 3940 return getMulExpr(V, getMinusOne(Ty), Flags); 3941 } 3942 3943 /// If Expr computes ~A, return A else return nullptr 3944 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3945 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3946 if (!Add || Add->getNumOperands() != 2 || 3947 !Add->getOperand(0)->isAllOnesValue()) 3948 return nullptr; 3949 3950 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3951 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3952 !AddRHS->getOperand(0)->isAllOnesValue()) 3953 return nullptr; 3954 3955 return AddRHS->getOperand(1); 3956 } 3957 3958 /// Return a SCEV corresponding to ~V = -1-V 3959 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3960 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3961 return getConstant( 3962 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3963 3964 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3965 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3966 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3967 SmallVector<const SCEV *, 2> MatchedOperands; 3968 for (const SCEV *Operand : MME->operands()) { 3969 const SCEV *Matched = MatchNotExpr(Operand); 3970 if (!Matched) 3971 return (const SCEV *)nullptr; 3972 MatchedOperands.push_back(Matched); 3973 } 3974 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 3975 MatchedOperands); 3976 }; 3977 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3978 return Replaced; 3979 } 3980 3981 Type *Ty = V->getType(); 3982 Ty = getEffectiveSCEVType(Ty); 3983 return getMinusSCEV(getMinusOne(Ty), V); 3984 } 3985 3986 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3987 SCEV::NoWrapFlags Flags, 3988 unsigned Depth) { 3989 // Fast path: X - X --> 0. 3990 if (LHS == RHS) 3991 return getZero(LHS->getType()); 3992 3993 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3994 // makes it so that we cannot make much use of NUW. 3995 auto AddFlags = SCEV::FlagAnyWrap; 3996 const bool RHSIsNotMinSigned = 3997 !getSignedRangeMin(RHS).isMinSignedValue(); 3998 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3999 // Let M be the minimum representable signed value. Then (-1)*RHS 4000 // signed-wraps if and only if RHS is M. That can happen even for 4001 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4002 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4003 // (-1)*RHS, we need to prove that RHS != M. 4004 // 4005 // If LHS is non-negative and we know that LHS - RHS does not 4006 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4007 // either by proving that RHS > M or that LHS >= 0. 4008 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4009 AddFlags = SCEV::FlagNSW; 4010 } 4011 } 4012 4013 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4014 // RHS is NSW and LHS >= 0. 4015 // 4016 // The difficulty here is that the NSW flag may have been proven 4017 // relative to a loop that is to be found in a recurrence in LHS and 4018 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4019 // larger scope than intended. 4020 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4021 4022 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4023 } 4024 4025 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4026 unsigned Depth) { 4027 Type *SrcTy = V->getType(); 4028 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4029 "Cannot truncate or zero extend with non-integer arguments!"); 4030 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4031 return V; // No conversion 4032 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4033 return getTruncateExpr(V, Ty, Depth); 4034 return getZeroExtendExpr(V, Ty, Depth); 4035 } 4036 4037 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4038 unsigned Depth) { 4039 Type *SrcTy = V->getType(); 4040 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4041 "Cannot truncate or zero extend with non-integer arguments!"); 4042 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4043 return V; // No conversion 4044 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4045 return getTruncateExpr(V, Ty, Depth); 4046 return getSignExtendExpr(V, Ty, Depth); 4047 } 4048 4049 const SCEV * 4050 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4051 Type *SrcTy = V->getType(); 4052 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4053 "Cannot noop or zero extend with non-integer arguments!"); 4054 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4055 "getNoopOrZeroExtend cannot truncate!"); 4056 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4057 return V; // No conversion 4058 return getZeroExtendExpr(V, Ty); 4059 } 4060 4061 const SCEV * 4062 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4063 Type *SrcTy = V->getType(); 4064 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4065 "Cannot noop or sign extend with non-integer arguments!"); 4066 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4067 "getNoopOrSignExtend cannot truncate!"); 4068 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4069 return V; // No conversion 4070 return getSignExtendExpr(V, Ty); 4071 } 4072 4073 const SCEV * 4074 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4075 Type *SrcTy = V->getType(); 4076 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4077 "Cannot noop or any extend with non-integer arguments!"); 4078 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4079 "getNoopOrAnyExtend cannot truncate!"); 4080 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4081 return V; // No conversion 4082 return getAnyExtendExpr(V, Ty); 4083 } 4084 4085 const SCEV * 4086 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4087 Type *SrcTy = V->getType(); 4088 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4089 "Cannot truncate or noop with non-integer arguments!"); 4090 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4091 "getTruncateOrNoop cannot extend!"); 4092 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4093 return V; // No conversion 4094 return getTruncateExpr(V, Ty); 4095 } 4096 4097 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4098 const SCEV *RHS) { 4099 const SCEV *PromotedLHS = LHS; 4100 const SCEV *PromotedRHS = RHS; 4101 4102 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4103 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4104 else 4105 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4106 4107 return getUMaxExpr(PromotedLHS, PromotedRHS); 4108 } 4109 4110 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4111 const SCEV *RHS) { 4112 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4113 return getUMinFromMismatchedTypes(Ops); 4114 } 4115 4116 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4117 SmallVectorImpl<const SCEV *> &Ops) { 4118 assert(!Ops.empty() && "At least one operand must be!"); 4119 // Trivial case. 4120 if (Ops.size() == 1) 4121 return Ops[0]; 4122 4123 // Find the max type first. 4124 Type *MaxType = nullptr; 4125 for (auto *S : Ops) 4126 if (MaxType) 4127 MaxType = getWiderType(MaxType, S->getType()); 4128 else 4129 MaxType = S->getType(); 4130 assert(MaxType && "Failed to find maximum type!"); 4131 4132 // Extend all ops to max type. 4133 SmallVector<const SCEV *, 2> PromotedOps; 4134 for (auto *S : Ops) 4135 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4136 4137 // Generate umin. 4138 return getUMinExpr(PromotedOps); 4139 } 4140 4141 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4142 // A pointer operand may evaluate to a nonpointer expression, such as null. 4143 if (!V->getType()->isPointerTy()) 4144 return V; 4145 4146 while (true) { 4147 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 4148 V = Cast->getOperand(); 4149 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4150 const SCEV *PtrOp = nullptr; 4151 for (const SCEV *NAryOp : NAry->operands()) { 4152 if (NAryOp->getType()->isPointerTy()) { 4153 // Cannot find the base of an expression with multiple pointer ops. 4154 if (PtrOp) 4155 return V; 4156 PtrOp = NAryOp; 4157 } 4158 } 4159 if (!PtrOp) // All operands were non-pointer. 4160 return V; 4161 V = PtrOp; 4162 } else // Not something we can look further into. 4163 return V; 4164 } 4165 } 4166 4167 /// Push users of the given Instruction onto the given Worklist. 4168 static void 4169 PushDefUseChildren(Instruction *I, 4170 SmallVectorImpl<Instruction *> &Worklist) { 4171 // Push the def-use children onto the Worklist stack. 4172 for (User *U : I->users()) 4173 Worklist.push_back(cast<Instruction>(U)); 4174 } 4175 4176 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4177 SmallVector<Instruction *, 16> Worklist; 4178 PushDefUseChildren(PN, Worklist); 4179 4180 SmallPtrSet<Instruction *, 8> Visited; 4181 Visited.insert(PN); 4182 while (!Worklist.empty()) { 4183 Instruction *I = Worklist.pop_back_val(); 4184 if (!Visited.insert(I).second) 4185 continue; 4186 4187 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4188 if (It != ValueExprMap.end()) { 4189 const SCEV *Old = It->second; 4190 4191 // Short-circuit the def-use traversal if the symbolic name 4192 // ceases to appear in expressions. 4193 if (Old != SymName && !hasOperand(Old, SymName)) 4194 continue; 4195 4196 // SCEVUnknown for a PHI either means that it has an unrecognized 4197 // structure, it's a PHI that's in the progress of being computed 4198 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4199 // additional loop trip count information isn't going to change anything. 4200 // In the second case, createNodeForPHI will perform the necessary 4201 // updates on its own when it gets to that point. In the third, we do 4202 // want to forget the SCEVUnknown. 4203 if (!isa<PHINode>(I) || 4204 !isa<SCEVUnknown>(Old) || 4205 (I != PN && Old == SymName)) { 4206 eraseValueFromMap(It->first); 4207 forgetMemoizedResults(Old); 4208 } 4209 } 4210 4211 PushDefUseChildren(I, Worklist); 4212 } 4213 } 4214 4215 namespace { 4216 4217 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4218 /// expression in case its Loop is L. If it is not L then 4219 /// if IgnoreOtherLoops is true then use AddRec itself 4220 /// otherwise rewrite cannot be done. 4221 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4222 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4223 public: 4224 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4225 bool IgnoreOtherLoops = true) { 4226 SCEVInitRewriter Rewriter(L, SE); 4227 const SCEV *Result = Rewriter.visit(S); 4228 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4229 return SE.getCouldNotCompute(); 4230 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4231 ? SE.getCouldNotCompute() 4232 : Result; 4233 } 4234 4235 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4236 if (!SE.isLoopInvariant(Expr, L)) 4237 SeenLoopVariantSCEVUnknown = true; 4238 return Expr; 4239 } 4240 4241 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4242 // Only re-write AddRecExprs for this loop. 4243 if (Expr->getLoop() == L) 4244 return Expr->getStart(); 4245 SeenOtherLoops = true; 4246 return Expr; 4247 } 4248 4249 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4250 4251 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4252 4253 private: 4254 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4255 : SCEVRewriteVisitor(SE), L(L) {} 4256 4257 const Loop *L; 4258 bool SeenLoopVariantSCEVUnknown = false; 4259 bool SeenOtherLoops = false; 4260 }; 4261 4262 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4263 /// increment expression in case its Loop is L. If it is not L then 4264 /// use AddRec itself. 4265 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4266 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4267 public: 4268 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4269 SCEVPostIncRewriter Rewriter(L, SE); 4270 const SCEV *Result = Rewriter.visit(S); 4271 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4272 ? SE.getCouldNotCompute() 4273 : Result; 4274 } 4275 4276 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4277 if (!SE.isLoopInvariant(Expr, L)) 4278 SeenLoopVariantSCEVUnknown = true; 4279 return Expr; 4280 } 4281 4282 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4283 // Only re-write AddRecExprs for this loop. 4284 if (Expr->getLoop() == L) 4285 return Expr->getPostIncExpr(SE); 4286 SeenOtherLoops = true; 4287 return Expr; 4288 } 4289 4290 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4291 4292 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4293 4294 private: 4295 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4296 : SCEVRewriteVisitor(SE), L(L) {} 4297 4298 const Loop *L; 4299 bool SeenLoopVariantSCEVUnknown = false; 4300 bool SeenOtherLoops = false; 4301 }; 4302 4303 /// This class evaluates the compare condition by matching it against the 4304 /// condition of loop latch. If there is a match we assume a true value 4305 /// for the condition while building SCEV nodes. 4306 class SCEVBackedgeConditionFolder 4307 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4308 public: 4309 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4310 ScalarEvolution &SE) { 4311 bool IsPosBECond = false; 4312 Value *BECond = nullptr; 4313 if (BasicBlock *Latch = L->getLoopLatch()) { 4314 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4315 if (BI && BI->isConditional()) { 4316 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4317 "Both outgoing branches should not target same header!"); 4318 BECond = BI->getCondition(); 4319 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4320 } else { 4321 return S; 4322 } 4323 } 4324 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4325 return Rewriter.visit(S); 4326 } 4327 4328 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4329 const SCEV *Result = Expr; 4330 bool InvariantF = SE.isLoopInvariant(Expr, L); 4331 4332 if (!InvariantF) { 4333 Instruction *I = cast<Instruction>(Expr->getValue()); 4334 switch (I->getOpcode()) { 4335 case Instruction::Select: { 4336 SelectInst *SI = cast<SelectInst>(I); 4337 Optional<const SCEV *> Res = 4338 compareWithBackedgeCondition(SI->getCondition()); 4339 if (Res.hasValue()) { 4340 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4341 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4342 } 4343 break; 4344 } 4345 default: { 4346 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4347 if (Res.hasValue()) 4348 Result = Res.getValue(); 4349 break; 4350 } 4351 } 4352 } 4353 return Result; 4354 } 4355 4356 private: 4357 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4358 bool IsPosBECond, ScalarEvolution &SE) 4359 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4360 IsPositiveBECond(IsPosBECond) {} 4361 4362 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4363 4364 const Loop *L; 4365 /// Loop back condition. 4366 Value *BackedgeCond = nullptr; 4367 /// Set to true if loop back is on positive branch condition. 4368 bool IsPositiveBECond; 4369 }; 4370 4371 Optional<const SCEV *> 4372 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4373 4374 // If value matches the backedge condition for loop latch, 4375 // then return a constant evolution node based on loopback 4376 // branch taken. 4377 if (BackedgeCond == IC) 4378 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4379 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4380 return None; 4381 } 4382 4383 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4384 public: 4385 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4386 ScalarEvolution &SE) { 4387 SCEVShiftRewriter Rewriter(L, SE); 4388 const SCEV *Result = Rewriter.visit(S); 4389 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4390 } 4391 4392 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4393 // Only allow AddRecExprs for this loop. 4394 if (!SE.isLoopInvariant(Expr, L)) 4395 Valid = false; 4396 return Expr; 4397 } 4398 4399 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4400 if (Expr->getLoop() == L && Expr->isAffine()) 4401 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4402 Valid = false; 4403 return Expr; 4404 } 4405 4406 bool isValid() { return Valid; } 4407 4408 private: 4409 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4410 : SCEVRewriteVisitor(SE), L(L) {} 4411 4412 const Loop *L; 4413 bool Valid = true; 4414 }; 4415 4416 } // end anonymous namespace 4417 4418 SCEV::NoWrapFlags 4419 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4420 if (!AR->isAffine()) 4421 return SCEV::FlagAnyWrap; 4422 4423 using OBO = OverflowingBinaryOperator; 4424 4425 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4426 4427 if (!AR->hasNoSignedWrap()) { 4428 ConstantRange AddRecRange = getSignedRange(AR); 4429 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4430 4431 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4432 Instruction::Add, IncRange, OBO::NoSignedWrap); 4433 if (NSWRegion.contains(AddRecRange)) 4434 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4435 } 4436 4437 if (!AR->hasNoUnsignedWrap()) { 4438 ConstantRange AddRecRange = getUnsignedRange(AR); 4439 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4440 4441 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4442 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4443 if (NUWRegion.contains(AddRecRange)) 4444 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4445 } 4446 4447 return Result; 4448 } 4449 4450 SCEV::NoWrapFlags 4451 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4452 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4453 4454 if (AR->hasNoSignedWrap()) 4455 return Result; 4456 4457 if (!AR->isAffine()) 4458 return Result; 4459 4460 const SCEV *Step = AR->getStepRecurrence(*this); 4461 const Loop *L = AR->getLoop(); 4462 4463 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4464 // Note that this serves two purposes: It filters out loops that are 4465 // simply not analyzable, and it covers the case where this code is 4466 // being called from within backedge-taken count analysis, such that 4467 // attempting to ask for the backedge-taken count would likely result 4468 // in infinite recursion. In the later case, the analysis code will 4469 // cope with a conservative value, and it will take care to purge 4470 // that value once it has finished. 4471 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4472 4473 // Normally, in the cases we can prove no-overflow via a 4474 // backedge guarding condition, we can also compute a backedge 4475 // taken count for the loop. The exceptions are assumptions and 4476 // guards present in the loop -- SCEV is not great at exploiting 4477 // these to compute max backedge taken counts, but can still use 4478 // these to prove lack of overflow. Use this fact to avoid 4479 // doing extra work that may not pay off. 4480 4481 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4482 AC.assumptions().empty()) 4483 return Result; 4484 4485 // If the backedge is guarded by a comparison with the pre-inc value the 4486 // addrec is safe. Also, if the entry is guarded by a comparison with the 4487 // start value and the backedge is guarded by a comparison with the post-inc 4488 // value, the addrec is safe. 4489 ICmpInst::Predicate Pred; 4490 const SCEV *OverflowLimit = 4491 getSignedOverflowLimitForStep(Step, &Pred, this); 4492 if (OverflowLimit && 4493 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4494 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4495 Result = setFlags(Result, SCEV::FlagNSW); 4496 } 4497 return Result; 4498 } 4499 SCEV::NoWrapFlags 4500 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4501 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4502 4503 if (AR->hasNoUnsignedWrap()) 4504 return Result; 4505 4506 if (!AR->isAffine()) 4507 return Result; 4508 4509 const SCEV *Step = AR->getStepRecurrence(*this); 4510 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4511 const Loop *L = AR->getLoop(); 4512 4513 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4514 // Note that this serves two purposes: It filters out loops that are 4515 // simply not analyzable, and it covers the case where this code is 4516 // being called from within backedge-taken count analysis, such that 4517 // attempting to ask for the backedge-taken count would likely result 4518 // in infinite recursion. In the later case, the analysis code will 4519 // cope with a conservative value, and it will take care to purge 4520 // that value once it has finished. 4521 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4522 4523 // Normally, in the cases we can prove no-overflow via a 4524 // backedge guarding condition, we can also compute a backedge 4525 // taken count for the loop. The exceptions are assumptions and 4526 // guards present in the loop -- SCEV is not great at exploiting 4527 // these to compute max backedge taken counts, but can still use 4528 // these to prove lack of overflow. Use this fact to avoid 4529 // doing extra work that may not pay off. 4530 4531 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4532 AC.assumptions().empty()) 4533 return Result; 4534 4535 // If the backedge is guarded by a comparison with the pre-inc value the 4536 // addrec is safe. Also, if the entry is guarded by a comparison with the 4537 // start value and the backedge is guarded by a comparison with the post-inc 4538 // value, the addrec is safe. 4539 if (isKnownPositive(Step)) { 4540 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4541 getUnsignedRangeMax(Step)); 4542 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4543 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4544 Result = setFlags(Result, SCEV::FlagNUW); 4545 } 4546 } 4547 4548 return Result; 4549 } 4550 4551 namespace { 4552 4553 /// Represents an abstract binary operation. This may exist as a 4554 /// normal instruction or constant expression, or may have been 4555 /// derived from an expression tree. 4556 struct BinaryOp { 4557 unsigned Opcode; 4558 Value *LHS; 4559 Value *RHS; 4560 bool IsNSW = false; 4561 bool IsNUW = false; 4562 bool IsExact = false; 4563 4564 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4565 /// constant expression. 4566 Operator *Op = nullptr; 4567 4568 explicit BinaryOp(Operator *Op) 4569 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4570 Op(Op) { 4571 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4572 IsNSW = OBO->hasNoSignedWrap(); 4573 IsNUW = OBO->hasNoUnsignedWrap(); 4574 } 4575 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op)) 4576 IsExact = PEO->isExact(); 4577 } 4578 4579 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4580 bool IsNUW = false, bool IsExact = false) 4581 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4582 IsExact(IsExact) {} 4583 }; 4584 4585 } // end anonymous namespace 4586 4587 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4588 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4589 auto *Op = dyn_cast<Operator>(V); 4590 if (!Op) 4591 return None; 4592 4593 // Implementation detail: all the cleverness here should happen without 4594 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4595 // SCEV expressions when possible, and we should not break that. 4596 4597 switch (Op->getOpcode()) { 4598 case Instruction::Add: 4599 case Instruction::Sub: 4600 case Instruction::Mul: 4601 case Instruction::UDiv: 4602 case Instruction::URem: 4603 case Instruction::And: 4604 case Instruction::Or: 4605 case Instruction::AShr: 4606 case Instruction::Shl: 4607 return BinaryOp(Op); 4608 4609 case Instruction::Xor: 4610 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4611 // If the RHS of the xor is a signmask, then this is just an add. 4612 // Instcombine turns add of signmask into xor as a strength reduction step. 4613 if (RHSC->getValue().isSignMask()) 4614 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4615 return BinaryOp(Op); 4616 4617 case Instruction::LShr: 4618 // Turn logical shift right of a constant into a unsigned divide. 4619 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4620 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4621 4622 // If the shift count is not less than the bitwidth, the result of 4623 // the shift is undefined. Don't try to analyze it, because the 4624 // resolution chosen here may differ from the resolution chosen in 4625 // other parts of the compiler. 4626 if (SA->getValue().ult(BitWidth)) { 4627 Constant *X = 4628 ConstantInt::get(SA->getContext(), 4629 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4630 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4631 } 4632 } 4633 return BinaryOp(Op); 4634 4635 case Instruction::ExtractValue: { 4636 auto *EVI = cast<ExtractValueInst>(Op); 4637 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4638 break; 4639 4640 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4641 if (!WO) 4642 break; 4643 4644 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4645 bool Signed = WO->isSigned(); 4646 // TODO: Should add nuw/nsw flags for mul as well. 4647 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4648 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4649 4650 // Now that we know that all uses of the arithmetic-result component of 4651 // CI are guarded by the overflow check, we can go ahead and pretend 4652 // that the arithmetic is non-overflowing. 4653 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4654 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4655 } 4656 4657 default: 4658 break; 4659 } 4660 4661 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4662 // semantics as a Sub, return a binary sub expression. 4663 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4664 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4665 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4666 4667 return None; 4668 } 4669 4670 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4671 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4672 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4673 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4674 /// follows one of the following patterns: 4675 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4676 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4677 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4678 /// we return the type of the truncation operation, and indicate whether the 4679 /// truncated type should be treated as signed/unsigned by setting 4680 /// \p Signed to true/false, respectively. 4681 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4682 bool &Signed, ScalarEvolution &SE) { 4683 // The case where Op == SymbolicPHI (that is, with no type conversions on 4684 // the way) is handled by the regular add recurrence creating logic and 4685 // would have already been triggered in createAddRecForPHI. Reaching it here 4686 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4687 // because one of the other operands of the SCEVAddExpr updating this PHI is 4688 // not invariant). 4689 // 4690 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4691 // this case predicates that allow us to prove that Op == SymbolicPHI will 4692 // be added. 4693 if (Op == SymbolicPHI) 4694 return nullptr; 4695 4696 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4697 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4698 if (SourceBits != NewBits) 4699 return nullptr; 4700 4701 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4702 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4703 if (!SExt && !ZExt) 4704 return nullptr; 4705 const SCEVTruncateExpr *Trunc = 4706 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4707 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4708 if (!Trunc) 4709 return nullptr; 4710 const SCEV *X = Trunc->getOperand(); 4711 if (X != SymbolicPHI) 4712 return nullptr; 4713 Signed = SExt != nullptr; 4714 return Trunc->getType(); 4715 } 4716 4717 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4718 if (!PN->getType()->isIntegerTy()) 4719 return nullptr; 4720 const Loop *L = LI.getLoopFor(PN->getParent()); 4721 if (!L || L->getHeader() != PN->getParent()) 4722 return nullptr; 4723 return L; 4724 } 4725 4726 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4727 // computation that updates the phi follows the following pattern: 4728 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4729 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4730 // If so, try to see if it can be rewritten as an AddRecExpr under some 4731 // Predicates. If successful, return them as a pair. Also cache the results 4732 // of the analysis. 4733 // 4734 // Example usage scenario: 4735 // Say the Rewriter is called for the following SCEV: 4736 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4737 // where: 4738 // %X = phi i64 (%Start, %BEValue) 4739 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4740 // and call this function with %SymbolicPHI = %X. 4741 // 4742 // The analysis will find that the value coming around the backedge has 4743 // the following SCEV: 4744 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4745 // Upon concluding that this matches the desired pattern, the function 4746 // will return the pair {NewAddRec, SmallPredsVec} where: 4747 // NewAddRec = {%Start,+,%Step} 4748 // SmallPredsVec = {P1, P2, P3} as follows: 4749 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4750 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4751 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4752 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4753 // under the predicates {P1,P2,P3}. 4754 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4755 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4756 // 4757 // TODO's: 4758 // 4759 // 1) Extend the Induction descriptor to also support inductions that involve 4760 // casts: When needed (namely, when we are called in the context of the 4761 // vectorizer induction analysis), a Set of cast instructions will be 4762 // populated by this method, and provided back to isInductionPHI. This is 4763 // needed to allow the vectorizer to properly record them to be ignored by 4764 // the cost model and to avoid vectorizing them (otherwise these casts, 4765 // which are redundant under the runtime overflow checks, will be 4766 // vectorized, which can be costly). 4767 // 4768 // 2) Support additional induction/PHISCEV patterns: We also want to support 4769 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4770 // after the induction update operation (the induction increment): 4771 // 4772 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4773 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4774 // 4775 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4776 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4777 // 4778 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4779 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4780 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4781 SmallVector<const SCEVPredicate *, 3> Predicates; 4782 4783 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4784 // return an AddRec expression under some predicate. 4785 4786 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4787 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4788 assert(L && "Expecting an integer loop header phi"); 4789 4790 // The loop may have multiple entrances or multiple exits; we can analyze 4791 // this phi as an addrec if it has a unique entry value and a unique 4792 // backedge value. 4793 Value *BEValueV = nullptr, *StartValueV = nullptr; 4794 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4795 Value *V = PN->getIncomingValue(i); 4796 if (L->contains(PN->getIncomingBlock(i))) { 4797 if (!BEValueV) { 4798 BEValueV = V; 4799 } else if (BEValueV != V) { 4800 BEValueV = nullptr; 4801 break; 4802 } 4803 } else if (!StartValueV) { 4804 StartValueV = V; 4805 } else if (StartValueV != V) { 4806 StartValueV = nullptr; 4807 break; 4808 } 4809 } 4810 if (!BEValueV || !StartValueV) 4811 return None; 4812 4813 const SCEV *BEValue = getSCEV(BEValueV); 4814 4815 // If the value coming around the backedge is an add with the symbolic 4816 // value we just inserted, possibly with casts that we can ignore under 4817 // an appropriate runtime guard, then we found a simple induction variable! 4818 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4819 if (!Add) 4820 return None; 4821 4822 // If there is a single occurrence of the symbolic value, possibly 4823 // casted, replace it with a recurrence. 4824 unsigned FoundIndex = Add->getNumOperands(); 4825 Type *TruncTy = nullptr; 4826 bool Signed; 4827 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4828 if ((TruncTy = 4829 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4830 if (FoundIndex == e) { 4831 FoundIndex = i; 4832 break; 4833 } 4834 4835 if (FoundIndex == Add->getNumOperands()) 4836 return None; 4837 4838 // Create an add with everything but the specified operand. 4839 SmallVector<const SCEV *, 8> Ops; 4840 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4841 if (i != FoundIndex) 4842 Ops.push_back(Add->getOperand(i)); 4843 const SCEV *Accum = getAddExpr(Ops); 4844 4845 // The runtime checks will not be valid if the step amount is 4846 // varying inside the loop. 4847 if (!isLoopInvariant(Accum, L)) 4848 return None; 4849 4850 // *** Part2: Create the predicates 4851 4852 // Analysis was successful: we have a phi-with-cast pattern for which we 4853 // can return an AddRec expression under the following predicates: 4854 // 4855 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4856 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4857 // P2: An Equal predicate that guarantees that 4858 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4859 // P3: An Equal predicate that guarantees that 4860 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4861 // 4862 // As we next prove, the above predicates guarantee that: 4863 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4864 // 4865 // 4866 // More formally, we want to prove that: 4867 // Expr(i+1) = Start + (i+1) * Accum 4868 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4869 // 4870 // Given that: 4871 // 1) Expr(0) = Start 4872 // 2) Expr(1) = Start + Accum 4873 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4874 // 3) Induction hypothesis (step i): 4875 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4876 // 4877 // Proof: 4878 // Expr(i+1) = 4879 // = Start + (i+1)*Accum 4880 // = (Start + i*Accum) + Accum 4881 // = Expr(i) + Accum 4882 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4883 // :: from step i 4884 // 4885 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4886 // 4887 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4888 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4889 // + Accum :: from P3 4890 // 4891 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4892 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4893 // 4894 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4895 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4896 // 4897 // By induction, the same applies to all iterations 1<=i<n: 4898 // 4899 4900 // Create a truncated addrec for which we will add a no overflow check (P1). 4901 const SCEV *StartVal = getSCEV(StartValueV); 4902 const SCEV *PHISCEV = 4903 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4904 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4905 4906 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4907 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4908 // will be constant. 4909 // 4910 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4911 // add P1. 4912 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4913 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4914 Signed ? SCEVWrapPredicate::IncrementNSSW 4915 : SCEVWrapPredicate::IncrementNUSW; 4916 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4917 Predicates.push_back(AddRecPred); 4918 } 4919 4920 // Create the Equal Predicates P2,P3: 4921 4922 // It is possible that the predicates P2 and/or P3 are computable at 4923 // compile time due to StartVal and/or Accum being constants. 4924 // If either one is, then we can check that now and escape if either P2 4925 // or P3 is false. 4926 4927 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4928 // for each of StartVal and Accum 4929 auto getExtendedExpr = [&](const SCEV *Expr, 4930 bool CreateSignExtend) -> const SCEV * { 4931 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4932 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4933 const SCEV *ExtendedExpr = 4934 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4935 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4936 return ExtendedExpr; 4937 }; 4938 4939 // Given: 4940 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4941 // = getExtendedExpr(Expr) 4942 // Determine whether the predicate P: Expr == ExtendedExpr 4943 // is known to be false at compile time 4944 auto PredIsKnownFalse = [&](const SCEV *Expr, 4945 const SCEV *ExtendedExpr) -> bool { 4946 return Expr != ExtendedExpr && 4947 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4948 }; 4949 4950 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4951 if (PredIsKnownFalse(StartVal, StartExtended)) { 4952 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4953 return None; 4954 } 4955 4956 // The Step is always Signed (because the overflow checks are either 4957 // NSSW or NUSW) 4958 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4959 if (PredIsKnownFalse(Accum, AccumExtended)) { 4960 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4961 return None; 4962 } 4963 4964 auto AppendPredicate = [&](const SCEV *Expr, 4965 const SCEV *ExtendedExpr) -> void { 4966 if (Expr != ExtendedExpr && 4967 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4968 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4969 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4970 Predicates.push_back(Pred); 4971 } 4972 }; 4973 4974 AppendPredicate(StartVal, StartExtended); 4975 AppendPredicate(Accum, AccumExtended); 4976 4977 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4978 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4979 // into NewAR if it will also add the runtime overflow checks specified in 4980 // Predicates. 4981 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4982 4983 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4984 std::make_pair(NewAR, Predicates); 4985 // Remember the result of the analysis for this SCEV at this locayyytion. 4986 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4987 return PredRewrite; 4988 } 4989 4990 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4991 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4992 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4993 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4994 if (!L) 4995 return None; 4996 4997 // Check to see if we already analyzed this PHI. 4998 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4999 if (I != PredicatedSCEVRewrites.end()) { 5000 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5001 I->second; 5002 // Analysis was done before and failed to create an AddRec: 5003 if (Rewrite.first == SymbolicPHI) 5004 return None; 5005 // Analysis was done before and succeeded to create an AddRec under 5006 // a predicate: 5007 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5008 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5009 return Rewrite; 5010 } 5011 5012 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5013 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5014 5015 // Record in the cache that the analysis failed 5016 if (!Rewrite) { 5017 SmallVector<const SCEVPredicate *, 3> Predicates; 5018 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5019 return None; 5020 } 5021 5022 return Rewrite; 5023 } 5024 5025 // FIXME: This utility is currently required because the Rewriter currently 5026 // does not rewrite this expression: 5027 // {0, +, (sext ix (trunc iy to ix) to iy)} 5028 // into {0, +, %step}, 5029 // even when the following Equal predicate exists: 5030 // "%step == (sext ix (trunc iy to ix) to iy)". 5031 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5032 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5033 if (AR1 == AR2) 5034 return true; 5035 5036 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5037 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5038 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5039 return false; 5040 return true; 5041 }; 5042 5043 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5044 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5045 return false; 5046 return true; 5047 } 5048 5049 /// A helper function for createAddRecFromPHI to handle simple cases. 5050 /// 5051 /// This function tries to find an AddRec expression for the simplest (yet most 5052 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5053 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5054 /// technique for finding the AddRec expression. 5055 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5056 Value *BEValueV, 5057 Value *StartValueV) { 5058 const Loop *L = LI.getLoopFor(PN->getParent()); 5059 assert(L && L->getHeader() == PN->getParent()); 5060 assert(BEValueV && StartValueV); 5061 5062 auto BO = MatchBinaryOp(BEValueV, DT); 5063 if (!BO) 5064 return nullptr; 5065 5066 if (BO->Opcode != Instruction::Add) 5067 return nullptr; 5068 5069 const SCEV *Accum = nullptr; 5070 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5071 Accum = getSCEV(BO->RHS); 5072 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5073 Accum = getSCEV(BO->LHS); 5074 5075 if (!Accum) 5076 return nullptr; 5077 5078 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5079 if (BO->IsNUW) 5080 Flags = setFlags(Flags, SCEV::FlagNUW); 5081 if (BO->IsNSW) 5082 Flags = setFlags(Flags, SCEV::FlagNSW); 5083 5084 const SCEV *StartVal = getSCEV(StartValueV); 5085 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5086 5087 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5088 5089 // We can add Flags to the post-inc expression only if we 5090 // know that it is *undefined behavior* for BEValueV to 5091 // overflow. 5092 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5093 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5094 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5095 5096 return PHISCEV; 5097 } 5098 5099 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5100 const Loop *L = LI.getLoopFor(PN->getParent()); 5101 if (!L || L->getHeader() != PN->getParent()) 5102 return nullptr; 5103 5104 // The loop may have multiple entrances or multiple exits; we can analyze 5105 // this phi as an addrec if it has a unique entry value and a unique 5106 // backedge value. 5107 Value *BEValueV = nullptr, *StartValueV = nullptr; 5108 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5109 Value *V = PN->getIncomingValue(i); 5110 if (L->contains(PN->getIncomingBlock(i))) { 5111 if (!BEValueV) { 5112 BEValueV = V; 5113 } else if (BEValueV != V) { 5114 BEValueV = nullptr; 5115 break; 5116 } 5117 } else if (!StartValueV) { 5118 StartValueV = V; 5119 } else if (StartValueV != V) { 5120 StartValueV = nullptr; 5121 break; 5122 } 5123 } 5124 if (!BEValueV || !StartValueV) 5125 return nullptr; 5126 5127 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5128 "PHI node already processed?"); 5129 5130 // First, try to find AddRec expression without creating a fictituos symbolic 5131 // value for PN. 5132 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5133 return S; 5134 5135 // Handle PHI node value symbolically. 5136 const SCEV *SymbolicName = getUnknown(PN); 5137 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5138 5139 // Using this symbolic name for the PHI, analyze the value coming around 5140 // the back-edge. 5141 const SCEV *BEValue = getSCEV(BEValueV); 5142 5143 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5144 // has a special value for the first iteration of the loop. 5145 5146 // If the value coming around the backedge is an add with the symbolic 5147 // value we just inserted, then we found a simple induction variable! 5148 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5149 // If there is a single occurrence of the symbolic value, replace it 5150 // with a recurrence. 5151 unsigned FoundIndex = Add->getNumOperands(); 5152 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5153 if (Add->getOperand(i) == SymbolicName) 5154 if (FoundIndex == e) { 5155 FoundIndex = i; 5156 break; 5157 } 5158 5159 if (FoundIndex != Add->getNumOperands()) { 5160 // Create an add with everything but the specified operand. 5161 SmallVector<const SCEV *, 8> Ops; 5162 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5163 if (i != FoundIndex) 5164 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5165 L, *this)); 5166 const SCEV *Accum = getAddExpr(Ops); 5167 5168 // This is not a valid addrec if the step amount is varying each 5169 // loop iteration, but is not itself an addrec in this loop. 5170 if (isLoopInvariant(Accum, L) || 5171 (isa<SCEVAddRecExpr>(Accum) && 5172 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5173 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5174 5175 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5176 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5177 if (BO->IsNUW) 5178 Flags = setFlags(Flags, SCEV::FlagNUW); 5179 if (BO->IsNSW) 5180 Flags = setFlags(Flags, SCEV::FlagNSW); 5181 } 5182 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5183 // If the increment is an inbounds GEP, then we know the address 5184 // space cannot be wrapped around. We cannot make any guarantee 5185 // about signed or unsigned overflow because pointers are 5186 // unsigned but we may have a negative index from the base 5187 // pointer. We can guarantee that no unsigned wrap occurs if the 5188 // indices form a positive value. 5189 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5190 Flags = setFlags(Flags, SCEV::FlagNW); 5191 5192 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5193 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5194 Flags = setFlags(Flags, SCEV::FlagNUW); 5195 } 5196 5197 // We cannot transfer nuw and nsw flags from subtraction 5198 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5199 // for instance. 5200 } 5201 5202 const SCEV *StartVal = getSCEV(StartValueV); 5203 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5204 5205 // Okay, for the entire analysis of this edge we assumed the PHI 5206 // to be symbolic. We now need to go back and purge all of the 5207 // entries for the scalars that use the symbolic expression. 5208 forgetSymbolicName(PN, SymbolicName); 5209 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5210 5211 // We can add Flags to the post-inc expression only if we 5212 // know that it is *undefined behavior* for BEValueV to 5213 // overflow. 5214 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5215 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5216 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5217 5218 return PHISCEV; 5219 } 5220 } 5221 } else { 5222 // Otherwise, this could be a loop like this: 5223 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5224 // In this case, j = {1,+,1} and BEValue is j. 5225 // Because the other in-value of i (0) fits the evolution of BEValue 5226 // i really is an addrec evolution. 5227 // 5228 // We can generalize this saying that i is the shifted value of BEValue 5229 // by one iteration: 5230 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5231 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5232 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5233 if (Shifted != getCouldNotCompute() && 5234 Start != getCouldNotCompute()) { 5235 const SCEV *StartVal = getSCEV(StartValueV); 5236 if (Start == StartVal) { 5237 // Okay, for the entire analysis of this edge we assumed the PHI 5238 // to be symbolic. We now need to go back and purge all of the 5239 // entries for the scalars that use the symbolic expression. 5240 forgetSymbolicName(PN, SymbolicName); 5241 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5242 return Shifted; 5243 } 5244 } 5245 } 5246 5247 // Remove the temporary PHI node SCEV that has been inserted while intending 5248 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5249 // as it will prevent later (possibly simpler) SCEV expressions to be added 5250 // to the ValueExprMap. 5251 eraseValueFromMap(PN); 5252 5253 return nullptr; 5254 } 5255 5256 // Checks if the SCEV S is available at BB. S is considered available at BB 5257 // if S can be materialized at BB without introducing a fault. 5258 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5259 BasicBlock *BB) { 5260 struct CheckAvailable { 5261 bool TraversalDone = false; 5262 bool Available = true; 5263 5264 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5265 BasicBlock *BB = nullptr; 5266 DominatorTree &DT; 5267 5268 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5269 : L(L), BB(BB), DT(DT) {} 5270 5271 bool setUnavailable() { 5272 TraversalDone = true; 5273 Available = false; 5274 return false; 5275 } 5276 5277 bool follow(const SCEV *S) { 5278 switch (S->getSCEVType()) { 5279 case scConstant: 5280 case scPtrToInt: 5281 case scTruncate: 5282 case scZeroExtend: 5283 case scSignExtend: 5284 case scAddExpr: 5285 case scMulExpr: 5286 case scUMaxExpr: 5287 case scSMaxExpr: 5288 case scUMinExpr: 5289 case scSMinExpr: 5290 // These expressions are available if their operand(s) is/are. 5291 return true; 5292 5293 case scAddRecExpr: { 5294 // We allow add recurrences that are on the loop BB is in, or some 5295 // outer loop. This guarantees availability because the value of the 5296 // add recurrence at BB is simply the "current" value of the induction 5297 // variable. We can relax this in the future; for instance an add 5298 // recurrence on a sibling dominating loop is also available at BB. 5299 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5300 if (L && (ARLoop == L || ARLoop->contains(L))) 5301 return true; 5302 5303 return setUnavailable(); 5304 } 5305 5306 case scUnknown: { 5307 // For SCEVUnknown, we check for simple dominance. 5308 const auto *SU = cast<SCEVUnknown>(S); 5309 Value *V = SU->getValue(); 5310 5311 if (isa<Argument>(V)) 5312 return false; 5313 5314 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5315 return false; 5316 5317 return setUnavailable(); 5318 } 5319 5320 case scUDivExpr: 5321 case scCouldNotCompute: 5322 // We do not try to smart about these at all. 5323 return setUnavailable(); 5324 } 5325 llvm_unreachable("Unknown SCEV kind!"); 5326 } 5327 5328 bool isDone() { return TraversalDone; } 5329 }; 5330 5331 CheckAvailable CA(L, BB, DT); 5332 SCEVTraversal<CheckAvailable> ST(CA); 5333 5334 ST.visitAll(S); 5335 return CA.Available; 5336 } 5337 5338 // Try to match a control flow sequence that branches out at BI and merges back 5339 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5340 // match. 5341 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5342 Value *&C, Value *&LHS, Value *&RHS) { 5343 C = BI->getCondition(); 5344 5345 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5346 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5347 5348 if (!LeftEdge.isSingleEdge()) 5349 return false; 5350 5351 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5352 5353 Use &LeftUse = Merge->getOperandUse(0); 5354 Use &RightUse = Merge->getOperandUse(1); 5355 5356 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5357 LHS = LeftUse; 5358 RHS = RightUse; 5359 return true; 5360 } 5361 5362 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5363 LHS = RightUse; 5364 RHS = LeftUse; 5365 return true; 5366 } 5367 5368 return false; 5369 } 5370 5371 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5372 auto IsReachable = 5373 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5374 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5375 const Loop *L = LI.getLoopFor(PN->getParent()); 5376 5377 // We don't want to break LCSSA, even in a SCEV expression tree. 5378 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5379 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5380 return nullptr; 5381 5382 // Try to match 5383 // 5384 // br %cond, label %left, label %right 5385 // left: 5386 // br label %merge 5387 // right: 5388 // br label %merge 5389 // merge: 5390 // V = phi [ %x, %left ], [ %y, %right ] 5391 // 5392 // as "select %cond, %x, %y" 5393 5394 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5395 assert(IDom && "At least the entry block should dominate PN"); 5396 5397 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5398 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5399 5400 if (BI && BI->isConditional() && 5401 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5402 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5403 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5404 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5405 } 5406 5407 return nullptr; 5408 } 5409 5410 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5411 if (const SCEV *S = createAddRecFromPHI(PN)) 5412 return S; 5413 5414 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5415 return S; 5416 5417 // If the PHI has a single incoming value, follow that value, unless the 5418 // PHI's incoming blocks are in a different loop, in which case doing so 5419 // risks breaking LCSSA form. Instcombine would normally zap these, but 5420 // it doesn't have DominatorTree information, so it may miss cases. 5421 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5422 if (LI.replacementPreservesLCSSAForm(PN, V)) 5423 return getSCEV(V); 5424 5425 // If it's not a loop phi, we can't handle it yet. 5426 return getUnknown(PN); 5427 } 5428 5429 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5430 Value *Cond, 5431 Value *TrueVal, 5432 Value *FalseVal) { 5433 // Handle "constant" branch or select. This can occur for instance when a 5434 // loop pass transforms an inner loop and moves on to process the outer loop. 5435 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5436 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5437 5438 // Try to match some simple smax or umax patterns. 5439 auto *ICI = dyn_cast<ICmpInst>(Cond); 5440 if (!ICI) 5441 return getUnknown(I); 5442 5443 Value *LHS = ICI->getOperand(0); 5444 Value *RHS = ICI->getOperand(1); 5445 5446 switch (ICI->getPredicate()) { 5447 case ICmpInst::ICMP_SLT: 5448 case ICmpInst::ICMP_SLE: 5449 std::swap(LHS, RHS); 5450 LLVM_FALLTHROUGH; 5451 case ICmpInst::ICMP_SGT: 5452 case ICmpInst::ICMP_SGE: 5453 // a >s b ? a+x : b+x -> smax(a, b)+x 5454 // a >s b ? b+x : a+x -> smin(a, b)+x 5455 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5456 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5457 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5458 const SCEV *LA = getSCEV(TrueVal); 5459 const SCEV *RA = getSCEV(FalseVal); 5460 const SCEV *LDiff = getMinusSCEV(LA, LS); 5461 const SCEV *RDiff = getMinusSCEV(RA, RS); 5462 if (LDiff == RDiff) 5463 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5464 LDiff = getMinusSCEV(LA, RS); 5465 RDiff = getMinusSCEV(RA, LS); 5466 if (LDiff == RDiff) 5467 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5468 } 5469 break; 5470 case ICmpInst::ICMP_ULT: 5471 case ICmpInst::ICMP_ULE: 5472 std::swap(LHS, RHS); 5473 LLVM_FALLTHROUGH; 5474 case ICmpInst::ICMP_UGT: 5475 case ICmpInst::ICMP_UGE: 5476 // a >u b ? a+x : b+x -> umax(a, b)+x 5477 // a >u b ? b+x : a+x -> umin(a, b)+x 5478 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5479 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5480 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5481 const SCEV *LA = getSCEV(TrueVal); 5482 const SCEV *RA = getSCEV(FalseVal); 5483 const SCEV *LDiff = getMinusSCEV(LA, LS); 5484 const SCEV *RDiff = getMinusSCEV(RA, RS); 5485 if (LDiff == RDiff) 5486 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5487 LDiff = getMinusSCEV(LA, RS); 5488 RDiff = getMinusSCEV(RA, LS); 5489 if (LDiff == RDiff) 5490 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5491 } 5492 break; 5493 case ICmpInst::ICMP_NE: 5494 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5495 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5496 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5497 const SCEV *One = getOne(I->getType()); 5498 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5499 const SCEV *LA = getSCEV(TrueVal); 5500 const SCEV *RA = getSCEV(FalseVal); 5501 const SCEV *LDiff = getMinusSCEV(LA, LS); 5502 const SCEV *RDiff = getMinusSCEV(RA, One); 5503 if (LDiff == RDiff) 5504 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5505 } 5506 break; 5507 case ICmpInst::ICMP_EQ: 5508 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5509 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5510 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5511 const SCEV *One = getOne(I->getType()); 5512 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5513 const SCEV *LA = getSCEV(TrueVal); 5514 const SCEV *RA = getSCEV(FalseVal); 5515 const SCEV *LDiff = getMinusSCEV(LA, One); 5516 const SCEV *RDiff = getMinusSCEV(RA, LS); 5517 if (LDiff == RDiff) 5518 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5519 } 5520 break; 5521 default: 5522 break; 5523 } 5524 5525 return getUnknown(I); 5526 } 5527 5528 /// Expand GEP instructions into add and multiply operations. This allows them 5529 /// to be analyzed by regular SCEV code. 5530 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5531 // Don't attempt to analyze GEPs over unsized objects. 5532 if (!GEP->getSourceElementType()->isSized()) 5533 return getUnknown(GEP); 5534 5535 SmallVector<const SCEV *, 4> IndexExprs; 5536 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5537 IndexExprs.push_back(getSCEV(*Index)); 5538 return getGEPExpr(GEP, IndexExprs); 5539 } 5540 5541 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5542 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5543 return C->getAPInt().countTrailingZeros(); 5544 5545 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5546 return GetMinTrailingZeros(I->getOperand()); 5547 5548 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5549 return std::min(GetMinTrailingZeros(T->getOperand()), 5550 (uint32_t)getTypeSizeInBits(T->getType())); 5551 5552 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5553 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5554 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5555 ? getTypeSizeInBits(E->getType()) 5556 : OpRes; 5557 } 5558 5559 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5560 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5561 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5562 ? getTypeSizeInBits(E->getType()) 5563 : OpRes; 5564 } 5565 5566 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5567 // The result is the min of all operands results. 5568 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5569 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5570 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5571 return MinOpRes; 5572 } 5573 5574 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5575 // The result is the sum of all operands results. 5576 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5577 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5578 for (unsigned i = 1, e = M->getNumOperands(); 5579 SumOpRes != BitWidth && i != e; ++i) 5580 SumOpRes = 5581 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5582 return SumOpRes; 5583 } 5584 5585 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5586 // The result is the min of all operands results. 5587 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5588 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5589 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5590 return MinOpRes; 5591 } 5592 5593 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5594 // The result is the min of all operands results. 5595 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5596 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5597 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5598 return MinOpRes; 5599 } 5600 5601 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5602 // The result is the min of all operands results. 5603 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5604 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5605 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5606 return MinOpRes; 5607 } 5608 5609 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5610 // For a SCEVUnknown, ask ValueTracking. 5611 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5612 return Known.countMinTrailingZeros(); 5613 } 5614 5615 // SCEVUDivExpr 5616 return 0; 5617 } 5618 5619 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5620 auto I = MinTrailingZerosCache.find(S); 5621 if (I != MinTrailingZerosCache.end()) 5622 return I->second; 5623 5624 uint32_t Result = GetMinTrailingZerosImpl(S); 5625 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5626 assert(InsertPair.second && "Should insert a new key"); 5627 return InsertPair.first->second; 5628 } 5629 5630 /// Helper method to assign a range to V from metadata present in the IR. 5631 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5632 if (Instruction *I = dyn_cast<Instruction>(V)) 5633 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5634 return getConstantRangeFromMetadata(*MD); 5635 5636 return None; 5637 } 5638 5639 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5640 SCEV::NoWrapFlags Flags) { 5641 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5642 AddRec->setNoWrapFlags(Flags); 5643 UnsignedRanges.erase(AddRec); 5644 SignedRanges.erase(AddRec); 5645 } 5646 } 5647 5648 ConstantRange ScalarEvolution:: 5649 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 5650 const DataLayout &DL = getDataLayout(); 5651 5652 unsigned BitWidth = getTypeSizeInBits(U->getType()); 5653 ConstantRange CR(BitWidth, /*isFullSet=*/true); 5654 5655 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 5656 // use information about the trip count to improve our available range. Note 5657 // that the trip count independent cases are already handled by known bits. 5658 // WARNING: The definition of recurrence used here is subtly different than 5659 // the one used by AddRec (and thus most of this file). Step is allowed to 5660 // be arbitrarily loop varying here, where AddRec allows only loop invariant 5661 // and other addrecs in the same loop (for non-affine addrecs). The code 5662 // below intentionally handles the case where step is not loop invariant. 5663 auto *P = dyn_cast<PHINode>(U->getValue()); 5664 if (!P) 5665 return CR; 5666 5667 BinaryOperator *BO; 5668 Value *Start, *Step; 5669 if (!matchSimpleRecurrence(P, BO, Start, Step)) 5670 return CR; 5671 5672 // If we found a recurrence, we must be in a loop -- unless we're 5673 // in unreachable code where dominance collapses. Note that BO might 5674 // be in some subloop of L, and that's completely okay. 5675 auto *L = LI.getLoopFor(P->getParent()); 5676 if (!L) 5677 return CR; 5678 assert(L->getHeader() == P->getParent()); 5679 if (!L->contains(BO->getParent())) 5680 // NOTE: This bailout should be an assert instead. However, asserting 5681 // the condition here exposes a case where LoopFusion is querying SCEV 5682 // with malformed loop information during the midst of the transform. 5683 // There doesn't appear to be an obvious fix, so for the moment bailout 5684 // until the caller issue can be fixed. PR49566 tracks the bug. 5685 return CR; 5686 5687 // TODO: Handle ashr and lshr cases to increase minimum value reported 5688 if (BO->getOpcode() != Instruction::Shl || BO->getOperand(0) != P) 5689 return CR; 5690 5691 unsigned TC = getSmallConstantMaxTripCount(L); 5692 if (!TC || TC >= BitWidth) 5693 return CR; 5694 5695 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 5696 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 5697 assert(KnownStart.getBitWidth() == BitWidth && 5698 KnownStep.getBitWidth() == BitWidth); 5699 5700 // Compute total shift amount, being careful of overflow and bitwidths. 5701 auto MaxShiftAmt = KnownStep.getMaxValue(); 5702 APInt TCAP(BitWidth, TC-1); 5703 bool Overflow = false; 5704 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 5705 if (Overflow) 5706 return CR; 5707 5708 // Iff no bits are shifted out, value increases on every shift. 5709 auto KnownEnd = KnownBits::shl(KnownStart, 5710 KnownBits::makeConstant(TotalShift)); 5711 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 5712 CR = CR.intersectWith(ConstantRange(KnownStart.getMinValue(), 5713 KnownEnd.getMaxValue() + 1)); 5714 return CR; 5715 } 5716 5717 5718 5719 /// Determine the range for a particular SCEV. If SignHint is 5720 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5721 /// with a "cleaner" unsigned (resp. signed) representation. 5722 const ConstantRange & 5723 ScalarEvolution::getRangeRef(const SCEV *S, 5724 ScalarEvolution::RangeSignHint SignHint) { 5725 DenseMap<const SCEV *, ConstantRange> &Cache = 5726 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5727 : SignedRanges; 5728 ConstantRange::PreferredRangeType RangeType = 5729 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5730 ? ConstantRange::Unsigned : ConstantRange::Signed; 5731 5732 // See if we've computed this range already. 5733 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5734 if (I != Cache.end()) 5735 return I->second; 5736 5737 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5738 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5739 5740 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5741 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5742 using OBO = OverflowingBinaryOperator; 5743 5744 // If the value has known zeros, the maximum value will have those known zeros 5745 // as well. 5746 uint32_t TZ = GetMinTrailingZeros(S); 5747 if (TZ != 0) { 5748 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5749 ConservativeResult = 5750 ConstantRange(APInt::getMinValue(BitWidth), 5751 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5752 else 5753 ConservativeResult = ConstantRange( 5754 APInt::getSignedMinValue(BitWidth), 5755 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5756 } 5757 5758 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5759 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5760 unsigned WrapType = OBO::AnyWrap; 5761 if (Add->hasNoSignedWrap()) 5762 WrapType |= OBO::NoSignedWrap; 5763 if (Add->hasNoUnsignedWrap()) 5764 WrapType |= OBO::NoUnsignedWrap; 5765 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5766 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5767 WrapType, RangeType); 5768 return setRange(Add, SignHint, 5769 ConservativeResult.intersectWith(X, RangeType)); 5770 } 5771 5772 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5773 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5774 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5775 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5776 return setRange(Mul, SignHint, 5777 ConservativeResult.intersectWith(X, RangeType)); 5778 } 5779 5780 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5781 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5782 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5783 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5784 return setRange(SMax, SignHint, 5785 ConservativeResult.intersectWith(X, RangeType)); 5786 } 5787 5788 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5789 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5790 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5791 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5792 return setRange(UMax, SignHint, 5793 ConservativeResult.intersectWith(X, RangeType)); 5794 } 5795 5796 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5797 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5798 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5799 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5800 return setRange(SMin, SignHint, 5801 ConservativeResult.intersectWith(X, RangeType)); 5802 } 5803 5804 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5805 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5806 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5807 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5808 return setRange(UMin, SignHint, 5809 ConservativeResult.intersectWith(X, RangeType)); 5810 } 5811 5812 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5813 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5814 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5815 return setRange(UDiv, SignHint, 5816 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5817 } 5818 5819 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5820 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5821 return setRange(ZExt, SignHint, 5822 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5823 RangeType)); 5824 } 5825 5826 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5827 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5828 return setRange(SExt, SignHint, 5829 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5830 RangeType)); 5831 } 5832 5833 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 5834 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 5835 return setRange(PtrToInt, SignHint, X); 5836 } 5837 5838 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5839 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5840 return setRange(Trunc, SignHint, 5841 ConservativeResult.intersectWith(X.truncate(BitWidth), 5842 RangeType)); 5843 } 5844 5845 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5846 // If there's no unsigned wrap, the value will never be less than its 5847 // initial value. 5848 if (AddRec->hasNoUnsignedWrap()) { 5849 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5850 if (!UnsignedMinValue.isNullValue()) 5851 ConservativeResult = ConservativeResult.intersectWith( 5852 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5853 } 5854 5855 // If there's no signed wrap, and all the operands except initial value have 5856 // the same sign or zero, the value won't ever be: 5857 // 1: smaller than initial value if operands are non negative, 5858 // 2: bigger than initial value if operands are non positive. 5859 // For both cases, value can not cross signed min/max boundary. 5860 if (AddRec->hasNoSignedWrap()) { 5861 bool AllNonNeg = true; 5862 bool AllNonPos = true; 5863 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5864 if (!isKnownNonNegative(AddRec->getOperand(i))) 5865 AllNonNeg = false; 5866 if (!isKnownNonPositive(AddRec->getOperand(i))) 5867 AllNonPos = false; 5868 } 5869 if (AllNonNeg) 5870 ConservativeResult = ConservativeResult.intersectWith( 5871 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5872 APInt::getSignedMinValue(BitWidth)), 5873 RangeType); 5874 else if (AllNonPos) 5875 ConservativeResult = ConservativeResult.intersectWith( 5876 ConstantRange::getNonEmpty( 5877 APInt::getSignedMinValue(BitWidth), 5878 getSignedRangeMax(AddRec->getStart()) + 1), 5879 RangeType); 5880 } 5881 5882 // TODO: non-affine addrec 5883 if (AddRec->isAffine()) { 5884 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5885 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5886 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5887 auto RangeFromAffine = getRangeForAffineAR( 5888 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5889 BitWidth); 5890 ConservativeResult = 5891 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5892 5893 auto RangeFromFactoring = getRangeViaFactoring( 5894 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5895 BitWidth); 5896 ConservativeResult = 5897 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5898 } 5899 5900 // Now try symbolic BE count and more powerful methods. 5901 if (UseExpensiveRangeSharpening) { 5902 const SCEV *SymbolicMaxBECount = 5903 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 5904 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 5905 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5906 AddRec->hasNoSelfWrap()) { 5907 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 5908 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 5909 ConservativeResult = 5910 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 5911 } 5912 } 5913 } 5914 5915 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5916 } 5917 5918 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5919 5920 // Check if the IR explicitly contains !range metadata. 5921 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5922 if (MDRange.hasValue()) 5923 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5924 RangeType); 5925 5926 // Use facts about recurrences in the underlying IR. Note that add 5927 // recurrences are AddRecExprs and thus don't hit this path. This 5928 // primarily handles shift recurrences. 5929 auto CR = getRangeForUnknownRecurrence(U); 5930 ConservativeResult = ConservativeResult.intersectWith(CR); 5931 5932 // See if ValueTracking can give us a useful range. 5933 const DataLayout &DL = getDataLayout(); 5934 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5935 if (Known.getBitWidth() != BitWidth) 5936 Known = Known.zextOrTrunc(BitWidth); 5937 5938 // ValueTracking may be able to compute a tighter result for the number of 5939 // sign bits than for the value of those sign bits. 5940 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5941 if (U->getType()->isPointerTy()) { 5942 // If the pointer size is larger than the index size type, this can cause 5943 // NS to be larger than BitWidth. So compensate for this. 5944 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5945 int ptrIdxDiff = ptrSize - BitWidth; 5946 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5947 NS -= ptrIdxDiff; 5948 } 5949 5950 if (NS > 1) { 5951 // If we know any of the sign bits, we know all of the sign bits. 5952 if (!Known.Zero.getHiBits(NS).isNullValue()) 5953 Known.Zero.setHighBits(NS); 5954 if (!Known.One.getHiBits(NS).isNullValue()) 5955 Known.One.setHighBits(NS); 5956 } 5957 5958 if (Known.getMinValue() != Known.getMaxValue() + 1) 5959 ConservativeResult = ConservativeResult.intersectWith( 5960 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5961 RangeType); 5962 if (NS > 1) 5963 ConservativeResult = ConservativeResult.intersectWith( 5964 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5965 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5966 RangeType); 5967 5968 // A range of Phi is a subset of union of all ranges of its input. 5969 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5970 // Make sure that we do not run over cycled Phis. 5971 if (PendingPhiRanges.insert(Phi).second) { 5972 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5973 for (auto &Op : Phi->operands()) { 5974 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5975 RangeFromOps = RangeFromOps.unionWith(OpRange); 5976 // No point to continue if we already have a full set. 5977 if (RangeFromOps.isFullSet()) 5978 break; 5979 } 5980 ConservativeResult = 5981 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5982 bool Erased = PendingPhiRanges.erase(Phi); 5983 assert(Erased && "Failed to erase Phi properly?"); 5984 (void) Erased; 5985 } 5986 } 5987 5988 return setRange(U, SignHint, std::move(ConservativeResult)); 5989 } 5990 5991 return setRange(S, SignHint, std::move(ConservativeResult)); 5992 } 5993 5994 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5995 // values that the expression can take. Initially, the expression has a value 5996 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5997 // argument defines if we treat Step as signed or unsigned. 5998 static ConstantRange getRangeForAffineARHelper(APInt Step, 5999 const ConstantRange &StartRange, 6000 const APInt &MaxBECount, 6001 unsigned BitWidth, bool Signed) { 6002 // If either Step or MaxBECount is 0, then the expression won't change, and we 6003 // just need to return the initial range. 6004 if (Step == 0 || MaxBECount == 0) 6005 return StartRange; 6006 6007 // If we don't know anything about the initial value (i.e. StartRange is 6008 // FullRange), then we don't know anything about the final range either. 6009 // Return FullRange. 6010 if (StartRange.isFullSet()) 6011 return ConstantRange::getFull(BitWidth); 6012 6013 // If Step is signed and negative, then we use its absolute value, but we also 6014 // note that we're moving in the opposite direction. 6015 bool Descending = Signed && Step.isNegative(); 6016 6017 if (Signed) 6018 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6019 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6020 // This equations hold true due to the well-defined wrap-around behavior of 6021 // APInt. 6022 Step = Step.abs(); 6023 6024 // Check if Offset is more than full span of BitWidth. If it is, the 6025 // expression is guaranteed to overflow. 6026 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6027 return ConstantRange::getFull(BitWidth); 6028 6029 // Offset is by how much the expression can change. Checks above guarantee no 6030 // overflow here. 6031 APInt Offset = Step * MaxBECount; 6032 6033 // Minimum value of the final range will match the minimal value of StartRange 6034 // if the expression is increasing and will be decreased by Offset otherwise. 6035 // Maximum value of the final range will match the maximal value of StartRange 6036 // if the expression is decreasing and will be increased by Offset otherwise. 6037 APInt StartLower = StartRange.getLower(); 6038 APInt StartUpper = StartRange.getUpper() - 1; 6039 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6040 : (StartUpper + std::move(Offset)); 6041 6042 // It's possible that the new minimum/maximum value will fall into the initial 6043 // range (due to wrap around). This means that the expression can take any 6044 // value in this bitwidth, and we have to return full range. 6045 if (StartRange.contains(MovedBoundary)) 6046 return ConstantRange::getFull(BitWidth); 6047 6048 APInt NewLower = 6049 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6050 APInt NewUpper = 6051 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6052 NewUpper += 1; 6053 6054 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6055 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6056 } 6057 6058 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6059 const SCEV *Step, 6060 const SCEV *MaxBECount, 6061 unsigned BitWidth) { 6062 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6063 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6064 "Precondition!"); 6065 6066 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6067 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6068 6069 // First, consider step signed. 6070 ConstantRange StartSRange = getSignedRange(Start); 6071 ConstantRange StepSRange = getSignedRange(Step); 6072 6073 // If Step can be both positive and negative, we need to find ranges for the 6074 // maximum absolute step values in both directions and union them. 6075 ConstantRange SR = 6076 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6077 MaxBECountValue, BitWidth, /* Signed = */ true); 6078 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6079 StartSRange, MaxBECountValue, 6080 BitWidth, /* Signed = */ true)); 6081 6082 // Next, consider step unsigned. 6083 ConstantRange UR = getRangeForAffineARHelper( 6084 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6085 MaxBECountValue, BitWidth, /* Signed = */ false); 6086 6087 // Finally, intersect signed and unsigned ranges. 6088 return SR.intersectWith(UR, ConstantRange::Smallest); 6089 } 6090 6091 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6092 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6093 ScalarEvolution::RangeSignHint SignHint) { 6094 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6095 assert(AddRec->hasNoSelfWrap() && 6096 "This only works for non-self-wrapping AddRecs!"); 6097 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6098 const SCEV *Step = AddRec->getStepRecurrence(*this); 6099 // Only deal with constant step to save compile time. 6100 if (!isa<SCEVConstant>(Step)) 6101 return ConstantRange::getFull(BitWidth); 6102 // Let's make sure that we can prove that we do not self-wrap during 6103 // MaxBECount iterations. We need this because MaxBECount is a maximum 6104 // iteration count estimate, and we might infer nw from some exit for which we 6105 // do not know max exit count (or any other side reasoning). 6106 // TODO: Turn into assert at some point. 6107 if (getTypeSizeInBits(MaxBECount->getType()) > 6108 getTypeSizeInBits(AddRec->getType())) 6109 return ConstantRange::getFull(BitWidth); 6110 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6111 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6112 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6113 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6114 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6115 MaxItersWithoutWrap)) 6116 return ConstantRange::getFull(BitWidth); 6117 6118 ICmpInst::Predicate LEPred = 6119 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6120 ICmpInst::Predicate GEPred = 6121 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6122 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6123 6124 // We know that there is no self-wrap. Let's take Start and End values and 6125 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6126 // the iteration. They either lie inside the range [Min(Start, End), 6127 // Max(Start, End)] or outside it: 6128 // 6129 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6130 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6131 // 6132 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6133 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6134 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6135 // Start <= End and step is positive, or Start >= End and step is negative. 6136 const SCEV *Start = AddRec->getStart(); 6137 ConstantRange StartRange = getRangeRef(Start, SignHint); 6138 ConstantRange EndRange = getRangeRef(End, SignHint); 6139 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6140 // If they already cover full iteration space, we will know nothing useful 6141 // even if we prove what we want to prove. 6142 if (RangeBetween.isFullSet()) 6143 return RangeBetween; 6144 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6145 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6146 : RangeBetween.isWrappedSet(); 6147 if (IsWrappedSet) 6148 return ConstantRange::getFull(BitWidth); 6149 6150 if (isKnownPositive(Step) && 6151 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6152 return RangeBetween; 6153 else if (isKnownNegative(Step) && 6154 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6155 return RangeBetween; 6156 return ConstantRange::getFull(BitWidth); 6157 } 6158 6159 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6160 const SCEV *Step, 6161 const SCEV *MaxBECount, 6162 unsigned BitWidth) { 6163 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6164 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6165 6166 struct SelectPattern { 6167 Value *Condition = nullptr; 6168 APInt TrueValue; 6169 APInt FalseValue; 6170 6171 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6172 const SCEV *S) { 6173 Optional<unsigned> CastOp; 6174 APInt Offset(BitWidth, 0); 6175 6176 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6177 "Should be!"); 6178 6179 // Peel off a constant offset: 6180 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6181 // In the future we could consider being smarter here and handle 6182 // {Start+Step,+,Step} too. 6183 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6184 return; 6185 6186 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6187 S = SA->getOperand(1); 6188 } 6189 6190 // Peel off a cast operation 6191 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6192 CastOp = SCast->getSCEVType(); 6193 S = SCast->getOperand(); 6194 } 6195 6196 using namespace llvm::PatternMatch; 6197 6198 auto *SU = dyn_cast<SCEVUnknown>(S); 6199 const APInt *TrueVal, *FalseVal; 6200 if (!SU || 6201 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6202 m_APInt(FalseVal)))) { 6203 Condition = nullptr; 6204 return; 6205 } 6206 6207 TrueValue = *TrueVal; 6208 FalseValue = *FalseVal; 6209 6210 // Re-apply the cast we peeled off earlier 6211 if (CastOp.hasValue()) 6212 switch (*CastOp) { 6213 default: 6214 llvm_unreachable("Unknown SCEV cast type!"); 6215 6216 case scTruncate: 6217 TrueValue = TrueValue.trunc(BitWidth); 6218 FalseValue = FalseValue.trunc(BitWidth); 6219 break; 6220 case scZeroExtend: 6221 TrueValue = TrueValue.zext(BitWidth); 6222 FalseValue = FalseValue.zext(BitWidth); 6223 break; 6224 case scSignExtend: 6225 TrueValue = TrueValue.sext(BitWidth); 6226 FalseValue = FalseValue.sext(BitWidth); 6227 break; 6228 } 6229 6230 // Re-apply the constant offset we peeled off earlier 6231 TrueValue += Offset; 6232 FalseValue += Offset; 6233 } 6234 6235 bool isRecognized() { return Condition != nullptr; } 6236 }; 6237 6238 SelectPattern StartPattern(*this, BitWidth, Start); 6239 if (!StartPattern.isRecognized()) 6240 return ConstantRange::getFull(BitWidth); 6241 6242 SelectPattern StepPattern(*this, BitWidth, Step); 6243 if (!StepPattern.isRecognized()) 6244 return ConstantRange::getFull(BitWidth); 6245 6246 if (StartPattern.Condition != StepPattern.Condition) { 6247 // We don't handle this case today; but we could, by considering four 6248 // possibilities below instead of two. I'm not sure if there are cases where 6249 // that will help over what getRange already does, though. 6250 return ConstantRange::getFull(BitWidth); 6251 } 6252 6253 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6254 // construct arbitrary general SCEV expressions here. This function is called 6255 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6256 // say) can end up caching a suboptimal value. 6257 6258 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6259 // C2352 and C2512 (otherwise it isn't needed). 6260 6261 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6262 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6263 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6264 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6265 6266 ConstantRange TrueRange = 6267 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6268 ConstantRange FalseRange = 6269 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6270 6271 return TrueRange.unionWith(FalseRange); 6272 } 6273 6274 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6275 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6276 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6277 6278 // Return early if there are no flags to propagate to the SCEV. 6279 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6280 if (BinOp->hasNoUnsignedWrap()) 6281 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6282 if (BinOp->hasNoSignedWrap()) 6283 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6284 if (Flags == SCEV::FlagAnyWrap) 6285 return SCEV::FlagAnyWrap; 6286 6287 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6288 } 6289 6290 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6291 // Here we check that I is in the header of the innermost loop containing I, 6292 // since we only deal with instructions in the loop header. The actual loop we 6293 // need to check later will come from an add recurrence, but getting that 6294 // requires computing the SCEV of the operands, which can be expensive. This 6295 // check we can do cheaply to rule out some cases early. 6296 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6297 if (InnermostContainingLoop == nullptr || 6298 InnermostContainingLoop->getHeader() != I->getParent()) 6299 return false; 6300 6301 // Only proceed if we can prove that I does not yield poison. 6302 if (!programUndefinedIfPoison(I)) 6303 return false; 6304 6305 // At this point we know that if I is executed, then it does not wrap 6306 // according to at least one of NSW or NUW. If I is not executed, then we do 6307 // not know if the calculation that I represents would wrap. Multiple 6308 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6309 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6310 // derived from other instructions that map to the same SCEV. We cannot make 6311 // that guarantee for cases where I is not executed. So we need to find the 6312 // loop that I is considered in relation to and prove that I is executed for 6313 // every iteration of that loop. That implies that the value that I 6314 // calculates does not wrap anywhere in the loop, so then we can apply the 6315 // flags to the SCEV. 6316 // 6317 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6318 // from different loops, so that we know which loop to prove that I is 6319 // executed in. 6320 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6321 // I could be an extractvalue from a call to an overflow intrinsic. 6322 // TODO: We can do better here in some cases. 6323 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6324 return false; 6325 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6326 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6327 bool AllOtherOpsLoopInvariant = true; 6328 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6329 ++OtherOpIndex) { 6330 if (OtherOpIndex != OpIndex) { 6331 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6332 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6333 AllOtherOpsLoopInvariant = false; 6334 break; 6335 } 6336 } 6337 } 6338 if (AllOtherOpsLoopInvariant && 6339 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6340 return true; 6341 } 6342 } 6343 return false; 6344 } 6345 6346 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6347 // If we know that \c I can never be poison period, then that's enough. 6348 if (isSCEVExprNeverPoison(I)) 6349 return true; 6350 6351 // For an add recurrence specifically, we assume that infinite loops without 6352 // side effects are undefined behavior, and then reason as follows: 6353 // 6354 // If the add recurrence is poison in any iteration, it is poison on all 6355 // future iterations (since incrementing poison yields poison). If the result 6356 // of the add recurrence is fed into the loop latch condition and the loop 6357 // does not contain any throws or exiting blocks other than the latch, we now 6358 // have the ability to "choose" whether the backedge is taken or not (by 6359 // choosing a sufficiently evil value for the poison feeding into the branch) 6360 // for every iteration including and after the one in which \p I first became 6361 // poison. There are two possibilities (let's call the iteration in which \p 6362 // I first became poison as K): 6363 // 6364 // 1. In the set of iterations including and after K, the loop body executes 6365 // no side effects. In this case executing the backege an infinte number 6366 // of times will yield undefined behavior. 6367 // 6368 // 2. In the set of iterations including and after K, the loop body executes 6369 // at least one side effect. In this case, that specific instance of side 6370 // effect is control dependent on poison, which also yields undefined 6371 // behavior. 6372 6373 auto *ExitingBB = L->getExitingBlock(); 6374 auto *LatchBB = L->getLoopLatch(); 6375 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6376 return false; 6377 6378 SmallPtrSet<const Instruction *, 16> Pushed; 6379 SmallVector<const Instruction *, 8> PoisonStack; 6380 6381 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6382 // things that are known to be poison under that assumption go on the 6383 // PoisonStack. 6384 Pushed.insert(I); 6385 PoisonStack.push_back(I); 6386 6387 bool LatchControlDependentOnPoison = false; 6388 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6389 const Instruction *Poison = PoisonStack.pop_back_val(); 6390 6391 for (auto *PoisonUser : Poison->users()) { 6392 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6393 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6394 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6395 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6396 assert(BI->isConditional() && "Only possibility!"); 6397 if (BI->getParent() == LatchBB) { 6398 LatchControlDependentOnPoison = true; 6399 break; 6400 } 6401 } 6402 } 6403 } 6404 6405 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6406 } 6407 6408 ScalarEvolution::LoopProperties 6409 ScalarEvolution::getLoopProperties(const Loop *L) { 6410 using LoopProperties = ScalarEvolution::LoopProperties; 6411 6412 auto Itr = LoopPropertiesCache.find(L); 6413 if (Itr == LoopPropertiesCache.end()) { 6414 auto HasSideEffects = [](Instruction *I) { 6415 if (auto *SI = dyn_cast<StoreInst>(I)) 6416 return !SI->isSimple(); 6417 6418 return I->mayHaveSideEffects(); 6419 }; 6420 6421 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6422 /*HasNoSideEffects*/ true}; 6423 6424 for (auto *BB : L->getBlocks()) 6425 for (auto &I : *BB) { 6426 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6427 LP.HasNoAbnormalExits = false; 6428 if (HasSideEffects(&I)) 6429 LP.HasNoSideEffects = false; 6430 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6431 break; // We're already as pessimistic as we can get. 6432 } 6433 6434 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6435 assert(InsertPair.second && "We just checked!"); 6436 Itr = InsertPair.first; 6437 } 6438 6439 return Itr->second; 6440 } 6441 6442 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6443 if (!isSCEVable(V->getType())) 6444 return getUnknown(V); 6445 6446 if (Instruction *I = dyn_cast<Instruction>(V)) { 6447 // Don't attempt to analyze instructions in blocks that aren't 6448 // reachable. Such instructions don't matter, and they aren't required 6449 // to obey basic rules for definitions dominating uses which this 6450 // analysis depends on. 6451 if (!DT.isReachableFromEntry(I->getParent())) 6452 return getUnknown(UndefValue::get(V->getType())); 6453 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6454 return getConstant(CI); 6455 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6456 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6457 else if (!isa<ConstantExpr>(V)) 6458 return getUnknown(V); 6459 6460 Operator *U = cast<Operator>(V); 6461 if (auto BO = MatchBinaryOp(U, DT)) { 6462 switch (BO->Opcode) { 6463 case Instruction::Add: { 6464 // The simple thing to do would be to just call getSCEV on both operands 6465 // and call getAddExpr with the result. However if we're looking at a 6466 // bunch of things all added together, this can be quite inefficient, 6467 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6468 // Instead, gather up all the operands and make a single getAddExpr call. 6469 // LLVM IR canonical form means we need only traverse the left operands. 6470 SmallVector<const SCEV *, 4> AddOps; 6471 do { 6472 if (BO->Op) { 6473 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6474 AddOps.push_back(OpSCEV); 6475 break; 6476 } 6477 6478 // If a NUW or NSW flag can be applied to the SCEV for this 6479 // addition, then compute the SCEV for this addition by itself 6480 // with a separate call to getAddExpr. We need to do that 6481 // instead of pushing the operands of the addition onto AddOps, 6482 // since the flags are only known to apply to this particular 6483 // addition - they may not apply to other additions that can be 6484 // formed with operands from AddOps. 6485 const SCEV *RHS = getSCEV(BO->RHS); 6486 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6487 if (Flags != SCEV::FlagAnyWrap) { 6488 const SCEV *LHS = getSCEV(BO->LHS); 6489 if (BO->Opcode == Instruction::Sub) 6490 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6491 else 6492 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6493 break; 6494 } 6495 } 6496 6497 if (BO->Opcode == Instruction::Sub) 6498 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6499 else 6500 AddOps.push_back(getSCEV(BO->RHS)); 6501 6502 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6503 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6504 NewBO->Opcode != Instruction::Sub)) { 6505 AddOps.push_back(getSCEV(BO->LHS)); 6506 break; 6507 } 6508 BO = NewBO; 6509 } while (true); 6510 6511 return getAddExpr(AddOps); 6512 } 6513 6514 case Instruction::Mul: { 6515 SmallVector<const SCEV *, 4> MulOps; 6516 do { 6517 if (BO->Op) { 6518 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6519 MulOps.push_back(OpSCEV); 6520 break; 6521 } 6522 6523 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6524 if (Flags != SCEV::FlagAnyWrap) { 6525 MulOps.push_back( 6526 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6527 break; 6528 } 6529 } 6530 6531 MulOps.push_back(getSCEV(BO->RHS)); 6532 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6533 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6534 MulOps.push_back(getSCEV(BO->LHS)); 6535 break; 6536 } 6537 BO = NewBO; 6538 } while (true); 6539 6540 return getMulExpr(MulOps); 6541 } 6542 case Instruction::UDiv: 6543 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6544 case Instruction::URem: 6545 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6546 case Instruction::Sub: { 6547 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6548 if (BO->Op) 6549 Flags = getNoWrapFlagsFromUB(BO->Op); 6550 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6551 } 6552 case Instruction::And: 6553 // For an expression like x&255 that merely masks off the high bits, 6554 // use zext(trunc(x)) as the SCEV expression. 6555 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6556 if (CI->isZero()) 6557 return getSCEV(BO->RHS); 6558 if (CI->isMinusOne()) 6559 return getSCEV(BO->LHS); 6560 const APInt &A = CI->getValue(); 6561 6562 // Instcombine's ShrinkDemandedConstant may strip bits out of 6563 // constants, obscuring what would otherwise be a low-bits mask. 6564 // Use computeKnownBits to compute what ShrinkDemandedConstant 6565 // knew about to reconstruct a low-bits mask value. 6566 unsigned LZ = A.countLeadingZeros(); 6567 unsigned TZ = A.countTrailingZeros(); 6568 unsigned BitWidth = A.getBitWidth(); 6569 KnownBits Known(BitWidth); 6570 computeKnownBits(BO->LHS, Known, getDataLayout(), 6571 0, &AC, nullptr, &DT); 6572 6573 APInt EffectiveMask = 6574 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6575 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6576 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6577 const SCEV *LHS = getSCEV(BO->LHS); 6578 const SCEV *ShiftedLHS = nullptr; 6579 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6580 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6581 // For an expression like (x * 8) & 8, simplify the multiply. 6582 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6583 unsigned GCD = std::min(MulZeros, TZ); 6584 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6585 SmallVector<const SCEV*, 4> MulOps; 6586 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6587 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6588 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6589 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6590 } 6591 } 6592 if (!ShiftedLHS) 6593 ShiftedLHS = getUDivExpr(LHS, MulCount); 6594 return getMulExpr( 6595 getZeroExtendExpr( 6596 getTruncateExpr(ShiftedLHS, 6597 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6598 BO->LHS->getType()), 6599 MulCount); 6600 } 6601 } 6602 break; 6603 6604 case Instruction::Or: 6605 // If the RHS of the Or is a constant, we may have something like: 6606 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6607 // optimizations will transparently handle this case. 6608 // 6609 // In order for this transformation to be safe, the LHS must be of the 6610 // form X*(2^n) and the Or constant must be less than 2^n. 6611 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6612 const SCEV *LHS = getSCEV(BO->LHS); 6613 const APInt &CIVal = CI->getValue(); 6614 if (GetMinTrailingZeros(LHS) >= 6615 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6616 // Build a plain add SCEV. 6617 return getAddExpr(LHS, getSCEV(CI), 6618 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6619 } 6620 } 6621 break; 6622 6623 case Instruction::Xor: 6624 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6625 // If the RHS of xor is -1, then this is a not operation. 6626 if (CI->isMinusOne()) 6627 return getNotSCEV(getSCEV(BO->LHS)); 6628 6629 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6630 // This is a variant of the check for xor with -1, and it handles 6631 // the case where instcombine has trimmed non-demanded bits out 6632 // of an xor with -1. 6633 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6634 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6635 if (LBO->getOpcode() == Instruction::And && 6636 LCI->getValue() == CI->getValue()) 6637 if (const SCEVZeroExtendExpr *Z = 6638 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6639 Type *UTy = BO->LHS->getType(); 6640 const SCEV *Z0 = Z->getOperand(); 6641 Type *Z0Ty = Z0->getType(); 6642 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6643 6644 // If C is a low-bits mask, the zero extend is serving to 6645 // mask off the high bits. Complement the operand and 6646 // re-apply the zext. 6647 if (CI->getValue().isMask(Z0TySize)) 6648 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6649 6650 // If C is a single bit, it may be in the sign-bit position 6651 // before the zero-extend. In this case, represent the xor 6652 // using an add, which is equivalent, and re-apply the zext. 6653 APInt Trunc = CI->getValue().trunc(Z0TySize); 6654 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6655 Trunc.isSignMask()) 6656 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6657 UTy); 6658 } 6659 } 6660 break; 6661 6662 case Instruction::Shl: 6663 // Turn shift left of a constant amount into a multiply. 6664 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6665 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6666 6667 // If the shift count is not less than the bitwidth, the result of 6668 // the shift is undefined. Don't try to analyze it, because the 6669 // resolution chosen here may differ from the resolution chosen in 6670 // other parts of the compiler. 6671 if (SA->getValue().uge(BitWidth)) 6672 break; 6673 6674 // We can safely preserve the nuw flag in all cases. It's also safe to 6675 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6676 // requires special handling. It can be preserved as long as we're not 6677 // left shifting by bitwidth - 1. 6678 auto Flags = SCEV::FlagAnyWrap; 6679 if (BO->Op) { 6680 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6681 if ((MulFlags & SCEV::FlagNSW) && 6682 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6683 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6684 if (MulFlags & SCEV::FlagNUW) 6685 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6686 } 6687 6688 Constant *X = ConstantInt::get( 6689 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6690 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6691 } 6692 break; 6693 6694 case Instruction::AShr: { 6695 // AShr X, C, where C is a constant. 6696 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6697 if (!CI) 6698 break; 6699 6700 Type *OuterTy = BO->LHS->getType(); 6701 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6702 // If the shift count is not less than the bitwidth, the result of 6703 // the shift is undefined. Don't try to analyze it, because the 6704 // resolution chosen here may differ from the resolution chosen in 6705 // other parts of the compiler. 6706 if (CI->getValue().uge(BitWidth)) 6707 break; 6708 6709 if (CI->isZero()) 6710 return getSCEV(BO->LHS); // shift by zero --> noop 6711 6712 uint64_t AShrAmt = CI->getZExtValue(); 6713 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6714 6715 Operator *L = dyn_cast<Operator>(BO->LHS); 6716 if (L && L->getOpcode() == Instruction::Shl) { 6717 // X = Shl A, n 6718 // Y = AShr X, m 6719 // Both n and m are constant. 6720 6721 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6722 if (L->getOperand(1) == BO->RHS) 6723 // For a two-shift sext-inreg, i.e. n = m, 6724 // use sext(trunc(x)) as the SCEV expression. 6725 return getSignExtendExpr( 6726 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6727 6728 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6729 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6730 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6731 if (ShlAmt > AShrAmt) { 6732 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6733 // expression. We already checked that ShlAmt < BitWidth, so 6734 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6735 // ShlAmt - AShrAmt < Amt. 6736 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6737 ShlAmt - AShrAmt); 6738 return getSignExtendExpr( 6739 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6740 getConstant(Mul)), OuterTy); 6741 } 6742 } 6743 } 6744 if (BO->IsExact) { 6745 // Given exact arithmetic in-bounds right-shift by a constant, 6746 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x) 6747 const SCEV *X = getSCEV(BO->LHS); 6748 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false); 6749 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt); 6750 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult)); 6751 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW); 6752 } 6753 break; 6754 } 6755 } 6756 } 6757 6758 switch (U->getOpcode()) { 6759 case Instruction::Trunc: 6760 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6761 6762 case Instruction::ZExt: 6763 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6764 6765 case Instruction::SExt: 6766 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6767 // The NSW flag of a subtract does not always survive the conversion to 6768 // A + (-1)*B. By pushing sign extension onto its operands we are much 6769 // more likely to preserve NSW and allow later AddRec optimisations. 6770 // 6771 // NOTE: This is effectively duplicating this logic from getSignExtend: 6772 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6773 // but by that point the NSW information has potentially been lost. 6774 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6775 Type *Ty = U->getType(); 6776 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6777 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6778 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6779 } 6780 } 6781 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6782 6783 case Instruction::BitCast: 6784 // BitCasts are no-op casts so we just eliminate the cast. 6785 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6786 return getSCEV(U->getOperand(0)); 6787 break; 6788 6789 case Instruction::PtrToInt: { 6790 // Pointer to integer cast is straight-forward, so do model it. 6791 Value *Ptr = U->getOperand(0); 6792 const SCEV *Op = getSCEV(Ptr); 6793 Type *DstIntTy = U->getType(); 6794 Type *PtrTy = Ptr->getType(); 6795 Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy); 6796 // But only if effective SCEV (integer) type is wide enough to represent 6797 // all possible pointer values. 6798 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) != 6799 getDataLayout().getTypeSizeInBits(IntPtrTy)) 6800 return getUnknown(V); 6801 return getPtrToIntExpr(Op, DstIntTy); 6802 } 6803 case Instruction::IntToPtr: 6804 // Just don't deal with inttoptr casts. 6805 return getUnknown(V); 6806 6807 case Instruction::SDiv: 6808 // If both operands are non-negative, this is just an udiv. 6809 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6810 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6811 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6812 break; 6813 6814 case Instruction::SRem: 6815 // If both operands are non-negative, this is just an urem. 6816 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6817 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6818 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6819 break; 6820 6821 case Instruction::GetElementPtr: 6822 return createNodeForGEP(cast<GEPOperator>(U)); 6823 6824 case Instruction::PHI: 6825 return createNodeForPHI(cast<PHINode>(U)); 6826 6827 case Instruction::Select: 6828 // U can also be a select constant expr, which let fall through. Since 6829 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6830 // constant expressions cannot have instructions as operands, we'd have 6831 // returned getUnknown for a select constant expressions anyway. 6832 if (isa<Instruction>(U)) 6833 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6834 U->getOperand(1), U->getOperand(2)); 6835 break; 6836 6837 case Instruction::Call: 6838 case Instruction::Invoke: 6839 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6840 return getSCEV(RV); 6841 6842 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6843 switch (II->getIntrinsicID()) { 6844 case Intrinsic::abs: 6845 return getAbsExpr( 6846 getSCEV(II->getArgOperand(0)), 6847 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6848 case Intrinsic::umax: 6849 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6850 getSCEV(II->getArgOperand(1))); 6851 case Intrinsic::umin: 6852 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6853 getSCEV(II->getArgOperand(1))); 6854 case Intrinsic::smax: 6855 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6856 getSCEV(II->getArgOperand(1))); 6857 case Intrinsic::smin: 6858 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6859 getSCEV(II->getArgOperand(1))); 6860 case Intrinsic::usub_sat: { 6861 const SCEV *X = getSCEV(II->getArgOperand(0)); 6862 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6863 const SCEV *ClampedY = getUMinExpr(X, Y); 6864 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6865 } 6866 case Intrinsic::uadd_sat: { 6867 const SCEV *X = getSCEV(II->getArgOperand(0)); 6868 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6869 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6870 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6871 } 6872 case Intrinsic::start_loop_iterations: 6873 // A start_loop_iterations is just equivalent to the first operand for 6874 // SCEV purposes. 6875 return getSCEV(II->getArgOperand(0)); 6876 default: 6877 break; 6878 } 6879 } 6880 break; 6881 } 6882 6883 return getUnknown(V); 6884 } 6885 6886 //===----------------------------------------------------------------------===// 6887 // Iteration Count Computation Code 6888 // 6889 6890 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6891 if (!ExitCount) 6892 return 0; 6893 6894 ConstantInt *ExitConst = ExitCount->getValue(); 6895 6896 // Guard against huge trip counts. 6897 if (ExitConst->getValue().getActiveBits() > 32) 6898 return 0; 6899 6900 // In case of integer overflow, this returns 0, which is correct. 6901 return ((unsigned)ExitConst->getZExtValue()) + 1; 6902 } 6903 6904 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6905 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6906 return getSmallConstantTripCount(L, ExitingBB); 6907 6908 // No trip count information for multiple exits. 6909 return 0; 6910 } 6911 6912 unsigned 6913 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6914 const BasicBlock *ExitingBlock) { 6915 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6916 assert(L->isLoopExiting(ExitingBlock) && 6917 "Exiting block must actually branch out of the loop!"); 6918 const SCEVConstant *ExitCount = 6919 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6920 return getConstantTripCount(ExitCount); 6921 } 6922 6923 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6924 const auto *MaxExitCount = 6925 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6926 return getConstantTripCount(MaxExitCount); 6927 } 6928 6929 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6930 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6931 return getSmallConstantTripMultiple(L, ExitingBB); 6932 6933 // No trip multiple information for multiple exits. 6934 return 0; 6935 } 6936 6937 /// Returns the largest constant divisor of the trip count of this loop as a 6938 /// normal unsigned value, if possible. This means that the actual trip count is 6939 /// always a multiple of the returned value (don't forget the trip count could 6940 /// very well be zero as well!). 6941 /// 6942 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6943 /// multiple of a constant (which is also the case if the trip count is simply 6944 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6945 /// if the trip count is very large (>= 2^32). 6946 /// 6947 /// As explained in the comments for getSmallConstantTripCount, this assumes 6948 /// that control exits the loop via ExitingBlock. 6949 unsigned 6950 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6951 const BasicBlock *ExitingBlock) { 6952 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6953 assert(L->isLoopExiting(ExitingBlock) && 6954 "Exiting block must actually branch out of the loop!"); 6955 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6956 if (ExitCount == getCouldNotCompute()) 6957 return 1; 6958 6959 // Get the trip count from the BE count by adding 1. 6960 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6961 6962 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6963 if (!TC) 6964 // Attempt to factor more general cases. Returns the greatest power of 6965 // two divisor. If overflow happens, the trip count expression is still 6966 // divisible by the greatest power of 2 divisor returned. 6967 return 1U << std::min((uint32_t)31, 6968 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 6969 6970 ConstantInt *Result = TC->getValue(); 6971 6972 // Guard against huge trip counts (this requires checking 6973 // for zero to handle the case where the trip count == -1 and the 6974 // addition wraps). 6975 if (!Result || Result->getValue().getActiveBits() > 32 || 6976 Result->getValue().getActiveBits() == 0) 6977 return 1; 6978 6979 return (unsigned)Result->getZExtValue(); 6980 } 6981 6982 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6983 const BasicBlock *ExitingBlock, 6984 ExitCountKind Kind) { 6985 switch (Kind) { 6986 case Exact: 6987 case SymbolicMaximum: 6988 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6989 case ConstantMaximum: 6990 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 6991 }; 6992 llvm_unreachable("Invalid ExitCountKind!"); 6993 } 6994 6995 const SCEV * 6996 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6997 SCEVUnionPredicate &Preds) { 6998 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6999 } 7000 7001 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7002 ExitCountKind Kind) { 7003 switch (Kind) { 7004 case Exact: 7005 return getBackedgeTakenInfo(L).getExact(L, this); 7006 case ConstantMaximum: 7007 return getBackedgeTakenInfo(L).getConstantMax(this); 7008 case SymbolicMaximum: 7009 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7010 }; 7011 llvm_unreachable("Invalid ExitCountKind!"); 7012 } 7013 7014 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7015 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7016 } 7017 7018 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7019 static void 7020 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 7021 BasicBlock *Header = L->getHeader(); 7022 7023 // Push all Loop-header PHIs onto the Worklist stack. 7024 for (PHINode &PN : Header->phis()) 7025 Worklist.push_back(&PN); 7026 } 7027 7028 const ScalarEvolution::BackedgeTakenInfo & 7029 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7030 auto &BTI = getBackedgeTakenInfo(L); 7031 if (BTI.hasFullInfo()) 7032 return BTI; 7033 7034 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7035 7036 if (!Pair.second) 7037 return Pair.first->second; 7038 7039 BackedgeTakenInfo Result = 7040 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7041 7042 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7043 } 7044 7045 ScalarEvolution::BackedgeTakenInfo & 7046 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7047 // Initially insert an invalid entry for this loop. If the insertion 7048 // succeeds, proceed to actually compute a backedge-taken count and 7049 // update the value. The temporary CouldNotCompute value tells SCEV 7050 // code elsewhere that it shouldn't attempt to request a new 7051 // backedge-taken count, which could result in infinite recursion. 7052 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7053 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7054 if (!Pair.second) 7055 return Pair.first->second; 7056 7057 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7058 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7059 // must be cleared in this scope. 7060 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7061 7062 // In product build, there are no usage of statistic. 7063 (void)NumTripCountsComputed; 7064 (void)NumTripCountsNotComputed; 7065 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7066 const SCEV *BEExact = Result.getExact(L, this); 7067 if (BEExact != getCouldNotCompute()) { 7068 assert(isLoopInvariant(BEExact, L) && 7069 isLoopInvariant(Result.getConstantMax(this), L) && 7070 "Computed backedge-taken count isn't loop invariant for loop!"); 7071 ++NumTripCountsComputed; 7072 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7073 isa<PHINode>(L->getHeader()->begin())) { 7074 // Only count loops that have phi nodes as not being computable. 7075 ++NumTripCountsNotComputed; 7076 } 7077 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7078 7079 // Now that we know more about the trip count for this loop, forget any 7080 // existing SCEV values for PHI nodes in this loop since they are only 7081 // conservative estimates made without the benefit of trip count 7082 // information. This is similar to the code in forgetLoop, except that 7083 // it handles SCEVUnknown PHI nodes specially. 7084 if (Result.hasAnyInfo()) { 7085 SmallVector<Instruction *, 16> Worklist; 7086 PushLoopPHIs(L, Worklist); 7087 7088 SmallPtrSet<Instruction *, 8> Discovered; 7089 while (!Worklist.empty()) { 7090 Instruction *I = Worklist.pop_back_val(); 7091 7092 ValueExprMapType::iterator It = 7093 ValueExprMap.find_as(static_cast<Value *>(I)); 7094 if (It != ValueExprMap.end()) { 7095 const SCEV *Old = It->second; 7096 7097 // SCEVUnknown for a PHI either means that it has an unrecognized 7098 // structure, or it's a PHI that's in the progress of being computed 7099 // by createNodeForPHI. In the former case, additional loop trip 7100 // count information isn't going to change anything. In the later 7101 // case, createNodeForPHI will perform the necessary updates on its 7102 // own when it gets to that point. 7103 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 7104 eraseValueFromMap(It->first); 7105 forgetMemoizedResults(Old); 7106 } 7107 if (PHINode *PN = dyn_cast<PHINode>(I)) 7108 ConstantEvolutionLoopExitValue.erase(PN); 7109 } 7110 7111 // Since we don't need to invalidate anything for correctness and we're 7112 // only invalidating to make SCEV's results more precise, we get to stop 7113 // early to avoid invalidating too much. This is especially important in 7114 // cases like: 7115 // 7116 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7117 // loop0: 7118 // %pn0 = phi 7119 // ... 7120 // loop1: 7121 // %pn1 = phi 7122 // ... 7123 // 7124 // where both loop0 and loop1's backedge taken count uses the SCEV 7125 // expression for %v. If we don't have the early stop below then in cases 7126 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7127 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7128 // count for loop1, effectively nullifying SCEV's trip count cache. 7129 for (auto *U : I->users()) 7130 if (auto *I = dyn_cast<Instruction>(U)) { 7131 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7132 if (LoopForUser && L->contains(LoopForUser) && 7133 Discovered.insert(I).second) 7134 Worklist.push_back(I); 7135 } 7136 } 7137 } 7138 7139 // Re-lookup the insert position, since the call to 7140 // computeBackedgeTakenCount above could result in a 7141 // recusive call to getBackedgeTakenInfo (on a different 7142 // loop), which would invalidate the iterator computed 7143 // earlier. 7144 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7145 } 7146 7147 void ScalarEvolution::forgetAllLoops() { 7148 // This method is intended to forget all info about loops. It should 7149 // invalidate caches as if the following happened: 7150 // - The trip counts of all loops have changed arbitrarily 7151 // - Every llvm::Value has been updated in place to produce a different 7152 // result. 7153 BackedgeTakenCounts.clear(); 7154 PredicatedBackedgeTakenCounts.clear(); 7155 LoopPropertiesCache.clear(); 7156 ConstantEvolutionLoopExitValue.clear(); 7157 ValueExprMap.clear(); 7158 ValuesAtScopes.clear(); 7159 LoopDispositions.clear(); 7160 BlockDispositions.clear(); 7161 UnsignedRanges.clear(); 7162 SignedRanges.clear(); 7163 ExprValueMap.clear(); 7164 HasRecMap.clear(); 7165 MinTrailingZerosCache.clear(); 7166 PredicatedSCEVRewrites.clear(); 7167 } 7168 7169 void ScalarEvolution::forgetLoop(const Loop *L) { 7170 // Drop any stored trip count value. 7171 auto RemoveLoopFromBackedgeMap = 7172 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 7173 auto BTCPos = Map.find(L); 7174 if (BTCPos != Map.end()) { 7175 BTCPos->second.clear(); 7176 Map.erase(BTCPos); 7177 } 7178 }; 7179 7180 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7181 SmallVector<Instruction *, 32> Worklist; 7182 SmallPtrSet<Instruction *, 16> Visited; 7183 7184 // Iterate over all the loops and sub-loops to drop SCEV information. 7185 while (!LoopWorklist.empty()) { 7186 auto *CurrL = LoopWorklist.pop_back_val(); 7187 7188 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 7189 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 7190 7191 // Drop information about predicated SCEV rewrites for this loop. 7192 for (auto I = PredicatedSCEVRewrites.begin(); 7193 I != PredicatedSCEVRewrites.end();) { 7194 std::pair<const SCEV *, const Loop *> Entry = I->first; 7195 if (Entry.second == CurrL) 7196 PredicatedSCEVRewrites.erase(I++); 7197 else 7198 ++I; 7199 } 7200 7201 auto LoopUsersItr = LoopUsers.find(CurrL); 7202 if (LoopUsersItr != LoopUsers.end()) { 7203 for (auto *S : LoopUsersItr->second) 7204 forgetMemoizedResults(S); 7205 LoopUsers.erase(LoopUsersItr); 7206 } 7207 7208 // Drop information about expressions based on loop-header PHIs. 7209 PushLoopPHIs(CurrL, Worklist); 7210 7211 while (!Worklist.empty()) { 7212 Instruction *I = Worklist.pop_back_val(); 7213 if (!Visited.insert(I).second) 7214 continue; 7215 7216 ValueExprMapType::iterator It = 7217 ValueExprMap.find_as(static_cast<Value *>(I)); 7218 if (It != ValueExprMap.end()) { 7219 eraseValueFromMap(It->first); 7220 forgetMemoizedResults(It->second); 7221 if (PHINode *PN = dyn_cast<PHINode>(I)) 7222 ConstantEvolutionLoopExitValue.erase(PN); 7223 } 7224 7225 PushDefUseChildren(I, Worklist); 7226 } 7227 7228 LoopPropertiesCache.erase(CurrL); 7229 // Forget all contained loops too, to avoid dangling entries in the 7230 // ValuesAtScopes map. 7231 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7232 } 7233 } 7234 7235 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7236 while (Loop *Parent = L->getParentLoop()) 7237 L = Parent; 7238 forgetLoop(L); 7239 } 7240 7241 void ScalarEvolution::forgetValue(Value *V) { 7242 Instruction *I = dyn_cast<Instruction>(V); 7243 if (!I) return; 7244 7245 // Drop information about expressions based on loop-header PHIs. 7246 SmallVector<Instruction *, 16> Worklist; 7247 Worklist.push_back(I); 7248 7249 SmallPtrSet<Instruction *, 8> Visited; 7250 while (!Worklist.empty()) { 7251 I = Worklist.pop_back_val(); 7252 if (!Visited.insert(I).second) 7253 continue; 7254 7255 ValueExprMapType::iterator It = 7256 ValueExprMap.find_as(static_cast<Value *>(I)); 7257 if (It != ValueExprMap.end()) { 7258 eraseValueFromMap(It->first); 7259 forgetMemoizedResults(It->second); 7260 if (PHINode *PN = dyn_cast<PHINode>(I)) 7261 ConstantEvolutionLoopExitValue.erase(PN); 7262 } 7263 7264 PushDefUseChildren(I, Worklist); 7265 } 7266 } 7267 7268 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7269 LoopDispositions.clear(); 7270 } 7271 7272 /// Get the exact loop backedge taken count considering all loop exits. A 7273 /// computable result can only be returned for loops with all exiting blocks 7274 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7275 /// is never skipped. This is a valid assumption as long as the loop exits via 7276 /// that test. For precise results, it is the caller's responsibility to specify 7277 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7278 const SCEV * 7279 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7280 SCEVUnionPredicate *Preds) const { 7281 // If any exits were not computable, the loop is not computable. 7282 if (!isComplete() || ExitNotTaken.empty()) 7283 return SE->getCouldNotCompute(); 7284 7285 const BasicBlock *Latch = L->getLoopLatch(); 7286 // All exiting blocks we have collected must dominate the only backedge. 7287 if (!Latch) 7288 return SE->getCouldNotCompute(); 7289 7290 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7291 // count is simply a minimum out of all these calculated exit counts. 7292 SmallVector<const SCEV *, 2> Ops; 7293 for (auto &ENT : ExitNotTaken) { 7294 const SCEV *BECount = ENT.ExactNotTaken; 7295 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7296 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7297 "We should only have known counts for exiting blocks that dominate " 7298 "latch!"); 7299 7300 Ops.push_back(BECount); 7301 7302 if (Preds && !ENT.hasAlwaysTruePredicate()) 7303 Preds->add(ENT.Predicate.get()); 7304 7305 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7306 "Predicate should be always true!"); 7307 } 7308 7309 return SE->getUMinFromMismatchedTypes(Ops); 7310 } 7311 7312 /// Get the exact not taken count for this loop exit. 7313 const SCEV * 7314 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7315 ScalarEvolution *SE) const { 7316 for (auto &ENT : ExitNotTaken) 7317 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7318 return ENT.ExactNotTaken; 7319 7320 return SE->getCouldNotCompute(); 7321 } 7322 7323 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7324 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7325 for (auto &ENT : ExitNotTaken) 7326 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7327 return ENT.MaxNotTaken; 7328 7329 return SE->getCouldNotCompute(); 7330 } 7331 7332 /// getConstantMax - Get the constant max backedge taken count for the loop. 7333 const SCEV * 7334 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7335 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7336 return !ENT.hasAlwaysTruePredicate(); 7337 }; 7338 7339 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7340 return SE->getCouldNotCompute(); 7341 7342 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7343 isa<SCEVConstant>(getConstantMax())) && 7344 "No point in having a non-constant max backedge taken count!"); 7345 return getConstantMax(); 7346 } 7347 7348 const SCEV * 7349 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7350 ScalarEvolution *SE) { 7351 if (!SymbolicMax) 7352 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7353 return SymbolicMax; 7354 } 7355 7356 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7357 ScalarEvolution *SE) const { 7358 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7359 return !ENT.hasAlwaysTruePredicate(); 7360 }; 7361 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7362 } 7363 7364 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 7365 ScalarEvolution *SE) const { 7366 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() && 7367 SE->hasOperand(getConstantMax(), S)) 7368 return true; 7369 7370 for (auto &ENT : ExitNotTaken) 7371 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 7372 SE->hasOperand(ENT.ExactNotTaken, S)) 7373 return true; 7374 7375 return false; 7376 } 7377 7378 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7379 : ExactNotTaken(E), MaxNotTaken(E) { 7380 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7381 isa<SCEVConstant>(MaxNotTaken)) && 7382 "No point in having a non-constant max backedge taken count!"); 7383 } 7384 7385 ScalarEvolution::ExitLimit::ExitLimit( 7386 const SCEV *E, const SCEV *M, bool MaxOrZero, 7387 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7388 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7389 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7390 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7391 "Exact is not allowed to be less precise than Max"); 7392 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7393 isa<SCEVConstant>(MaxNotTaken)) && 7394 "No point in having a non-constant max backedge taken count!"); 7395 for (auto *PredSet : PredSetList) 7396 for (auto *P : *PredSet) 7397 addPredicate(P); 7398 } 7399 7400 ScalarEvolution::ExitLimit::ExitLimit( 7401 const SCEV *E, const SCEV *M, bool MaxOrZero, 7402 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7403 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7404 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7405 isa<SCEVConstant>(MaxNotTaken)) && 7406 "No point in having a non-constant max backedge taken count!"); 7407 } 7408 7409 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7410 bool MaxOrZero) 7411 : ExitLimit(E, M, MaxOrZero, None) { 7412 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7413 isa<SCEVConstant>(MaxNotTaken)) && 7414 "No point in having a non-constant max backedge taken count!"); 7415 } 7416 7417 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7418 /// computable exit into a persistent ExitNotTakenInfo array. 7419 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7420 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7421 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7422 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7423 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7424 7425 ExitNotTaken.reserve(ExitCounts.size()); 7426 std::transform( 7427 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7428 [&](const EdgeExitInfo &EEI) { 7429 BasicBlock *ExitBB = EEI.first; 7430 const ExitLimit &EL = EEI.second; 7431 if (EL.Predicates.empty()) 7432 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7433 nullptr); 7434 7435 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7436 for (auto *Pred : EL.Predicates) 7437 Predicate->add(Pred); 7438 7439 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7440 std::move(Predicate)); 7441 }); 7442 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7443 isa<SCEVConstant>(ConstantMax)) && 7444 "No point in having a non-constant max backedge taken count!"); 7445 } 7446 7447 /// Invalidate this result and free the ExitNotTakenInfo array. 7448 void ScalarEvolution::BackedgeTakenInfo::clear() { 7449 ExitNotTaken.clear(); 7450 } 7451 7452 /// Compute the number of times the backedge of the specified loop will execute. 7453 ScalarEvolution::BackedgeTakenInfo 7454 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7455 bool AllowPredicates) { 7456 SmallVector<BasicBlock *, 8> ExitingBlocks; 7457 L->getExitingBlocks(ExitingBlocks); 7458 7459 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7460 7461 SmallVector<EdgeExitInfo, 4> ExitCounts; 7462 bool CouldComputeBECount = true; 7463 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7464 const SCEV *MustExitMaxBECount = nullptr; 7465 const SCEV *MayExitMaxBECount = nullptr; 7466 bool MustExitMaxOrZero = false; 7467 7468 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7469 // and compute maxBECount. 7470 // Do a union of all the predicates here. 7471 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7472 BasicBlock *ExitBB = ExitingBlocks[i]; 7473 7474 // We canonicalize untaken exits to br (constant), ignore them so that 7475 // proving an exit untaken doesn't negatively impact our ability to reason 7476 // about the loop as whole. 7477 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7478 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7479 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7480 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7481 continue; 7482 } 7483 7484 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7485 7486 assert((AllowPredicates || EL.Predicates.empty()) && 7487 "Predicated exit limit when predicates are not allowed!"); 7488 7489 // 1. For each exit that can be computed, add an entry to ExitCounts. 7490 // CouldComputeBECount is true only if all exits can be computed. 7491 if (EL.ExactNotTaken == getCouldNotCompute()) 7492 // We couldn't compute an exact value for this exit, so 7493 // we won't be able to compute an exact value for the loop. 7494 CouldComputeBECount = false; 7495 else 7496 ExitCounts.emplace_back(ExitBB, EL); 7497 7498 // 2. Derive the loop's MaxBECount from each exit's max number of 7499 // non-exiting iterations. Partition the loop exits into two kinds: 7500 // LoopMustExits and LoopMayExits. 7501 // 7502 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7503 // is a LoopMayExit. If any computable LoopMustExit is found, then 7504 // MaxBECount is the minimum EL.MaxNotTaken of computable 7505 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7506 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7507 // computable EL.MaxNotTaken. 7508 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7509 DT.dominates(ExitBB, Latch)) { 7510 if (!MustExitMaxBECount) { 7511 MustExitMaxBECount = EL.MaxNotTaken; 7512 MustExitMaxOrZero = EL.MaxOrZero; 7513 } else { 7514 MustExitMaxBECount = 7515 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7516 } 7517 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7518 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7519 MayExitMaxBECount = EL.MaxNotTaken; 7520 else { 7521 MayExitMaxBECount = 7522 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7523 } 7524 } 7525 } 7526 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7527 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7528 // The loop backedge will be taken the maximum or zero times if there's 7529 // a single exit that must be taken the maximum or zero times. 7530 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7531 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7532 MaxBECount, MaxOrZero); 7533 } 7534 7535 ScalarEvolution::ExitLimit 7536 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7537 bool AllowPredicates) { 7538 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7539 // If our exiting block does not dominate the latch, then its connection with 7540 // loop's exit limit may be far from trivial. 7541 const BasicBlock *Latch = L->getLoopLatch(); 7542 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7543 return getCouldNotCompute(); 7544 7545 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7546 Instruction *Term = ExitingBlock->getTerminator(); 7547 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7548 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7549 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7550 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7551 "It should have one successor in loop and one exit block!"); 7552 // Proceed to the next level to examine the exit condition expression. 7553 return computeExitLimitFromCond( 7554 L, BI->getCondition(), ExitIfTrue, 7555 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7556 } 7557 7558 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7559 // For switch, make sure that there is a single exit from the loop. 7560 BasicBlock *Exit = nullptr; 7561 for (auto *SBB : successors(ExitingBlock)) 7562 if (!L->contains(SBB)) { 7563 if (Exit) // Multiple exit successors. 7564 return getCouldNotCompute(); 7565 Exit = SBB; 7566 } 7567 assert(Exit && "Exiting block must have at least one exit"); 7568 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7569 /*ControlsExit=*/IsOnlyExit); 7570 } 7571 7572 return getCouldNotCompute(); 7573 } 7574 7575 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7576 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7577 bool ControlsExit, bool AllowPredicates) { 7578 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7579 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7580 ControlsExit, AllowPredicates); 7581 } 7582 7583 Optional<ScalarEvolution::ExitLimit> 7584 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7585 bool ExitIfTrue, bool ControlsExit, 7586 bool AllowPredicates) { 7587 (void)this->L; 7588 (void)this->ExitIfTrue; 7589 (void)this->AllowPredicates; 7590 7591 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7592 this->AllowPredicates == AllowPredicates && 7593 "Variance in assumed invariant key components!"); 7594 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7595 if (Itr == TripCountMap.end()) 7596 return None; 7597 return Itr->second; 7598 } 7599 7600 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7601 bool ExitIfTrue, 7602 bool ControlsExit, 7603 bool AllowPredicates, 7604 const ExitLimit &EL) { 7605 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7606 this->AllowPredicates == AllowPredicates && 7607 "Variance in assumed invariant key components!"); 7608 7609 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7610 assert(InsertResult.second && "Expected successful insertion!"); 7611 (void)InsertResult; 7612 (void)ExitIfTrue; 7613 } 7614 7615 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7616 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7617 bool ControlsExit, bool AllowPredicates) { 7618 7619 if (auto MaybeEL = 7620 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7621 return *MaybeEL; 7622 7623 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7624 ControlsExit, AllowPredicates); 7625 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7626 return EL; 7627 } 7628 7629 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7630 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7631 bool ControlsExit, bool AllowPredicates) { 7632 // Handle BinOp conditions (And, Or). 7633 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 7634 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7635 return *LimitFromBinOp; 7636 7637 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7638 // Proceed to the next level to examine the icmp. 7639 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7640 ExitLimit EL = 7641 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7642 if (EL.hasFullInfo() || !AllowPredicates) 7643 return EL; 7644 7645 // Try again, but use SCEV predicates this time. 7646 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7647 /*AllowPredicates=*/true); 7648 } 7649 7650 // Check for a constant condition. These are normally stripped out by 7651 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7652 // preserve the CFG and is temporarily leaving constant conditions 7653 // in place. 7654 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7655 if (ExitIfTrue == !CI->getZExtValue()) 7656 // The backedge is always taken. 7657 return getCouldNotCompute(); 7658 else 7659 // The backedge is never taken. 7660 return getZero(CI->getType()); 7661 } 7662 7663 // If it's not an integer or pointer comparison then compute it the hard way. 7664 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7665 } 7666 7667 Optional<ScalarEvolution::ExitLimit> 7668 ScalarEvolution::computeExitLimitFromCondFromBinOp( 7669 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7670 bool ControlsExit, bool AllowPredicates) { 7671 // Check if the controlling expression for this loop is an And or Or. 7672 Value *Op0, *Op1; 7673 bool IsAnd = false; 7674 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 7675 IsAnd = true; 7676 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 7677 IsAnd = false; 7678 else 7679 return None; 7680 7681 // EitherMayExit is true in these two cases: 7682 // br (and Op0 Op1), loop, exit 7683 // br (or Op0 Op1), exit, loop 7684 bool EitherMayExit = IsAnd ^ ExitIfTrue; 7685 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 7686 ControlsExit && !EitherMayExit, 7687 AllowPredicates); 7688 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 7689 ControlsExit && !EitherMayExit, 7690 AllowPredicates); 7691 7692 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 7693 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 7694 if (isa<ConstantInt>(Op1)) 7695 return Op1 == NeutralElement ? EL0 : EL1; 7696 if (isa<ConstantInt>(Op0)) 7697 return Op0 == NeutralElement ? EL1 : EL0; 7698 7699 const SCEV *BECount = getCouldNotCompute(); 7700 const SCEV *MaxBECount = getCouldNotCompute(); 7701 if (EitherMayExit) { 7702 // Both conditions must be same for the loop to continue executing. 7703 // Choose the less conservative count. 7704 // If ExitCond is a short-circuit form (select), using 7705 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 7706 // To see the detailed examples, please see 7707 // test/Analysis/ScalarEvolution/exit-count-select.ll 7708 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 7709 if (!PoisonSafe) 7710 // Even if ExitCond is select, we can safely derive BECount using both 7711 // EL0 and EL1 in these cases: 7712 // (1) EL0.ExactNotTaken is non-zero 7713 // (2) EL1.ExactNotTaken is non-poison 7714 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 7715 // it cannot be umin(0, ..)) 7716 // The PoisonSafe assignment below is simplified and the assertion after 7717 // BECount calculation fully guarantees the condition (3). 7718 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 7719 isa<SCEVConstant>(EL1.ExactNotTaken); 7720 if (EL0.ExactNotTaken != getCouldNotCompute() && 7721 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 7722 BECount = 7723 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7724 7725 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 7726 // it should have been simplified to zero (see the condition (3) above) 7727 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 7728 BECount->isZero()); 7729 } 7730 if (EL0.MaxNotTaken == getCouldNotCompute()) 7731 MaxBECount = EL1.MaxNotTaken; 7732 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7733 MaxBECount = EL0.MaxNotTaken; 7734 else 7735 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7736 } else { 7737 // Both conditions must be same at the same time for the loop to exit. 7738 // For now, be conservative. 7739 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7740 BECount = EL0.ExactNotTaken; 7741 } 7742 7743 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7744 // to be more aggressive when computing BECount than when computing 7745 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7746 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7747 // to not. 7748 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7749 !isa<SCEVCouldNotCompute>(BECount)) 7750 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7751 7752 return ExitLimit(BECount, MaxBECount, false, 7753 { &EL0.Predicates, &EL1.Predicates }); 7754 } 7755 7756 ScalarEvolution::ExitLimit 7757 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7758 ICmpInst *ExitCond, 7759 bool ExitIfTrue, 7760 bool ControlsExit, 7761 bool AllowPredicates) { 7762 // If the condition was exit on true, convert the condition to exit on false 7763 ICmpInst::Predicate Pred; 7764 if (!ExitIfTrue) 7765 Pred = ExitCond->getPredicate(); 7766 else 7767 Pred = ExitCond->getInversePredicate(); 7768 const ICmpInst::Predicate OriginalPred = Pred; 7769 7770 // Handle common loops like: for (X = "string"; *X; ++X) 7771 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7772 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7773 ExitLimit ItCnt = 7774 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7775 if (ItCnt.hasAnyInfo()) 7776 return ItCnt; 7777 } 7778 7779 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7780 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7781 7782 // Try to evaluate any dependencies out of the loop. 7783 LHS = getSCEVAtScope(LHS, L); 7784 RHS = getSCEVAtScope(RHS, L); 7785 7786 // At this point, we would like to compute how many iterations of the 7787 // loop the predicate will return true for these inputs. 7788 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7789 // If there is a loop-invariant, force it into the RHS. 7790 std::swap(LHS, RHS); 7791 Pred = ICmpInst::getSwappedPredicate(Pred); 7792 } 7793 7794 // Simplify the operands before analyzing them. 7795 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7796 7797 // If we have a comparison of a chrec against a constant, try to use value 7798 // ranges to answer this query. 7799 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7800 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7801 if (AddRec->getLoop() == L) { 7802 // Form the constant range. 7803 ConstantRange CompRange = 7804 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7805 7806 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7807 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7808 } 7809 7810 switch (Pred) { 7811 case ICmpInst::ICMP_NE: { // while (X != Y) 7812 // Convert to: while (X-Y != 0) 7813 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7814 AllowPredicates); 7815 if (EL.hasAnyInfo()) return EL; 7816 break; 7817 } 7818 case ICmpInst::ICMP_EQ: { // while (X == Y) 7819 // Convert to: while (X-Y == 0) 7820 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7821 if (EL.hasAnyInfo()) return EL; 7822 break; 7823 } 7824 case ICmpInst::ICMP_SLT: 7825 case ICmpInst::ICMP_ULT: { // while (X < Y) 7826 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7827 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7828 AllowPredicates); 7829 if (EL.hasAnyInfo()) return EL; 7830 break; 7831 } 7832 case ICmpInst::ICMP_SGT: 7833 case ICmpInst::ICMP_UGT: { // while (X > Y) 7834 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7835 ExitLimit EL = 7836 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7837 AllowPredicates); 7838 if (EL.hasAnyInfo()) return EL; 7839 break; 7840 } 7841 default: 7842 break; 7843 } 7844 7845 auto *ExhaustiveCount = 7846 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7847 7848 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7849 return ExhaustiveCount; 7850 7851 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7852 ExitCond->getOperand(1), L, OriginalPred); 7853 } 7854 7855 ScalarEvolution::ExitLimit 7856 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7857 SwitchInst *Switch, 7858 BasicBlock *ExitingBlock, 7859 bool ControlsExit) { 7860 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7861 7862 // Give up if the exit is the default dest of a switch. 7863 if (Switch->getDefaultDest() == ExitingBlock) 7864 return getCouldNotCompute(); 7865 7866 assert(L->contains(Switch->getDefaultDest()) && 7867 "Default case must not exit the loop!"); 7868 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7869 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7870 7871 // while (X != Y) --> while (X-Y != 0) 7872 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7873 if (EL.hasAnyInfo()) 7874 return EL; 7875 7876 return getCouldNotCompute(); 7877 } 7878 7879 static ConstantInt * 7880 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7881 ScalarEvolution &SE) { 7882 const SCEV *InVal = SE.getConstant(C); 7883 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7884 assert(isa<SCEVConstant>(Val) && 7885 "Evaluation of SCEV at constant didn't fold correctly?"); 7886 return cast<SCEVConstant>(Val)->getValue(); 7887 } 7888 7889 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7890 /// compute the backedge execution count. 7891 ScalarEvolution::ExitLimit 7892 ScalarEvolution::computeLoadConstantCompareExitLimit( 7893 LoadInst *LI, 7894 Constant *RHS, 7895 const Loop *L, 7896 ICmpInst::Predicate predicate) { 7897 if (LI->isVolatile()) return getCouldNotCompute(); 7898 7899 // Check to see if the loaded pointer is a getelementptr of a global. 7900 // TODO: Use SCEV instead of manually grubbing with GEPs. 7901 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7902 if (!GEP) return getCouldNotCompute(); 7903 7904 // Make sure that it is really a constant global we are gepping, with an 7905 // initializer, and make sure the first IDX is really 0. 7906 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7907 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7908 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7909 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7910 return getCouldNotCompute(); 7911 7912 // Okay, we allow one non-constant index into the GEP instruction. 7913 Value *VarIdx = nullptr; 7914 std::vector<Constant*> Indexes; 7915 unsigned VarIdxNum = 0; 7916 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7917 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7918 Indexes.push_back(CI); 7919 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7920 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7921 VarIdx = GEP->getOperand(i); 7922 VarIdxNum = i-2; 7923 Indexes.push_back(nullptr); 7924 } 7925 7926 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7927 if (!VarIdx) 7928 return getCouldNotCompute(); 7929 7930 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7931 // Check to see if X is a loop variant variable value now. 7932 const SCEV *Idx = getSCEV(VarIdx); 7933 Idx = getSCEVAtScope(Idx, L); 7934 7935 // We can only recognize very limited forms of loop index expressions, in 7936 // particular, only affine AddRec's like {C1,+,C2}<L>. 7937 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7938 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() || 7939 isLoopInvariant(IdxExpr, L) || 7940 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7941 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7942 return getCouldNotCompute(); 7943 7944 unsigned MaxSteps = MaxBruteForceIterations; 7945 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7946 ConstantInt *ItCst = ConstantInt::get( 7947 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7948 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7949 7950 // Form the GEP offset. 7951 Indexes[VarIdxNum] = Val; 7952 7953 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7954 Indexes); 7955 if (!Result) break; // Cannot compute! 7956 7957 // Evaluate the condition for this iteration. 7958 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7959 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7960 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7961 ++NumArrayLenItCounts; 7962 return getConstant(ItCst); // Found terminating iteration! 7963 } 7964 } 7965 return getCouldNotCompute(); 7966 } 7967 7968 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7969 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7970 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7971 if (!RHS) 7972 return getCouldNotCompute(); 7973 7974 const BasicBlock *Latch = L->getLoopLatch(); 7975 if (!Latch) 7976 return getCouldNotCompute(); 7977 7978 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7979 if (!Predecessor) 7980 return getCouldNotCompute(); 7981 7982 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7983 // Return LHS in OutLHS and shift_opt in OutOpCode. 7984 auto MatchPositiveShift = 7985 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7986 7987 using namespace PatternMatch; 7988 7989 ConstantInt *ShiftAmt; 7990 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7991 OutOpCode = Instruction::LShr; 7992 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7993 OutOpCode = Instruction::AShr; 7994 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7995 OutOpCode = Instruction::Shl; 7996 else 7997 return false; 7998 7999 return ShiftAmt->getValue().isStrictlyPositive(); 8000 }; 8001 8002 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8003 // 8004 // loop: 8005 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8006 // %iv.shifted = lshr i32 %iv, <positive constant> 8007 // 8008 // Return true on a successful match. Return the corresponding PHI node (%iv 8009 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8010 auto MatchShiftRecurrence = 8011 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8012 Optional<Instruction::BinaryOps> PostShiftOpCode; 8013 8014 { 8015 Instruction::BinaryOps OpC; 8016 Value *V; 8017 8018 // If we encounter a shift instruction, "peel off" the shift operation, 8019 // and remember that we did so. Later when we inspect %iv's backedge 8020 // value, we will make sure that the backedge value uses the same 8021 // operation. 8022 // 8023 // Note: the peeled shift operation does not have to be the same 8024 // instruction as the one feeding into the PHI's backedge value. We only 8025 // really care about it being the same *kind* of shift instruction -- 8026 // that's all that is required for our later inferences to hold. 8027 if (MatchPositiveShift(LHS, V, OpC)) { 8028 PostShiftOpCode = OpC; 8029 LHS = V; 8030 } 8031 } 8032 8033 PNOut = dyn_cast<PHINode>(LHS); 8034 if (!PNOut || PNOut->getParent() != L->getHeader()) 8035 return false; 8036 8037 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8038 Value *OpLHS; 8039 8040 return 8041 // The backedge value for the PHI node must be a shift by a positive 8042 // amount 8043 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8044 8045 // of the PHI node itself 8046 OpLHS == PNOut && 8047 8048 // and the kind of shift should be match the kind of shift we peeled 8049 // off, if any. 8050 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8051 }; 8052 8053 PHINode *PN; 8054 Instruction::BinaryOps OpCode; 8055 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8056 return getCouldNotCompute(); 8057 8058 const DataLayout &DL = getDataLayout(); 8059 8060 // The key rationale for this optimization is that for some kinds of shift 8061 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8062 // within a finite number of iterations. If the condition guarding the 8063 // backedge (in the sense that the backedge is taken if the condition is true) 8064 // is false for the value the shift recurrence stabilizes to, then we know 8065 // that the backedge is taken only a finite number of times. 8066 8067 ConstantInt *StableValue = nullptr; 8068 switch (OpCode) { 8069 default: 8070 llvm_unreachable("Impossible case!"); 8071 8072 case Instruction::AShr: { 8073 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8074 // bitwidth(K) iterations. 8075 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8076 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8077 Predecessor->getTerminator(), &DT); 8078 auto *Ty = cast<IntegerType>(RHS->getType()); 8079 if (Known.isNonNegative()) 8080 StableValue = ConstantInt::get(Ty, 0); 8081 else if (Known.isNegative()) 8082 StableValue = ConstantInt::get(Ty, -1, true); 8083 else 8084 return getCouldNotCompute(); 8085 8086 break; 8087 } 8088 case Instruction::LShr: 8089 case Instruction::Shl: 8090 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8091 // stabilize to 0 in at most bitwidth(K) iterations. 8092 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8093 break; 8094 } 8095 8096 auto *Result = 8097 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8098 assert(Result->getType()->isIntegerTy(1) && 8099 "Otherwise cannot be an operand to a branch instruction"); 8100 8101 if (Result->isZeroValue()) { 8102 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8103 const SCEV *UpperBound = 8104 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8105 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8106 } 8107 8108 return getCouldNotCompute(); 8109 } 8110 8111 /// Return true if we can constant fold an instruction of the specified type, 8112 /// assuming that all operands were constants. 8113 static bool CanConstantFold(const Instruction *I) { 8114 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8115 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8116 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8117 return true; 8118 8119 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8120 if (const Function *F = CI->getCalledFunction()) 8121 return canConstantFoldCallTo(CI, F); 8122 return false; 8123 } 8124 8125 /// Determine whether this instruction can constant evolve within this loop 8126 /// assuming its operands can all constant evolve. 8127 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8128 // An instruction outside of the loop can't be derived from a loop PHI. 8129 if (!L->contains(I)) return false; 8130 8131 if (isa<PHINode>(I)) { 8132 // We don't currently keep track of the control flow needed to evaluate 8133 // PHIs, so we cannot handle PHIs inside of loops. 8134 return L->getHeader() == I->getParent(); 8135 } 8136 8137 // If we won't be able to constant fold this expression even if the operands 8138 // are constants, bail early. 8139 return CanConstantFold(I); 8140 } 8141 8142 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8143 /// recursing through each instruction operand until reaching a loop header phi. 8144 static PHINode * 8145 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8146 DenseMap<Instruction *, PHINode *> &PHIMap, 8147 unsigned Depth) { 8148 if (Depth > MaxConstantEvolvingDepth) 8149 return nullptr; 8150 8151 // Otherwise, we can evaluate this instruction if all of its operands are 8152 // constant or derived from a PHI node themselves. 8153 PHINode *PHI = nullptr; 8154 for (Value *Op : UseInst->operands()) { 8155 if (isa<Constant>(Op)) continue; 8156 8157 Instruction *OpInst = dyn_cast<Instruction>(Op); 8158 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8159 8160 PHINode *P = dyn_cast<PHINode>(OpInst); 8161 if (!P) 8162 // If this operand is already visited, reuse the prior result. 8163 // We may have P != PHI if this is the deepest point at which the 8164 // inconsistent paths meet. 8165 P = PHIMap.lookup(OpInst); 8166 if (!P) { 8167 // Recurse and memoize the results, whether a phi is found or not. 8168 // This recursive call invalidates pointers into PHIMap. 8169 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8170 PHIMap[OpInst] = P; 8171 } 8172 if (!P) 8173 return nullptr; // Not evolving from PHI 8174 if (PHI && PHI != P) 8175 return nullptr; // Evolving from multiple different PHIs. 8176 PHI = P; 8177 } 8178 // This is a expression evolving from a constant PHI! 8179 return PHI; 8180 } 8181 8182 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8183 /// in the loop that V is derived from. We allow arbitrary operations along the 8184 /// way, but the operands of an operation must either be constants or a value 8185 /// derived from a constant PHI. If this expression does not fit with these 8186 /// constraints, return null. 8187 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8188 Instruction *I = dyn_cast<Instruction>(V); 8189 if (!I || !canConstantEvolve(I, L)) return nullptr; 8190 8191 if (PHINode *PN = dyn_cast<PHINode>(I)) 8192 return PN; 8193 8194 // Record non-constant instructions contained by the loop. 8195 DenseMap<Instruction *, PHINode *> PHIMap; 8196 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8197 } 8198 8199 /// EvaluateExpression - Given an expression that passes the 8200 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8201 /// in the loop has the value PHIVal. If we can't fold this expression for some 8202 /// reason, return null. 8203 static Constant *EvaluateExpression(Value *V, const Loop *L, 8204 DenseMap<Instruction *, Constant *> &Vals, 8205 const DataLayout &DL, 8206 const TargetLibraryInfo *TLI) { 8207 // Convenient constant check, but redundant for recursive calls. 8208 if (Constant *C = dyn_cast<Constant>(V)) return C; 8209 Instruction *I = dyn_cast<Instruction>(V); 8210 if (!I) return nullptr; 8211 8212 if (Constant *C = Vals.lookup(I)) return C; 8213 8214 // An instruction inside the loop depends on a value outside the loop that we 8215 // weren't given a mapping for, or a value such as a call inside the loop. 8216 if (!canConstantEvolve(I, L)) return nullptr; 8217 8218 // An unmapped PHI can be due to a branch or another loop inside this loop, 8219 // or due to this not being the initial iteration through a loop where we 8220 // couldn't compute the evolution of this particular PHI last time. 8221 if (isa<PHINode>(I)) return nullptr; 8222 8223 std::vector<Constant*> Operands(I->getNumOperands()); 8224 8225 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8226 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8227 if (!Operand) { 8228 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8229 if (!Operands[i]) return nullptr; 8230 continue; 8231 } 8232 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8233 Vals[Operand] = C; 8234 if (!C) return nullptr; 8235 Operands[i] = C; 8236 } 8237 8238 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8239 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8240 Operands[1], DL, TLI); 8241 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8242 if (!LI->isVolatile()) 8243 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8244 } 8245 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8246 } 8247 8248 8249 // If every incoming value to PN except the one for BB is a specific Constant, 8250 // return that, else return nullptr. 8251 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8252 Constant *IncomingVal = nullptr; 8253 8254 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8255 if (PN->getIncomingBlock(i) == BB) 8256 continue; 8257 8258 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8259 if (!CurrentVal) 8260 return nullptr; 8261 8262 if (IncomingVal != CurrentVal) { 8263 if (IncomingVal) 8264 return nullptr; 8265 IncomingVal = CurrentVal; 8266 } 8267 } 8268 8269 return IncomingVal; 8270 } 8271 8272 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8273 /// in the header of its containing loop, we know the loop executes a 8274 /// constant number of times, and the PHI node is just a recurrence 8275 /// involving constants, fold it. 8276 Constant * 8277 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8278 const APInt &BEs, 8279 const Loop *L) { 8280 auto I = ConstantEvolutionLoopExitValue.find(PN); 8281 if (I != ConstantEvolutionLoopExitValue.end()) 8282 return I->second; 8283 8284 if (BEs.ugt(MaxBruteForceIterations)) 8285 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8286 8287 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8288 8289 DenseMap<Instruction *, Constant *> CurrentIterVals; 8290 BasicBlock *Header = L->getHeader(); 8291 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8292 8293 BasicBlock *Latch = L->getLoopLatch(); 8294 if (!Latch) 8295 return nullptr; 8296 8297 for (PHINode &PHI : Header->phis()) { 8298 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8299 CurrentIterVals[&PHI] = StartCST; 8300 } 8301 if (!CurrentIterVals.count(PN)) 8302 return RetVal = nullptr; 8303 8304 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8305 8306 // Execute the loop symbolically to determine the exit value. 8307 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8308 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8309 8310 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8311 unsigned IterationNum = 0; 8312 const DataLayout &DL = getDataLayout(); 8313 for (; ; ++IterationNum) { 8314 if (IterationNum == NumIterations) 8315 return RetVal = CurrentIterVals[PN]; // Got exit value! 8316 8317 // Compute the value of the PHIs for the next iteration. 8318 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8319 DenseMap<Instruction *, Constant *> NextIterVals; 8320 Constant *NextPHI = 8321 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8322 if (!NextPHI) 8323 return nullptr; // Couldn't evaluate! 8324 NextIterVals[PN] = NextPHI; 8325 8326 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8327 8328 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8329 // cease to be able to evaluate one of them or if they stop evolving, 8330 // because that doesn't necessarily prevent us from computing PN. 8331 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8332 for (const auto &I : CurrentIterVals) { 8333 PHINode *PHI = dyn_cast<PHINode>(I.first); 8334 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8335 PHIsToCompute.emplace_back(PHI, I.second); 8336 } 8337 // We use two distinct loops because EvaluateExpression may invalidate any 8338 // iterators into CurrentIterVals. 8339 for (const auto &I : PHIsToCompute) { 8340 PHINode *PHI = I.first; 8341 Constant *&NextPHI = NextIterVals[PHI]; 8342 if (!NextPHI) { // Not already computed. 8343 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8344 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8345 } 8346 if (NextPHI != I.second) 8347 StoppedEvolving = false; 8348 } 8349 8350 // If all entries in CurrentIterVals == NextIterVals then we can stop 8351 // iterating, the loop can't continue to change. 8352 if (StoppedEvolving) 8353 return RetVal = CurrentIterVals[PN]; 8354 8355 CurrentIterVals.swap(NextIterVals); 8356 } 8357 } 8358 8359 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8360 Value *Cond, 8361 bool ExitWhen) { 8362 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8363 if (!PN) return getCouldNotCompute(); 8364 8365 // If the loop is canonicalized, the PHI will have exactly two entries. 8366 // That's the only form we support here. 8367 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8368 8369 DenseMap<Instruction *, Constant *> CurrentIterVals; 8370 BasicBlock *Header = L->getHeader(); 8371 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8372 8373 BasicBlock *Latch = L->getLoopLatch(); 8374 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8375 8376 for (PHINode &PHI : Header->phis()) { 8377 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8378 CurrentIterVals[&PHI] = StartCST; 8379 } 8380 if (!CurrentIterVals.count(PN)) 8381 return getCouldNotCompute(); 8382 8383 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8384 // the loop symbolically to determine when the condition gets a value of 8385 // "ExitWhen". 8386 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8387 const DataLayout &DL = getDataLayout(); 8388 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8389 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8390 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8391 8392 // Couldn't symbolically evaluate. 8393 if (!CondVal) return getCouldNotCompute(); 8394 8395 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8396 ++NumBruteForceTripCountsComputed; 8397 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8398 } 8399 8400 // Update all the PHI nodes for the next iteration. 8401 DenseMap<Instruction *, Constant *> NextIterVals; 8402 8403 // Create a list of which PHIs we need to compute. We want to do this before 8404 // calling EvaluateExpression on them because that may invalidate iterators 8405 // into CurrentIterVals. 8406 SmallVector<PHINode *, 8> PHIsToCompute; 8407 for (const auto &I : CurrentIterVals) { 8408 PHINode *PHI = dyn_cast<PHINode>(I.first); 8409 if (!PHI || PHI->getParent() != Header) continue; 8410 PHIsToCompute.push_back(PHI); 8411 } 8412 for (PHINode *PHI : PHIsToCompute) { 8413 Constant *&NextPHI = NextIterVals[PHI]; 8414 if (NextPHI) continue; // Already computed! 8415 8416 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8417 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8418 } 8419 CurrentIterVals.swap(NextIterVals); 8420 } 8421 8422 // Too many iterations were needed to evaluate. 8423 return getCouldNotCompute(); 8424 } 8425 8426 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8427 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8428 ValuesAtScopes[V]; 8429 // Check to see if we've folded this expression at this loop before. 8430 for (auto &LS : Values) 8431 if (LS.first == L) 8432 return LS.second ? LS.second : V; 8433 8434 Values.emplace_back(L, nullptr); 8435 8436 // Otherwise compute it. 8437 const SCEV *C = computeSCEVAtScope(V, L); 8438 for (auto &LS : reverse(ValuesAtScopes[V])) 8439 if (LS.first == L) { 8440 LS.second = C; 8441 break; 8442 } 8443 return C; 8444 } 8445 8446 /// This builds up a Constant using the ConstantExpr interface. That way, we 8447 /// will return Constants for objects which aren't represented by a 8448 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8449 /// Returns NULL if the SCEV isn't representable as a Constant. 8450 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8451 switch (V->getSCEVType()) { 8452 case scCouldNotCompute: 8453 case scAddRecExpr: 8454 return nullptr; 8455 case scConstant: 8456 return cast<SCEVConstant>(V)->getValue(); 8457 case scUnknown: 8458 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8459 case scSignExtend: { 8460 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8461 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8462 return ConstantExpr::getSExt(CastOp, SS->getType()); 8463 return nullptr; 8464 } 8465 case scZeroExtend: { 8466 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8467 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8468 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8469 return nullptr; 8470 } 8471 case scPtrToInt: { 8472 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8473 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8474 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8475 8476 return nullptr; 8477 } 8478 case scTruncate: { 8479 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8480 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8481 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8482 return nullptr; 8483 } 8484 case scAddExpr: { 8485 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8486 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8487 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8488 unsigned AS = PTy->getAddressSpace(); 8489 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8490 C = ConstantExpr::getBitCast(C, DestPtrTy); 8491 } 8492 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8493 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8494 if (!C2) 8495 return nullptr; 8496 8497 // First pointer! 8498 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8499 unsigned AS = C2->getType()->getPointerAddressSpace(); 8500 std::swap(C, C2); 8501 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8502 // The offsets have been converted to bytes. We can add bytes to an 8503 // i8* by GEP with the byte count in the first index. 8504 C = ConstantExpr::getBitCast(C, DestPtrTy); 8505 } 8506 8507 // Don't bother trying to sum two pointers. We probably can't 8508 // statically compute a load that results from it anyway. 8509 if (C2->getType()->isPointerTy()) 8510 return nullptr; 8511 8512 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8513 if (PTy->getElementType()->isStructTy()) 8514 C2 = ConstantExpr::getIntegerCast( 8515 C2, Type::getInt32Ty(C->getContext()), true); 8516 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8517 } else 8518 C = ConstantExpr::getAdd(C, C2); 8519 } 8520 return C; 8521 } 8522 return nullptr; 8523 } 8524 case scMulExpr: { 8525 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8526 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8527 // Don't bother with pointers at all. 8528 if (C->getType()->isPointerTy()) 8529 return nullptr; 8530 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8531 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8532 if (!C2 || C2->getType()->isPointerTy()) 8533 return nullptr; 8534 C = ConstantExpr::getMul(C, C2); 8535 } 8536 return C; 8537 } 8538 return nullptr; 8539 } 8540 case scUDivExpr: { 8541 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8542 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8543 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8544 if (LHS->getType() == RHS->getType()) 8545 return ConstantExpr::getUDiv(LHS, RHS); 8546 return nullptr; 8547 } 8548 case scSMaxExpr: 8549 case scUMaxExpr: 8550 case scSMinExpr: 8551 case scUMinExpr: 8552 return nullptr; // TODO: smax, umax, smin, umax. 8553 } 8554 llvm_unreachable("Unknown SCEV kind!"); 8555 } 8556 8557 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8558 if (isa<SCEVConstant>(V)) return V; 8559 8560 // If this instruction is evolved from a constant-evolving PHI, compute the 8561 // exit value from the loop without using SCEVs. 8562 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8563 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8564 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8565 const Loop *CurrLoop = this->LI[I->getParent()]; 8566 // Looking for loop exit value. 8567 if (CurrLoop && CurrLoop->getParentLoop() == L && 8568 PN->getParent() == CurrLoop->getHeader()) { 8569 // Okay, there is no closed form solution for the PHI node. Check 8570 // to see if the loop that contains it has a known backedge-taken 8571 // count. If so, we may be able to force computation of the exit 8572 // value. 8573 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8574 // This trivial case can show up in some degenerate cases where 8575 // the incoming IR has not yet been fully simplified. 8576 if (BackedgeTakenCount->isZero()) { 8577 Value *InitValue = nullptr; 8578 bool MultipleInitValues = false; 8579 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8580 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8581 if (!InitValue) 8582 InitValue = PN->getIncomingValue(i); 8583 else if (InitValue != PN->getIncomingValue(i)) { 8584 MultipleInitValues = true; 8585 break; 8586 } 8587 } 8588 } 8589 if (!MultipleInitValues && InitValue) 8590 return getSCEV(InitValue); 8591 } 8592 // Do we have a loop invariant value flowing around the backedge 8593 // for a loop which must execute the backedge? 8594 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8595 isKnownPositive(BackedgeTakenCount) && 8596 PN->getNumIncomingValues() == 2) { 8597 8598 unsigned InLoopPred = 8599 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8600 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8601 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8602 return getSCEV(BackedgeVal); 8603 } 8604 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8605 // Okay, we know how many times the containing loop executes. If 8606 // this is a constant evolving PHI node, get the final value at 8607 // the specified iteration number. 8608 Constant *RV = getConstantEvolutionLoopExitValue( 8609 PN, BTCC->getAPInt(), CurrLoop); 8610 if (RV) return getSCEV(RV); 8611 } 8612 } 8613 8614 // If there is a single-input Phi, evaluate it at our scope. If we can 8615 // prove that this replacement does not break LCSSA form, use new value. 8616 if (PN->getNumOperands() == 1) { 8617 const SCEV *Input = getSCEV(PN->getOperand(0)); 8618 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8619 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8620 // for the simplest case just support constants. 8621 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8622 } 8623 } 8624 8625 // Okay, this is an expression that we cannot symbolically evaluate 8626 // into a SCEV. Check to see if it's possible to symbolically evaluate 8627 // the arguments into constants, and if so, try to constant propagate the 8628 // result. This is particularly useful for computing loop exit values. 8629 if (CanConstantFold(I)) { 8630 SmallVector<Constant *, 4> Operands; 8631 bool MadeImprovement = false; 8632 for (Value *Op : I->operands()) { 8633 if (Constant *C = dyn_cast<Constant>(Op)) { 8634 Operands.push_back(C); 8635 continue; 8636 } 8637 8638 // If any of the operands is non-constant and if they are 8639 // non-integer and non-pointer, don't even try to analyze them 8640 // with scev techniques. 8641 if (!isSCEVable(Op->getType())) 8642 return V; 8643 8644 const SCEV *OrigV = getSCEV(Op); 8645 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8646 MadeImprovement |= OrigV != OpV; 8647 8648 Constant *C = BuildConstantFromSCEV(OpV); 8649 if (!C) return V; 8650 if (C->getType() != Op->getType()) 8651 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8652 Op->getType(), 8653 false), 8654 C, Op->getType()); 8655 Operands.push_back(C); 8656 } 8657 8658 // Check to see if getSCEVAtScope actually made an improvement. 8659 if (MadeImprovement) { 8660 Constant *C = nullptr; 8661 const DataLayout &DL = getDataLayout(); 8662 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8663 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8664 Operands[1], DL, &TLI); 8665 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8666 if (!Load->isVolatile()) 8667 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8668 DL); 8669 } else 8670 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8671 if (!C) return V; 8672 return getSCEV(C); 8673 } 8674 } 8675 } 8676 8677 // This is some other type of SCEVUnknown, just return it. 8678 return V; 8679 } 8680 8681 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8682 // Avoid performing the look-up in the common case where the specified 8683 // expression has no loop-variant portions. 8684 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8685 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8686 if (OpAtScope != Comm->getOperand(i)) { 8687 // Okay, at least one of these operands is loop variant but might be 8688 // foldable. Build a new instance of the folded commutative expression. 8689 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8690 Comm->op_begin()+i); 8691 NewOps.push_back(OpAtScope); 8692 8693 for (++i; i != e; ++i) { 8694 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8695 NewOps.push_back(OpAtScope); 8696 } 8697 if (isa<SCEVAddExpr>(Comm)) 8698 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8699 if (isa<SCEVMulExpr>(Comm)) 8700 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8701 if (isa<SCEVMinMaxExpr>(Comm)) 8702 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8703 llvm_unreachable("Unknown commutative SCEV type!"); 8704 } 8705 } 8706 // If we got here, all operands are loop invariant. 8707 return Comm; 8708 } 8709 8710 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8711 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8712 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8713 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8714 return Div; // must be loop invariant 8715 return getUDivExpr(LHS, RHS); 8716 } 8717 8718 // If this is a loop recurrence for a loop that does not contain L, then we 8719 // are dealing with the final value computed by the loop. 8720 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8721 // First, attempt to evaluate each operand. 8722 // Avoid performing the look-up in the common case where the specified 8723 // expression has no loop-variant portions. 8724 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8725 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8726 if (OpAtScope == AddRec->getOperand(i)) 8727 continue; 8728 8729 // Okay, at least one of these operands is loop variant but might be 8730 // foldable. Build a new instance of the folded commutative expression. 8731 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8732 AddRec->op_begin()+i); 8733 NewOps.push_back(OpAtScope); 8734 for (++i; i != e; ++i) 8735 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8736 8737 const SCEV *FoldedRec = 8738 getAddRecExpr(NewOps, AddRec->getLoop(), 8739 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8740 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8741 // The addrec may be folded to a nonrecurrence, for example, if the 8742 // induction variable is multiplied by zero after constant folding. Go 8743 // ahead and return the folded value. 8744 if (!AddRec) 8745 return FoldedRec; 8746 break; 8747 } 8748 8749 // If the scope is outside the addrec's loop, evaluate it by using the 8750 // loop exit value of the addrec. 8751 if (!AddRec->getLoop()->contains(L)) { 8752 // To evaluate this recurrence, we need to know how many times the AddRec 8753 // loop iterates. Compute this now. 8754 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8755 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8756 8757 // Then, evaluate the AddRec. 8758 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8759 } 8760 8761 return AddRec; 8762 } 8763 8764 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8765 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8766 if (Op == Cast->getOperand()) 8767 return Cast; // must be loop invariant 8768 return getZeroExtendExpr(Op, Cast->getType()); 8769 } 8770 8771 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8772 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8773 if (Op == Cast->getOperand()) 8774 return Cast; // must be loop invariant 8775 return getSignExtendExpr(Op, Cast->getType()); 8776 } 8777 8778 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8779 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8780 if (Op == Cast->getOperand()) 8781 return Cast; // must be loop invariant 8782 return getTruncateExpr(Op, Cast->getType()); 8783 } 8784 8785 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 8786 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8787 if (Op == Cast->getOperand()) 8788 return Cast; // must be loop invariant 8789 return getPtrToIntExpr(Op, Cast->getType()); 8790 } 8791 8792 llvm_unreachable("Unknown SCEV type!"); 8793 } 8794 8795 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8796 return getSCEVAtScope(getSCEV(V), L); 8797 } 8798 8799 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8800 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8801 return stripInjectiveFunctions(ZExt->getOperand()); 8802 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8803 return stripInjectiveFunctions(SExt->getOperand()); 8804 return S; 8805 } 8806 8807 /// Finds the minimum unsigned root of the following equation: 8808 /// 8809 /// A * X = B (mod N) 8810 /// 8811 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8812 /// A and B isn't important. 8813 /// 8814 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8815 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8816 ScalarEvolution &SE) { 8817 uint32_t BW = A.getBitWidth(); 8818 assert(BW == SE.getTypeSizeInBits(B->getType())); 8819 assert(A != 0 && "A must be non-zero."); 8820 8821 // 1. D = gcd(A, N) 8822 // 8823 // The gcd of A and N may have only one prime factor: 2. The number of 8824 // trailing zeros in A is its multiplicity 8825 uint32_t Mult2 = A.countTrailingZeros(); 8826 // D = 2^Mult2 8827 8828 // 2. Check if B is divisible by D. 8829 // 8830 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8831 // is not less than multiplicity of this prime factor for D. 8832 if (SE.GetMinTrailingZeros(B) < Mult2) 8833 return SE.getCouldNotCompute(); 8834 8835 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8836 // modulo (N / D). 8837 // 8838 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8839 // (N / D) in general. The inverse itself always fits into BW bits, though, 8840 // so we immediately truncate it. 8841 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8842 APInt Mod(BW + 1, 0); 8843 Mod.setBit(BW - Mult2); // Mod = N / D 8844 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8845 8846 // 4. Compute the minimum unsigned root of the equation: 8847 // I * (B / D) mod (N / D) 8848 // To simplify the computation, we factor out the divide by D: 8849 // (I * B mod N) / D 8850 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8851 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8852 } 8853 8854 /// For a given quadratic addrec, generate coefficients of the corresponding 8855 /// quadratic equation, multiplied by a common value to ensure that they are 8856 /// integers. 8857 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8858 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8859 /// were multiplied by, and BitWidth is the bit width of the original addrec 8860 /// coefficients. 8861 /// This function returns None if the addrec coefficients are not compile- 8862 /// time constants. 8863 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8864 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8865 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8866 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8867 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8868 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8869 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8870 << *AddRec << '\n'); 8871 8872 // We currently can only solve this if the coefficients are constants. 8873 if (!LC || !MC || !NC) { 8874 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8875 return None; 8876 } 8877 8878 APInt L = LC->getAPInt(); 8879 APInt M = MC->getAPInt(); 8880 APInt N = NC->getAPInt(); 8881 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8882 8883 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8884 unsigned NewWidth = BitWidth + 1; 8885 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8886 << BitWidth << '\n'); 8887 // The sign-extension (as opposed to a zero-extension) here matches the 8888 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8889 N = N.sext(NewWidth); 8890 M = M.sext(NewWidth); 8891 L = L.sext(NewWidth); 8892 8893 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8894 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8895 // L+M, L+2M+N, L+3M+3N, ... 8896 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8897 // 8898 // The equation Acc = 0 is then 8899 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8900 // In a quadratic form it becomes: 8901 // N n^2 + (2M-N) n + 2L = 0. 8902 8903 APInt A = N; 8904 APInt B = 2 * M - A; 8905 APInt C = 2 * L; 8906 APInt T = APInt(NewWidth, 2); 8907 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8908 << "x + " << C << ", coeff bw: " << NewWidth 8909 << ", multiplied by " << T << '\n'); 8910 return std::make_tuple(A, B, C, T, BitWidth); 8911 } 8912 8913 /// Helper function to compare optional APInts: 8914 /// (a) if X and Y both exist, return min(X, Y), 8915 /// (b) if neither X nor Y exist, return None, 8916 /// (c) if exactly one of X and Y exists, return that value. 8917 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8918 if (X.hasValue() && Y.hasValue()) { 8919 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8920 APInt XW = X->sextOrSelf(W); 8921 APInt YW = Y->sextOrSelf(W); 8922 return XW.slt(YW) ? *X : *Y; 8923 } 8924 if (!X.hasValue() && !Y.hasValue()) 8925 return None; 8926 return X.hasValue() ? *X : *Y; 8927 } 8928 8929 /// Helper function to truncate an optional APInt to a given BitWidth. 8930 /// When solving addrec-related equations, it is preferable to return a value 8931 /// that has the same bit width as the original addrec's coefficients. If the 8932 /// solution fits in the original bit width, truncate it (except for i1). 8933 /// Returning a value of a different bit width may inhibit some optimizations. 8934 /// 8935 /// In general, a solution to a quadratic equation generated from an addrec 8936 /// may require BW+1 bits, where BW is the bit width of the addrec's 8937 /// coefficients. The reason is that the coefficients of the quadratic 8938 /// equation are BW+1 bits wide (to avoid truncation when converting from 8939 /// the addrec to the equation). 8940 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8941 if (!X.hasValue()) 8942 return None; 8943 unsigned W = X->getBitWidth(); 8944 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8945 return X->trunc(BitWidth); 8946 return X; 8947 } 8948 8949 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8950 /// iterations. The values L, M, N are assumed to be signed, and they 8951 /// should all have the same bit widths. 8952 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8953 /// where BW is the bit width of the addrec's coefficients. 8954 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8955 /// returned as such, otherwise the bit width of the returned value may 8956 /// be greater than BW. 8957 /// 8958 /// This function returns None if 8959 /// (a) the addrec coefficients are not constant, or 8960 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8961 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8962 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8963 static Optional<APInt> 8964 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8965 APInt A, B, C, M; 8966 unsigned BitWidth; 8967 auto T = GetQuadraticEquation(AddRec); 8968 if (!T.hasValue()) 8969 return None; 8970 8971 std::tie(A, B, C, M, BitWidth) = *T; 8972 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8973 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8974 if (!X.hasValue()) 8975 return None; 8976 8977 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8978 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8979 if (!V->isZero()) 8980 return None; 8981 8982 return TruncIfPossible(X, BitWidth); 8983 } 8984 8985 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8986 /// iterations. The values M, N are assumed to be signed, and they 8987 /// should all have the same bit widths. 8988 /// Find the least n such that c(n) does not belong to the given range, 8989 /// while c(n-1) does. 8990 /// 8991 /// This function returns None if 8992 /// (a) the addrec coefficients are not constant, or 8993 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8994 /// bounds of the range. 8995 static Optional<APInt> 8996 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8997 const ConstantRange &Range, ScalarEvolution &SE) { 8998 assert(AddRec->getOperand(0)->isZero() && 8999 "Starting value of addrec should be 0"); 9000 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9001 << Range << ", addrec " << *AddRec << '\n'); 9002 // This case is handled in getNumIterationsInRange. Here we can assume that 9003 // we start in the range. 9004 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9005 "Addrec's initial value should be in range"); 9006 9007 APInt A, B, C, M; 9008 unsigned BitWidth; 9009 auto T = GetQuadraticEquation(AddRec); 9010 if (!T.hasValue()) 9011 return None; 9012 9013 // Be careful about the return value: there can be two reasons for not 9014 // returning an actual number. First, if no solutions to the equations 9015 // were found, and second, if the solutions don't leave the given range. 9016 // The first case means that the actual solution is "unknown", the second 9017 // means that it's known, but not valid. If the solution is unknown, we 9018 // cannot make any conclusions. 9019 // Return a pair: the optional solution and a flag indicating if the 9020 // solution was found. 9021 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9022 // Solve for signed overflow and unsigned overflow, pick the lower 9023 // solution. 9024 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9025 << Bound << " (before multiplying by " << M << ")\n"); 9026 Bound *= M; // The quadratic equation multiplier. 9027 9028 Optional<APInt> SO = None; 9029 if (BitWidth > 1) { 9030 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9031 "signed overflow\n"); 9032 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9033 } 9034 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9035 "unsigned overflow\n"); 9036 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9037 BitWidth+1); 9038 9039 auto LeavesRange = [&] (const APInt &X) { 9040 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9041 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9042 if (Range.contains(V0->getValue())) 9043 return false; 9044 // X should be at least 1, so X-1 is non-negative. 9045 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9046 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9047 if (Range.contains(V1->getValue())) 9048 return true; 9049 return false; 9050 }; 9051 9052 // If SolveQuadraticEquationWrap returns None, it means that there can 9053 // be a solution, but the function failed to find it. We cannot treat it 9054 // as "no solution". 9055 if (!SO.hasValue() || !UO.hasValue()) 9056 return { None, false }; 9057 9058 // Check the smaller value first to see if it leaves the range. 9059 // At this point, both SO and UO must have values. 9060 Optional<APInt> Min = MinOptional(SO, UO); 9061 if (LeavesRange(*Min)) 9062 return { Min, true }; 9063 Optional<APInt> Max = Min == SO ? UO : SO; 9064 if (LeavesRange(*Max)) 9065 return { Max, true }; 9066 9067 // Solutions were found, but were eliminated, hence the "true". 9068 return { None, true }; 9069 }; 9070 9071 std::tie(A, B, C, M, BitWidth) = *T; 9072 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9073 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 9074 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 9075 auto SL = SolveForBoundary(Lower); 9076 auto SU = SolveForBoundary(Upper); 9077 // If any of the solutions was unknown, no meaninigful conclusions can 9078 // be made. 9079 if (!SL.second || !SU.second) 9080 return None; 9081 9082 // Claim: The correct solution is not some value between Min and Max. 9083 // 9084 // Justification: Assuming that Min and Max are different values, one of 9085 // them is when the first signed overflow happens, the other is when the 9086 // first unsigned overflow happens. Crossing the range boundary is only 9087 // possible via an overflow (treating 0 as a special case of it, modeling 9088 // an overflow as crossing k*2^W for some k). 9089 // 9090 // The interesting case here is when Min was eliminated as an invalid 9091 // solution, but Max was not. The argument is that if there was another 9092 // overflow between Min and Max, it would also have been eliminated if 9093 // it was considered. 9094 // 9095 // For a given boundary, it is possible to have two overflows of the same 9096 // type (signed/unsigned) without having the other type in between: this 9097 // can happen when the vertex of the parabola is between the iterations 9098 // corresponding to the overflows. This is only possible when the two 9099 // overflows cross k*2^W for the same k. In such case, if the second one 9100 // left the range (and was the first one to do so), the first overflow 9101 // would have to enter the range, which would mean that either we had left 9102 // the range before or that we started outside of it. Both of these cases 9103 // are contradictions. 9104 // 9105 // Claim: In the case where SolveForBoundary returns None, the correct 9106 // solution is not some value between the Max for this boundary and the 9107 // Min of the other boundary. 9108 // 9109 // Justification: Assume that we had such Max_A and Min_B corresponding 9110 // to range boundaries A and B and such that Max_A < Min_B. If there was 9111 // a solution between Max_A and Min_B, it would have to be caused by an 9112 // overflow corresponding to either A or B. It cannot correspond to B, 9113 // since Min_B is the first occurrence of such an overflow. If it 9114 // corresponded to A, it would have to be either a signed or an unsigned 9115 // overflow that is larger than both eliminated overflows for A. But 9116 // between the eliminated overflows and this overflow, the values would 9117 // cover the entire value space, thus crossing the other boundary, which 9118 // is a contradiction. 9119 9120 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9121 } 9122 9123 ScalarEvolution::ExitLimit 9124 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9125 bool AllowPredicates) { 9126 9127 // This is only used for loops with a "x != y" exit test. The exit condition 9128 // is now expressed as a single expression, V = x-y. So the exit test is 9129 // effectively V != 0. We know and take advantage of the fact that this 9130 // expression only being used in a comparison by zero context. 9131 9132 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9133 // If the value is a constant 9134 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9135 // If the value is already zero, the branch will execute zero times. 9136 if (C->getValue()->isZero()) return C; 9137 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9138 } 9139 9140 const SCEVAddRecExpr *AddRec = 9141 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9142 9143 if (!AddRec && AllowPredicates) 9144 // Try to make this an AddRec using runtime tests, in the first X 9145 // iterations of this loop, where X is the SCEV expression found by the 9146 // algorithm below. 9147 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9148 9149 if (!AddRec || AddRec->getLoop() != L) 9150 return getCouldNotCompute(); 9151 9152 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9153 // the quadratic equation to solve it. 9154 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9155 // We can only use this value if the chrec ends up with an exact zero 9156 // value at this index. When solving for "X*X != 5", for example, we 9157 // should not accept a root of 2. 9158 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9159 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9160 return ExitLimit(R, R, false, Predicates); 9161 } 9162 return getCouldNotCompute(); 9163 } 9164 9165 // Otherwise we can only handle this if it is affine. 9166 if (!AddRec->isAffine()) 9167 return getCouldNotCompute(); 9168 9169 // If this is an affine expression, the execution count of this branch is 9170 // the minimum unsigned root of the following equation: 9171 // 9172 // Start + Step*N = 0 (mod 2^BW) 9173 // 9174 // equivalent to: 9175 // 9176 // Step*N = -Start (mod 2^BW) 9177 // 9178 // where BW is the common bit width of Start and Step. 9179 9180 // Get the initial value for the loop. 9181 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9182 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9183 9184 // For now we handle only constant steps. 9185 // 9186 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9187 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9188 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9189 // We have not yet seen any such cases. 9190 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9191 if (!StepC || StepC->getValue()->isZero()) 9192 return getCouldNotCompute(); 9193 9194 // For positive steps (counting up until unsigned overflow): 9195 // N = -Start/Step (as unsigned) 9196 // For negative steps (counting down to zero): 9197 // N = Start/-Step 9198 // First compute the unsigned distance from zero in the direction of Step. 9199 bool CountDown = StepC->getAPInt().isNegative(); 9200 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9201 9202 // Handle unitary steps, which cannot wraparound. 9203 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9204 // N = Distance (as unsigned) 9205 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9206 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9207 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9208 if (MaxBECountBase.ult(MaxBECount)) 9209 MaxBECount = MaxBECountBase; 9210 9211 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9212 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9213 // case, and see if we can improve the bound. 9214 // 9215 // Explicitly handling this here is necessary because getUnsignedRange 9216 // isn't context-sensitive; it doesn't know that we only care about the 9217 // range inside the loop. 9218 const SCEV *Zero = getZero(Distance->getType()); 9219 const SCEV *One = getOne(Distance->getType()); 9220 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9221 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9222 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9223 // as "unsigned_max(Distance + 1) - 1". 9224 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9225 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9226 } 9227 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9228 } 9229 9230 // If the condition controls loop exit (the loop exits only if the expression 9231 // is true) and the addition is no-wrap we can use unsigned divide to 9232 // compute the backedge count. In this case, the step may not divide the 9233 // distance, but we don't care because if the condition is "missed" the loop 9234 // will have undefined behavior due to wrapping. 9235 if (ControlsExit && AddRec->hasNoSelfWrap() && 9236 loopHasNoAbnormalExits(AddRec->getLoop())) { 9237 const SCEV *Exact = 9238 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9239 const SCEV *Max = 9240 Exact == getCouldNotCompute() 9241 ? Exact 9242 : getConstant(getUnsignedRangeMax(Exact)); 9243 return ExitLimit(Exact, Max, false, Predicates); 9244 } 9245 9246 // Solve the general equation. 9247 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9248 getNegativeSCEV(Start), *this); 9249 const SCEV *M = E == getCouldNotCompute() 9250 ? E 9251 : getConstant(getUnsignedRangeMax(E)); 9252 return ExitLimit(E, M, false, Predicates); 9253 } 9254 9255 ScalarEvolution::ExitLimit 9256 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9257 // Loops that look like: while (X == 0) are very strange indeed. We don't 9258 // handle them yet except for the trivial case. This could be expanded in the 9259 // future as needed. 9260 9261 // If the value is a constant, check to see if it is known to be non-zero 9262 // already. If so, the backedge will execute zero times. 9263 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9264 if (!C->getValue()->isZero()) 9265 return getZero(C->getType()); 9266 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9267 } 9268 9269 // We could implement others, but I really doubt anyone writes loops like 9270 // this, and if they did, they would already be constant folded. 9271 return getCouldNotCompute(); 9272 } 9273 9274 std::pair<const BasicBlock *, const BasicBlock *> 9275 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9276 const { 9277 // If the block has a unique predecessor, then there is no path from the 9278 // predecessor to the block that does not go through the direct edge 9279 // from the predecessor to the block. 9280 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9281 return {Pred, BB}; 9282 9283 // A loop's header is defined to be a block that dominates the loop. 9284 // If the header has a unique predecessor outside the loop, it must be 9285 // a block that has exactly one successor that can reach the loop. 9286 if (const Loop *L = LI.getLoopFor(BB)) 9287 return {L->getLoopPredecessor(), L->getHeader()}; 9288 9289 return {nullptr, nullptr}; 9290 } 9291 9292 /// SCEV structural equivalence is usually sufficient for testing whether two 9293 /// expressions are equal, however for the purposes of looking for a condition 9294 /// guarding a loop, it can be useful to be a little more general, since a 9295 /// front-end may have replicated the controlling expression. 9296 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9297 // Quick check to see if they are the same SCEV. 9298 if (A == B) return true; 9299 9300 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9301 // Not all instructions that are "identical" compute the same value. For 9302 // instance, two distinct alloca instructions allocating the same type are 9303 // identical and do not read memory; but compute distinct values. 9304 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9305 }; 9306 9307 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9308 // two different instructions with the same value. Check for this case. 9309 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9310 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9311 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9312 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9313 if (ComputesEqualValues(AI, BI)) 9314 return true; 9315 9316 // Otherwise assume they may have a different value. 9317 return false; 9318 } 9319 9320 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9321 const SCEV *&LHS, const SCEV *&RHS, 9322 unsigned Depth) { 9323 bool Changed = false; 9324 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9325 // '0 != 0'. 9326 auto TrivialCase = [&](bool TriviallyTrue) { 9327 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9328 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9329 return true; 9330 }; 9331 // If we hit the max recursion limit bail out. 9332 if (Depth >= 3) 9333 return false; 9334 9335 // Canonicalize a constant to the right side. 9336 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9337 // Check for both operands constant. 9338 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9339 if (ConstantExpr::getICmp(Pred, 9340 LHSC->getValue(), 9341 RHSC->getValue())->isNullValue()) 9342 return TrivialCase(false); 9343 else 9344 return TrivialCase(true); 9345 } 9346 // Otherwise swap the operands to put the constant on the right. 9347 std::swap(LHS, RHS); 9348 Pred = ICmpInst::getSwappedPredicate(Pred); 9349 Changed = true; 9350 } 9351 9352 // If we're comparing an addrec with a value which is loop-invariant in the 9353 // addrec's loop, put the addrec on the left. Also make a dominance check, 9354 // as both operands could be addrecs loop-invariant in each other's loop. 9355 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9356 const Loop *L = AR->getLoop(); 9357 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9358 std::swap(LHS, RHS); 9359 Pred = ICmpInst::getSwappedPredicate(Pred); 9360 Changed = true; 9361 } 9362 } 9363 9364 // If there's a constant operand, canonicalize comparisons with boundary 9365 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9366 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9367 const APInt &RA = RC->getAPInt(); 9368 9369 bool SimplifiedByConstantRange = false; 9370 9371 if (!ICmpInst::isEquality(Pred)) { 9372 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9373 if (ExactCR.isFullSet()) 9374 return TrivialCase(true); 9375 else if (ExactCR.isEmptySet()) 9376 return TrivialCase(false); 9377 9378 APInt NewRHS; 9379 CmpInst::Predicate NewPred; 9380 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9381 ICmpInst::isEquality(NewPred)) { 9382 // We were able to convert an inequality to an equality. 9383 Pred = NewPred; 9384 RHS = getConstant(NewRHS); 9385 Changed = SimplifiedByConstantRange = true; 9386 } 9387 } 9388 9389 if (!SimplifiedByConstantRange) { 9390 switch (Pred) { 9391 default: 9392 break; 9393 case ICmpInst::ICMP_EQ: 9394 case ICmpInst::ICMP_NE: 9395 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9396 if (!RA) 9397 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9398 if (const SCEVMulExpr *ME = 9399 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9400 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9401 ME->getOperand(0)->isAllOnesValue()) { 9402 RHS = AE->getOperand(1); 9403 LHS = ME->getOperand(1); 9404 Changed = true; 9405 } 9406 break; 9407 9408 9409 // The "Should have been caught earlier!" messages refer to the fact 9410 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9411 // should have fired on the corresponding cases, and canonicalized the 9412 // check to trivial case. 9413 9414 case ICmpInst::ICMP_UGE: 9415 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9416 Pred = ICmpInst::ICMP_UGT; 9417 RHS = getConstant(RA - 1); 9418 Changed = true; 9419 break; 9420 case ICmpInst::ICMP_ULE: 9421 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9422 Pred = ICmpInst::ICMP_ULT; 9423 RHS = getConstant(RA + 1); 9424 Changed = true; 9425 break; 9426 case ICmpInst::ICMP_SGE: 9427 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9428 Pred = ICmpInst::ICMP_SGT; 9429 RHS = getConstant(RA - 1); 9430 Changed = true; 9431 break; 9432 case ICmpInst::ICMP_SLE: 9433 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9434 Pred = ICmpInst::ICMP_SLT; 9435 RHS = getConstant(RA + 1); 9436 Changed = true; 9437 break; 9438 } 9439 } 9440 } 9441 9442 // Check for obvious equality. 9443 if (HasSameValue(LHS, RHS)) { 9444 if (ICmpInst::isTrueWhenEqual(Pred)) 9445 return TrivialCase(true); 9446 if (ICmpInst::isFalseWhenEqual(Pred)) 9447 return TrivialCase(false); 9448 } 9449 9450 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9451 // adding or subtracting 1 from one of the operands. 9452 switch (Pred) { 9453 case ICmpInst::ICMP_SLE: 9454 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9455 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9456 SCEV::FlagNSW); 9457 Pred = ICmpInst::ICMP_SLT; 9458 Changed = true; 9459 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9460 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9461 SCEV::FlagNSW); 9462 Pred = ICmpInst::ICMP_SLT; 9463 Changed = true; 9464 } 9465 break; 9466 case ICmpInst::ICMP_SGE: 9467 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9468 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9469 SCEV::FlagNSW); 9470 Pred = ICmpInst::ICMP_SGT; 9471 Changed = true; 9472 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9473 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9474 SCEV::FlagNSW); 9475 Pred = ICmpInst::ICMP_SGT; 9476 Changed = true; 9477 } 9478 break; 9479 case ICmpInst::ICMP_ULE: 9480 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9481 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9482 SCEV::FlagNUW); 9483 Pred = ICmpInst::ICMP_ULT; 9484 Changed = true; 9485 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9486 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9487 Pred = ICmpInst::ICMP_ULT; 9488 Changed = true; 9489 } 9490 break; 9491 case ICmpInst::ICMP_UGE: 9492 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9493 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9494 Pred = ICmpInst::ICMP_UGT; 9495 Changed = true; 9496 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9497 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9498 SCEV::FlagNUW); 9499 Pred = ICmpInst::ICMP_UGT; 9500 Changed = true; 9501 } 9502 break; 9503 default: 9504 break; 9505 } 9506 9507 // TODO: More simplifications are possible here. 9508 9509 // Recursively simplify until we either hit a recursion limit or nothing 9510 // changes. 9511 if (Changed) 9512 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9513 9514 return Changed; 9515 } 9516 9517 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9518 return getSignedRangeMax(S).isNegative(); 9519 } 9520 9521 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9522 return getSignedRangeMin(S).isStrictlyPositive(); 9523 } 9524 9525 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9526 return !getSignedRangeMin(S).isNegative(); 9527 } 9528 9529 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9530 return !getSignedRangeMax(S).isStrictlyPositive(); 9531 } 9532 9533 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9534 return isKnownNegative(S) || isKnownPositive(S); 9535 } 9536 9537 std::pair<const SCEV *, const SCEV *> 9538 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9539 // Compute SCEV on entry of loop L. 9540 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9541 if (Start == getCouldNotCompute()) 9542 return { Start, Start }; 9543 // Compute post increment SCEV for loop L. 9544 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9545 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9546 return { Start, PostInc }; 9547 } 9548 9549 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9550 const SCEV *LHS, const SCEV *RHS) { 9551 // First collect all loops. 9552 SmallPtrSet<const Loop *, 8> LoopsUsed; 9553 getUsedLoops(LHS, LoopsUsed); 9554 getUsedLoops(RHS, LoopsUsed); 9555 9556 if (LoopsUsed.empty()) 9557 return false; 9558 9559 // Domination relationship must be a linear order on collected loops. 9560 #ifndef NDEBUG 9561 for (auto *L1 : LoopsUsed) 9562 for (auto *L2 : LoopsUsed) 9563 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9564 DT.dominates(L2->getHeader(), L1->getHeader())) && 9565 "Domination relationship is not a linear order"); 9566 #endif 9567 9568 const Loop *MDL = 9569 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9570 [&](const Loop *L1, const Loop *L2) { 9571 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9572 }); 9573 9574 // Get init and post increment value for LHS. 9575 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9576 // if LHS contains unknown non-invariant SCEV then bail out. 9577 if (SplitLHS.first == getCouldNotCompute()) 9578 return false; 9579 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9580 // Get init and post increment value for RHS. 9581 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9582 // if RHS contains unknown non-invariant SCEV then bail out. 9583 if (SplitRHS.first == getCouldNotCompute()) 9584 return false; 9585 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9586 // It is possible that init SCEV contains an invariant load but it does 9587 // not dominate MDL and is not available at MDL loop entry, so we should 9588 // check it here. 9589 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9590 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9591 return false; 9592 9593 // It seems backedge guard check is faster than entry one so in some cases 9594 // it can speed up whole estimation by short circuit 9595 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9596 SplitRHS.second) && 9597 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9598 } 9599 9600 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9601 const SCEV *LHS, const SCEV *RHS) { 9602 // Canonicalize the inputs first. 9603 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9604 9605 if (isKnownViaInduction(Pred, LHS, RHS)) 9606 return true; 9607 9608 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9609 return true; 9610 9611 // Otherwise see what can be done with some simple reasoning. 9612 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9613 } 9614 9615 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 9616 const SCEV *LHS, 9617 const SCEV *RHS) { 9618 if (isKnownPredicate(Pred, LHS, RHS)) 9619 return true; 9620 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 9621 return false; 9622 return None; 9623 } 9624 9625 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9626 const SCEV *LHS, const SCEV *RHS, 9627 const Instruction *Context) { 9628 // TODO: Analyze guards and assumes from Context's block. 9629 return isKnownPredicate(Pred, LHS, RHS) || 9630 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9631 } 9632 9633 Optional<bool> 9634 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 9635 const SCEV *RHS, 9636 const Instruction *Context) { 9637 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 9638 if (KnownWithoutContext) 9639 return KnownWithoutContext; 9640 9641 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS)) 9642 return true; 9643 else if (isBasicBlockEntryGuardedByCond(Context->getParent(), 9644 ICmpInst::getInversePredicate(Pred), 9645 LHS, RHS)) 9646 return false; 9647 return None; 9648 } 9649 9650 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9651 const SCEVAddRecExpr *LHS, 9652 const SCEV *RHS) { 9653 const Loop *L = LHS->getLoop(); 9654 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9655 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9656 } 9657 9658 Optional<ScalarEvolution::MonotonicPredicateType> 9659 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9660 ICmpInst::Predicate Pred) { 9661 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9662 9663 #ifndef NDEBUG 9664 // Verify an invariant: inverting the predicate should turn a monotonically 9665 // increasing change to a monotonically decreasing one, and vice versa. 9666 if (Result) { 9667 auto ResultSwapped = 9668 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9669 9670 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9671 assert(ResultSwapped.getValue() != Result.getValue() && 9672 "monotonicity should flip as we flip the predicate"); 9673 } 9674 #endif 9675 9676 return Result; 9677 } 9678 9679 Optional<ScalarEvolution::MonotonicPredicateType> 9680 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9681 ICmpInst::Predicate Pred) { 9682 // A zero step value for LHS means the induction variable is essentially a 9683 // loop invariant value. We don't really depend on the predicate actually 9684 // flipping from false to true (for increasing predicates, and the other way 9685 // around for decreasing predicates), all we care about is that *if* the 9686 // predicate changes then it only changes from false to true. 9687 // 9688 // A zero step value in itself is not very useful, but there may be places 9689 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9690 // as general as possible. 9691 9692 // Only handle LE/LT/GE/GT predicates. 9693 if (!ICmpInst::isRelational(Pred)) 9694 return None; 9695 9696 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9697 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9698 "Should be greater or less!"); 9699 9700 // Check that AR does not wrap. 9701 if (ICmpInst::isUnsigned(Pred)) { 9702 if (!LHS->hasNoUnsignedWrap()) 9703 return None; 9704 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9705 } else { 9706 assert(ICmpInst::isSigned(Pred) && 9707 "Relational predicate is either signed or unsigned!"); 9708 if (!LHS->hasNoSignedWrap()) 9709 return None; 9710 9711 const SCEV *Step = LHS->getStepRecurrence(*this); 9712 9713 if (isKnownNonNegative(Step)) 9714 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9715 9716 if (isKnownNonPositive(Step)) 9717 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9718 9719 return None; 9720 } 9721 } 9722 9723 Optional<ScalarEvolution::LoopInvariantPredicate> 9724 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9725 const SCEV *LHS, const SCEV *RHS, 9726 const Loop *L) { 9727 9728 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9729 if (!isLoopInvariant(RHS, L)) { 9730 if (!isLoopInvariant(LHS, L)) 9731 return None; 9732 9733 std::swap(LHS, RHS); 9734 Pred = ICmpInst::getSwappedPredicate(Pred); 9735 } 9736 9737 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9738 if (!ArLHS || ArLHS->getLoop() != L) 9739 return None; 9740 9741 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9742 if (!MonotonicType) 9743 return None; 9744 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9745 // true as the loop iterates, and the backedge is control dependent on 9746 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9747 // 9748 // * if the predicate was false in the first iteration then the predicate 9749 // is never evaluated again, since the loop exits without taking the 9750 // backedge. 9751 // * if the predicate was true in the first iteration then it will 9752 // continue to be true for all future iterations since it is 9753 // monotonically increasing. 9754 // 9755 // For both the above possibilities, we can replace the loop varying 9756 // predicate with its value on the first iteration of the loop (which is 9757 // loop invariant). 9758 // 9759 // A similar reasoning applies for a monotonically decreasing predicate, by 9760 // replacing true with false and false with true in the above two bullets. 9761 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9762 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9763 9764 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9765 return None; 9766 9767 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 9768 } 9769 9770 Optional<ScalarEvolution::LoopInvariantPredicate> 9771 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 9772 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9773 const Instruction *Context, const SCEV *MaxIter) { 9774 // Try to prove the following set of facts: 9775 // - The predicate is monotonic in the iteration space. 9776 // - If the check does not fail on the 1st iteration: 9777 // - No overflow will happen during first MaxIter iterations; 9778 // - It will not fail on the MaxIter'th iteration. 9779 // If the check does fail on the 1st iteration, we leave the loop and no 9780 // other checks matter. 9781 9782 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9783 if (!isLoopInvariant(RHS, L)) { 9784 if (!isLoopInvariant(LHS, L)) 9785 return None; 9786 9787 std::swap(LHS, RHS); 9788 Pred = ICmpInst::getSwappedPredicate(Pred); 9789 } 9790 9791 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9792 if (!AR || AR->getLoop() != L) 9793 return None; 9794 9795 // The predicate must be relational (i.e. <, <=, >=, >). 9796 if (!ICmpInst::isRelational(Pred)) 9797 return None; 9798 9799 // TODO: Support steps other than +/- 1. 9800 const SCEV *Step = AR->getStepRecurrence(*this); 9801 auto *One = getOne(Step->getType()); 9802 auto *MinusOne = getNegativeSCEV(One); 9803 if (Step != One && Step != MinusOne) 9804 return None; 9805 9806 // Type mismatch here means that MaxIter is potentially larger than max 9807 // unsigned value in start type, which mean we cannot prove no wrap for the 9808 // indvar. 9809 if (AR->getType() != MaxIter->getType()) 9810 return None; 9811 9812 // Value of IV on suggested last iteration. 9813 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9814 // Does it still meet the requirement? 9815 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 9816 return None; 9817 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 9818 // not exceed max unsigned value of this type), this effectively proves 9819 // that there is no wrap during the iteration. To prove that there is no 9820 // signed/unsigned wrap, we need to check that 9821 // Start <= Last for step = 1 or Start >= Last for step = -1. 9822 ICmpInst::Predicate NoOverflowPred = 9823 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 9824 if (Step == MinusOne) 9825 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 9826 const SCEV *Start = AR->getStart(); 9827 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 9828 return None; 9829 9830 // Everything is fine. 9831 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 9832 } 9833 9834 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9835 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9836 if (HasSameValue(LHS, RHS)) 9837 return ICmpInst::isTrueWhenEqual(Pred); 9838 9839 // This code is split out from isKnownPredicate because it is called from 9840 // within isLoopEntryGuardedByCond. 9841 9842 auto CheckRanges = 9843 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9844 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9845 .contains(RangeLHS); 9846 }; 9847 9848 // The check at the top of the function catches the case where the values are 9849 // known to be equal. 9850 if (Pred == CmpInst::ICMP_EQ) 9851 return false; 9852 9853 if (Pred == CmpInst::ICMP_NE) 9854 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9855 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9856 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9857 9858 if (CmpInst::isSigned(Pred)) 9859 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9860 9861 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9862 } 9863 9864 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9865 const SCEV *LHS, 9866 const SCEV *RHS) { 9867 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9868 // Return Y via OutY. 9869 auto MatchBinaryAddToConst = 9870 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9871 SCEV::NoWrapFlags ExpectedFlags) { 9872 const SCEV *NonConstOp, *ConstOp; 9873 SCEV::NoWrapFlags FlagsPresent; 9874 9875 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9876 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9877 return false; 9878 9879 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9880 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9881 }; 9882 9883 APInt C; 9884 9885 switch (Pred) { 9886 default: 9887 break; 9888 9889 case ICmpInst::ICMP_SGE: 9890 std::swap(LHS, RHS); 9891 LLVM_FALLTHROUGH; 9892 case ICmpInst::ICMP_SLE: 9893 // X s<= (X + C)<nsw> if C >= 0 9894 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9895 return true; 9896 9897 // (X + C)<nsw> s<= X if C <= 0 9898 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9899 !C.isStrictlyPositive()) 9900 return true; 9901 break; 9902 9903 case ICmpInst::ICMP_SGT: 9904 std::swap(LHS, RHS); 9905 LLVM_FALLTHROUGH; 9906 case ICmpInst::ICMP_SLT: 9907 // X s< (X + C)<nsw> if C > 0 9908 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9909 C.isStrictlyPositive()) 9910 return true; 9911 9912 // (X + C)<nsw> s< X if C < 0 9913 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9914 return true; 9915 break; 9916 9917 case ICmpInst::ICMP_UGE: 9918 std::swap(LHS, RHS); 9919 LLVM_FALLTHROUGH; 9920 case ICmpInst::ICMP_ULE: 9921 // X u<= (X + C)<nuw> for any C 9922 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9923 return true; 9924 break; 9925 9926 case ICmpInst::ICMP_UGT: 9927 std::swap(LHS, RHS); 9928 LLVM_FALLTHROUGH; 9929 case ICmpInst::ICMP_ULT: 9930 // X u< (X + C)<nuw> if C != 0 9931 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9932 return true; 9933 break; 9934 } 9935 9936 return false; 9937 } 9938 9939 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9940 const SCEV *LHS, 9941 const SCEV *RHS) { 9942 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9943 return false; 9944 9945 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9946 // the stack can result in exponential time complexity. 9947 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9948 9949 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9950 // 9951 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9952 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9953 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9954 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9955 // use isKnownPredicate later if needed. 9956 return isKnownNonNegative(RHS) && 9957 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9958 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9959 } 9960 9961 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9962 ICmpInst::Predicate Pred, 9963 const SCEV *LHS, const SCEV *RHS) { 9964 // No need to even try if we know the module has no guards. 9965 if (!HasGuards) 9966 return false; 9967 9968 return any_of(*BB, [&](const Instruction &I) { 9969 using namespace llvm::PatternMatch; 9970 9971 Value *Condition; 9972 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9973 m_Value(Condition))) && 9974 isImpliedCond(Pred, LHS, RHS, Condition, false); 9975 }); 9976 } 9977 9978 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9979 /// protected by a conditional between LHS and RHS. This is used to 9980 /// to eliminate casts. 9981 bool 9982 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9983 ICmpInst::Predicate Pred, 9984 const SCEV *LHS, const SCEV *RHS) { 9985 // Interpret a null as meaning no loop, where there is obviously no guard 9986 // (interprocedural conditions notwithstanding). 9987 if (!L) return true; 9988 9989 if (VerifyIR) 9990 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9991 "This cannot be done on broken IR!"); 9992 9993 9994 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9995 return true; 9996 9997 BasicBlock *Latch = L->getLoopLatch(); 9998 if (!Latch) 9999 return false; 10000 10001 BranchInst *LoopContinuePredicate = 10002 dyn_cast<BranchInst>(Latch->getTerminator()); 10003 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10004 isImpliedCond(Pred, LHS, RHS, 10005 LoopContinuePredicate->getCondition(), 10006 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10007 return true; 10008 10009 // We don't want more than one activation of the following loops on the stack 10010 // -- that can lead to O(n!) time complexity. 10011 if (WalkingBEDominatingConds) 10012 return false; 10013 10014 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10015 10016 // See if we can exploit a trip count to prove the predicate. 10017 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10018 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10019 if (LatchBECount != getCouldNotCompute()) { 10020 // We know that Latch branches back to the loop header exactly 10021 // LatchBECount times. This means the backdege condition at Latch is 10022 // equivalent to "{0,+,1} u< LatchBECount". 10023 Type *Ty = LatchBECount->getType(); 10024 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10025 const SCEV *LoopCounter = 10026 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10027 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10028 LatchBECount)) 10029 return true; 10030 } 10031 10032 // Check conditions due to any @llvm.assume intrinsics. 10033 for (auto &AssumeVH : AC.assumptions()) { 10034 if (!AssumeVH) 10035 continue; 10036 auto *CI = cast<CallInst>(AssumeVH); 10037 if (!DT.dominates(CI, Latch->getTerminator())) 10038 continue; 10039 10040 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10041 return true; 10042 } 10043 10044 // If the loop is not reachable from the entry block, we risk running into an 10045 // infinite loop as we walk up into the dom tree. These loops do not matter 10046 // anyway, so we just return a conservative answer when we see them. 10047 if (!DT.isReachableFromEntry(L->getHeader())) 10048 return false; 10049 10050 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10051 return true; 10052 10053 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10054 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10055 assert(DTN && "should reach the loop header before reaching the root!"); 10056 10057 BasicBlock *BB = DTN->getBlock(); 10058 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10059 return true; 10060 10061 BasicBlock *PBB = BB->getSinglePredecessor(); 10062 if (!PBB) 10063 continue; 10064 10065 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10066 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10067 continue; 10068 10069 Value *Condition = ContinuePredicate->getCondition(); 10070 10071 // If we have an edge `E` within the loop body that dominates the only 10072 // latch, the condition guarding `E` also guards the backedge. This 10073 // reasoning works only for loops with a single latch. 10074 10075 BasicBlockEdge DominatingEdge(PBB, BB); 10076 if (DominatingEdge.isSingleEdge()) { 10077 // We're constructively (and conservatively) enumerating edges within the 10078 // loop body that dominate the latch. The dominator tree better agree 10079 // with us on this: 10080 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10081 10082 if (isImpliedCond(Pred, LHS, RHS, Condition, 10083 BB != ContinuePredicate->getSuccessor(0))) 10084 return true; 10085 } 10086 } 10087 10088 return false; 10089 } 10090 10091 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10092 ICmpInst::Predicate Pred, 10093 const SCEV *LHS, 10094 const SCEV *RHS) { 10095 if (VerifyIR) 10096 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10097 "This cannot be done on broken IR!"); 10098 10099 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10100 // the facts (a >= b && a != b) separately. A typical situation is when the 10101 // non-strict comparison is known from ranges and non-equality is known from 10102 // dominating predicates. If we are proving strict comparison, we always try 10103 // to prove non-equality and non-strict comparison separately. 10104 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10105 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10106 bool ProvedNonStrictComparison = false; 10107 bool ProvedNonEquality = false; 10108 10109 auto SplitAndProve = 10110 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10111 if (!ProvedNonStrictComparison) 10112 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10113 if (!ProvedNonEquality) 10114 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10115 if (ProvedNonStrictComparison && ProvedNonEquality) 10116 return true; 10117 return false; 10118 }; 10119 10120 if (ProvingStrictComparison) { 10121 auto ProofFn = [&](ICmpInst::Predicate P) { 10122 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10123 }; 10124 if (SplitAndProve(ProofFn)) 10125 return true; 10126 } 10127 10128 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10129 auto ProveViaGuard = [&](const BasicBlock *Block) { 10130 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10131 return true; 10132 if (ProvingStrictComparison) { 10133 auto ProofFn = [&](ICmpInst::Predicate P) { 10134 return isImpliedViaGuard(Block, P, LHS, RHS); 10135 }; 10136 if (SplitAndProve(ProofFn)) 10137 return true; 10138 } 10139 return false; 10140 }; 10141 10142 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10143 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10144 const Instruction *Context = &BB->front(); 10145 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10146 return true; 10147 if (ProvingStrictComparison) { 10148 auto ProofFn = [&](ICmpInst::Predicate P) { 10149 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context); 10150 }; 10151 if (SplitAndProve(ProofFn)) 10152 return true; 10153 } 10154 return false; 10155 }; 10156 10157 // Starting at the block's predecessor, climb up the predecessor chain, as long 10158 // as there are predecessors that can be found that have unique successors 10159 // leading to the original block. 10160 const Loop *ContainingLoop = LI.getLoopFor(BB); 10161 const BasicBlock *PredBB; 10162 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10163 PredBB = ContainingLoop->getLoopPredecessor(); 10164 else 10165 PredBB = BB->getSinglePredecessor(); 10166 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10167 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10168 if (ProveViaGuard(Pair.first)) 10169 return true; 10170 10171 const BranchInst *LoopEntryPredicate = 10172 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10173 if (!LoopEntryPredicate || 10174 LoopEntryPredicate->isUnconditional()) 10175 continue; 10176 10177 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10178 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10179 return true; 10180 } 10181 10182 // Check conditions due to any @llvm.assume intrinsics. 10183 for (auto &AssumeVH : AC.assumptions()) { 10184 if (!AssumeVH) 10185 continue; 10186 auto *CI = cast<CallInst>(AssumeVH); 10187 if (!DT.dominates(CI, BB)) 10188 continue; 10189 10190 if (ProveViaCond(CI->getArgOperand(0), false)) 10191 return true; 10192 } 10193 10194 return false; 10195 } 10196 10197 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10198 ICmpInst::Predicate Pred, 10199 const SCEV *LHS, 10200 const SCEV *RHS) { 10201 // Interpret a null as meaning no loop, where there is obviously no guard 10202 // (interprocedural conditions notwithstanding). 10203 if (!L) 10204 return false; 10205 10206 // Both LHS and RHS must be available at loop entry. 10207 assert(isAvailableAtLoopEntry(LHS, L) && 10208 "LHS is not available at Loop Entry"); 10209 assert(isAvailableAtLoopEntry(RHS, L) && 10210 "RHS is not available at Loop Entry"); 10211 10212 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10213 return true; 10214 10215 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10216 } 10217 10218 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10219 const SCEV *RHS, 10220 const Value *FoundCondValue, bool Inverse, 10221 const Instruction *Context) { 10222 // False conditions implies anything. Do not bother analyzing it further. 10223 if (FoundCondValue == 10224 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 10225 return true; 10226 10227 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10228 return false; 10229 10230 auto ClearOnExit = 10231 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10232 10233 // Recursively handle And and Or conditions. 10234 const Value *Op0, *Op1; 10235 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 10236 if (!Inverse) 10237 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10238 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10239 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 10240 if (Inverse) 10241 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10242 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10243 } 10244 10245 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10246 if (!ICI) return false; 10247 10248 // Now that we found a conditional branch that dominates the loop or controls 10249 // the loop latch. Check to see if it is the comparison we are looking for. 10250 ICmpInst::Predicate FoundPred; 10251 if (Inverse) 10252 FoundPred = ICI->getInversePredicate(); 10253 else 10254 FoundPred = ICI->getPredicate(); 10255 10256 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10257 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10258 10259 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10260 } 10261 10262 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10263 const SCEV *RHS, 10264 ICmpInst::Predicate FoundPred, 10265 const SCEV *FoundLHS, const SCEV *FoundRHS, 10266 const Instruction *Context) { 10267 // Balance the types. 10268 if (getTypeSizeInBits(LHS->getType()) < 10269 getTypeSizeInBits(FoundLHS->getType())) { 10270 // For unsigned and equality predicates, try to prove that both found 10271 // operands fit into narrow unsigned range. If so, try to prove facts in 10272 // narrow types. 10273 if (!CmpInst::isSigned(FoundPred)) { 10274 auto *NarrowType = LHS->getType(); 10275 auto *WideType = FoundLHS->getType(); 10276 auto BitWidth = getTypeSizeInBits(NarrowType); 10277 const SCEV *MaxValue = getZeroExtendExpr( 10278 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10279 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10280 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10281 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10282 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10283 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10284 TruncFoundRHS, Context)) 10285 return true; 10286 } 10287 } 10288 10289 if (CmpInst::isSigned(Pred)) { 10290 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10291 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10292 } else { 10293 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10294 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10295 } 10296 } else if (getTypeSizeInBits(LHS->getType()) > 10297 getTypeSizeInBits(FoundLHS->getType())) { 10298 if (CmpInst::isSigned(FoundPred)) { 10299 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10300 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10301 } else { 10302 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10303 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10304 } 10305 } 10306 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10307 FoundRHS, Context); 10308 } 10309 10310 bool ScalarEvolution::isImpliedCondBalancedTypes( 10311 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10312 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10313 const Instruction *Context) { 10314 assert(getTypeSizeInBits(LHS->getType()) == 10315 getTypeSizeInBits(FoundLHS->getType()) && 10316 "Types should be balanced!"); 10317 // Canonicalize the query to match the way instcombine will have 10318 // canonicalized the comparison. 10319 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10320 if (LHS == RHS) 10321 return CmpInst::isTrueWhenEqual(Pred); 10322 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10323 if (FoundLHS == FoundRHS) 10324 return CmpInst::isFalseWhenEqual(FoundPred); 10325 10326 // Check to see if we can make the LHS or RHS match. 10327 if (LHS == FoundRHS || RHS == FoundLHS) { 10328 if (isa<SCEVConstant>(RHS)) { 10329 std::swap(FoundLHS, FoundRHS); 10330 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10331 } else { 10332 std::swap(LHS, RHS); 10333 Pred = ICmpInst::getSwappedPredicate(Pred); 10334 } 10335 } 10336 10337 // Check whether the found predicate is the same as the desired predicate. 10338 if (FoundPred == Pred) 10339 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10340 10341 // Check whether swapping the found predicate makes it the same as the 10342 // desired predicate. 10343 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10344 if (isa<SCEVConstant>(RHS)) 10345 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10346 else 10347 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, 10348 LHS, FoundLHS, FoundRHS, Context); 10349 } 10350 10351 // Unsigned comparison is the same as signed comparison when both the operands 10352 // are non-negative. 10353 if (CmpInst::isUnsigned(FoundPred) && 10354 CmpInst::getSignedPredicate(FoundPred) == Pred && 10355 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10356 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10357 10358 // Check if we can make progress by sharpening ranges. 10359 if (FoundPred == ICmpInst::ICMP_NE && 10360 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10361 10362 const SCEVConstant *C = nullptr; 10363 const SCEV *V = nullptr; 10364 10365 if (isa<SCEVConstant>(FoundLHS)) { 10366 C = cast<SCEVConstant>(FoundLHS); 10367 V = FoundRHS; 10368 } else { 10369 C = cast<SCEVConstant>(FoundRHS); 10370 V = FoundLHS; 10371 } 10372 10373 // The guarding predicate tells us that C != V. If the known range 10374 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10375 // range we consider has to correspond to same signedness as the 10376 // predicate we're interested in folding. 10377 10378 APInt Min = ICmpInst::isSigned(Pred) ? 10379 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10380 10381 if (Min == C->getAPInt()) { 10382 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10383 // This is true even if (Min + 1) wraps around -- in case of 10384 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10385 10386 APInt SharperMin = Min + 1; 10387 10388 switch (Pred) { 10389 case ICmpInst::ICMP_SGE: 10390 case ICmpInst::ICMP_UGE: 10391 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10392 // RHS, we're done. 10393 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10394 Context)) 10395 return true; 10396 LLVM_FALLTHROUGH; 10397 10398 case ICmpInst::ICMP_SGT: 10399 case ICmpInst::ICMP_UGT: 10400 // We know from the range information that (V `Pred` Min || 10401 // V == Min). We know from the guarding condition that !(V 10402 // == Min). This gives us 10403 // 10404 // V `Pred` Min || V == Min && !(V == Min) 10405 // => V `Pred` Min 10406 // 10407 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10408 10409 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10410 Context)) 10411 return true; 10412 break; 10413 10414 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10415 case ICmpInst::ICMP_SLE: 10416 case ICmpInst::ICMP_ULE: 10417 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10418 LHS, V, getConstant(SharperMin), Context)) 10419 return true; 10420 LLVM_FALLTHROUGH; 10421 10422 case ICmpInst::ICMP_SLT: 10423 case ICmpInst::ICMP_ULT: 10424 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10425 LHS, V, getConstant(Min), Context)) 10426 return true; 10427 break; 10428 10429 default: 10430 // No change 10431 break; 10432 } 10433 } 10434 } 10435 10436 // Check whether the actual condition is beyond sufficient. 10437 if (FoundPred == ICmpInst::ICMP_EQ) 10438 if (ICmpInst::isTrueWhenEqual(Pred)) 10439 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10440 return true; 10441 if (Pred == ICmpInst::ICMP_NE) 10442 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10443 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10444 Context)) 10445 return true; 10446 10447 // Otherwise assume the worst. 10448 return false; 10449 } 10450 10451 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10452 const SCEV *&L, const SCEV *&R, 10453 SCEV::NoWrapFlags &Flags) { 10454 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10455 if (!AE || AE->getNumOperands() != 2) 10456 return false; 10457 10458 L = AE->getOperand(0); 10459 R = AE->getOperand(1); 10460 Flags = AE->getNoWrapFlags(); 10461 return true; 10462 } 10463 10464 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10465 const SCEV *Less) { 10466 // We avoid subtracting expressions here because this function is usually 10467 // fairly deep in the call stack (i.e. is called many times). 10468 10469 // X - X = 0. 10470 if (More == Less) 10471 return APInt(getTypeSizeInBits(More->getType()), 0); 10472 10473 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10474 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10475 const auto *MAR = cast<SCEVAddRecExpr>(More); 10476 10477 if (LAR->getLoop() != MAR->getLoop()) 10478 return None; 10479 10480 // We look at affine expressions only; not for correctness but to keep 10481 // getStepRecurrence cheap. 10482 if (!LAR->isAffine() || !MAR->isAffine()) 10483 return None; 10484 10485 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10486 return None; 10487 10488 Less = LAR->getStart(); 10489 More = MAR->getStart(); 10490 10491 // fall through 10492 } 10493 10494 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10495 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10496 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10497 return M - L; 10498 } 10499 10500 SCEV::NoWrapFlags Flags; 10501 const SCEV *LLess = nullptr, *RLess = nullptr; 10502 const SCEV *LMore = nullptr, *RMore = nullptr; 10503 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10504 // Compare (X + C1) vs X. 10505 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10506 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10507 if (RLess == More) 10508 return -(C1->getAPInt()); 10509 10510 // Compare X vs (X + C2). 10511 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10512 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10513 if (RMore == Less) 10514 return C2->getAPInt(); 10515 10516 // Compare (X + C1) vs (X + C2). 10517 if (C1 && C2 && RLess == RMore) 10518 return C2->getAPInt() - C1->getAPInt(); 10519 10520 return None; 10521 } 10522 10523 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10524 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10525 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10526 // Try to recognize the following pattern: 10527 // 10528 // FoundRHS = ... 10529 // ... 10530 // loop: 10531 // FoundLHS = {Start,+,W} 10532 // context_bb: // Basic block from the same loop 10533 // known(Pred, FoundLHS, FoundRHS) 10534 // 10535 // If some predicate is known in the context of a loop, it is also known on 10536 // each iteration of this loop, including the first iteration. Therefore, in 10537 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10538 // prove the original pred using this fact. 10539 if (!Context) 10540 return false; 10541 const BasicBlock *ContextBB = Context->getParent(); 10542 // Make sure AR varies in the context block. 10543 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10544 const Loop *L = AR->getLoop(); 10545 // Make sure that context belongs to the loop and executes on 1st iteration 10546 // (if it ever executes at all). 10547 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10548 return false; 10549 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10550 return false; 10551 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10552 } 10553 10554 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10555 const Loop *L = AR->getLoop(); 10556 // Make sure that context belongs to the loop and executes on 1st iteration 10557 // (if it ever executes at all). 10558 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10559 return false; 10560 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10561 return false; 10562 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10563 } 10564 10565 return false; 10566 } 10567 10568 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10569 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10570 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10571 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10572 return false; 10573 10574 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10575 if (!AddRecLHS) 10576 return false; 10577 10578 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10579 if (!AddRecFoundLHS) 10580 return false; 10581 10582 // We'd like to let SCEV reason about control dependencies, so we constrain 10583 // both the inequalities to be about add recurrences on the same loop. This 10584 // way we can use isLoopEntryGuardedByCond later. 10585 10586 const Loop *L = AddRecFoundLHS->getLoop(); 10587 if (L != AddRecLHS->getLoop()) 10588 return false; 10589 10590 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10591 // 10592 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10593 // ... (2) 10594 // 10595 // Informal proof for (2), assuming (1) [*]: 10596 // 10597 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10598 // 10599 // Then 10600 // 10601 // FoundLHS s< FoundRHS s< INT_MIN - C 10602 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10603 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10604 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10605 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10606 // <=> FoundLHS + C s< FoundRHS + C 10607 // 10608 // [*]: (1) can be proved by ruling out overflow. 10609 // 10610 // [**]: This can be proved by analyzing all the four possibilities: 10611 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10612 // (A s>= 0, B s>= 0). 10613 // 10614 // Note: 10615 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10616 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10617 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10618 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10619 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10620 // C)". 10621 10622 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10623 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10624 if (!LDiff || !RDiff || *LDiff != *RDiff) 10625 return false; 10626 10627 if (LDiff->isMinValue()) 10628 return true; 10629 10630 APInt FoundRHSLimit; 10631 10632 if (Pred == CmpInst::ICMP_ULT) { 10633 FoundRHSLimit = -(*RDiff); 10634 } else { 10635 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10636 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10637 } 10638 10639 // Try to prove (1) or (2), as needed. 10640 return isAvailableAtLoopEntry(FoundRHS, L) && 10641 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10642 getConstant(FoundRHSLimit)); 10643 } 10644 10645 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10646 const SCEV *LHS, const SCEV *RHS, 10647 const SCEV *FoundLHS, 10648 const SCEV *FoundRHS, unsigned Depth) { 10649 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10650 10651 auto ClearOnExit = make_scope_exit([&]() { 10652 if (LPhi) { 10653 bool Erased = PendingMerges.erase(LPhi); 10654 assert(Erased && "Failed to erase LPhi!"); 10655 (void)Erased; 10656 } 10657 if (RPhi) { 10658 bool Erased = PendingMerges.erase(RPhi); 10659 assert(Erased && "Failed to erase RPhi!"); 10660 (void)Erased; 10661 } 10662 }); 10663 10664 // Find respective Phis and check that they are not being pending. 10665 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10666 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10667 if (!PendingMerges.insert(Phi).second) 10668 return false; 10669 LPhi = Phi; 10670 } 10671 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10672 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10673 // If we detect a loop of Phi nodes being processed by this method, for 10674 // example: 10675 // 10676 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10677 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10678 // 10679 // we don't want to deal with a case that complex, so return conservative 10680 // answer false. 10681 if (!PendingMerges.insert(Phi).second) 10682 return false; 10683 RPhi = Phi; 10684 } 10685 10686 // If none of LHS, RHS is a Phi, nothing to do here. 10687 if (!LPhi && !RPhi) 10688 return false; 10689 10690 // If there is a SCEVUnknown Phi we are interested in, make it left. 10691 if (!LPhi) { 10692 std::swap(LHS, RHS); 10693 std::swap(FoundLHS, FoundRHS); 10694 std::swap(LPhi, RPhi); 10695 Pred = ICmpInst::getSwappedPredicate(Pred); 10696 } 10697 10698 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10699 const BasicBlock *LBB = LPhi->getParent(); 10700 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10701 10702 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10703 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10704 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10705 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10706 }; 10707 10708 if (RPhi && RPhi->getParent() == LBB) { 10709 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10710 // If we compare two Phis from the same block, and for each entry block 10711 // the predicate is true for incoming values from this block, then the 10712 // predicate is also true for the Phis. 10713 for (const BasicBlock *IncBB : predecessors(LBB)) { 10714 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10715 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10716 if (!ProvedEasily(L, R)) 10717 return false; 10718 } 10719 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10720 // Case two: RHS is also a Phi from the same basic block, and it is an 10721 // AddRec. It means that there is a loop which has both AddRec and Unknown 10722 // PHIs, for it we can compare incoming values of AddRec from above the loop 10723 // and latch with their respective incoming values of LPhi. 10724 // TODO: Generalize to handle loops with many inputs in a header. 10725 if (LPhi->getNumIncomingValues() != 2) return false; 10726 10727 auto *RLoop = RAR->getLoop(); 10728 auto *Predecessor = RLoop->getLoopPredecessor(); 10729 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10730 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10731 if (!ProvedEasily(L1, RAR->getStart())) 10732 return false; 10733 auto *Latch = RLoop->getLoopLatch(); 10734 assert(Latch && "Loop with AddRec with no latch?"); 10735 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10736 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10737 return false; 10738 } else { 10739 // In all other cases go over inputs of LHS and compare each of them to RHS, 10740 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10741 // At this point RHS is either a non-Phi, or it is a Phi from some block 10742 // different from LBB. 10743 for (const BasicBlock *IncBB : predecessors(LBB)) { 10744 // Check that RHS is available in this block. 10745 if (!dominates(RHS, IncBB)) 10746 return false; 10747 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10748 if (!ProvedEasily(L, RHS)) 10749 return false; 10750 } 10751 } 10752 return true; 10753 } 10754 10755 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10756 const SCEV *LHS, const SCEV *RHS, 10757 const SCEV *FoundLHS, 10758 const SCEV *FoundRHS, 10759 const Instruction *Context) { 10760 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10761 return true; 10762 10763 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10764 return true; 10765 10766 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10767 Context)) 10768 return true; 10769 10770 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10771 FoundLHS, FoundRHS) || 10772 // ~x < ~y --> x > y 10773 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10774 getNotSCEV(FoundRHS), 10775 getNotSCEV(FoundLHS)); 10776 } 10777 10778 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10779 template <typename MinMaxExprType> 10780 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10781 const SCEV *Candidate) { 10782 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10783 if (!MinMaxExpr) 10784 return false; 10785 10786 return is_contained(MinMaxExpr->operands(), Candidate); 10787 } 10788 10789 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10790 ICmpInst::Predicate Pred, 10791 const SCEV *LHS, const SCEV *RHS) { 10792 // If both sides are affine addrecs for the same loop, with equal 10793 // steps, and we know the recurrences don't wrap, then we only 10794 // need to check the predicate on the starting values. 10795 10796 if (!ICmpInst::isRelational(Pred)) 10797 return false; 10798 10799 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10800 if (!LAR) 10801 return false; 10802 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10803 if (!RAR) 10804 return false; 10805 if (LAR->getLoop() != RAR->getLoop()) 10806 return false; 10807 if (!LAR->isAffine() || !RAR->isAffine()) 10808 return false; 10809 10810 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10811 return false; 10812 10813 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10814 SCEV::FlagNSW : SCEV::FlagNUW; 10815 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10816 return false; 10817 10818 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10819 } 10820 10821 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10822 /// expression? 10823 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10824 ICmpInst::Predicate Pred, 10825 const SCEV *LHS, const SCEV *RHS) { 10826 switch (Pred) { 10827 default: 10828 return false; 10829 10830 case ICmpInst::ICMP_SGE: 10831 std::swap(LHS, RHS); 10832 LLVM_FALLTHROUGH; 10833 case ICmpInst::ICMP_SLE: 10834 return 10835 // min(A, ...) <= A 10836 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10837 // A <= max(A, ...) 10838 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10839 10840 case ICmpInst::ICMP_UGE: 10841 std::swap(LHS, RHS); 10842 LLVM_FALLTHROUGH; 10843 case ICmpInst::ICMP_ULE: 10844 return 10845 // min(A, ...) <= A 10846 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10847 // A <= max(A, ...) 10848 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10849 } 10850 10851 llvm_unreachable("covered switch fell through?!"); 10852 } 10853 10854 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10855 const SCEV *LHS, const SCEV *RHS, 10856 const SCEV *FoundLHS, 10857 const SCEV *FoundRHS, 10858 unsigned Depth) { 10859 assert(getTypeSizeInBits(LHS->getType()) == 10860 getTypeSizeInBits(RHS->getType()) && 10861 "LHS and RHS have different sizes?"); 10862 assert(getTypeSizeInBits(FoundLHS->getType()) == 10863 getTypeSizeInBits(FoundRHS->getType()) && 10864 "FoundLHS and FoundRHS have different sizes?"); 10865 // We want to avoid hurting the compile time with analysis of too big trees. 10866 if (Depth > MaxSCEVOperationsImplicationDepth) 10867 return false; 10868 10869 // We only want to work with GT comparison so far. 10870 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10871 Pred = CmpInst::getSwappedPredicate(Pred); 10872 std::swap(LHS, RHS); 10873 std::swap(FoundLHS, FoundRHS); 10874 } 10875 10876 // For unsigned, try to reduce it to corresponding signed comparison. 10877 if (Pred == ICmpInst::ICMP_UGT) 10878 // We can replace unsigned predicate with its signed counterpart if all 10879 // involved values are non-negative. 10880 // TODO: We could have better support for unsigned. 10881 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10882 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10883 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10884 // use this fact to prove that LHS and RHS are non-negative. 10885 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10886 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10887 FoundRHS) && 10888 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10889 FoundRHS)) 10890 Pred = ICmpInst::ICMP_SGT; 10891 } 10892 10893 if (Pred != ICmpInst::ICMP_SGT) 10894 return false; 10895 10896 auto GetOpFromSExt = [&](const SCEV *S) { 10897 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10898 return Ext->getOperand(); 10899 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10900 // the constant in some cases. 10901 return S; 10902 }; 10903 10904 // Acquire values from extensions. 10905 auto *OrigLHS = LHS; 10906 auto *OrigFoundLHS = FoundLHS; 10907 LHS = GetOpFromSExt(LHS); 10908 FoundLHS = GetOpFromSExt(FoundLHS); 10909 10910 // Is the SGT predicate can be proved trivially or using the found context. 10911 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10912 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10913 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10914 FoundRHS, Depth + 1); 10915 }; 10916 10917 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10918 // We want to avoid creation of any new non-constant SCEV. Since we are 10919 // going to compare the operands to RHS, we should be certain that we don't 10920 // need any size extensions for this. So let's decline all cases when the 10921 // sizes of types of LHS and RHS do not match. 10922 // TODO: Maybe try to get RHS from sext to catch more cases? 10923 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10924 return false; 10925 10926 // Should not overflow. 10927 if (!LHSAddExpr->hasNoSignedWrap()) 10928 return false; 10929 10930 auto *LL = LHSAddExpr->getOperand(0); 10931 auto *LR = LHSAddExpr->getOperand(1); 10932 auto *MinusOne = getMinusOne(RHS->getType()); 10933 10934 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10935 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10936 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10937 }; 10938 // Try to prove the following rule: 10939 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10940 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10941 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10942 return true; 10943 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10944 Value *LL, *LR; 10945 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10946 10947 using namespace llvm::PatternMatch; 10948 10949 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10950 // Rules for division. 10951 // We are going to perform some comparisons with Denominator and its 10952 // derivative expressions. In general case, creating a SCEV for it may 10953 // lead to a complex analysis of the entire graph, and in particular it 10954 // can request trip count recalculation for the same loop. This would 10955 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10956 // this, we only want to create SCEVs that are constants in this section. 10957 // So we bail if Denominator is not a constant. 10958 if (!isa<ConstantInt>(LR)) 10959 return false; 10960 10961 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10962 10963 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10964 // then a SCEV for the numerator already exists and matches with FoundLHS. 10965 auto *Numerator = getExistingSCEV(LL); 10966 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10967 return false; 10968 10969 // Make sure that the numerator matches with FoundLHS and the denominator 10970 // is positive. 10971 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10972 return false; 10973 10974 auto *DTy = Denominator->getType(); 10975 auto *FRHSTy = FoundRHS->getType(); 10976 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10977 // One of types is a pointer and another one is not. We cannot extend 10978 // them properly to a wider type, so let us just reject this case. 10979 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10980 // to avoid this check. 10981 return false; 10982 10983 // Given that: 10984 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10985 auto *WTy = getWiderType(DTy, FRHSTy); 10986 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10987 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10988 10989 // Try to prove the following rule: 10990 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10991 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10992 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10993 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10994 if (isKnownNonPositive(RHS) && 10995 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10996 return true; 10997 10998 // Try to prove the following rule: 10999 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11000 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11001 // If we divide it by Denominator > 2, then: 11002 // 1. If FoundLHS is negative, then the result is 0. 11003 // 2. If FoundLHS is non-negative, then the result is non-negative. 11004 // Anyways, the result is non-negative. 11005 auto *MinusOne = getMinusOne(WTy); 11006 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11007 if (isKnownNegative(RHS) && 11008 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11009 return true; 11010 } 11011 } 11012 11013 // If our expression contained SCEVUnknown Phis, and we split it down and now 11014 // need to prove something for them, try to prove the predicate for every 11015 // possible incoming values of those Phis. 11016 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11017 return true; 11018 11019 return false; 11020 } 11021 11022 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11023 const SCEV *LHS, const SCEV *RHS) { 11024 // zext x u<= sext x, sext x s<= zext x 11025 switch (Pred) { 11026 case ICmpInst::ICMP_SGE: 11027 std::swap(LHS, RHS); 11028 LLVM_FALLTHROUGH; 11029 case ICmpInst::ICMP_SLE: { 11030 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11031 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11032 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11033 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11034 return true; 11035 break; 11036 } 11037 case ICmpInst::ICMP_UGE: 11038 std::swap(LHS, RHS); 11039 LLVM_FALLTHROUGH; 11040 case ICmpInst::ICMP_ULE: { 11041 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11042 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11043 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11044 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11045 return true; 11046 break; 11047 } 11048 default: 11049 break; 11050 }; 11051 return false; 11052 } 11053 11054 bool 11055 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 11056 const SCEV *LHS, const SCEV *RHS) { 11057 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 11058 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 11059 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 11060 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 11061 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 11062 } 11063 11064 bool 11065 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 11066 const SCEV *LHS, const SCEV *RHS, 11067 const SCEV *FoundLHS, 11068 const SCEV *FoundRHS) { 11069 switch (Pred) { 11070 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 11071 case ICmpInst::ICMP_EQ: 11072 case ICmpInst::ICMP_NE: 11073 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 11074 return true; 11075 break; 11076 case ICmpInst::ICMP_SLT: 11077 case ICmpInst::ICMP_SLE: 11078 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 11079 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 11080 return true; 11081 break; 11082 case ICmpInst::ICMP_SGT: 11083 case ICmpInst::ICMP_SGE: 11084 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 11085 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 11086 return true; 11087 break; 11088 case ICmpInst::ICMP_ULT: 11089 case ICmpInst::ICMP_ULE: 11090 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 11091 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 11092 return true; 11093 break; 11094 case ICmpInst::ICMP_UGT: 11095 case ICmpInst::ICMP_UGE: 11096 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 11097 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 11098 return true; 11099 break; 11100 } 11101 11102 // Maybe it can be proved via operations? 11103 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11104 return true; 11105 11106 return false; 11107 } 11108 11109 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11110 const SCEV *LHS, 11111 const SCEV *RHS, 11112 const SCEV *FoundLHS, 11113 const SCEV *FoundRHS) { 11114 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11115 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11116 // reduce the compile time impact of this optimization. 11117 return false; 11118 11119 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11120 if (!Addend) 11121 return false; 11122 11123 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11124 11125 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11126 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11127 ConstantRange FoundLHSRange = 11128 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 11129 11130 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11131 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11132 11133 // We can also compute the range of values for `LHS` that satisfy the 11134 // consequent, "`LHS` `Pred` `RHS`": 11135 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11136 ConstantRange SatisfyingLHSRange = 11137 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 11138 11139 // The antecedent implies the consequent if every value of `LHS` that 11140 // satisfies the antecedent also satisfies the consequent. 11141 return SatisfyingLHSRange.contains(LHSRange); 11142 } 11143 11144 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11145 bool IsSigned, bool NoWrap) { 11146 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11147 11148 if (NoWrap) return false; 11149 11150 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11151 const SCEV *One = getOne(Stride->getType()); 11152 11153 if (IsSigned) { 11154 APInt MaxRHS = getSignedRangeMax(RHS); 11155 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11156 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11157 11158 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11159 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11160 } 11161 11162 APInt MaxRHS = getUnsignedRangeMax(RHS); 11163 APInt MaxValue = APInt::getMaxValue(BitWidth); 11164 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11165 11166 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11167 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11168 } 11169 11170 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11171 bool IsSigned, bool NoWrap) { 11172 if (NoWrap) return false; 11173 11174 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11175 const SCEV *One = getOne(Stride->getType()); 11176 11177 if (IsSigned) { 11178 APInt MinRHS = getSignedRangeMin(RHS); 11179 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11180 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11181 11182 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11183 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11184 } 11185 11186 APInt MinRHS = getUnsignedRangeMin(RHS); 11187 APInt MinValue = APInt::getMinValue(BitWidth); 11188 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11189 11190 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11191 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11192 } 11193 11194 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 11195 bool Equality) { 11196 const SCEV *One = getOne(Step->getType()); 11197 Delta = Equality ? getAddExpr(Delta, Step) 11198 : getAddExpr(Delta, getMinusSCEV(Step, One)); 11199 return getUDivExpr(Delta, Step); 11200 } 11201 11202 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11203 const SCEV *Stride, 11204 const SCEV *End, 11205 unsigned BitWidth, 11206 bool IsSigned) { 11207 11208 assert(!isKnownNonPositive(Stride) && 11209 "Stride is expected strictly positive!"); 11210 // Calculate the maximum backedge count based on the range of values 11211 // permitted by Start, End, and Stride. 11212 const SCEV *MaxBECount; 11213 APInt MinStart = 11214 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11215 11216 APInt StrideForMaxBECount = 11217 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11218 11219 // We already know that the stride is positive, so we paper over conservatism 11220 // in our range computation by forcing StrideForMaxBECount to be at least one. 11221 // In theory this is unnecessary, but we expect MaxBECount to be a 11222 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11223 // is nothing to constant fold it to). 11224 APInt One(BitWidth, 1, IsSigned); 11225 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11226 11227 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11228 : APInt::getMaxValue(BitWidth); 11229 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11230 11231 // Although End can be a MAX expression we estimate MaxEnd considering only 11232 // the case End = RHS of the loop termination condition. This is safe because 11233 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11234 // taken count. 11235 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11236 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11237 11238 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11239 getConstant(StrideForMaxBECount) /* Step */, 11240 false /* Equality */); 11241 11242 return MaxBECount; 11243 } 11244 11245 ScalarEvolution::ExitLimit 11246 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11247 const Loop *L, bool IsSigned, 11248 bool ControlsExit, bool AllowPredicates) { 11249 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11250 11251 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11252 bool PredicatedIV = false; 11253 11254 if (!IV && AllowPredicates) { 11255 // Try to make this an AddRec using runtime tests, in the first X 11256 // iterations of this loop, where X is the SCEV expression found by the 11257 // algorithm below. 11258 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11259 PredicatedIV = true; 11260 } 11261 11262 // Avoid weird loops 11263 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11264 return getCouldNotCompute(); 11265 11266 bool NoWrap = ControlsExit && 11267 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11268 11269 const SCEV *Stride = IV->getStepRecurrence(*this); 11270 11271 bool PositiveStride = isKnownPositive(Stride); 11272 11273 // Avoid negative or zero stride values. 11274 if (!PositiveStride) { 11275 // We can compute the correct backedge taken count for loops with unknown 11276 // strides if we can prove that the loop is not an infinite loop with side 11277 // effects. Here's the loop structure we are trying to handle - 11278 // 11279 // i = start 11280 // do { 11281 // A[i] = i; 11282 // i += s; 11283 // } while (i < end); 11284 // 11285 // The backedge taken count for such loops is evaluated as - 11286 // (max(end, start + stride) - start - 1) /u stride 11287 // 11288 // The additional preconditions that we need to check to prove correctness 11289 // of the above formula is as follows - 11290 // 11291 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11292 // NoWrap flag). 11293 // b) loop is single exit with no side effects. 11294 // 11295 // 11296 // Precondition a) implies that if the stride is negative, this is a single 11297 // trip loop. The backedge taken count formula reduces to zero in this case. 11298 // 11299 // Precondition b) implies that the unknown stride cannot be zero otherwise 11300 // we have UB. 11301 // 11302 // The positive stride case is the same as isKnownPositive(Stride) returning 11303 // true (original behavior of the function). 11304 // 11305 // We want to make sure that the stride is truly unknown as there are edge 11306 // cases where ScalarEvolution propagates no wrap flags to the 11307 // post-increment/decrement IV even though the increment/decrement operation 11308 // itself is wrapping. The computed backedge taken count may be wrong in 11309 // such cases. This is prevented by checking that the stride is not known to 11310 // be either positive or non-positive. For example, no wrap flags are 11311 // propagated to the post-increment IV of this loop with a trip count of 2 - 11312 // 11313 // unsigned char i; 11314 // for(i=127; i<128; i+=129) 11315 // A[i] = i; 11316 // 11317 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11318 !loopHasNoSideEffects(L)) 11319 return getCouldNotCompute(); 11320 } else if (!Stride->isOne() && 11321 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 11322 // Avoid proven overflow cases: this will ensure that the backedge taken 11323 // count will not generate any unsigned overflow. Relaxed no-overflow 11324 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11325 // undefined behaviors like the case of C language. 11326 return getCouldNotCompute(); 11327 11328 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 11329 : ICmpInst::ICMP_ULT; 11330 const SCEV *Start = IV->getStart(); 11331 const SCEV *End = RHS; 11332 // When the RHS is not invariant, we do not know the end bound of the loop and 11333 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11334 // calculate the MaxBECount, given the start, stride and max value for the end 11335 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11336 // checked above). 11337 if (!isLoopInvariant(RHS, L)) { 11338 const SCEV *MaxBECount = computeMaxBECountForLT( 11339 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11340 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11341 false /*MaxOrZero*/, Predicates); 11342 } 11343 // If the backedge is taken at least once, then it will be taken 11344 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11345 // is the LHS value of the less-than comparison the first time it is evaluated 11346 // and End is the RHS. 11347 const SCEV *BECountIfBackedgeTaken = 11348 computeBECount(getMinusSCEV(End, Start), Stride, false); 11349 // If the loop entry is guarded by the result of the backedge test of the 11350 // first loop iteration, then we know the backedge will be taken at least 11351 // once and so the backedge taken count is as above. If not then we use the 11352 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11353 // as if the backedge is taken at least once max(End,Start) is End and so the 11354 // result is as above, and if not max(End,Start) is Start so we get a backedge 11355 // count of zero. 11356 const SCEV *BECount; 11357 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 11358 BECount = BECountIfBackedgeTaken; 11359 else { 11360 // If we know that RHS >= Start in the context of loop, then we know that 11361 // max(RHS, Start) = RHS at this point. 11362 if (isLoopEntryGuardedByCond( 11363 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 11364 End = RHS; 11365 else 11366 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11367 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 11368 } 11369 11370 const SCEV *MaxBECount; 11371 bool MaxOrZero = false; 11372 if (isa<SCEVConstant>(BECount)) 11373 MaxBECount = BECount; 11374 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11375 // If we know exactly how many times the backedge will be taken if it's 11376 // taken at least once, then the backedge count will either be that or 11377 // zero. 11378 MaxBECount = BECountIfBackedgeTaken; 11379 MaxOrZero = true; 11380 } else { 11381 MaxBECount = computeMaxBECountForLT( 11382 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11383 } 11384 11385 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11386 !isa<SCEVCouldNotCompute>(BECount)) 11387 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11388 11389 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11390 } 11391 11392 ScalarEvolution::ExitLimit 11393 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11394 const Loop *L, bool IsSigned, 11395 bool ControlsExit, bool AllowPredicates) { 11396 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11397 // We handle only IV > Invariant 11398 if (!isLoopInvariant(RHS, L)) 11399 return getCouldNotCompute(); 11400 11401 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11402 if (!IV && AllowPredicates) 11403 // Try to make this an AddRec using runtime tests, in the first X 11404 // iterations of this loop, where X is the SCEV expression found by the 11405 // algorithm below. 11406 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11407 11408 // Avoid weird loops 11409 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11410 return getCouldNotCompute(); 11411 11412 bool NoWrap = ControlsExit && 11413 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11414 11415 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11416 11417 // Avoid negative or zero stride values 11418 if (!isKnownPositive(Stride)) 11419 return getCouldNotCompute(); 11420 11421 // Avoid proven overflow cases: this will ensure that the backedge taken count 11422 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11423 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11424 // behaviors like the case of C language. 11425 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 11426 return getCouldNotCompute(); 11427 11428 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 11429 : ICmpInst::ICMP_UGT; 11430 11431 const SCEV *Start = IV->getStart(); 11432 const SCEV *End = RHS; 11433 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11434 // If we know that Start >= RHS in the context of loop, then we know that 11435 // min(RHS, Start) = RHS at this point. 11436 if (isLoopEntryGuardedByCond( 11437 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11438 End = RHS; 11439 else 11440 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11441 } 11442 11443 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 11444 11445 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11446 : getUnsignedRangeMax(Start); 11447 11448 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11449 : getUnsignedRangeMin(Stride); 11450 11451 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11452 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11453 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11454 11455 // Although End can be a MIN expression we estimate MinEnd considering only 11456 // the case End = RHS. This is safe because in the other case (Start - End) 11457 // is zero, leading to a zero maximum backedge taken count. 11458 APInt MinEnd = 11459 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11460 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11461 11462 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11463 ? BECount 11464 : computeBECount(getConstant(MaxStart - MinEnd), 11465 getConstant(MinStride), false); 11466 11467 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11468 MaxBECount = BECount; 11469 11470 return ExitLimit(BECount, MaxBECount, false, Predicates); 11471 } 11472 11473 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11474 ScalarEvolution &SE) const { 11475 if (Range.isFullSet()) // Infinite loop. 11476 return SE.getCouldNotCompute(); 11477 11478 // If the start is a non-zero constant, shift the range to simplify things. 11479 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11480 if (!SC->getValue()->isZero()) { 11481 SmallVector<const SCEV *, 4> Operands(operands()); 11482 Operands[0] = SE.getZero(SC->getType()); 11483 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11484 getNoWrapFlags(FlagNW)); 11485 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11486 return ShiftedAddRec->getNumIterationsInRange( 11487 Range.subtract(SC->getAPInt()), SE); 11488 // This is strange and shouldn't happen. 11489 return SE.getCouldNotCompute(); 11490 } 11491 11492 // The only time we can solve this is when we have all constant indices. 11493 // Otherwise, we cannot determine the overflow conditions. 11494 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11495 return SE.getCouldNotCompute(); 11496 11497 // Okay at this point we know that all elements of the chrec are constants and 11498 // that the start element is zero. 11499 11500 // First check to see if the range contains zero. If not, the first 11501 // iteration exits. 11502 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11503 if (!Range.contains(APInt(BitWidth, 0))) 11504 return SE.getZero(getType()); 11505 11506 if (isAffine()) { 11507 // If this is an affine expression then we have this situation: 11508 // Solve {0,+,A} in Range === Ax in Range 11509 11510 // We know that zero is in the range. If A is positive then we know that 11511 // the upper value of the range must be the first possible exit value. 11512 // If A is negative then the lower of the range is the last possible loop 11513 // value. Also note that we already checked for a full range. 11514 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11515 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11516 11517 // The exit value should be (End+A)/A. 11518 APInt ExitVal = (End + A).udiv(A); 11519 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11520 11521 // Evaluate at the exit value. If we really did fall out of the valid 11522 // range, then we computed our trip count, otherwise wrap around or other 11523 // things must have happened. 11524 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11525 if (Range.contains(Val->getValue())) 11526 return SE.getCouldNotCompute(); // Something strange happened 11527 11528 // Ensure that the previous value is in the range. This is a sanity check. 11529 assert(Range.contains( 11530 EvaluateConstantChrecAtConstant(this, 11531 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11532 "Linear scev computation is off in a bad way!"); 11533 return SE.getConstant(ExitValue); 11534 } 11535 11536 if (isQuadratic()) { 11537 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11538 return SE.getConstant(S.getValue()); 11539 } 11540 11541 return SE.getCouldNotCompute(); 11542 } 11543 11544 const SCEVAddRecExpr * 11545 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11546 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11547 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11548 // but in this case we cannot guarantee that the value returned will be an 11549 // AddRec because SCEV does not have a fixed point where it stops 11550 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11551 // may happen if we reach arithmetic depth limit while simplifying. So we 11552 // construct the returned value explicitly. 11553 SmallVector<const SCEV *, 3> Ops; 11554 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11555 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11556 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11557 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11558 // We know that the last operand is not a constant zero (otherwise it would 11559 // have been popped out earlier). This guarantees us that if the result has 11560 // the same last operand, then it will also not be popped out, meaning that 11561 // the returned value will be an AddRec. 11562 const SCEV *Last = getOperand(getNumOperands() - 1); 11563 assert(!Last->isZero() && "Recurrency with zero step?"); 11564 Ops.push_back(Last); 11565 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11566 SCEV::FlagAnyWrap)); 11567 } 11568 11569 // Return true when S contains at least an undef value. 11570 static inline bool containsUndefs(const SCEV *S) { 11571 return SCEVExprContains(S, [](const SCEV *S) { 11572 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11573 return isa<UndefValue>(SU->getValue()); 11574 return false; 11575 }); 11576 } 11577 11578 namespace { 11579 11580 // Collect all steps of SCEV expressions. 11581 struct SCEVCollectStrides { 11582 ScalarEvolution &SE; 11583 SmallVectorImpl<const SCEV *> &Strides; 11584 11585 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11586 : SE(SE), Strides(S) {} 11587 11588 bool follow(const SCEV *S) { 11589 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11590 Strides.push_back(AR->getStepRecurrence(SE)); 11591 return true; 11592 } 11593 11594 bool isDone() const { return false; } 11595 }; 11596 11597 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11598 struct SCEVCollectTerms { 11599 SmallVectorImpl<const SCEV *> &Terms; 11600 11601 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11602 11603 bool follow(const SCEV *S) { 11604 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11605 isa<SCEVSignExtendExpr>(S)) { 11606 if (!containsUndefs(S)) 11607 Terms.push_back(S); 11608 11609 // Stop recursion: once we collected a term, do not walk its operands. 11610 return false; 11611 } 11612 11613 // Keep looking. 11614 return true; 11615 } 11616 11617 bool isDone() const { return false; } 11618 }; 11619 11620 // Check if a SCEV contains an AddRecExpr. 11621 struct SCEVHasAddRec { 11622 bool &ContainsAddRec; 11623 11624 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11625 ContainsAddRec = false; 11626 } 11627 11628 bool follow(const SCEV *S) { 11629 if (isa<SCEVAddRecExpr>(S)) { 11630 ContainsAddRec = true; 11631 11632 // Stop recursion: once we collected a term, do not walk its operands. 11633 return false; 11634 } 11635 11636 // Keep looking. 11637 return true; 11638 } 11639 11640 bool isDone() const { return false; } 11641 }; 11642 11643 // Find factors that are multiplied with an expression that (possibly as a 11644 // subexpression) contains an AddRecExpr. In the expression: 11645 // 11646 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11647 // 11648 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11649 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11650 // parameters as they form a product with an induction variable. 11651 // 11652 // This collector expects all array size parameters to be in the same MulExpr. 11653 // It might be necessary to later add support for collecting parameters that are 11654 // spread over different nested MulExpr. 11655 struct SCEVCollectAddRecMultiplies { 11656 SmallVectorImpl<const SCEV *> &Terms; 11657 ScalarEvolution &SE; 11658 11659 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11660 : Terms(T), SE(SE) {} 11661 11662 bool follow(const SCEV *S) { 11663 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11664 bool HasAddRec = false; 11665 SmallVector<const SCEV *, 0> Operands; 11666 for (auto Op : Mul->operands()) { 11667 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11668 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11669 Operands.push_back(Op); 11670 } else if (Unknown) { 11671 HasAddRec = true; 11672 } else { 11673 bool ContainsAddRec = false; 11674 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11675 visitAll(Op, ContiansAddRec); 11676 HasAddRec |= ContainsAddRec; 11677 } 11678 } 11679 if (Operands.size() == 0) 11680 return true; 11681 11682 if (!HasAddRec) 11683 return false; 11684 11685 Terms.push_back(SE.getMulExpr(Operands)); 11686 // Stop recursion: once we collected a term, do not walk its operands. 11687 return false; 11688 } 11689 11690 // Keep looking. 11691 return true; 11692 } 11693 11694 bool isDone() const { return false; } 11695 }; 11696 11697 } // end anonymous namespace 11698 11699 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11700 /// two places: 11701 /// 1) The strides of AddRec expressions. 11702 /// 2) Unknowns that are multiplied with AddRec expressions. 11703 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11704 SmallVectorImpl<const SCEV *> &Terms) { 11705 SmallVector<const SCEV *, 4> Strides; 11706 SCEVCollectStrides StrideCollector(*this, Strides); 11707 visitAll(Expr, StrideCollector); 11708 11709 LLVM_DEBUG({ 11710 dbgs() << "Strides:\n"; 11711 for (const SCEV *S : Strides) 11712 dbgs() << *S << "\n"; 11713 }); 11714 11715 for (const SCEV *S : Strides) { 11716 SCEVCollectTerms TermCollector(Terms); 11717 visitAll(S, TermCollector); 11718 } 11719 11720 LLVM_DEBUG({ 11721 dbgs() << "Terms:\n"; 11722 for (const SCEV *T : Terms) 11723 dbgs() << *T << "\n"; 11724 }); 11725 11726 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11727 visitAll(Expr, MulCollector); 11728 } 11729 11730 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11731 SmallVectorImpl<const SCEV *> &Terms, 11732 SmallVectorImpl<const SCEV *> &Sizes) { 11733 int Last = Terms.size() - 1; 11734 const SCEV *Step = Terms[Last]; 11735 11736 // End of recursion. 11737 if (Last == 0) { 11738 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11739 SmallVector<const SCEV *, 2> Qs; 11740 for (const SCEV *Op : M->operands()) 11741 if (!isa<SCEVConstant>(Op)) 11742 Qs.push_back(Op); 11743 11744 Step = SE.getMulExpr(Qs); 11745 } 11746 11747 Sizes.push_back(Step); 11748 return true; 11749 } 11750 11751 for (const SCEV *&Term : Terms) { 11752 // Normalize the terms before the next call to findArrayDimensionsRec. 11753 const SCEV *Q, *R; 11754 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11755 11756 // Bail out when GCD does not evenly divide one of the terms. 11757 if (!R->isZero()) 11758 return false; 11759 11760 Term = Q; 11761 } 11762 11763 // Remove all SCEVConstants. 11764 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); 11765 11766 if (Terms.size() > 0) 11767 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11768 return false; 11769 11770 Sizes.push_back(Step); 11771 return true; 11772 } 11773 11774 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11775 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11776 for (const SCEV *T : Terms) 11777 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11778 return true; 11779 11780 return false; 11781 } 11782 11783 // Return the number of product terms in S. 11784 static inline int numberOfTerms(const SCEV *S) { 11785 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11786 return Expr->getNumOperands(); 11787 return 1; 11788 } 11789 11790 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11791 if (isa<SCEVConstant>(T)) 11792 return nullptr; 11793 11794 if (isa<SCEVUnknown>(T)) 11795 return T; 11796 11797 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11798 SmallVector<const SCEV *, 2> Factors; 11799 for (const SCEV *Op : M->operands()) 11800 if (!isa<SCEVConstant>(Op)) 11801 Factors.push_back(Op); 11802 11803 return SE.getMulExpr(Factors); 11804 } 11805 11806 return T; 11807 } 11808 11809 /// Return the size of an element read or written by Inst. 11810 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11811 Type *Ty; 11812 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11813 Ty = Store->getValueOperand()->getType(); 11814 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11815 Ty = Load->getType(); 11816 else 11817 return nullptr; 11818 11819 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11820 return getSizeOfExpr(ETy, Ty); 11821 } 11822 11823 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11824 SmallVectorImpl<const SCEV *> &Sizes, 11825 const SCEV *ElementSize) { 11826 if (Terms.size() < 1 || !ElementSize) 11827 return; 11828 11829 // Early return when Terms do not contain parameters: we do not delinearize 11830 // non parametric SCEVs. 11831 if (!containsParameters(Terms)) 11832 return; 11833 11834 LLVM_DEBUG({ 11835 dbgs() << "Terms:\n"; 11836 for (const SCEV *T : Terms) 11837 dbgs() << *T << "\n"; 11838 }); 11839 11840 // Remove duplicates. 11841 array_pod_sort(Terms.begin(), Terms.end()); 11842 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11843 11844 // Put larger terms first. 11845 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11846 return numberOfTerms(LHS) > numberOfTerms(RHS); 11847 }); 11848 11849 // Try to divide all terms by the element size. If term is not divisible by 11850 // element size, proceed with the original term. 11851 for (const SCEV *&Term : Terms) { 11852 const SCEV *Q, *R; 11853 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11854 if (!Q->isZero()) 11855 Term = Q; 11856 } 11857 11858 SmallVector<const SCEV *, 4> NewTerms; 11859 11860 // Remove constant factors. 11861 for (const SCEV *T : Terms) 11862 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11863 NewTerms.push_back(NewT); 11864 11865 LLVM_DEBUG({ 11866 dbgs() << "Terms after sorting:\n"; 11867 for (const SCEV *T : NewTerms) 11868 dbgs() << *T << "\n"; 11869 }); 11870 11871 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11872 Sizes.clear(); 11873 return; 11874 } 11875 11876 // The last element to be pushed into Sizes is the size of an element. 11877 Sizes.push_back(ElementSize); 11878 11879 LLVM_DEBUG({ 11880 dbgs() << "Sizes:\n"; 11881 for (const SCEV *S : Sizes) 11882 dbgs() << *S << "\n"; 11883 }); 11884 } 11885 11886 void ScalarEvolution::computeAccessFunctions( 11887 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11888 SmallVectorImpl<const SCEV *> &Sizes) { 11889 // Early exit in case this SCEV is not an affine multivariate function. 11890 if (Sizes.empty()) 11891 return; 11892 11893 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11894 if (!AR->isAffine()) 11895 return; 11896 11897 const SCEV *Res = Expr; 11898 int Last = Sizes.size() - 1; 11899 for (int i = Last; i >= 0; i--) { 11900 const SCEV *Q, *R; 11901 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11902 11903 LLVM_DEBUG({ 11904 dbgs() << "Res: " << *Res << "\n"; 11905 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11906 dbgs() << "Res divided by Sizes[i]:\n"; 11907 dbgs() << "Quotient: " << *Q << "\n"; 11908 dbgs() << "Remainder: " << *R << "\n"; 11909 }); 11910 11911 Res = Q; 11912 11913 // Do not record the last subscript corresponding to the size of elements in 11914 // the array. 11915 if (i == Last) { 11916 11917 // Bail out if the remainder is too complex. 11918 if (isa<SCEVAddRecExpr>(R)) { 11919 Subscripts.clear(); 11920 Sizes.clear(); 11921 return; 11922 } 11923 11924 continue; 11925 } 11926 11927 // Record the access function for the current subscript. 11928 Subscripts.push_back(R); 11929 } 11930 11931 // Also push in last position the remainder of the last division: it will be 11932 // the access function of the innermost dimension. 11933 Subscripts.push_back(Res); 11934 11935 std::reverse(Subscripts.begin(), Subscripts.end()); 11936 11937 LLVM_DEBUG({ 11938 dbgs() << "Subscripts:\n"; 11939 for (const SCEV *S : Subscripts) 11940 dbgs() << *S << "\n"; 11941 }); 11942 } 11943 11944 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11945 /// sizes of an array access. Returns the remainder of the delinearization that 11946 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11947 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11948 /// expressions in the stride and base of a SCEV corresponding to the 11949 /// computation of a GCD (greatest common divisor) of base and stride. When 11950 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11951 /// 11952 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11953 /// 11954 /// void foo(long n, long m, long o, double A[n][m][o]) { 11955 /// 11956 /// for (long i = 0; i < n; i++) 11957 /// for (long j = 0; j < m; j++) 11958 /// for (long k = 0; k < o; k++) 11959 /// A[i][j][k] = 1.0; 11960 /// } 11961 /// 11962 /// the delinearization input is the following AddRec SCEV: 11963 /// 11964 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11965 /// 11966 /// From this SCEV, we are able to say that the base offset of the access is %A 11967 /// because it appears as an offset that does not divide any of the strides in 11968 /// the loops: 11969 /// 11970 /// CHECK: Base offset: %A 11971 /// 11972 /// and then SCEV->delinearize determines the size of some of the dimensions of 11973 /// the array as these are the multiples by which the strides are happening: 11974 /// 11975 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11976 /// 11977 /// Note that the outermost dimension remains of UnknownSize because there are 11978 /// no strides that would help identifying the size of the last dimension: when 11979 /// the array has been statically allocated, one could compute the size of that 11980 /// dimension by dividing the overall size of the array by the size of the known 11981 /// dimensions: %m * %o * 8. 11982 /// 11983 /// Finally delinearize provides the access functions for the array reference 11984 /// that does correspond to A[i][j][k] of the above C testcase: 11985 /// 11986 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11987 /// 11988 /// The testcases are checking the output of a function pass: 11989 /// DelinearizationPass that walks through all loads and stores of a function 11990 /// asking for the SCEV of the memory access with respect to all enclosing 11991 /// loops, calling SCEV->delinearize on that and printing the results. 11992 void ScalarEvolution::delinearize(const SCEV *Expr, 11993 SmallVectorImpl<const SCEV *> &Subscripts, 11994 SmallVectorImpl<const SCEV *> &Sizes, 11995 const SCEV *ElementSize) { 11996 // First step: collect parametric terms. 11997 SmallVector<const SCEV *, 4> Terms; 11998 collectParametricTerms(Expr, Terms); 11999 12000 if (Terms.empty()) 12001 return; 12002 12003 // Second step: find subscript sizes. 12004 findArrayDimensions(Terms, Sizes, ElementSize); 12005 12006 if (Sizes.empty()) 12007 return; 12008 12009 // Third step: compute the access functions for each subscript. 12010 computeAccessFunctions(Expr, Subscripts, Sizes); 12011 12012 if (Subscripts.empty()) 12013 return; 12014 12015 LLVM_DEBUG({ 12016 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 12017 dbgs() << "ArrayDecl[UnknownSize]"; 12018 for (const SCEV *S : Sizes) 12019 dbgs() << "[" << *S << "]"; 12020 12021 dbgs() << "\nArrayRef"; 12022 for (const SCEV *S : Subscripts) 12023 dbgs() << "[" << *S << "]"; 12024 dbgs() << "\n"; 12025 }); 12026 } 12027 12028 bool ScalarEvolution::getIndexExpressionsFromGEP( 12029 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 12030 SmallVectorImpl<int> &Sizes) { 12031 assert(Subscripts.empty() && Sizes.empty() && 12032 "Expected output lists to be empty on entry to this function."); 12033 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 12034 Type *Ty = GEP->getPointerOperandType(); 12035 bool DroppedFirstDim = false; 12036 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 12037 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 12038 if (i == 1) { 12039 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 12040 Ty = PtrTy->getElementType(); 12041 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 12042 Ty = ArrayTy->getElementType(); 12043 } else { 12044 Subscripts.clear(); 12045 Sizes.clear(); 12046 return false; 12047 } 12048 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 12049 if (Const->getValue()->isZero()) { 12050 DroppedFirstDim = true; 12051 continue; 12052 } 12053 Subscripts.push_back(Expr); 12054 continue; 12055 } 12056 12057 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 12058 if (!ArrayTy) { 12059 Subscripts.clear(); 12060 Sizes.clear(); 12061 return false; 12062 } 12063 12064 Subscripts.push_back(Expr); 12065 if (!(DroppedFirstDim && i == 2)) 12066 Sizes.push_back(ArrayTy->getNumElements()); 12067 12068 Ty = ArrayTy->getElementType(); 12069 } 12070 return !Subscripts.empty(); 12071 } 12072 12073 //===----------------------------------------------------------------------===// 12074 // SCEVCallbackVH Class Implementation 12075 //===----------------------------------------------------------------------===// 12076 12077 void ScalarEvolution::SCEVCallbackVH::deleted() { 12078 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12079 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12080 SE->ConstantEvolutionLoopExitValue.erase(PN); 12081 SE->eraseValueFromMap(getValPtr()); 12082 // this now dangles! 12083 } 12084 12085 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12086 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12087 12088 // Forget all the expressions associated with users of the old value, 12089 // so that future queries will recompute the expressions using the new 12090 // value. 12091 Value *Old = getValPtr(); 12092 SmallVector<User *, 16> Worklist(Old->users()); 12093 SmallPtrSet<User *, 8> Visited; 12094 while (!Worklist.empty()) { 12095 User *U = Worklist.pop_back_val(); 12096 // Deleting the Old value will cause this to dangle. Postpone 12097 // that until everything else is done. 12098 if (U == Old) 12099 continue; 12100 if (!Visited.insert(U).second) 12101 continue; 12102 if (PHINode *PN = dyn_cast<PHINode>(U)) 12103 SE->ConstantEvolutionLoopExitValue.erase(PN); 12104 SE->eraseValueFromMap(U); 12105 llvm::append_range(Worklist, U->users()); 12106 } 12107 // Delete the Old value. 12108 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12109 SE->ConstantEvolutionLoopExitValue.erase(PN); 12110 SE->eraseValueFromMap(Old); 12111 // this now dangles! 12112 } 12113 12114 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12115 : CallbackVH(V), SE(se) {} 12116 12117 //===----------------------------------------------------------------------===// 12118 // ScalarEvolution Class Implementation 12119 //===----------------------------------------------------------------------===// 12120 12121 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12122 AssumptionCache &AC, DominatorTree &DT, 12123 LoopInfo &LI) 12124 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12125 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12126 LoopDispositions(64), BlockDispositions(64) { 12127 // To use guards for proving predicates, we need to scan every instruction in 12128 // relevant basic blocks, and not just terminators. Doing this is a waste of 12129 // time if the IR does not actually contain any calls to 12130 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12131 // 12132 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12133 // to _add_ guards to the module when there weren't any before, and wants 12134 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12135 // efficient in lieu of being smart in that rather obscure case. 12136 12137 auto *GuardDecl = F.getParent()->getFunction( 12138 Intrinsic::getName(Intrinsic::experimental_guard)); 12139 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12140 } 12141 12142 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12143 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12144 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12145 ValueExprMap(std::move(Arg.ValueExprMap)), 12146 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12147 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12148 PendingMerges(std::move(Arg.PendingMerges)), 12149 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12150 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12151 PredicatedBackedgeTakenCounts( 12152 std::move(Arg.PredicatedBackedgeTakenCounts)), 12153 ConstantEvolutionLoopExitValue( 12154 std::move(Arg.ConstantEvolutionLoopExitValue)), 12155 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12156 LoopDispositions(std::move(Arg.LoopDispositions)), 12157 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12158 BlockDispositions(std::move(Arg.BlockDispositions)), 12159 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12160 SignedRanges(std::move(Arg.SignedRanges)), 12161 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12162 UniquePreds(std::move(Arg.UniquePreds)), 12163 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12164 LoopUsers(std::move(Arg.LoopUsers)), 12165 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12166 FirstUnknown(Arg.FirstUnknown) { 12167 Arg.FirstUnknown = nullptr; 12168 } 12169 12170 ScalarEvolution::~ScalarEvolution() { 12171 // Iterate through all the SCEVUnknown instances and call their 12172 // destructors, so that they release their references to their values. 12173 for (SCEVUnknown *U = FirstUnknown; U;) { 12174 SCEVUnknown *Tmp = U; 12175 U = U->Next; 12176 Tmp->~SCEVUnknown(); 12177 } 12178 FirstUnknown = nullptr; 12179 12180 ExprValueMap.clear(); 12181 ValueExprMap.clear(); 12182 HasRecMap.clear(); 12183 12184 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 12185 // that a loop had multiple computable exits. 12186 for (auto &BTCI : BackedgeTakenCounts) 12187 BTCI.second.clear(); 12188 for (auto &BTCI : PredicatedBackedgeTakenCounts) 12189 BTCI.second.clear(); 12190 12191 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12192 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12193 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12194 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12195 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12196 } 12197 12198 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12199 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12200 } 12201 12202 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12203 const Loop *L) { 12204 // Print all inner loops first 12205 for (Loop *I : *L) 12206 PrintLoopInfo(OS, SE, I); 12207 12208 OS << "Loop "; 12209 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12210 OS << ": "; 12211 12212 SmallVector<BasicBlock *, 8> ExitingBlocks; 12213 L->getExitingBlocks(ExitingBlocks); 12214 if (ExitingBlocks.size() != 1) 12215 OS << "<multiple exits> "; 12216 12217 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12218 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12219 else 12220 OS << "Unpredictable backedge-taken count.\n"; 12221 12222 if (ExitingBlocks.size() > 1) 12223 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12224 OS << " exit count for " << ExitingBlock->getName() << ": " 12225 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12226 } 12227 12228 OS << "Loop "; 12229 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12230 OS << ": "; 12231 12232 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12233 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12234 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12235 OS << ", actual taken count either this or zero."; 12236 } else { 12237 OS << "Unpredictable max backedge-taken count. "; 12238 } 12239 12240 OS << "\n" 12241 "Loop "; 12242 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12243 OS << ": "; 12244 12245 SCEVUnionPredicate Pred; 12246 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12247 if (!isa<SCEVCouldNotCompute>(PBT)) { 12248 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12249 OS << " Predicates:\n"; 12250 Pred.print(OS, 4); 12251 } else { 12252 OS << "Unpredictable predicated backedge-taken count. "; 12253 } 12254 OS << "\n"; 12255 12256 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12257 OS << "Loop "; 12258 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12259 OS << ": "; 12260 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12261 } 12262 } 12263 12264 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12265 switch (LD) { 12266 case ScalarEvolution::LoopVariant: 12267 return "Variant"; 12268 case ScalarEvolution::LoopInvariant: 12269 return "Invariant"; 12270 case ScalarEvolution::LoopComputable: 12271 return "Computable"; 12272 } 12273 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12274 } 12275 12276 void ScalarEvolution::print(raw_ostream &OS) const { 12277 // ScalarEvolution's implementation of the print method is to print 12278 // out SCEV values of all instructions that are interesting. Doing 12279 // this potentially causes it to create new SCEV objects though, 12280 // which technically conflicts with the const qualifier. This isn't 12281 // observable from outside the class though, so casting away the 12282 // const isn't dangerous. 12283 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12284 12285 if (ClassifyExpressions) { 12286 OS << "Classifying expressions for: "; 12287 F.printAsOperand(OS, /*PrintType=*/false); 12288 OS << "\n"; 12289 for (Instruction &I : instructions(F)) 12290 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12291 OS << I << '\n'; 12292 OS << " --> "; 12293 const SCEV *SV = SE.getSCEV(&I); 12294 SV->print(OS); 12295 if (!isa<SCEVCouldNotCompute>(SV)) { 12296 OS << " U: "; 12297 SE.getUnsignedRange(SV).print(OS); 12298 OS << " S: "; 12299 SE.getSignedRange(SV).print(OS); 12300 } 12301 12302 const Loop *L = LI.getLoopFor(I.getParent()); 12303 12304 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12305 if (AtUse != SV) { 12306 OS << " --> "; 12307 AtUse->print(OS); 12308 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12309 OS << " U: "; 12310 SE.getUnsignedRange(AtUse).print(OS); 12311 OS << " S: "; 12312 SE.getSignedRange(AtUse).print(OS); 12313 } 12314 } 12315 12316 if (L) { 12317 OS << "\t\t" "Exits: "; 12318 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12319 if (!SE.isLoopInvariant(ExitValue, L)) { 12320 OS << "<<Unknown>>"; 12321 } else { 12322 OS << *ExitValue; 12323 } 12324 12325 bool First = true; 12326 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12327 if (First) { 12328 OS << "\t\t" "LoopDispositions: { "; 12329 First = false; 12330 } else { 12331 OS << ", "; 12332 } 12333 12334 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12335 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12336 } 12337 12338 for (auto *InnerL : depth_first(L)) { 12339 if (InnerL == L) 12340 continue; 12341 if (First) { 12342 OS << "\t\t" "LoopDispositions: { "; 12343 First = false; 12344 } else { 12345 OS << ", "; 12346 } 12347 12348 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12349 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12350 } 12351 12352 OS << " }"; 12353 } 12354 12355 OS << "\n"; 12356 } 12357 } 12358 12359 OS << "Determining loop execution counts for: "; 12360 F.printAsOperand(OS, /*PrintType=*/false); 12361 OS << "\n"; 12362 for (Loop *I : LI) 12363 PrintLoopInfo(OS, &SE, I); 12364 } 12365 12366 ScalarEvolution::LoopDisposition 12367 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12368 auto &Values = LoopDispositions[S]; 12369 for (auto &V : Values) { 12370 if (V.getPointer() == L) 12371 return V.getInt(); 12372 } 12373 Values.emplace_back(L, LoopVariant); 12374 LoopDisposition D = computeLoopDisposition(S, L); 12375 auto &Values2 = LoopDispositions[S]; 12376 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12377 if (V.getPointer() == L) { 12378 V.setInt(D); 12379 break; 12380 } 12381 } 12382 return D; 12383 } 12384 12385 ScalarEvolution::LoopDisposition 12386 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12387 switch (S->getSCEVType()) { 12388 case scConstant: 12389 return LoopInvariant; 12390 case scPtrToInt: 12391 case scTruncate: 12392 case scZeroExtend: 12393 case scSignExtend: 12394 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12395 case scAddRecExpr: { 12396 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12397 12398 // If L is the addrec's loop, it's computable. 12399 if (AR->getLoop() == L) 12400 return LoopComputable; 12401 12402 // Add recurrences are never invariant in the function-body (null loop). 12403 if (!L) 12404 return LoopVariant; 12405 12406 // Everything that is not defined at loop entry is variant. 12407 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12408 return LoopVariant; 12409 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12410 " dominate the contained loop's header?"); 12411 12412 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12413 if (AR->getLoop()->contains(L)) 12414 return LoopInvariant; 12415 12416 // This recurrence is variant w.r.t. L if any of its operands 12417 // are variant. 12418 for (auto *Op : AR->operands()) 12419 if (!isLoopInvariant(Op, L)) 12420 return LoopVariant; 12421 12422 // Otherwise it's loop-invariant. 12423 return LoopInvariant; 12424 } 12425 case scAddExpr: 12426 case scMulExpr: 12427 case scUMaxExpr: 12428 case scSMaxExpr: 12429 case scUMinExpr: 12430 case scSMinExpr: { 12431 bool HasVarying = false; 12432 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12433 LoopDisposition D = getLoopDisposition(Op, L); 12434 if (D == LoopVariant) 12435 return LoopVariant; 12436 if (D == LoopComputable) 12437 HasVarying = true; 12438 } 12439 return HasVarying ? LoopComputable : LoopInvariant; 12440 } 12441 case scUDivExpr: { 12442 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12443 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12444 if (LD == LoopVariant) 12445 return LoopVariant; 12446 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12447 if (RD == LoopVariant) 12448 return LoopVariant; 12449 return (LD == LoopInvariant && RD == LoopInvariant) ? 12450 LoopInvariant : LoopComputable; 12451 } 12452 case scUnknown: 12453 // All non-instruction values are loop invariant. All instructions are loop 12454 // invariant if they are not contained in the specified loop. 12455 // Instructions are never considered invariant in the function body 12456 // (null loop) because they are defined within the "loop". 12457 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12458 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12459 return LoopInvariant; 12460 case scCouldNotCompute: 12461 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12462 } 12463 llvm_unreachable("Unknown SCEV kind!"); 12464 } 12465 12466 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12467 return getLoopDisposition(S, L) == LoopInvariant; 12468 } 12469 12470 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12471 return getLoopDisposition(S, L) == LoopComputable; 12472 } 12473 12474 ScalarEvolution::BlockDisposition 12475 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12476 auto &Values = BlockDispositions[S]; 12477 for (auto &V : Values) { 12478 if (V.getPointer() == BB) 12479 return V.getInt(); 12480 } 12481 Values.emplace_back(BB, DoesNotDominateBlock); 12482 BlockDisposition D = computeBlockDisposition(S, BB); 12483 auto &Values2 = BlockDispositions[S]; 12484 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12485 if (V.getPointer() == BB) { 12486 V.setInt(D); 12487 break; 12488 } 12489 } 12490 return D; 12491 } 12492 12493 ScalarEvolution::BlockDisposition 12494 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12495 switch (S->getSCEVType()) { 12496 case scConstant: 12497 return ProperlyDominatesBlock; 12498 case scPtrToInt: 12499 case scTruncate: 12500 case scZeroExtend: 12501 case scSignExtend: 12502 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12503 case scAddRecExpr: { 12504 // This uses a "dominates" query instead of "properly dominates" query 12505 // to test for proper dominance too, because the instruction which 12506 // produces the addrec's value is a PHI, and a PHI effectively properly 12507 // dominates its entire containing block. 12508 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12509 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12510 return DoesNotDominateBlock; 12511 12512 // Fall through into SCEVNAryExpr handling. 12513 LLVM_FALLTHROUGH; 12514 } 12515 case scAddExpr: 12516 case scMulExpr: 12517 case scUMaxExpr: 12518 case scSMaxExpr: 12519 case scUMinExpr: 12520 case scSMinExpr: { 12521 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12522 bool Proper = true; 12523 for (const SCEV *NAryOp : NAry->operands()) { 12524 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12525 if (D == DoesNotDominateBlock) 12526 return DoesNotDominateBlock; 12527 if (D == DominatesBlock) 12528 Proper = false; 12529 } 12530 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12531 } 12532 case scUDivExpr: { 12533 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12534 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12535 BlockDisposition LD = getBlockDisposition(LHS, BB); 12536 if (LD == DoesNotDominateBlock) 12537 return DoesNotDominateBlock; 12538 BlockDisposition RD = getBlockDisposition(RHS, BB); 12539 if (RD == DoesNotDominateBlock) 12540 return DoesNotDominateBlock; 12541 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12542 ProperlyDominatesBlock : DominatesBlock; 12543 } 12544 case scUnknown: 12545 if (Instruction *I = 12546 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12547 if (I->getParent() == BB) 12548 return DominatesBlock; 12549 if (DT.properlyDominates(I->getParent(), BB)) 12550 return ProperlyDominatesBlock; 12551 return DoesNotDominateBlock; 12552 } 12553 return ProperlyDominatesBlock; 12554 case scCouldNotCompute: 12555 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12556 } 12557 llvm_unreachable("Unknown SCEV kind!"); 12558 } 12559 12560 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12561 return getBlockDisposition(S, BB) >= DominatesBlock; 12562 } 12563 12564 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12565 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12566 } 12567 12568 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12569 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12570 } 12571 12572 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12573 auto IsS = [&](const SCEV *X) { return S == X; }; 12574 auto ContainsS = [&](const SCEV *X) { 12575 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12576 }; 12577 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12578 } 12579 12580 void 12581 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12582 ValuesAtScopes.erase(S); 12583 LoopDispositions.erase(S); 12584 BlockDispositions.erase(S); 12585 UnsignedRanges.erase(S); 12586 SignedRanges.erase(S); 12587 ExprValueMap.erase(S); 12588 HasRecMap.erase(S); 12589 MinTrailingZerosCache.erase(S); 12590 12591 for (auto I = PredicatedSCEVRewrites.begin(); 12592 I != PredicatedSCEVRewrites.end();) { 12593 std::pair<const SCEV *, const Loop *> Entry = I->first; 12594 if (Entry.first == S) 12595 PredicatedSCEVRewrites.erase(I++); 12596 else 12597 ++I; 12598 } 12599 12600 auto RemoveSCEVFromBackedgeMap = 12601 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12602 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12603 BackedgeTakenInfo &BEInfo = I->second; 12604 if (BEInfo.hasOperand(S, this)) { 12605 BEInfo.clear(); 12606 Map.erase(I++); 12607 } else 12608 ++I; 12609 } 12610 }; 12611 12612 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12613 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12614 } 12615 12616 void 12617 ScalarEvolution::getUsedLoops(const SCEV *S, 12618 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12619 struct FindUsedLoops { 12620 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12621 : LoopsUsed(LoopsUsed) {} 12622 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12623 bool follow(const SCEV *S) { 12624 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12625 LoopsUsed.insert(AR->getLoop()); 12626 return true; 12627 } 12628 12629 bool isDone() const { return false; } 12630 }; 12631 12632 FindUsedLoops F(LoopsUsed); 12633 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12634 } 12635 12636 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12637 SmallPtrSet<const Loop *, 8> LoopsUsed; 12638 getUsedLoops(S, LoopsUsed); 12639 for (auto *L : LoopsUsed) 12640 LoopUsers[L].push_back(S); 12641 } 12642 12643 void ScalarEvolution::verify() const { 12644 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12645 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12646 12647 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12648 12649 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12650 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12651 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12652 12653 const SCEV *visitConstant(const SCEVConstant *Constant) { 12654 return SE.getConstant(Constant->getAPInt()); 12655 } 12656 12657 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12658 return SE.getUnknown(Expr->getValue()); 12659 } 12660 12661 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12662 return SE.getCouldNotCompute(); 12663 } 12664 }; 12665 12666 SCEVMapper SCM(SE2); 12667 12668 while (!LoopStack.empty()) { 12669 auto *L = LoopStack.pop_back_val(); 12670 llvm::append_range(LoopStack, *L); 12671 12672 auto *CurBECount = SCM.visit( 12673 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12674 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12675 12676 if (CurBECount == SE2.getCouldNotCompute() || 12677 NewBECount == SE2.getCouldNotCompute()) { 12678 // NB! This situation is legal, but is very suspicious -- whatever pass 12679 // change the loop to make a trip count go from could not compute to 12680 // computable or vice-versa *should have* invalidated SCEV. However, we 12681 // choose not to assert here (for now) since we don't want false 12682 // positives. 12683 continue; 12684 } 12685 12686 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12687 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12688 // not propagate undef aggressively). This means we can (and do) fail 12689 // verification in cases where a transform makes the trip count of a loop 12690 // go from "undef" to "undef+1" (say). The transform is fine, since in 12691 // both cases the loop iterates "undef" times, but SCEV thinks we 12692 // increased the trip count of the loop by 1 incorrectly. 12693 continue; 12694 } 12695 12696 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12697 SE.getTypeSizeInBits(NewBECount->getType())) 12698 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12699 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12700 SE.getTypeSizeInBits(NewBECount->getType())) 12701 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12702 12703 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12704 12705 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12706 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12707 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12708 dbgs() << "Old: " << *CurBECount << "\n"; 12709 dbgs() << "New: " << *NewBECount << "\n"; 12710 dbgs() << "Delta: " << *Delta << "\n"; 12711 std::abort(); 12712 } 12713 } 12714 12715 // Collect all valid loops currently in LoopInfo. 12716 SmallPtrSet<Loop *, 32> ValidLoops; 12717 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12718 while (!Worklist.empty()) { 12719 Loop *L = Worklist.pop_back_val(); 12720 if (ValidLoops.contains(L)) 12721 continue; 12722 ValidLoops.insert(L); 12723 Worklist.append(L->begin(), L->end()); 12724 } 12725 // Check for SCEV expressions referencing invalid/deleted loops. 12726 for (auto &KV : ValueExprMap) { 12727 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12728 if (!AR) 12729 continue; 12730 assert(ValidLoops.contains(AR->getLoop()) && 12731 "AddRec references invalid loop"); 12732 } 12733 } 12734 12735 bool ScalarEvolution::invalidate( 12736 Function &F, const PreservedAnalyses &PA, 12737 FunctionAnalysisManager::Invalidator &Inv) { 12738 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12739 // of its dependencies is invalidated. 12740 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12741 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12742 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12743 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12744 Inv.invalidate<LoopAnalysis>(F, PA); 12745 } 12746 12747 AnalysisKey ScalarEvolutionAnalysis::Key; 12748 12749 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12750 FunctionAnalysisManager &AM) { 12751 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12752 AM.getResult<AssumptionAnalysis>(F), 12753 AM.getResult<DominatorTreeAnalysis>(F), 12754 AM.getResult<LoopAnalysis>(F)); 12755 } 12756 12757 PreservedAnalyses 12758 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12759 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12760 return PreservedAnalyses::all(); 12761 } 12762 12763 PreservedAnalyses 12764 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12765 // For compatibility with opt's -analyze feature under legacy pass manager 12766 // which was not ported to NPM. This keeps tests using 12767 // update_analyze_test_checks.py working. 12768 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12769 << F.getName() << "':\n"; 12770 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12771 return PreservedAnalyses::all(); 12772 } 12773 12774 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12775 "Scalar Evolution Analysis", false, true) 12776 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12777 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12778 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12779 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12780 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12781 "Scalar Evolution Analysis", false, true) 12782 12783 char ScalarEvolutionWrapperPass::ID = 0; 12784 12785 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12786 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12787 } 12788 12789 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12790 SE.reset(new ScalarEvolution( 12791 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12792 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12793 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12794 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12795 return false; 12796 } 12797 12798 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12799 12800 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12801 SE->print(OS); 12802 } 12803 12804 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12805 if (!VerifySCEV) 12806 return; 12807 12808 SE->verify(); 12809 } 12810 12811 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12812 AU.setPreservesAll(); 12813 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12814 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12815 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12816 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12817 } 12818 12819 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12820 const SCEV *RHS) { 12821 FoldingSetNodeID ID; 12822 assert(LHS->getType() == RHS->getType() && 12823 "Type mismatch between LHS and RHS"); 12824 // Unique this node based on the arguments 12825 ID.AddInteger(SCEVPredicate::P_Equal); 12826 ID.AddPointer(LHS); 12827 ID.AddPointer(RHS); 12828 void *IP = nullptr; 12829 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12830 return S; 12831 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12832 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12833 UniquePreds.InsertNode(Eq, IP); 12834 return Eq; 12835 } 12836 12837 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12838 const SCEVAddRecExpr *AR, 12839 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12840 FoldingSetNodeID ID; 12841 // Unique this node based on the arguments 12842 ID.AddInteger(SCEVPredicate::P_Wrap); 12843 ID.AddPointer(AR); 12844 ID.AddInteger(AddedFlags); 12845 void *IP = nullptr; 12846 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12847 return S; 12848 auto *OF = new (SCEVAllocator) 12849 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12850 UniquePreds.InsertNode(OF, IP); 12851 return OF; 12852 } 12853 12854 namespace { 12855 12856 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12857 public: 12858 12859 /// Rewrites \p S in the context of a loop L and the SCEV predication 12860 /// infrastructure. 12861 /// 12862 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12863 /// equivalences present in \p Pred. 12864 /// 12865 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12866 /// \p NewPreds such that the result will be an AddRecExpr. 12867 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12868 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12869 SCEVUnionPredicate *Pred) { 12870 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12871 return Rewriter.visit(S); 12872 } 12873 12874 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12875 if (Pred) { 12876 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12877 for (auto *Pred : ExprPreds) 12878 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12879 if (IPred->getLHS() == Expr) 12880 return IPred->getRHS(); 12881 } 12882 return convertToAddRecWithPreds(Expr); 12883 } 12884 12885 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12886 const SCEV *Operand = visit(Expr->getOperand()); 12887 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12888 if (AR && AR->getLoop() == L && AR->isAffine()) { 12889 // This couldn't be folded because the operand didn't have the nuw 12890 // flag. Add the nusw flag as an assumption that we could make. 12891 const SCEV *Step = AR->getStepRecurrence(SE); 12892 Type *Ty = Expr->getType(); 12893 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12894 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12895 SE.getSignExtendExpr(Step, Ty), L, 12896 AR->getNoWrapFlags()); 12897 } 12898 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12899 } 12900 12901 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12902 const SCEV *Operand = visit(Expr->getOperand()); 12903 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12904 if (AR && AR->getLoop() == L && AR->isAffine()) { 12905 // This couldn't be folded because the operand didn't have the nsw 12906 // flag. Add the nssw flag as an assumption that we could make. 12907 const SCEV *Step = AR->getStepRecurrence(SE); 12908 Type *Ty = Expr->getType(); 12909 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12910 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12911 SE.getSignExtendExpr(Step, Ty), L, 12912 AR->getNoWrapFlags()); 12913 } 12914 return SE.getSignExtendExpr(Operand, Expr->getType()); 12915 } 12916 12917 private: 12918 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12919 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12920 SCEVUnionPredicate *Pred) 12921 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12922 12923 bool addOverflowAssumption(const SCEVPredicate *P) { 12924 if (!NewPreds) { 12925 // Check if we've already made this assumption. 12926 return Pred && Pred->implies(P); 12927 } 12928 NewPreds->insert(P); 12929 return true; 12930 } 12931 12932 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12933 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12934 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12935 return addOverflowAssumption(A); 12936 } 12937 12938 // If \p Expr represents a PHINode, we try to see if it can be represented 12939 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12940 // to add this predicate as a runtime overflow check, we return the AddRec. 12941 // If \p Expr does not meet these conditions (is not a PHI node, or we 12942 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12943 // return \p Expr. 12944 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12945 if (!isa<PHINode>(Expr->getValue())) 12946 return Expr; 12947 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12948 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12949 if (!PredicatedRewrite) 12950 return Expr; 12951 for (auto *P : PredicatedRewrite->second){ 12952 // Wrap predicates from outer loops are not supported. 12953 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12954 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12955 if (L != AR->getLoop()) 12956 return Expr; 12957 } 12958 if (!addOverflowAssumption(P)) 12959 return Expr; 12960 } 12961 return PredicatedRewrite->first; 12962 } 12963 12964 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12965 SCEVUnionPredicate *Pred; 12966 const Loop *L; 12967 }; 12968 12969 } // end anonymous namespace 12970 12971 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12972 SCEVUnionPredicate &Preds) { 12973 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12974 } 12975 12976 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12977 const SCEV *S, const Loop *L, 12978 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12979 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12980 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12981 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12982 12983 if (!AddRec) 12984 return nullptr; 12985 12986 // Since the transformation was successful, we can now transfer the SCEV 12987 // predicates. 12988 for (auto *P : TransformPreds) 12989 Preds.insert(P); 12990 12991 return AddRec; 12992 } 12993 12994 /// SCEV predicates 12995 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12996 SCEVPredicateKind Kind) 12997 : FastID(ID), Kind(Kind) {} 12998 12999 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 13000 const SCEV *LHS, const SCEV *RHS) 13001 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 13002 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13003 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 13004 } 13005 13006 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 13007 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 13008 13009 if (!Op) 13010 return false; 13011 13012 return Op->LHS == LHS && Op->RHS == RHS; 13013 } 13014 13015 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 13016 13017 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 13018 13019 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 13020 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 13021 } 13022 13023 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 13024 const SCEVAddRecExpr *AR, 13025 IncrementWrapFlags Flags) 13026 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 13027 13028 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 13029 13030 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 13031 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 13032 13033 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 13034 } 13035 13036 bool SCEVWrapPredicate::isAlwaysTrue() const { 13037 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 13038 IncrementWrapFlags IFlags = Flags; 13039 13040 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 13041 IFlags = clearFlags(IFlags, IncrementNSSW); 13042 13043 return IFlags == IncrementAnyWrap; 13044 } 13045 13046 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 13047 OS.indent(Depth) << *getExpr() << " Added Flags: "; 13048 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 13049 OS << "<nusw>"; 13050 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 13051 OS << "<nssw>"; 13052 OS << "\n"; 13053 } 13054 13055 SCEVWrapPredicate::IncrementWrapFlags 13056 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 13057 ScalarEvolution &SE) { 13058 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 13059 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 13060 13061 // We can safely transfer the NSW flag as NSSW. 13062 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 13063 ImpliedFlags = IncrementNSSW; 13064 13065 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 13066 // If the increment is positive, the SCEV NUW flag will also imply the 13067 // WrapPredicate NUSW flag. 13068 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 13069 if (Step->getValue()->getValue().isNonNegative()) 13070 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 13071 } 13072 13073 return ImpliedFlags; 13074 } 13075 13076 /// Union predicates don't get cached so create a dummy set ID for it. 13077 SCEVUnionPredicate::SCEVUnionPredicate() 13078 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 13079 13080 bool SCEVUnionPredicate::isAlwaysTrue() const { 13081 return all_of(Preds, 13082 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 13083 } 13084 13085 ArrayRef<const SCEVPredicate *> 13086 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 13087 auto I = SCEVToPreds.find(Expr); 13088 if (I == SCEVToPreds.end()) 13089 return ArrayRef<const SCEVPredicate *>(); 13090 return I->second; 13091 } 13092 13093 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 13094 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 13095 return all_of(Set->Preds, 13096 [this](const SCEVPredicate *I) { return this->implies(I); }); 13097 13098 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 13099 if (ScevPredsIt == SCEVToPreds.end()) 13100 return false; 13101 auto &SCEVPreds = ScevPredsIt->second; 13102 13103 return any_of(SCEVPreds, 13104 [N](const SCEVPredicate *I) { return I->implies(N); }); 13105 } 13106 13107 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13108 13109 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13110 for (auto Pred : Preds) 13111 Pred->print(OS, Depth); 13112 } 13113 13114 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13115 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13116 for (auto Pred : Set->Preds) 13117 add(Pred); 13118 return; 13119 } 13120 13121 if (implies(N)) 13122 return; 13123 13124 const SCEV *Key = N->getExpr(); 13125 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13126 " associated expression!"); 13127 13128 SCEVToPreds[Key].push_back(N); 13129 Preds.push_back(N); 13130 } 13131 13132 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13133 Loop &L) 13134 : SE(SE), L(L) {} 13135 13136 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13137 const SCEV *Expr = SE.getSCEV(V); 13138 RewriteEntry &Entry = RewriteMap[Expr]; 13139 13140 // If we already have an entry and the version matches, return it. 13141 if (Entry.second && Generation == Entry.first) 13142 return Entry.second; 13143 13144 // We found an entry but it's stale. Rewrite the stale entry 13145 // according to the current predicate. 13146 if (Entry.second) 13147 Expr = Entry.second; 13148 13149 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13150 Entry = {Generation, NewSCEV}; 13151 13152 return NewSCEV; 13153 } 13154 13155 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13156 if (!BackedgeCount) { 13157 SCEVUnionPredicate BackedgePred; 13158 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13159 addPredicate(BackedgePred); 13160 } 13161 return BackedgeCount; 13162 } 13163 13164 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13165 if (Preds.implies(&Pred)) 13166 return; 13167 Preds.add(&Pred); 13168 updateGeneration(); 13169 } 13170 13171 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13172 return Preds; 13173 } 13174 13175 void PredicatedScalarEvolution::updateGeneration() { 13176 // If the generation number wrapped recompute everything. 13177 if (++Generation == 0) { 13178 for (auto &II : RewriteMap) { 13179 const SCEV *Rewritten = II.second.second; 13180 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13181 } 13182 } 13183 } 13184 13185 void PredicatedScalarEvolution::setNoOverflow( 13186 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13187 const SCEV *Expr = getSCEV(V); 13188 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13189 13190 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13191 13192 // Clear the statically implied flags. 13193 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13194 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13195 13196 auto II = FlagsMap.insert({V, Flags}); 13197 if (!II.second) 13198 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13199 } 13200 13201 bool PredicatedScalarEvolution::hasNoOverflow( 13202 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13203 const SCEV *Expr = getSCEV(V); 13204 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13205 13206 Flags = SCEVWrapPredicate::clearFlags( 13207 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13208 13209 auto II = FlagsMap.find(V); 13210 13211 if (II != FlagsMap.end()) 13212 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13213 13214 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13215 } 13216 13217 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13218 const SCEV *Expr = this->getSCEV(V); 13219 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13220 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13221 13222 if (!New) 13223 return nullptr; 13224 13225 for (auto *P : NewPreds) 13226 Preds.add(P); 13227 13228 updateGeneration(); 13229 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13230 return New; 13231 } 13232 13233 PredicatedScalarEvolution::PredicatedScalarEvolution( 13234 const PredicatedScalarEvolution &Init) 13235 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13236 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13237 for (auto I : Init.FlagsMap) 13238 FlagsMap.insert(I); 13239 } 13240 13241 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13242 // For each block. 13243 for (auto *BB : L.getBlocks()) 13244 for (auto &I : *BB) { 13245 if (!SE.isSCEVable(I.getType())) 13246 continue; 13247 13248 auto *Expr = SE.getSCEV(&I); 13249 auto II = RewriteMap.find(Expr); 13250 13251 if (II == RewriteMap.end()) 13252 continue; 13253 13254 // Don't print things that are not interesting. 13255 if (II->second.second == Expr) 13256 continue; 13257 13258 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13259 OS.indent(Depth + 2) << *Expr << "\n"; 13260 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13261 } 13262 } 13263 13264 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13265 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13266 // for URem with constant power-of-2 second operands. 13267 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13268 // 4, A / B becomes X / 8). 13269 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13270 const SCEV *&RHS) { 13271 // Try to match 'zext (trunc A to iB) to iY', which is used 13272 // for URem with constant power-of-2 second operands. Make sure the size of 13273 // the operand A matches the size of the whole expressions. 13274 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13275 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13276 LHS = Trunc->getOperand(); 13277 // Bail out if the type of the LHS is larger than the type of the 13278 // expression for now. 13279 if (getTypeSizeInBits(LHS->getType()) > 13280 getTypeSizeInBits(Expr->getType())) 13281 return false; 13282 if (LHS->getType() != Expr->getType()) 13283 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13284 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13285 << getTypeSizeInBits(Trunc->getType())); 13286 return true; 13287 } 13288 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13289 if (Add == nullptr || Add->getNumOperands() != 2) 13290 return false; 13291 13292 const SCEV *A = Add->getOperand(1); 13293 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13294 13295 if (Mul == nullptr) 13296 return false; 13297 13298 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13299 // (SomeExpr + (-(SomeExpr / B) * B)). 13300 if (Expr == getURemExpr(A, B)) { 13301 LHS = A; 13302 RHS = B; 13303 return true; 13304 } 13305 return false; 13306 }; 13307 13308 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13309 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13310 return MatchURemWithDivisor(Mul->getOperand(1)) || 13311 MatchURemWithDivisor(Mul->getOperand(2)); 13312 13313 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13314 if (Mul->getNumOperands() == 2) 13315 return MatchURemWithDivisor(Mul->getOperand(1)) || 13316 MatchURemWithDivisor(Mul->getOperand(0)) || 13317 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13318 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13319 return false; 13320 } 13321 13322 const SCEV * 13323 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13324 SmallVector<BasicBlock*, 16> ExitingBlocks; 13325 L->getExitingBlocks(ExitingBlocks); 13326 13327 // Form an expression for the maximum exit count possible for this loop. We 13328 // merge the max and exact information to approximate a version of 13329 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13330 SmallVector<const SCEV*, 4> ExitCounts; 13331 for (BasicBlock *ExitingBB : ExitingBlocks) { 13332 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13333 if (isa<SCEVCouldNotCompute>(ExitCount)) 13334 ExitCount = getExitCount(L, ExitingBB, 13335 ScalarEvolution::ConstantMaximum); 13336 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13337 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13338 "We should only have known counts for exiting blocks that " 13339 "dominate latch!"); 13340 ExitCounts.push_back(ExitCount); 13341 } 13342 } 13343 if (ExitCounts.empty()) 13344 return getCouldNotCompute(); 13345 return getUMinFromMismatchedTypes(ExitCounts); 13346 } 13347 13348 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13349 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13350 /// we cannot guarantee that the replacement is loop invariant in the loop of 13351 /// the AddRec. 13352 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13353 ValueToSCEVMapTy ⤅ 13354 13355 public: 13356 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13357 : SCEVRewriteVisitor(SE), Map(M) {} 13358 13359 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13360 13361 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13362 auto I = Map.find(Expr->getValue()); 13363 if (I == Map.end()) 13364 return Expr; 13365 return I->second; 13366 } 13367 }; 13368 13369 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13370 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13371 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13372 // If we have LHS == 0, check if LHS is computing a property of some unknown 13373 // SCEV %v which we can rewrite %v to express explicitly. 13374 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 13375 if (Predicate == CmpInst::ICMP_EQ && RHSC && 13376 RHSC->getValue()->isNullValue()) { 13377 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 13378 // explicitly express that. 13379 const SCEV *URemLHS = nullptr; 13380 const SCEV *URemRHS = nullptr; 13381 if (matchURem(LHS, URemLHS, URemRHS)) { 13382 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 13383 Value *V = LHSUnknown->getValue(); 13384 auto Multiple = 13385 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS, 13386 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 13387 RewriteMap[V] = Multiple; 13388 return; 13389 } 13390 } 13391 } 13392 13393 if (!isa<SCEVUnknown>(LHS)) { 13394 std::swap(LHS, RHS); 13395 Predicate = CmpInst::getSwappedPredicate(Predicate); 13396 } 13397 13398 // For now, limit to conditions that provide information about unknown 13399 // expressions. 13400 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13401 if (!LHSUnknown) 13402 return; 13403 13404 // TODO: use information from more predicates. 13405 switch (Predicate) { 13406 case CmpInst::ICMP_ULT: { 13407 if (!containsAddRecurrence(RHS)) { 13408 const SCEV *Base = LHS; 13409 auto I = RewriteMap.find(LHSUnknown->getValue()); 13410 if (I != RewriteMap.end()) 13411 Base = I->second; 13412 13413 RewriteMap[LHSUnknown->getValue()] = 13414 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 13415 } 13416 break; 13417 } 13418 case CmpInst::ICMP_ULE: { 13419 if (!containsAddRecurrence(RHS)) { 13420 const SCEV *Base = LHS; 13421 auto I = RewriteMap.find(LHSUnknown->getValue()); 13422 if (I != RewriteMap.end()) 13423 Base = I->second; 13424 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 13425 } 13426 break; 13427 } 13428 case CmpInst::ICMP_EQ: 13429 if (isa<SCEVConstant>(RHS)) 13430 RewriteMap[LHSUnknown->getValue()] = RHS; 13431 break; 13432 case CmpInst::ICMP_NE: 13433 if (isa<SCEVConstant>(RHS) && 13434 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13435 RewriteMap[LHSUnknown->getValue()] = 13436 getUMaxExpr(LHS, getOne(RHS->getType())); 13437 break; 13438 default: 13439 break; 13440 } 13441 }; 13442 // Starting at the loop predecessor, climb up the predecessor chain, as long 13443 // as there are predecessors that can be found that have unique successors 13444 // leading to the original header. 13445 // TODO: share this logic with isLoopEntryGuardedByCond. 13446 ValueToSCEVMapTy RewriteMap; 13447 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13448 L->getLoopPredecessor(), L->getHeader()); 13449 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13450 13451 const BranchInst *LoopEntryPredicate = 13452 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13453 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13454 continue; 13455 13456 // TODO: use information from more complex conditions, e.g. AND expressions. 13457 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 13458 if (!Cmp) 13459 continue; 13460 13461 auto Predicate = Cmp->getPredicate(); 13462 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 13463 Predicate = CmpInst::getInversePredicate(Predicate); 13464 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13465 getSCEV(Cmp->getOperand(1)), RewriteMap); 13466 } 13467 13468 // Also collect information from assumptions dominating the loop. 13469 for (auto &AssumeVH : AC.assumptions()) { 13470 if (!AssumeVH) 13471 continue; 13472 auto *AssumeI = cast<CallInst>(AssumeVH); 13473 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13474 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13475 continue; 13476 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13477 getSCEV(Cmp->getOperand(1)), RewriteMap); 13478 } 13479 13480 if (RewriteMap.empty()) 13481 return Expr; 13482 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13483 return Rewriter.visit(Expr); 13484 } 13485