1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumArrayLenItCounts, 143 "Number of trip counts computed with array length"); 144 STATISTIC(NumTripCountsComputed, 145 "Number of loops with predictable loop counts"); 146 STATISTIC(NumTripCountsNotComputed, 147 "Number of loops without predictable loop counts"); 148 STATISTIC(NumBruteForceTripCountsComputed, 149 "Number of loops with trip counts computed by force"); 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 160 static cl::opt<bool> VerifySCEV( 161 "verify-scev", cl::Hidden, 162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 163 static cl::opt<bool> VerifySCEVStrict( 164 "verify-scev-strict", cl::Hidden, 165 cl::desc("Enable stricter verification with -verify-scev is passed")); 166 static cl::opt<bool> 167 VerifySCEVMap("verify-scev-maps", cl::Hidden, 168 cl::desc("Verify no dangling value in ScalarEvolution's " 169 "ExprValueMap (slow)")); 170 171 static cl::opt<bool> VerifyIR( 172 "scev-verify-ir", cl::Hidden, 173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 174 cl::init(false)); 175 176 static cl::opt<unsigned> MulOpsInlineThreshold( 177 "scev-mulops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 179 cl::init(32)); 180 181 static cl::opt<unsigned> AddOpsInlineThreshold( 182 "scev-addops-inline-threshold", cl::Hidden, 183 cl::desc("Threshold for inlining addition operands into a SCEV"), 184 cl::init(500)); 185 186 static cl::opt<unsigned> MaxSCEVCompareDepth( 187 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 189 cl::init(32)); 190 191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> MaxValueCompareDepth( 197 "scalar-evolution-max-value-compare-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive value complexity comparisons"), 199 cl::init(2)); 200 201 static cl::opt<unsigned> 202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive arithmetics"), 204 cl::init(32)); 205 206 static cl::opt<unsigned> MaxConstantEvolvingDepth( 207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 209 210 static cl::opt<unsigned> 211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 217 cl::desc("Max coefficients in AddRec during evolving"), 218 cl::init(8)); 219 220 static cl::opt<unsigned> 221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 222 cl::desc("Size of the expression which is considered huge"), 223 cl::init(4096)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 //===----------------------------------------------------------------------===// 237 // SCEV class definitions 238 //===----------------------------------------------------------------------===// 239 240 //===----------------------------------------------------------------------===// 241 // Implementation of the SCEV class. 242 // 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 LLVM_DUMP_METHOD void SCEV::dump() const { 246 print(dbgs()); 247 dbgs() << '\n'; 248 } 249 #endif 250 251 void SCEV::print(raw_ostream &OS) const { 252 switch (getSCEVType()) { 253 case scConstant: 254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 255 return; 256 case scPtrToInt: { 257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 258 const SCEV *Op = PtrToInt->getOperand(); 259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 260 << *PtrToInt->getType() << ")"; 261 return; 262 } 263 case scTruncate: { 264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 265 const SCEV *Op = Trunc->getOperand(); 266 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 267 << *Trunc->getType() << ")"; 268 return; 269 } 270 case scZeroExtend: { 271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 272 const SCEV *Op = ZExt->getOperand(); 273 OS << "(zext " << *Op->getType() << " " << *Op << " to " 274 << *ZExt->getType() << ")"; 275 return; 276 } 277 case scSignExtend: { 278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 279 const SCEV *Op = SExt->getOperand(); 280 OS << "(sext " << *Op->getType() << " " << *Op << " to " 281 << *SExt->getType() << ")"; 282 return; 283 } 284 case scAddRecExpr: { 285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 286 OS << "{" << *AR->getOperand(0); 287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 288 OS << ",+," << *AR->getOperand(i); 289 OS << "}<"; 290 if (AR->hasNoUnsignedWrap()) 291 OS << "nuw><"; 292 if (AR->hasNoSignedWrap()) 293 OS << "nsw><"; 294 if (AR->hasNoSelfWrap() && 295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 296 OS << "nw><"; 297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 298 OS << ">"; 299 return; 300 } 301 case scAddExpr: 302 case scMulExpr: 303 case scUMaxExpr: 304 case scSMaxExpr: 305 case scUMinExpr: 306 case scSMinExpr: { 307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 308 const char *OpStr = nullptr; 309 switch (NAry->getSCEVType()) { 310 case scAddExpr: OpStr = " + "; break; 311 case scMulExpr: OpStr = " * "; break; 312 case scUMaxExpr: OpStr = " umax "; break; 313 case scSMaxExpr: OpStr = " smax "; break; 314 case scUMinExpr: 315 OpStr = " umin "; 316 break; 317 case scSMinExpr: 318 OpStr = " smin "; 319 break; 320 default: 321 llvm_unreachable("There are no other nary expression types."); 322 } 323 OS << "("; 324 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 325 I != E; ++I) { 326 OS << **I; 327 if (std::next(I) != E) 328 OS << OpStr; 329 } 330 OS << ")"; 331 switch (NAry->getSCEVType()) { 332 case scAddExpr: 333 case scMulExpr: 334 if (NAry->hasNoUnsignedWrap()) 335 OS << "<nuw>"; 336 if (NAry->hasNoSignedWrap()) 337 OS << "<nsw>"; 338 break; 339 default: 340 // Nothing to print for other nary expressions. 341 break; 342 } 343 return; 344 } 345 case scUDivExpr: { 346 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 347 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 348 return; 349 } 350 case scUnknown: { 351 const SCEVUnknown *U = cast<SCEVUnknown>(this); 352 Type *AllocTy; 353 if (U->isSizeOf(AllocTy)) { 354 OS << "sizeof(" << *AllocTy << ")"; 355 return; 356 } 357 if (U->isAlignOf(AllocTy)) { 358 OS << "alignof(" << *AllocTy << ")"; 359 return; 360 } 361 362 Type *CTy; 363 Constant *FieldNo; 364 if (U->isOffsetOf(CTy, FieldNo)) { 365 OS << "offsetof(" << *CTy << ", "; 366 FieldNo->printAsOperand(OS, false); 367 OS << ")"; 368 return; 369 } 370 371 // Otherwise just print it normally. 372 U->getValue()->printAsOperand(OS, false); 373 return; 374 } 375 case scCouldNotCompute: 376 OS << "***COULDNOTCOMPUTE***"; 377 return; 378 } 379 llvm_unreachable("Unknown SCEV kind!"); 380 } 381 382 Type *SCEV::getType() const { 383 switch (getSCEVType()) { 384 case scConstant: 385 return cast<SCEVConstant>(this)->getType(); 386 case scPtrToInt: 387 case scTruncate: 388 case scZeroExtend: 389 case scSignExtend: 390 return cast<SCEVCastExpr>(this)->getType(); 391 case scAddRecExpr: 392 case scMulExpr: 393 case scUMaxExpr: 394 case scSMaxExpr: 395 case scUMinExpr: 396 case scSMinExpr: 397 return cast<SCEVNAryExpr>(this)->getType(); 398 case scAddExpr: 399 return cast<SCEVAddExpr>(this)->getType(); 400 case scUDivExpr: 401 return cast<SCEVUDivExpr>(this)->getType(); 402 case scUnknown: 403 return cast<SCEVUnknown>(this)->getType(); 404 case scCouldNotCompute: 405 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 406 } 407 llvm_unreachable("Unknown SCEV kind!"); 408 } 409 410 bool SCEV::isZero() const { 411 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 412 return SC->getValue()->isZero(); 413 return false; 414 } 415 416 bool SCEV::isOne() const { 417 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 418 return SC->getValue()->isOne(); 419 return false; 420 } 421 422 bool SCEV::isAllOnesValue() const { 423 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 424 return SC->getValue()->isMinusOne(); 425 return false; 426 } 427 428 bool SCEV::isNonConstantNegative() const { 429 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 430 if (!Mul) return false; 431 432 // If there is a constant factor, it will be first. 433 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 434 if (!SC) return false; 435 436 // Return true if the value is negative, this matches things like (-42 * V). 437 return SC->getAPInt().isNegative(); 438 } 439 440 SCEVCouldNotCompute::SCEVCouldNotCompute() : 441 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 442 443 bool SCEVCouldNotCompute::classof(const SCEV *S) { 444 return S->getSCEVType() == scCouldNotCompute; 445 } 446 447 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 448 FoldingSetNodeID ID; 449 ID.AddInteger(scConstant); 450 ID.AddPointer(V); 451 void *IP = nullptr; 452 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 453 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 454 UniqueSCEVs.InsertNode(S, IP); 455 return S; 456 } 457 458 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 459 return getConstant(ConstantInt::get(getContext(), Val)); 460 } 461 462 const SCEV * 463 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 464 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 465 return getConstant(ConstantInt::get(ITy, V, isSigned)); 466 } 467 468 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 469 const SCEV *op, Type *ty) 470 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 471 Operands[0] = op; 472 } 473 474 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 475 Type *ITy) 476 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 477 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 478 "Must be a non-bit-width-changing pointer-to-integer cast!"); 479 } 480 481 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 482 SCEVTypes SCEVTy, const SCEV *op, 483 Type *ty) 484 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 485 486 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 487 Type *ty) 488 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 489 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 490 "Cannot truncate non-integer value!"); 491 } 492 493 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 494 const SCEV *op, Type *ty) 495 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 496 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 497 "Cannot zero extend non-integer value!"); 498 } 499 500 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 501 const SCEV *op, Type *ty) 502 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 503 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 504 "Cannot sign extend non-integer value!"); 505 } 506 507 void SCEVUnknown::deleted() { 508 // Clear this SCEVUnknown from various maps. 509 SE->forgetMemoizedResults(this); 510 511 // Remove this SCEVUnknown from the uniquing map. 512 SE->UniqueSCEVs.RemoveNode(this); 513 514 // Release the value. 515 setValPtr(nullptr); 516 } 517 518 void SCEVUnknown::allUsesReplacedWith(Value *New) { 519 // Remove this SCEVUnknown from the uniquing map. 520 SE->UniqueSCEVs.RemoveNode(this); 521 522 // Update this SCEVUnknown to point to the new value. This is needed 523 // because there may still be outstanding SCEVs which still point to 524 // this SCEVUnknown. 525 setValPtr(New); 526 } 527 528 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 529 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 530 if (VCE->getOpcode() == Instruction::PtrToInt) 531 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 532 if (CE->getOpcode() == Instruction::GetElementPtr && 533 CE->getOperand(0)->isNullValue() && 534 CE->getNumOperands() == 2) 535 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 536 if (CI->isOne()) { 537 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 538 ->getElementType(); 539 return true; 540 } 541 542 return false; 543 } 544 545 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 546 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 547 if (VCE->getOpcode() == Instruction::PtrToInt) 548 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 549 if (CE->getOpcode() == Instruction::GetElementPtr && 550 CE->getOperand(0)->isNullValue()) { 551 Type *Ty = 552 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 553 if (StructType *STy = dyn_cast<StructType>(Ty)) 554 if (!STy->isPacked() && 555 CE->getNumOperands() == 3 && 556 CE->getOperand(1)->isNullValue()) { 557 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 558 if (CI->isOne() && 559 STy->getNumElements() == 2 && 560 STy->getElementType(0)->isIntegerTy(1)) { 561 AllocTy = STy->getElementType(1); 562 return true; 563 } 564 } 565 } 566 567 return false; 568 } 569 570 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 571 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 572 if (VCE->getOpcode() == Instruction::PtrToInt) 573 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 574 if (CE->getOpcode() == Instruction::GetElementPtr && 575 CE->getNumOperands() == 3 && 576 CE->getOperand(0)->isNullValue() && 577 CE->getOperand(1)->isNullValue()) { 578 Type *Ty = 579 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 580 // Ignore vector types here so that ScalarEvolutionExpander doesn't 581 // emit getelementptrs that index into vectors. 582 if (Ty->isStructTy() || Ty->isArrayTy()) { 583 CTy = Ty; 584 FieldNo = CE->getOperand(2); 585 return true; 586 } 587 } 588 589 return false; 590 } 591 592 //===----------------------------------------------------------------------===// 593 // SCEV Utilities 594 //===----------------------------------------------------------------------===// 595 596 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 597 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 598 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 599 /// have been previously deemed to be "equally complex" by this routine. It is 600 /// intended to avoid exponential time complexity in cases like: 601 /// 602 /// %a = f(%x, %y) 603 /// %b = f(%a, %a) 604 /// %c = f(%b, %b) 605 /// 606 /// %d = f(%x, %y) 607 /// %e = f(%d, %d) 608 /// %f = f(%e, %e) 609 /// 610 /// CompareValueComplexity(%f, %c) 611 /// 612 /// Since we do not continue running this routine on expression trees once we 613 /// have seen unequal values, there is no need to track them in the cache. 614 static int 615 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 616 const LoopInfo *const LI, Value *LV, Value *RV, 617 unsigned Depth) { 618 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 619 return 0; 620 621 // Order pointer values after integer values. This helps SCEVExpander form 622 // GEPs. 623 bool LIsPointer = LV->getType()->isPointerTy(), 624 RIsPointer = RV->getType()->isPointerTy(); 625 if (LIsPointer != RIsPointer) 626 return (int)LIsPointer - (int)RIsPointer; 627 628 // Compare getValueID values. 629 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 630 if (LID != RID) 631 return (int)LID - (int)RID; 632 633 // Sort arguments by their position. 634 if (const auto *LA = dyn_cast<Argument>(LV)) { 635 const auto *RA = cast<Argument>(RV); 636 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 637 return (int)LArgNo - (int)RArgNo; 638 } 639 640 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 641 const auto *RGV = cast<GlobalValue>(RV); 642 643 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 644 auto LT = GV->getLinkage(); 645 return !(GlobalValue::isPrivateLinkage(LT) || 646 GlobalValue::isInternalLinkage(LT)); 647 }; 648 649 // Use the names to distinguish the two values, but only if the 650 // names are semantically important. 651 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 652 return LGV->getName().compare(RGV->getName()); 653 } 654 655 // For instructions, compare their loop depth, and their operand count. This 656 // is pretty loose. 657 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 658 const auto *RInst = cast<Instruction>(RV); 659 660 // Compare loop depths. 661 const BasicBlock *LParent = LInst->getParent(), 662 *RParent = RInst->getParent(); 663 if (LParent != RParent) { 664 unsigned LDepth = LI->getLoopDepth(LParent), 665 RDepth = LI->getLoopDepth(RParent); 666 if (LDepth != RDepth) 667 return (int)LDepth - (int)RDepth; 668 } 669 670 // Compare the number of operands. 671 unsigned LNumOps = LInst->getNumOperands(), 672 RNumOps = RInst->getNumOperands(); 673 if (LNumOps != RNumOps) 674 return (int)LNumOps - (int)RNumOps; 675 676 for (unsigned Idx : seq(0u, LNumOps)) { 677 int Result = 678 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 679 RInst->getOperand(Idx), Depth + 1); 680 if (Result != 0) 681 return Result; 682 } 683 } 684 685 EqCacheValue.unionSets(LV, RV); 686 return 0; 687 } 688 689 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 690 // than RHS, respectively. A three-way result allows recursive comparisons to be 691 // more efficient. 692 static int CompareSCEVComplexity( 693 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 694 EquivalenceClasses<const Value *> &EqCacheValue, 695 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 696 DominatorTree &DT, unsigned Depth = 0) { 697 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 698 if (LHS == RHS) 699 return 0; 700 701 // Primarily, sort the SCEVs by their getSCEVType(). 702 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 703 if (LType != RType) 704 return (int)LType - (int)RType; 705 706 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 707 return 0; 708 // Aside from the getSCEVType() ordering, the particular ordering 709 // isn't very important except that it's beneficial to be consistent, 710 // so that (a + b) and (b + a) don't end up as different expressions. 711 switch (LType) { 712 case scUnknown: { 713 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 714 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 715 716 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 717 RU->getValue(), Depth + 1); 718 if (X == 0) 719 EqCacheSCEV.unionSets(LHS, RHS); 720 return X; 721 } 722 723 case scConstant: { 724 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 725 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 726 727 // Compare constant values. 728 const APInt &LA = LC->getAPInt(); 729 const APInt &RA = RC->getAPInt(); 730 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 731 if (LBitWidth != RBitWidth) 732 return (int)LBitWidth - (int)RBitWidth; 733 return LA.ult(RA) ? -1 : 1; 734 } 735 736 case scAddRecExpr: { 737 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 738 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 739 740 // There is always a dominance between two recs that are used by one SCEV, 741 // so we can safely sort recs by loop header dominance. We require such 742 // order in getAddExpr. 743 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 744 if (LLoop != RLoop) { 745 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 746 assert(LHead != RHead && "Two loops share the same header?"); 747 if (DT.dominates(LHead, RHead)) 748 return 1; 749 else 750 assert(DT.dominates(RHead, LHead) && 751 "No dominance between recurrences used by one SCEV?"); 752 return -1; 753 } 754 755 // Addrec complexity grows with operand count. 756 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 757 if (LNumOps != RNumOps) 758 return (int)LNumOps - (int)RNumOps; 759 760 // Lexicographically compare. 761 for (unsigned i = 0; i != LNumOps; ++i) { 762 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 763 LA->getOperand(i), RA->getOperand(i), DT, 764 Depth + 1); 765 if (X != 0) 766 return X; 767 } 768 EqCacheSCEV.unionSets(LHS, RHS); 769 return 0; 770 } 771 772 case scAddExpr: 773 case scMulExpr: 774 case scSMaxExpr: 775 case scUMaxExpr: 776 case scSMinExpr: 777 case scUMinExpr: { 778 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 779 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 780 781 // Lexicographically compare n-ary expressions. 782 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 783 if (LNumOps != RNumOps) 784 return (int)LNumOps - (int)RNumOps; 785 786 for (unsigned i = 0; i != LNumOps; ++i) { 787 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 788 LC->getOperand(i), RC->getOperand(i), DT, 789 Depth + 1); 790 if (X != 0) 791 return X; 792 } 793 EqCacheSCEV.unionSets(LHS, RHS); 794 return 0; 795 } 796 797 case scUDivExpr: { 798 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 799 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 800 801 // Lexicographically compare udiv expressions. 802 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 803 RC->getLHS(), DT, Depth + 1); 804 if (X != 0) 805 return X; 806 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 807 RC->getRHS(), DT, Depth + 1); 808 if (X == 0) 809 EqCacheSCEV.unionSets(LHS, RHS); 810 return X; 811 } 812 813 case scPtrToInt: 814 case scTruncate: 815 case scZeroExtend: 816 case scSignExtend: { 817 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 818 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 819 820 // Compare cast expressions by operand. 821 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 822 LC->getOperand(), RC->getOperand(), DT, 823 Depth + 1); 824 if (X == 0) 825 EqCacheSCEV.unionSets(LHS, RHS); 826 return X; 827 } 828 829 case scCouldNotCompute: 830 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 831 } 832 llvm_unreachable("Unknown SCEV kind!"); 833 } 834 835 /// Given a list of SCEV objects, order them by their complexity, and group 836 /// objects of the same complexity together by value. When this routine is 837 /// finished, we know that any duplicates in the vector are consecutive and that 838 /// complexity is monotonically increasing. 839 /// 840 /// Note that we go take special precautions to ensure that we get deterministic 841 /// results from this routine. In other words, we don't want the results of 842 /// this to depend on where the addresses of various SCEV objects happened to 843 /// land in memory. 844 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 845 LoopInfo *LI, DominatorTree &DT) { 846 if (Ops.size() < 2) return; // Noop 847 848 EquivalenceClasses<const SCEV *> EqCacheSCEV; 849 EquivalenceClasses<const Value *> EqCacheValue; 850 if (Ops.size() == 2) { 851 // This is the common case, which also happens to be trivially simple. 852 // Special case it. 853 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 854 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 855 std::swap(LHS, RHS); 856 return; 857 } 858 859 // Do the rough sort by complexity. 860 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 861 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 862 0; 863 }); 864 865 // Now that we are sorted by complexity, group elements of the same 866 // complexity. Note that this is, at worst, N^2, but the vector is likely to 867 // be extremely short in practice. Note that we take this approach because we 868 // do not want to depend on the addresses of the objects we are grouping. 869 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 870 const SCEV *S = Ops[i]; 871 unsigned Complexity = S->getSCEVType(); 872 873 // If there are any objects of the same complexity and same value as this 874 // one, group them. 875 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 876 if (Ops[j] == S) { // Found a duplicate. 877 // Move it to immediately after i'th element. 878 std::swap(Ops[i+1], Ops[j]); 879 ++i; // no need to rescan it. 880 if (i == e-2) return; // Done! 881 } 882 } 883 } 884 } 885 886 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 887 /// least HugeExprThreshold nodes). 888 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 889 return any_of(Ops, [](const SCEV *S) { 890 return S->getExpressionSize() >= HugeExprThreshold; 891 }); 892 } 893 894 //===----------------------------------------------------------------------===// 895 // Simple SCEV method implementations 896 //===----------------------------------------------------------------------===// 897 898 /// Compute BC(It, K). The result has width W. Assume, K > 0. 899 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 900 ScalarEvolution &SE, 901 Type *ResultTy) { 902 // Handle the simplest case efficiently. 903 if (K == 1) 904 return SE.getTruncateOrZeroExtend(It, ResultTy); 905 906 // We are using the following formula for BC(It, K): 907 // 908 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 909 // 910 // Suppose, W is the bitwidth of the return value. We must be prepared for 911 // overflow. Hence, we must assure that the result of our computation is 912 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 913 // safe in modular arithmetic. 914 // 915 // However, this code doesn't use exactly that formula; the formula it uses 916 // is something like the following, where T is the number of factors of 2 in 917 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 918 // exponentiation: 919 // 920 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 921 // 922 // This formula is trivially equivalent to the previous formula. However, 923 // this formula can be implemented much more efficiently. The trick is that 924 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 925 // arithmetic. To do exact division in modular arithmetic, all we have 926 // to do is multiply by the inverse. Therefore, this step can be done at 927 // width W. 928 // 929 // The next issue is how to safely do the division by 2^T. The way this 930 // is done is by doing the multiplication step at a width of at least W + T 931 // bits. This way, the bottom W+T bits of the product are accurate. Then, 932 // when we perform the division by 2^T (which is equivalent to a right shift 933 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 934 // truncated out after the division by 2^T. 935 // 936 // In comparison to just directly using the first formula, this technique 937 // is much more efficient; using the first formula requires W * K bits, 938 // but this formula less than W + K bits. Also, the first formula requires 939 // a division step, whereas this formula only requires multiplies and shifts. 940 // 941 // It doesn't matter whether the subtraction step is done in the calculation 942 // width or the input iteration count's width; if the subtraction overflows, 943 // the result must be zero anyway. We prefer here to do it in the width of 944 // the induction variable because it helps a lot for certain cases; CodeGen 945 // isn't smart enough to ignore the overflow, which leads to much less 946 // efficient code if the width of the subtraction is wider than the native 947 // register width. 948 // 949 // (It's possible to not widen at all by pulling out factors of 2 before 950 // the multiplication; for example, K=2 can be calculated as 951 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 952 // extra arithmetic, so it's not an obvious win, and it gets 953 // much more complicated for K > 3.) 954 955 // Protection from insane SCEVs; this bound is conservative, 956 // but it probably doesn't matter. 957 if (K > 1000) 958 return SE.getCouldNotCompute(); 959 960 unsigned W = SE.getTypeSizeInBits(ResultTy); 961 962 // Calculate K! / 2^T and T; we divide out the factors of two before 963 // multiplying for calculating K! / 2^T to avoid overflow. 964 // Other overflow doesn't matter because we only care about the bottom 965 // W bits of the result. 966 APInt OddFactorial(W, 1); 967 unsigned T = 1; 968 for (unsigned i = 3; i <= K; ++i) { 969 APInt Mult(W, i); 970 unsigned TwoFactors = Mult.countTrailingZeros(); 971 T += TwoFactors; 972 Mult.lshrInPlace(TwoFactors); 973 OddFactorial *= Mult; 974 } 975 976 // We need at least W + T bits for the multiplication step 977 unsigned CalculationBits = W + T; 978 979 // Calculate 2^T, at width T+W. 980 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 981 982 // Calculate the multiplicative inverse of K! / 2^T; 983 // this multiplication factor will perform the exact division by 984 // K! / 2^T. 985 APInt Mod = APInt::getSignedMinValue(W+1); 986 APInt MultiplyFactor = OddFactorial.zext(W+1); 987 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 988 MultiplyFactor = MultiplyFactor.trunc(W); 989 990 // Calculate the product, at width T+W 991 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 992 CalculationBits); 993 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 994 for (unsigned i = 1; i != K; ++i) { 995 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 996 Dividend = SE.getMulExpr(Dividend, 997 SE.getTruncateOrZeroExtend(S, CalculationTy)); 998 } 999 1000 // Divide by 2^T 1001 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1002 1003 // Truncate the result, and divide by K! / 2^T. 1004 1005 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1006 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1007 } 1008 1009 /// Return the value of this chain of recurrences at the specified iteration 1010 /// number. We can evaluate this recurrence by multiplying each element in the 1011 /// chain by the binomial coefficient corresponding to it. In other words, we 1012 /// can evaluate {A,+,B,+,C,+,D} as: 1013 /// 1014 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1015 /// 1016 /// where BC(It, k) stands for binomial coefficient. 1017 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1018 ScalarEvolution &SE) const { 1019 const SCEV *Result = getStart(); 1020 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1021 // The computation is correct in the face of overflow provided that the 1022 // multiplication is performed _after_ the evaluation of the binomial 1023 // coefficient. 1024 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1025 if (isa<SCEVCouldNotCompute>(Coeff)) 1026 return Coeff; 1027 1028 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1029 } 1030 return Result; 1031 } 1032 1033 //===----------------------------------------------------------------------===// 1034 // SCEV Expression folder implementations 1035 //===----------------------------------------------------------------------===// 1036 1037 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty, 1038 unsigned Depth) { 1039 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1040 assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once."); 1041 1042 // We could be called with an integer-typed operands during SCEV rewrites. 1043 // Since the operand is an integer already, just perform zext/trunc/self cast. 1044 if (!Op->getType()->isPointerTy()) 1045 return getTruncateOrZeroExtend(Op, Ty); 1046 1047 // What would be an ID for such a SCEV cast expression? 1048 FoldingSetNodeID ID; 1049 ID.AddInteger(scPtrToInt); 1050 ID.AddPointer(Op); 1051 1052 void *IP = nullptr; 1053 1054 // Is there already an expression for such a cast? 1055 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1056 return getTruncateOrZeroExtend(S, Ty); 1057 1058 // If not, is this expression something we can't reduce any further? 1059 if (isa<SCEVUnknown>(Op)) { 1060 // Create an explicit cast node. 1061 // We can reuse the existing insert position since if we get here, 1062 // we won't have made any changes which would invalidate it. 1063 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1064 assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType( 1065 Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) && 1066 "We can only model ptrtoint if SCEV's effective (integer) type is " 1067 "sufficiently wide to represent all possible pointer values."); 1068 SCEV *S = new (SCEVAllocator) 1069 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1070 UniqueSCEVs.InsertNode(S, IP); 1071 addToLoopUseLists(S); 1072 return getTruncateOrZeroExtend(S, Ty); 1073 } 1074 1075 assert(Depth == 0 && 1076 "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's."); 1077 1078 // Otherwise, we've got some expression that is more complex than just a 1079 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1080 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1081 // only, and the expressions must otherwise be integer-typed. 1082 // So sink the cast down to the SCEVUnknown's. 1083 1084 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1085 /// which computes a pointer-typed value, and rewrites the whole expression 1086 /// tree so that *all* the computations are done on integers, and the only 1087 /// pointer-typed operands in the expression are SCEVUnknown. 1088 class SCEVPtrToIntSinkingRewriter 1089 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1090 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1091 1092 public: 1093 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1094 1095 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1096 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1097 return Rewriter.visit(Scev); 1098 } 1099 1100 const SCEV *visit(const SCEV *S) { 1101 Type *STy = S->getType(); 1102 // If the expression is not pointer-typed, just keep it as-is. 1103 if (!STy->isPointerTy()) 1104 return S; 1105 // Else, recursively sink the cast down into it. 1106 return Base::visit(S); 1107 } 1108 1109 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1110 SmallVector<const SCEV *, 2> Operands; 1111 bool Changed = false; 1112 for (auto *Op : Expr->operands()) { 1113 Operands.push_back(visit(Op)); 1114 Changed |= Op != Operands.back(); 1115 } 1116 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1117 } 1118 1119 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1120 SmallVector<const SCEV *, 2> Operands; 1121 bool Changed = false; 1122 for (auto *Op : Expr->operands()) { 1123 Operands.push_back(visit(Op)); 1124 Changed |= Op != Operands.back(); 1125 } 1126 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1127 } 1128 1129 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1130 Type *ExprPtrTy = Expr->getType(); 1131 assert(ExprPtrTy->isPointerTy() && 1132 "Should only reach pointer-typed SCEVUnknown's."); 1133 Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy); 1134 return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1); 1135 } 1136 }; 1137 1138 // And actually perform the cast sinking. 1139 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1140 assert(IntOp->getType()->isIntegerTy() && 1141 "We must have succeeded in sinking the cast, " 1142 "and ending up with an integer-typed expression!"); 1143 return getTruncateOrZeroExtend(IntOp, Ty); 1144 } 1145 1146 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1147 unsigned Depth) { 1148 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1149 "This is not a truncating conversion!"); 1150 assert(isSCEVable(Ty) && 1151 "This is not a conversion to a SCEVable type!"); 1152 Ty = getEffectiveSCEVType(Ty); 1153 1154 FoldingSetNodeID ID; 1155 ID.AddInteger(scTruncate); 1156 ID.AddPointer(Op); 1157 ID.AddPointer(Ty); 1158 void *IP = nullptr; 1159 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1160 1161 // Fold if the operand is constant. 1162 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1163 return getConstant( 1164 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1165 1166 // trunc(trunc(x)) --> trunc(x) 1167 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1168 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1169 1170 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1171 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1172 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1173 1174 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1175 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1176 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1177 1178 if (Depth > MaxCastDepth) { 1179 SCEV *S = 1180 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1181 UniqueSCEVs.InsertNode(S, IP); 1182 addToLoopUseLists(S); 1183 return S; 1184 } 1185 1186 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1187 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1188 // if after transforming we have at most one truncate, not counting truncates 1189 // that replace other casts. 1190 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1191 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1192 SmallVector<const SCEV *, 4> Operands; 1193 unsigned numTruncs = 0; 1194 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1195 ++i) { 1196 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1197 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1198 isa<SCEVTruncateExpr>(S)) 1199 numTruncs++; 1200 Operands.push_back(S); 1201 } 1202 if (numTruncs < 2) { 1203 if (isa<SCEVAddExpr>(Op)) 1204 return getAddExpr(Operands); 1205 else if (isa<SCEVMulExpr>(Op)) 1206 return getMulExpr(Operands); 1207 else 1208 llvm_unreachable("Unexpected SCEV type for Op."); 1209 } 1210 // Although we checked in the beginning that ID is not in the cache, it is 1211 // possible that during recursion and different modification ID was inserted 1212 // into the cache. So if we find it, just return it. 1213 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1214 return S; 1215 } 1216 1217 // If the input value is a chrec scev, truncate the chrec's operands. 1218 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1219 SmallVector<const SCEV *, 4> Operands; 1220 for (const SCEV *Op : AddRec->operands()) 1221 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1222 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1223 } 1224 1225 // Return zero if truncating to known zeros. 1226 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1227 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1228 return getZero(Ty); 1229 1230 // The cast wasn't folded; create an explicit cast node. We can reuse 1231 // the existing insert position since if we get here, we won't have 1232 // made any changes which would invalidate it. 1233 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1234 Op, Ty); 1235 UniqueSCEVs.InsertNode(S, IP); 1236 addToLoopUseLists(S); 1237 return S; 1238 } 1239 1240 // Get the limit of a recurrence such that incrementing by Step cannot cause 1241 // signed overflow as long as the value of the recurrence within the 1242 // loop does not exceed this limit before incrementing. 1243 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1244 ICmpInst::Predicate *Pred, 1245 ScalarEvolution *SE) { 1246 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1247 if (SE->isKnownPositive(Step)) { 1248 *Pred = ICmpInst::ICMP_SLT; 1249 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1250 SE->getSignedRangeMax(Step)); 1251 } 1252 if (SE->isKnownNegative(Step)) { 1253 *Pred = ICmpInst::ICMP_SGT; 1254 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1255 SE->getSignedRangeMin(Step)); 1256 } 1257 return nullptr; 1258 } 1259 1260 // Get the limit of a recurrence such that incrementing by Step cannot cause 1261 // unsigned overflow as long as the value of the recurrence within the loop does 1262 // not exceed this limit before incrementing. 1263 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1264 ICmpInst::Predicate *Pred, 1265 ScalarEvolution *SE) { 1266 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1267 *Pred = ICmpInst::ICMP_ULT; 1268 1269 return SE->getConstant(APInt::getMinValue(BitWidth) - 1270 SE->getUnsignedRangeMax(Step)); 1271 } 1272 1273 namespace { 1274 1275 struct ExtendOpTraitsBase { 1276 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1277 unsigned); 1278 }; 1279 1280 // Used to make code generic over signed and unsigned overflow. 1281 template <typename ExtendOp> struct ExtendOpTraits { 1282 // Members present: 1283 // 1284 // static const SCEV::NoWrapFlags WrapType; 1285 // 1286 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1287 // 1288 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1289 // ICmpInst::Predicate *Pred, 1290 // ScalarEvolution *SE); 1291 }; 1292 1293 template <> 1294 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1295 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1296 1297 static const GetExtendExprTy GetExtendExpr; 1298 1299 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1300 ICmpInst::Predicate *Pred, 1301 ScalarEvolution *SE) { 1302 return getSignedOverflowLimitForStep(Step, Pred, SE); 1303 } 1304 }; 1305 1306 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1307 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1308 1309 template <> 1310 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1311 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1312 1313 static const GetExtendExprTy GetExtendExpr; 1314 1315 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1316 ICmpInst::Predicate *Pred, 1317 ScalarEvolution *SE) { 1318 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1319 } 1320 }; 1321 1322 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1323 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1324 1325 } // end anonymous namespace 1326 1327 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1328 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1329 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1330 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1331 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1332 // expression "Step + sext/zext(PreIncAR)" is congruent with 1333 // "sext/zext(PostIncAR)" 1334 template <typename ExtendOpTy> 1335 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1336 ScalarEvolution *SE, unsigned Depth) { 1337 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1338 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1339 1340 const Loop *L = AR->getLoop(); 1341 const SCEV *Start = AR->getStart(); 1342 const SCEV *Step = AR->getStepRecurrence(*SE); 1343 1344 // Check for a simple looking step prior to loop entry. 1345 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1346 if (!SA) 1347 return nullptr; 1348 1349 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1350 // subtraction is expensive. For this purpose, perform a quick and dirty 1351 // difference, by checking for Step in the operand list. 1352 SmallVector<const SCEV *, 4> DiffOps; 1353 for (const SCEV *Op : SA->operands()) 1354 if (Op != Step) 1355 DiffOps.push_back(Op); 1356 1357 if (DiffOps.size() == SA->getNumOperands()) 1358 return nullptr; 1359 1360 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1361 // `Step`: 1362 1363 // 1. NSW/NUW flags on the step increment. 1364 auto PreStartFlags = 1365 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1366 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1367 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1368 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1369 1370 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1371 // "S+X does not sign/unsign-overflow". 1372 // 1373 1374 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1375 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1376 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1377 return PreStart; 1378 1379 // 2. Direct overflow check on the step operation's expression. 1380 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1381 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1382 const SCEV *OperandExtendedStart = 1383 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1384 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1385 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1386 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1387 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1388 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1389 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1390 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1391 } 1392 return PreStart; 1393 } 1394 1395 // 3. Loop precondition. 1396 ICmpInst::Predicate Pred; 1397 const SCEV *OverflowLimit = 1398 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1399 1400 if (OverflowLimit && 1401 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1402 return PreStart; 1403 1404 return nullptr; 1405 } 1406 1407 // Get the normalized zero or sign extended expression for this AddRec's Start. 1408 template <typename ExtendOpTy> 1409 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1410 ScalarEvolution *SE, 1411 unsigned Depth) { 1412 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1413 1414 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1415 if (!PreStart) 1416 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1417 1418 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1419 Depth), 1420 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1421 } 1422 1423 // Try to prove away overflow by looking at "nearby" add recurrences. A 1424 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1425 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1426 // 1427 // Formally: 1428 // 1429 // {S,+,X} == {S-T,+,X} + T 1430 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1431 // 1432 // If ({S-T,+,X} + T) does not overflow ... (1) 1433 // 1434 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1435 // 1436 // If {S-T,+,X} does not overflow ... (2) 1437 // 1438 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1439 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1440 // 1441 // If (S-T)+T does not overflow ... (3) 1442 // 1443 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1444 // == {Ext(S),+,Ext(X)} == LHS 1445 // 1446 // Thus, if (1), (2) and (3) are true for some T, then 1447 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1448 // 1449 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1450 // does not overflow" restricted to the 0th iteration. Therefore we only need 1451 // to check for (1) and (2). 1452 // 1453 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1454 // is `Delta` (defined below). 1455 template <typename ExtendOpTy> 1456 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1457 const SCEV *Step, 1458 const Loop *L) { 1459 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1460 1461 // We restrict `Start` to a constant to prevent SCEV from spending too much 1462 // time here. It is correct (but more expensive) to continue with a 1463 // non-constant `Start` and do a general SCEV subtraction to compute 1464 // `PreStart` below. 1465 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1466 if (!StartC) 1467 return false; 1468 1469 APInt StartAI = StartC->getAPInt(); 1470 1471 for (unsigned Delta : {-2, -1, 1, 2}) { 1472 const SCEV *PreStart = getConstant(StartAI - Delta); 1473 1474 FoldingSetNodeID ID; 1475 ID.AddInteger(scAddRecExpr); 1476 ID.AddPointer(PreStart); 1477 ID.AddPointer(Step); 1478 ID.AddPointer(L); 1479 void *IP = nullptr; 1480 const auto *PreAR = 1481 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1482 1483 // Give up if we don't already have the add recurrence we need because 1484 // actually constructing an add recurrence is relatively expensive. 1485 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1486 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1487 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1488 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1489 DeltaS, &Pred, this); 1490 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1491 return true; 1492 } 1493 } 1494 1495 return false; 1496 } 1497 1498 // Finds an integer D for an expression (C + x + y + ...) such that the top 1499 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1500 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1501 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1502 // the (C + x + y + ...) expression is \p WholeAddExpr. 1503 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1504 const SCEVConstant *ConstantTerm, 1505 const SCEVAddExpr *WholeAddExpr) { 1506 const APInt &C = ConstantTerm->getAPInt(); 1507 const unsigned BitWidth = C.getBitWidth(); 1508 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1509 uint32_t TZ = BitWidth; 1510 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1511 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1512 if (TZ) { 1513 // Set D to be as many least significant bits of C as possible while still 1514 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1515 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1516 } 1517 return APInt(BitWidth, 0); 1518 } 1519 1520 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1521 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1522 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1523 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1524 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1525 const APInt &ConstantStart, 1526 const SCEV *Step) { 1527 const unsigned BitWidth = ConstantStart.getBitWidth(); 1528 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1529 if (TZ) 1530 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1531 : ConstantStart; 1532 return APInt(BitWidth, 0); 1533 } 1534 1535 const SCEV * 1536 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1537 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1538 "This is not an extending conversion!"); 1539 assert(isSCEVable(Ty) && 1540 "This is not a conversion to a SCEVable type!"); 1541 Ty = getEffectiveSCEVType(Ty); 1542 1543 // Fold if the operand is constant. 1544 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1545 return getConstant( 1546 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1547 1548 // zext(zext(x)) --> zext(x) 1549 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1550 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1551 1552 // Before doing any expensive analysis, check to see if we've already 1553 // computed a SCEV for this Op and Ty. 1554 FoldingSetNodeID ID; 1555 ID.AddInteger(scZeroExtend); 1556 ID.AddPointer(Op); 1557 ID.AddPointer(Ty); 1558 void *IP = nullptr; 1559 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1560 if (Depth > MaxCastDepth) { 1561 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1562 Op, Ty); 1563 UniqueSCEVs.InsertNode(S, IP); 1564 addToLoopUseLists(S); 1565 return S; 1566 } 1567 1568 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1569 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1570 // It's possible the bits taken off by the truncate were all zero bits. If 1571 // so, we should be able to simplify this further. 1572 const SCEV *X = ST->getOperand(); 1573 ConstantRange CR = getUnsignedRange(X); 1574 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1575 unsigned NewBits = getTypeSizeInBits(Ty); 1576 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1577 CR.zextOrTrunc(NewBits))) 1578 return getTruncateOrZeroExtend(X, Ty, Depth); 1579 } 1580 1581 // If the input value is a chrec scev, and we can prove that the value 1582 // did not overflow the old, smaller, value, we can zero extend all of the 1583 // operands (often constants). This allows analysis of something like 1584 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1585 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1586 if (AR->isAffine()) { 1587 const SCEV *Start = AR->getStart(); 1588 const SCEV *Step = AR->getStepRecurrence(*this); 1589 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1590 const Loop *L = AR->getLoop(); 1591 1592 if (!AR->hasNoUnsignedWrap()) { 1593 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1594 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1595 } 1596 1597 // If we have special knowledge that this addrec won't overflow, 1598 // we don't need to do any further analysis. 1599 if (AR->hasNoUnsignedWrap()) 1600 return getAddRecExpr( 1601 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1602 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1603 1604 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1605 // Note that this serves two purposes: It filters out loops that are 1606 // simply not analyzable, and it covers the case where this code is 1607 // being called from within backedge-taken count analysis, such that 1608 // attempting to ask for the backedge-taken count would likely result 1609 // in infinite recursion. In the later case, the analysis code will 1610 // cope with a conservative value, and it will take care to purge 1611 // that value once it has finished. 1612 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1613 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1614 // Manually compute the final value for AR, checking for overflow. 1615 1616 // Check whether the backedge-taken count can be losslessly casted to 1617 // the addrec's type. The count is always unsigned. 1618 const SCEV *CastedMaxBECount = 1619 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1620 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1621 CastedMaxBECount, MaxBECount->getType(), Depth); 1622 if (MaxBECount == RecastedMaxBECount) { 1623 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1624 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1625 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1626 SCEV::FlagAnyWrap, Depth + 1); 1627 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1628 SCEV::FlagAnyWrap, 1629 Depth + 1), 1630 WideTy, Depth + 1); 1631 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1632 const SCEV *WideMaxBECount = 1633 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1634 const SCEV *OperandExtendedAdd = 1635 getAddExpr(WideStart, 1636 getMulExpr(WideMaxBECount, 1637 getZeroExtendExpr(Step, WideTy, Depth + 1), 1638 SCEV::FlagAnyWrap, Depth + 1), 1639 SCEV::FlagAnyWrap, Depth + 1); 1640 if (ZAdd == OperandExtendedAdd) { 1641 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1642 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1643 // Return the expression with the addrec on the outside. 1644 return getAddRecExpr( 1645 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1646 Depth + 1), 1647 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1648 AR->getNoWrapFlags()); 1649 } 1650 // Similar to above, only this time treat the step value as signed. 1651 // This covers loops that count down. 1652 OperandExtendedAdd = 1653 getAddExpr(WideStart, 1654 getMulExpr(WideMaxBECount, 1655 getSignExtendExpr(Step, WideTy, Depth + 1), 1656 SCEV::FlagAnyWrap, Depth + 1), 1657 SCEV::FlagAnyWrap, Depth + 1); 1658 if (ZAdd == OperandExtendedAdd) { 1659 // Cache knowledge of AR NW, which is propagated to this AddRec. 1660 // Negative step causes unsigned wrap, but it still can't self-wrap. 1661 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1662 // Return the expression with the addrec on the outside. 1663 return getAddRecExpr( 1664 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1665 Depth + 1), 1666 getSignExtendExpr(Step, Ty, Depth + 1), L, 1667 AR->getNoWrapFlags()); 1668 } 1669 } 1670 } 1671 1672 // Normally, in the cases we can prove no-overflow via a 1673 // backedge guarding condition, we can also compute a backedge 1674 // taken count for the loop. The exceptions are assumptions and 1675 // guards present in the loop -- SCEV is not great at exploiting 1676 // these to compute max backedge taken counts, but can still use 1677 // these to prove lack of overflow. Use this fact to avoid 1678 // doing extra work that may not pay off. 1679 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1680 !AC.assumptions().empty()) { 1681 1682 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1683 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1684 if (AR->hasNoUnsignedWrap()) { 1685 // Same as nuw case above - duplicated here to avoid a compile time 1686 // issue. It's not clear that the order of checks does matter, but 1687 // it's one of two issue possible causes for a change which was 1688 // reverted. Be conservative for the moment. 1689 return getAddRecExpr( 1690 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1691 Depth + 1), 1692 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1693 AR->getNoWrapFlags()); 1694 } 1695 1696 // For a negative step, we can extend the operands iff doing so only 1697 // traverses values in the range zext([0,UINT_MAX]). 1698 if (isKnownNegative(Step)) { 1699 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1700 getSignedRangeMin(Step)); 1701 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1702 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1703 // Cache knowledge of AR NW, which is propagated to this 1704 // AddRec. Negative step causes unsigned wrap, but it 1705 // still can't self-wrap. 1706 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1707 // Return the expression with the addrec on the outside. 1708 return getAddRecExpr( 1709 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1710 Depth + 1), 1711 getSignExtendExpr(Step, Ty, Depth + 1), L, 1712 AR->getNoWrapFlags()); 1713 } 1714 } 1715 } 1716 1717 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1718 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1719 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1720 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1721 const APInt &C = SC->getAPInt(); 1722 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1723 if (D != 0) { 1724 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1725 const SCEV *SResidual = 1726 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1727 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1728 return getAddExpr(SZExtD, SZExtR, 1729 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1730 Depth + 1); 1731 } 1732 } 1733 1734 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1735 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1736 return getAddRecExpr( 1737 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1738 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1739 } 1740 } 1741 1742 // zext(A % B) --> zext(A) % zext(B) 1743 { 1744 const SCEV *LHS; 1745 const SCEV *RHS; 1746 if (matchURem(Op, LHS, RHS)) 1747 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1748 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1749 } 1750 1751 // zext(A / B) --> zext(A) / zext(B). 1752 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1753 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1754 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1755 1756 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1757 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1758 if (SA->hasNoUnsignedWrap()) { 1759 // If the addition does not unsign overflow then we can, by definition, 1760 // commute the zero extension with the addition operation. 1761 SmallVector<const SCEV *, 4> Ops; 1762 for (const auto *Op : SA->operands()) 1763 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1764 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1765 } 1766 1767 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1768 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1769 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1770 // 1771 // Often address arithmetics contain expressions like 1772 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1773 // This transformation is useful while proving that such expressions are 1774 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1775 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1776 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1777 if (D != 0) { 1778 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1779 const SCEV *SResidual = 1780 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1781 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1782 return getAddExpr(SZExtD, SZExtR, 1783 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1784 Depth + 1); 1785 } 1786 } 1787 } 1788 1789 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1790 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1791 if (SM->hasNoUnsignedWrap()) { 1792 // If the multiply does not unsign overflow then we can, by definition, 1793 // commute the zero extension with the multiply operation. 1794 SmallVector<const SCEV *, 4> Ops; 1795 for (const auto *Op : SM->operands()) 1796 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1797 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1798 } 1799 1800 // zext(2^K * (trunc X to iN)) to iM -> 1801 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1802 // 1803 // Proof: 1804 // 1805 // zext(2^K * (trunc X to iN)) to iM 1806 // = zext((trunc X to iN) << K) to iM 1807 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1808 // (because shl removes the top K bits) 1809 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1810 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1811 // 1812 if (SM->getNumOperands() == 2) 1813 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1814 if (MulLHS->getAPInt().isPowerOf2()) 1815 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1816 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1817 MulLHS->getAPInt().logBase2(); 1818 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1819 return getMulExpr( 1820 getZeroExtendExpr(MulLHS, Ty), 1821 getZeroExtendExpr( 1822 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1823 SCEV::FlagNUW, Depth + 1); 1824 } 1825 } 1826 1827 // The cast wasn't folded; create an explicit cast node. 1828 // Recompute the insert position, as it may have been invalidated. 1829 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1830 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1831 Op, Ty); 1832 UniqueSCEVs.InsertNode(S, IP); 1833 addToLoopUseLists(S); 1834 return S; 1835 } 1836 1837 const SCEV * 1838 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1839 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1840 "This is not an extending conversion!"); 1841 assert(isSCEVable(Ty) && 1842 "This is not a conversion to a SCEVable type!"); 1843 Ty = getEffectiveSCEVType(Ty); 1844 1845 // Fold if the operand is constant. 1846 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1847 return getConstant( 1848 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1849 1850 // sext(sext(x)) --> sext(x) 1851 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1852 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1853 1854 // sext(zext(x)) --> zext(x) 1855 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1856 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1857 1858 // Before doing any expensive analysis, check to see if we've already 1859 // computed a SCEV for this Op and Ty. 1860 FoldingSetNodeID ID; 1861 ID.AddInteger(scSignExtend); 1862 ID.AddPointer(Op); 1863 ID.AddPointer(Ty); 1864 void *IP = nullptr; 1865 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1866 // Limit recursion depth. 1867 if (Depth > MaxCastDepth) { 1868 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1869 Op, Ty); 1870 UniqueSCEVs.InsertNode(S, IP); 1871 addToLoopUseLists(S); 1872 return S; 1873 } 1874 1875 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1876 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1877 // It's possible the bits taken off by the truncate were all sign bits. If 1878 // so, we should be able to simplify this further. 1879 const SCEV *X = ST->getOperand(); 1880 ConstantRange CR = getSignedRange(X); 1881 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1882 unsigned NewBits = getTypeSizeInBits(Ty); 1883 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1884 CR.sextOrTrunc(NewBits))) 1885 return getTruncateOrSignExtend(X, Ty, Depth); 1886 } 1887 1888 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1889 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1890 if (SA->hasNoSignedWrap()) { 1891 // If the addition does not sign overflow then we can, by definition, 1892 // commute the sign extension with the addition operation. 1893 SmallVector<const SCEV *, 4> Ops; 1894 for (const auto *Op : SA->operands()) 1895 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1896 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1897 } 1898 1899 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1900 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1901 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1902 // 1903 // For instance, this will bring two seemingly different expressions: 1904 // 1 + sext(5 + 20 * %x + 24 * %y) and 1905 // sext(6 + 20 * %x + 24 * %y) 1906 // to the same form: 1907 // 2 + sext(4 + 20 * %x + 24 * %y) 1908 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1909 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1910 if (D != 0) { 1911 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1912 const SCEV *SResidual = 1913 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1914 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1915 return getAddExpr(SSExtD, SSExtR, 1916 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1917 Depth + 1); 1918 } 1919 } 1920 } 1921 // If the input value is a chrec scev, and we can prove that the value 1922 // did not overflow the old, smaller, value, we can sign extend all of the 1923 // operands (often constants). This allows analysis of something like 1924 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1925 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1926 if (AR->isAffine()) { 1927 const SCEV *Start = AR->getStart(); 1928 const SCEV *Step = AR->getStepRecurrence(*this); 1929 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1930 const Loop *L = AR->getLoop(); 1931 1932 if (!AR->hasNoSignedWrap()) { 1933 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1934 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1935 } 1936 1937 // If we have special knowledge that this addrec won't overflow, 1938 // we don't need to do any further analysis. 1939 if (AR->hasNoSignedWrap()) 1940 return getAddRecExpr( 1941 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1942 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1943 1944 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1945 // Note that this serves two purposes: It filters out loops that are 1946 // simply not analyzable, and it covers the case where this code is 1947 // being called from within backedge-taken count analysis, such that 1948 // attempting to ask for the backedge-taken count would likely result 1949 // in infinite recursion. In the later case, the analysis code will 1950 // cope with a conservative value, and it will take care to purge 1951 // that value once it has finished. 1952 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1953 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1954 // Manually compute the final value for AR, checking for 1955 // overflow. 1956 1957 // Check whether the backedge-taken count can be losslessly casted to 1958 // the addrec's type. The count is always unsigned. 1959 const SCEV *CastedMaxBECount = 1960 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1961 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1962 CastedMaxBECount, MaxBECount->getType(), Depth); 1963 if (MaxBECount == RecastedMaxBECount) { 1964 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1965 // Check whether Start+Step*MaxBECount has no signed overflow. 1966 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1967 SCEV::FlagAnyWrap, Depth + 1); 1968 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1969 SCEV::FlagAnyWrap, 1970 Depth + 1), 1971 WideTy, Depth + 1); 1972 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1973 const SCEV *WideMaxBECount = 1974 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1975 const SCEV *OperandExtendedAdd = 1976 getAddExpr(WideStart, 1977 getMulExpr(WideMaxBECount, 1978 getSignExtendExpr(Step, WideTy, Depth + 1), 1979 SCEV::FlagAnyWrap, Depth + 1), 1980 SCEV::FlagAnyWrap, Depth + 1); 1981 if (SAdd == OperandExtendedAdd) { 1982 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1983 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 1984 // Return the expression with the addrec on the outside. 1985 return getAddRecExpr( 1986 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1987 Depth + 1), 1988 getSignExtendExpr(Step, Ty, Depth + 1), L, 1989 AR->getNoWrapFlags()); 1990 } 1991 // Similar to above, only this time treat the step value as unsigned. 1992 // This covers loops that count up with an unsigned step. 1993 OperandExtendedAdd = 1994 getAddExpr(WideStart, 1995 getMulExpr(WideMaxBECount, 1996 getZeroExtendExpr(Step, WideTy, Depth + 1), 1997 SCEV::FlagAnyWrap, Depth + 1), 1998 SCEV::FlagAnyWrap, Depth + 1); 1999 if (SAdd == OperandExtendedAdd) { 2000 // If AR wraps around then 2001 // 2002 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2003 // => SAdd != OperandExtendedAdd 2004 // 2005 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2006 // (SAdd == OperandExtendedAdd => AR is NW) 2007 2008 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2009 2010 // Return the expression with the addrec on the outside. 2011 return getAddRecExpr( 2012 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2013 Depth + 1), 2014 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2015 AR->getNoWrapFlags()); 2016 } 2017 } 2018 } 2019 2020 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2021 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2022 if (AR->hasNoSignedWrap()) { 2023 // Same as nsw case above - duplicated here to avoid a compile time 2024 // issue. It's not clear that the order of checks does matter, but 2025 // it's one of two issue possible causes for a change which was 2026 // reverted. Be conservative for the moment. 2027 return getAddRecExpr( 2028 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2029 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2030 } 2031 2032 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2033 // if D + (C - D + Step * n) could be proven to not signed wrap 2034 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2035 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2036 const APInt &C = SC->getAPInt(); 2037 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2038 if (D != 0) { 2039 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2040 const SCEV *SResidual = 2041 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2042 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2043 return getAddExpr(SSExtD, SSExtR, 2044 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2045 Depth + 1); 2046 } 2047 } 2048 2049 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2050 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2051 return getAddRecExpr( 2052 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2053 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2054 } 2055 } 2056 2057 // If the input value is provably positive and we could not simplify 2058 // away the sext build a zext instead. 2059 if (isKnownNonNegative(Op)) 2060 return getZeroExtendExpr(Op, Ty, Depth + 1); 2061 2062 // The cast wasn't folded; create an explicit cast node. 2063 // Recompute the insert position, as it may have been invalidated. 2064 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2065 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2066 Op, Ty); 2067 UniqueSCEVs.InsertNode(S, IP); 2068 addToLoopUseLists(S); 2069 return S; 2070 } 2071 2072 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2073 /// unspecified bits out to the given type. 2074 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2075 Type *Ty) { 2076 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2077 "This is not an extending conversion!"); 2078 assert(isSCEVable(Ty) && 2079 "This is not a conversion to a SCEVable type!"); 2080 Ty = getEffectiveSCEVType(Ty); 2081 2082 // Sign-extend negative constants. 2083 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2084 if (SC->getAPInt().isNegative()) 2085 return getSignExtendExpr(Op, Ty); 2086 2087 // Peel off a truncate cast. 2088 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2089 const SCEV *NewOp = T->getOperand(); 2090 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2091 return getAnyExtendExpr(NewOp, Ty); 2092 return getTruncateOrNoop(NewOp, Ty); 2093 } 2094 2095 // Next try a zext cast. If the cast is folded, use it. 2096 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2097 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2098 return ZExt; 2099 2100 // Next try a sext cast. If the cast is folded, use it. 2101 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2102 if (!isa<SCEVSignExtendExpr>(SExt)) 2103 return SExt; 2104 2105 // Force the cast to be folded into the operands of an addrec. 2106 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2107 SmallVector<const SCEV *, 4> Ops; 2108 for (const SCEV *Op : AR->operands()) 2109 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2110 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2111 } 2112 2113 // If the expression is obviously signed, use the sext cast value. 2114 if (isa<SCEVSMaxExpr>(Op)) 2115 return SExt; 2116 2117 // Absent any other information, use the zext cast value. 2118 return ZExt; 2119 } 2120 2121 /// Process the given Ops list, which is a list of operands to be added under 2122 /// the given scale, update the given map. This is a helper function for 2123 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2124 /// that would form an add expression like this: 2125 /// 2126 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2127 /// 2128 /// where A and B are constants, update the map with these values: 2129 /// 2130 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2131 /// 2132 /// and add 13 + A*B*29 to AccumulatedConstant. 2133 /// This will allow getAddRecExpr to produce this: 2134 /// 2135 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2136 /// 2137 /// This form often exposes folding opportunities that are hidden in 2138 /// the original operand list. 2139 /// 2140 /// Return true iff it appears that any interesting folding opportunities 2141 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2142 /// the common case where no interesting opportunities are present, and 2143 /// is also used as a check to avoid infinite recursion. 2144 static bool 2145 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2146 SmallVectorImpl<const SCEV *> &NewOps, 2147 APInt &AccumulatedConstant, 2148 const SCEV *const *Ops, size_t NumOperands, 2149 const APInt &Scale, 2150 ScalarEvolution &SE) { 2151 bool Interesting = false; 2152 2153 // Iterate over the add operands. They are sorted, with constants first. 2154 unsigned i = 0; 2155 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2156 ++i; 2157 // Pull a buried constant out to the outside. 2158 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2159 Interesting = true; 2160 AccumulatedConstant += Scale * C->getAPInt(); 2161 } 2162 2163 // Next comes everything else. We're especially interested in multiplies 2164 // here, but they're in the middle, so just visit the rest with one loop. 2165 for (; i != NumOperands; ++i) { 2166 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2167 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2168 APInt NewScale = 2169 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2170 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2171 // A multiplication of a constant with another add; recurse. 2172 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2173 Interesting |= 2174 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2175 Add->op_begin(), Add->getNumOperands(), 2176 NewScale, SE); 2177 } else { 2178 // A multiplication of a constant with some other value. Update 2179 // the map. 2180 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2181 const SCEV *Key = SE.getMulExpr(MulOps); 2182 auto Pair = M.insert({Key, NewScale}); 2183 if (Pair.second) { 2184 NewOps.push_back(Pair.first->first); 2185 } else { 2186 Pair.first->second += NewScale; 2187 // The map already had an entry for this value, which may indicate 2188 // a folding opportunity. 2189 Interesting = true; 2190 } 2191 } 2192 } else { 2193 // An ordinary operand. Update the map. 2194 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2195 M.insert({Ops[i], Scale}); 2196 if (Pair.second) { 2197 NewOps.push_back(Pair.first->first); 2198 } else { 2199 Pair.first->second += Scale; 2200 // The map already had an entry for this value, which may indicate 2201 // a folding opportunity. 2202 Interesting = true; 2203 } 2204 } 2205 } 2206 2207 return Interesting; 2208 } 2209 2210 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2211 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2212 // can't-overflow flags for the operation if possible. 2213 static SCEV::NoWrapFlags 2214 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2215 const ArrayRef<const SCEV *> Ops, 2216 SCEV::NoWrapFlags Flags) { 2217 using namespace std::placeholders; 2218 2219 using OBO = OverflowingBinaryOperator; 2220 2221 bool CanAnalyze = 2222 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2223 (void)CanAnalyze; 2224 assert(CanAnalyze && "don't call from other places!"); 2225 2226 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2227 SCEV::NoWrapFlags SignOrUnsignWrap = 2228 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2229 2230 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2231 auto IsKnownNonNegative = [&](const SCEV *S) { 2232 return SE->isKnownNonNegative(S); 2233 }; 2234 2235 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2236 Flags = 2237 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2238 2239 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2240 2241 if (SignOrUnsignWrap != SignOrUnsignMask && 2242 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2243 isa<SCEVConstant>(Ops[0])) { 2244 2245 auto Opcode = [&] { 2246 switch (Type) { 2247 case scAddExpr: 2248 return Instruction::Add; 2249 case scMulExpr: 2250 return Instruction::Mul; 2251 default: 2252 llvm_unreachable("Unexpected SCEV op."); 2253 } 2254 }(); 2255 2256 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2257 2258 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2259 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2260 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2261 Opcode, C, OBO::NoSignedWrap); 2262 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2263 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2264 } 2265 2266 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2267 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2268 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2269 Opcode, C, OBO::NoUnsignedWrap); 2270 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2271 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2272 } 2273 } 2274 2275 return Flags; 2276 } 2277 2278 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2279 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2280 } 2281 2282 /// Get a canonical add expression, or something simpler if possible. 2283 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2284 SCEV::NoWrapFlags OrigFlags, 2285 unsigned Depth) { 2286 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2287 "only nuw or nsw allowed"); 2288 assert(!Ops.empty() && "Cannot get empty add!"); 2289 if (Ops.size() == 1) return Ops[0]; 2290 #ifndef NDEBUG 2291 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2292 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2293 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2294 "SCEVAddExpr operand types don't match!"); 2295 #endif 2296 2297 // Sort by complexity, this groups all similar expression types together. 2298 GroupByComplexity(Ops, &LI, DT); 2299 2300 // If there are any constants, fold them together. 2301 unsigned Idx = 0; 2302 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2303 ++Idx; 2304 assert(Idx < Ops.size()); 2305 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2306 // We found two constants, fold them together! 2307 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2308 if (Ops.size() == 2) return Ops[0]; 2309 Ops.erase(Ops.begin()+1); // Erase the folded element 2310 LHSC = cast<SCEVConstant>(Ops[0]); 2311 } 2312 2313 // If we are left with a constant zero being added, strip it off. 2314 if (LHSC->getValue()->isZero()) { 2315 Ops.erase(Ops.begin()); 2316 --Idx; 2317 } 2318 2319 if (Ops.size() == 1) return Ops[0]; 2320 } 2321 2322 // Delay expensive flag strengthening until necessary. 2323 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2324 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2325 }; 2326 2327 // Limit recursion calls depth. 2328 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2329 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2330 2331 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2332 // Don't strengthen flags if we have no new information. 2333 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2334 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2335 Add->setNoWrapFlags(ComputeFlags(Ops)); 2336 return S; 2337 } 2338 2339 // Okay, check to see if the same value occurs in the operand list more than 2340 // once. If so, merge them together into an multiply expression. Since we 2341 // sorted the list, these values are required to be adjacent. 2342 Type *Ty = Ops[0]->getType(); 2343 bool FoundMatch = false; 2344 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2345 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2346 // Scan ahead to count how many equal operands there are. 2347 unsigned Count = 2; 2348 while (i+Count != e && Ops[i+Count] == Ops[i]) 2349 ++Count; 2350 // Merge the values into a multiply. 2351 const SCEV *Scale = getConstant(Ty, Count); 2352 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2353 if (Ops.size() == Count) 2354 return Mul; 2355 Ops[i] = Mul; 2356 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2357 --i; e -= Count - 1; 2358 FoundMatch = true; 2359 } 2360 if (FoundMatch) 2361 return getAddExpr(Ops, OrigFlags, Depth + 1); 2362 2363 // Check for truncates. If all the operands are truncated from the same 2364 // type, see if factoring out the truncate would permit the result to be 2365 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2366 // if the contents of the resulting outer trunc fold to something simple. 2367 auto FindTruncSrcType = [&]() -> Type * { 2368 // We're ultimately looking to fold an addrec of truncs and muls of only 2369 // constants and truncs, so if we find any other types of SCEV 2370 // as operands of the addrec then we bail and return nullptr here. 2371 // Otherwise, we return the type of the operand of a trunc that we find. 2372 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2373 return T->getOperand()->getType(); 2374 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2375 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2376 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2377 return T->getOperand()->getType(); 2378 } 2379 return nullptr; 2380 }; 2381 if (auto *SrcType = FindTruncSrcType()) { 2382 SmallVector<const SCEV *, 8> LargeOps; 2383 bool Ok = true; 2384 // Check all the operands to see if they can be represented in the 2385 // source type of the truncate. 2386 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2387 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2388 if (T->getOperand()->getType() != SrcType) { 2389 Ok = false; 2390 break; 2391 } 2392 LargeOps.push_back(T->getOperand()); 2393 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2394 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2395 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2396 SmallVector<const SCEV *, 8> LargeMulOps; 2397 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2398 if (const SCEVTruncateExpr *T = 2399 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2400 if (T->getOperand()->getType() != SrcType) { 2401 Ok = false; 2402 break; 2403 } 2404 LargeMulOps.push_back(T->getOperand()); 2405 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2406 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2407 } else { 2408 Ok = false; 2409 break; 2410 } 2411 } 2412 if (Ok) 2413 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2414 } else { 2415 Ok = false; 2416 break; 2417 } 2418 } 2419 if (Ok) { 2420 // Evaluate the expression in the larger type. 2421 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2422 // If it folds to something simple, use it. Otherwise, don't. 2423 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2424 return getTruncateExpr(Fold, Ty); 2425 } 2426 } 2427 2428 // Skip past any other cast SCEVs. 2429 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2430 ++Idx; 2431 2432 // If there are add operands they would be next. 2433 if (Idx < Ops.size()) { 2434 bool DeletedAdd = false; 2435 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2436 if (Ops.size() > AddOpsInlineThreshold || 2437 Add->getNumOperands() > AddOpsInlineThreshold) 2438 break; 2439 // If we have an add, expand the add operands onto the end of the operands 2440 // list. 2441 Ops.erase(Ops.begin()+Idx); 2442 Ops.append(Add->op_begin(), Add->op_end()); 2443 DeletedAdd = true; 2444 } 2445 2446 // If we deleted at least one add, we added operands to the end of the list, 2447 // and they are not necessarily sorted. Recurse to resort and resimplify 2448 // any operands we just acquired. 2449 if (DeletedAdd) 2450 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2451 } 2452 2453 // Skip over the add expression until we get to a multiply. 2454 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2455 ++Idx; 2456 2457 // Check to see if there are any folding opportunities present with 2458 // operands multiplied by constant values. 2459 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2460 uint64_t BitWidth = getTypeSizeInBits(Ty); 2461 DenseMap<const SCEV *, APInt> M; 2462 SmallVector<const SCEV *, 8> NewOps; 2463 APInt AccumulatedConstant(BitWidth, 0); 2464 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2465 Ops.data(), Ops.size(), 2466 APInt(BitWidth, 1), *this)) { 2467 struct APIntCompare { 2468 bool operator()(const APInt &LHS, const APInt &RHS) const { 2469 return LHS.ult(RHS); 2470 } 2471 }; 2472 2473 // Some interesting folding opportunity is present, so its worthwhile to 2474 // re-generate the operands list. Group the operands by constant scale, 2475 // to avoid multiplying by the same constant scale multiple times. 2476 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2477 for (const SCEV *NewOp : NewOps) 2478 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2479 // Re-generate the operands list. 2480 Ops.clear(); 2481 if (AccumulatedConstant != 0) 2482 Ops.push_back(getConstant(AccumulatedConstant)); 2483 for (auto &MulOp : MulOpLists) 2484 if (MulOp.first != 0) 2485 Ops.push_back(getMulExpr( 2486 getConstant(MulOp.first), 2487 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2488 SCEV::FlagAnyWrap, Depth + 1)); 2489 if (Ops.empty()) 2490 return getZero(Ty); 2491 if (Ops.size() == 1) 2492 return Ops[0]; 2493 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2494 } 2495 } 2496 2497 // If we are adding something to a multiply expression, make sure the 2498 // something is not already an operand of the multiply. If so, merge it into 2499 // the multiply. 2500 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2501 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2502 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2503 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2504 if (isa<SCEVConstant>(MulOpSCEV)) 2505 continue; 2506 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2507 if (MulOpSCEV == Ops[AddOp]) { 2508 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2509 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2510 if (Mul->getNumOperands() != 2) { 2511 // If the multiply has more than two operands, we must get the 2512 // Y*Z term. 2513 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2514 Mul->op_begin()+MulOp); 2515 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2516 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2517 } 2518 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2519 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2520 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2521 SCEV::FlagAnyWrap, Depth + 1); 2522 if (Ops.size() == 2) return OuterMul; 2523 if (AddOp < Idx) { 2524 Ops.erase(Ops.begin()+AddOp); 2525 Ops.erase(Ops.begin()+Idx-1); 2526 } else { 2527 Ops.erase(Ops.begin()+Idx); 2528 Ops.erase(Ops.begin()+AddOp-1); 2529 } 2530 Ops.push_back(OuterMul); 2531 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2532 } 2533 2534 // Check this multiply against other multiplies being added together. 2535 for (unsigned OtherMulIdx = Idx+1; 2536 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2537 ++OtherMulIdx) { 2538 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2539 // If MulOp occurs in OtherMul, we can fold the two multiplies 2540 // together. 2541 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2542 OMulOp != e; ++OMulOp) 2543 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2544 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2545 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2546 if (Mul->getNumOperands() != 2) { 2547 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2548 Mul->op_begin()+MulOp); 2549 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2550 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2551 } 2552 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2553 if (OtherMul->getNumOperands() != 2) { 2554 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2555 OtherMul->op_begin()+OMulOp); 2556 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2557 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2558 } 2559 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2560 const SCEV *InnerMulSum = 2561 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2562 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2563 SCEV::FlagAnyWrap, Depth + 1); 2564 if (Ops.size() == 2) return OuterMul; 2565 Ops.erase(Ops.begin()+Idx); 2566 Ops.erase(Ops.begin()+OtherMulIdx-1); 2567 Ops.push_back(OuterMul); 2568 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2569 } 2570 } 2571 } 2572 } 2573 2574 // If there are any add recurrences in the operands list, see if any other 2575 // added values are loop invariant. If so, we can fold them into the 2576 // recurrence. 2577 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2578 ++Idx; 2579 2580 // Scan over all recurrences, trying to fold loop invariants into them. 2581 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2582 // Scan all of the other operands to this add and add them to the vector if 2583 // they are loop invariant w.r.t. the recurrence. 2584 SmallVector<const SCEV *, 8> LIOps; 2585 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2586 const Loop *AddRecLoop = AddRec->getLoop(); 2587 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2588 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2589 LIOps.push_back(Ops[i]); 2590 Ops.erase(Ops.begin()+i); 2591 --i; --e; 2592 } 2593 2594 // If we found some loop invariants, fold them into the recurrence. 2595 if (!LIOps.empty()) { 2596 // Compute nowrap flags for the addition of the loop-invariant ops and 2597 // the addrec. Temporarily push it as an operand for that purpose. 2598 LIOps.push_back(AddRec); 2599 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2600 LIOps.pop_back(); 2601 2602 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2603 LIOps.push_back(AddRec->getStart()); 2604 2605 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2606 // This follows from the fact that the no-wrap flags on the outer add 2607 // expression are applicable on the 0th iteration, when the add recurrence 2608 // will be equal to its start value. 2609 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2610 2611 // Build the new addrec. Propagate the NUW and NSW flags if both the 2612 // outer add and the inner addrec are guaranteed to have no overflow. 2613 // Always propagate NW. 2614 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2615 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2616 2617 // If all of the other operands were loop invariant, we are done. 2618 if (Ops.size() == 1) return NewRec; 2619 2620 // Otherwise, add the folded AddRec by the non-invariant parts. 2621 for (unsigned i = 0;; ++i) 2622 if (Ops[i] == AddRec) { 2623 Ops[i] = NewRec; 2624 break; 2625 } 2626 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2627 } 2628 2629 // Okay, if there weren't any loop invariants to be folded, check to see if 2630 // there are multiple AddRec's with the same loop induction variable being 2631 // added together. If so, we can fold them. 2632 for (unsigned OtherIdx = Idx+1; 2633 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2634 ++OtherIdx) { 2635 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2636 // so that the 1st found AddRecExpr is dominated by all others. 2637 assert(DT.dominates( 2638 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2639 AddRec->getLoop()->getHeader()) && 2640 "AddRecExprs are not sorted in reverse dominance order?"); 2641 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2642 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2643 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2644 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2645 ++OtherIdx) { 2646 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2647 if (OtherAddRec->getLoop() == AddRecLoop) { 2648 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2649 i != e; ++i) { 2650 if (i >= AddRecOps.size()) { 2651 AddRecOps.append(OtherAddRec->op_begin()+i, 2652 OtherAddRec->op_end()); 2653 break; 2654 } 2655 SmallVector<const SCEV *, 2> TwoOps = { 2656 AddRecOps[i], OtherAddRec->getOperand(i)}; 2657 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2658 } 2659 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2660 } 2661 } 2662 // Step size has changed, so we cannot guarantee no self-wraparound. 2663 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2664 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2665 } 2666 } 2667 2668 // Otherwise couldn't fold anything into this recurrence. Move onto the 2669 // next one. 2670 } 2671 2672 // Okay, it looks like we really DO need an add expr. Check to see if we 2673 // already have one, otherwise create a new one. 2674 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2675 } 2676 2677 const SCEV * 2678 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2679 SCEV::NoWrapFlags Flags) { 2680 FoldingSetNodeID ID; 2681 ID.AddInteger(scAddExpr); 2682 for (const SCEV *Op : Ops) 2683 ID.AddPointer(Op); 2684 void *IP = nullptr; 2685 SCEVAddExpr *S = 2686 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2687 if (!S) { 2688 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2689 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2690 S = new (SCEVAllocator) 2691 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2692 UniqueSCEVs.InsertNode(S, IP); 2693 addToLoopUseLists(S); 2694 } 2695 S->setNoWrapFlags(Flags); 2696 return S; 2697 } 2698 2699 const SCEV * 2700 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2701 const Loop *L, SCEV::NoWrapFlags Flags) { 2702 FoldingSetNodeID ID; 2703 ID.AddInteger(scAddRecExpr); 2704 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2705 ID.AddPointer(Ops[i]); 2706 ID.AddPointer(L); 2707 void *IP = nullptr; 2708 SCEVAddRecExpr *S = 2709 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2710 if (!S) { 2711 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2712 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2713 S = new (SCEVAllocator) 2714 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2715 UniqueSCEVs.InsertNode(S, IP); 2716 addToLoopUseLists(S); 2717 } 2718 setNoWrapFlags(S, Flags); 2719 return S; 2720 } 2721 2722 const SCEV * 2723 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2724 SCEV::NoWrapFlags Flags) { 2725 FoldingSetNodeID ID; 2726 ID.AddInteger(scMulExpr); 2727 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2728 ID.AddPointer(Ops[i]); 2729 void *IP = nullptr; 2730 SCEVMulExpr *S = 2731 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2732 if (!S) { 2733 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2734 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2735 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2736 O, Ops.size()); 2737 UniqueSCEVs.InsertNode(S, IP); 2738 addToLoopUseLists(S); 2739 } 2740 S->setNoWrapFlags(Flags); 2741 return S; 2742 } 2743 2744 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2745 uint64_t k = i*j; 2746 if (j > 1 && k / j != i) Overflow = true; 2747 return k; 2748 } 2749 2750 /// Compute the result of "n choose k", the binomial coefficient. If an 2751 /// intermediate computation overflows, Overflow will be set and the return will 2752 /// be garbage. Overflow is not cleared on absence of overflow. 2753 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2754 // We use the multiplicative formula: 2755 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2756 // At each iteration, we take the n-th term of the numeral and divide by the 2757 // (k-n)th term of the denominator. This division will always produce an 2758 // integral result, and helps reduce the chance of overflow in the 2759 // intermediate computations. However, we can still overflow even when the 2760 // final result would fit. 2761 2762 if (n == 0 || n == k) return 1; 2763 if (k > n) return 0; 2764 2765 if (k > n/2) 2766 k = n-k; 2767 2768 uint64_t r = 1; 2769 for (uint64_t i = 1; i <= k; ++i) { 2770 r = umul_ov(r, n-(i-1), Overflow); 2771 r /= i; 2772 } 2773 return r; 2774 } 2775 2776 /// Determine if any of the operands in this SCEV are a constant or if 2777 /// any of the add or multiply expressions in this SCEV contain a constant. 2778 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2779 struct FindConstantInAddMulChain { 2780 bool FoundConstant = false; 2781 2782 bool follow(const SCEV *S) { 2783 FoundConstant |= isa<SCEVConstant>(S); 2784 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2785 } 2786 2787 bool isDone() const { 2788 return FoundConstant; 2789 } 2790 }; 2791 2792 FindConstantInAddMulChain F; 2793 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2794 ST.visitAll(StartExpr); 2795 return F.FoundConstant; 2796 } 2797 2798 /// Get a canonical multiply expression, or something simpler if possible. 2799 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2800 SCEV::NoWrapFlags OrigFlags, 2801 unsigned Depth) { 2802 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2803 "only nuw or nsw allowed"); 2804 assert(!Ops.empty() && "Cannot get empty mul!"); 2805 if (Ops.size() == 1) return Ops[0]; 2806 #ifndef NDEBUG 2807 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2808 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2809 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2810 "SCEVMulExpr operand types don't match!"); 2811 #endif 2812 2813 // Sort by complexity, this groups all similar expression types together. 2814 GroupByComplexity(Ops, &LI, DT); 2815 2816 // If there are any constants, fold them together. 2817 unsigned Idx = 0; 2818 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2819 ++Idx; 2820 assert(Idx < Ops.size()); 2821 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2822 // We found two constants, fold them together! 2823 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2824 if (Ops.size() == 2) return Ops[0]; 2825 Ops.erase(Ops.begin()+1); // Erase the folded element 2826 LHSC = cast<SCEVConstant>(Ops[0]); 2827 } 2828 2829 // If we have a multiply of zero, it will always be zero. 2830 if (LHSC->getValue()->isZero()) 2831 return LHSC; 2832 2833 // If we are left with a constant one being multiplied, strip it off. 2834 if (LHSC->getValue()->isOne()) { 2835 Ops.erase(Ops.begin()); 2836 --Idx; 2837 } 2838 2839 if (Ops.size() == 1) 2840 return Ops[0]; 2841 } 2842 2843 // Delay expensive flag strengthening until necessary. 2844 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2845 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 2846 }; 2847 2848 // Limit recursion calls depth. 2849 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2850 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 2851 2852 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2853 // Don't strengthen flags if we have no new information. 2854 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 2855 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 2856 Mul->setNoWrapFlags(ComputeFlags(Ops)); 2857 return S; 2858 } 2859 2860 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2861 if (Ops.size() == 2) { 2862 // C1*(C2+V) -> C1*C2 + C1*V 2863 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2864 // If any of Add's ops are Adds or Muls with a constant, apply this 2865 // transformation as well. 2866 // 2867 // TODO: There are some cases where this transformation is not 2868 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2869 // this transformation should be narrowed down. 2870 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2871 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2872 SCEV::FlagAnyWrap, Depth + 1), 2873 getMulExpr(LHSC, Add->getOperand(1), 2874 SCEV::FlagAnyWrap, Depth + 1), 2875 SCEV::FlagAnyWrap, Depth + 1); 2876 2877 if (Ops[0]->isAllOnesValue()) { 2878 // If we have a mul by -1 of an add, try distributing the -1 among the 2879 // add operands. 2880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2881 SmallVector<const SCEV *, 4> NewOps; 2882 bool AnyFolded = false; 2883 for (const SCEV *AddOp : Add->operands()) { 2884 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2885 Depth + 1); 2886 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2887 NewOps.push_back(Mul); 2888 } 2889 if (AnyFolded) 2890 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2891 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2892 // Negation preserves a recurrence's no self-wrap property. 2893 SmallVector<const SCEV *, 4> Operands; 2894 for (const SCEV *AddRecOp : AddRec->operands()) 2895 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2896 Depth + 1)); 2897 2898 return getAddRecExpr(Operands, AddRec->getLoop(), 2899 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2900 } 2901 } 2902 } 2903 } 2904 2905 // Skip over the add expression until we get to a multiply. 2906 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2907 ++Idx; 2908 2909 // If there are mul operands inline them all into this expression. 2910 if (Idx < Ops.size()) { 2911 bool DeletedMul = false; 2912 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2913 if (Ops.size() > MulOpsInlineThreshold) 2914 break; 2915 // If we have an mul, expand the mul operands onto the end of the 2916 // operands list. 2917 Ops.erase(Ops.begin()+Idx); 2918 Ops.append(Mul->op_begin(), Mul->op_end()); 2919 DeletedMul = true; 2920 } 2921 2922 // If we deleted at least one mul, we added operands to the end of the 2923 // list, and they are not necessarily sorted. Recurse to resort and 2924 // resimplify any operands we just acquired. 2925 if (DeletedMul) 2926 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2927 } 2928 2929 // If there are any add recurrences in the operands list, see if any other 2930 // added values are loop invariant. If so, we can fold them into the 2931 // recurrence. 2932 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2933 ++Idx; 2934 2935 // Scan over all recurrences, trying to fold loop invariants into them. 2936 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2937 // Scan all of the other operands to this mul and add them to the vector 2938 // if they are loop invariant w.r.t. the recurrence. 2939 SmallVector<const SCEV *, 8> LIOps; 2940 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2941 const Loop *AddRecLoop = AddRec->getLoop(); 2942 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2943 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2944 LIOps.push_back(Ops[i]); 2945 Ops.erase(Ops.begin()+i); 2946 --i; --e; 2947 } 2948 2949 // If we found some loop invariants, fold them into the recurrence. 2950 if (!LIOps.empty()) { 2951 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2952 SmallVector<const SCEV *, 4> NewOps; 2953 NewOps.reserve(AddRec->getNumOperands()); 2954 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2955 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2956 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2957 SCEV::FlagAnyWrap, Depth + 1)); 2958 2959 // Build the new addrec. Propagate the NUW and NSW flags if both the 2960 // outer mul and the inner addrec are guaranteed to have no overflow. 2961 // 2962 // No self-wrap cannot be guaranteed after changing the step size, but 2963 // will be inferred if either NUW or NSW is true. 2964 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 2965 const SCEV *NewRec = getAddRecExpr( 2966 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 2967 2968 // If all of the other operands were loop invariant, we are done. 2969 if (Ops.size() == 1) return NewRec; 2970 2971 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2972 for (unsigned i = 0;; ++i) 2973 if (Ops[i] == AddRec) { 2974 Ops[i] = NewRec; 2975 break; 2976 } 2977 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2978 } 2979 2980 // Okay, if there weren't any loop invariants to be folded, check to see 2981 // if there are multiple AddRec's with the same loop induction variable 2982 // being multiplied together. If so, we can fold them. 2983 2984 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2985 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2986 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2987 // ]]],+,...up to x=2n}. 2988 // Note that the arguments to choose() are always integers with values 2989 // known at compile time, never SCEV objects. 2990 // 2991 // The implementation avoids pointless extra computations when the two 2992 // addrec's are of different length (mathematically, it's equivalent to 2993 // an infinite stream of zeros on the right). 2994 bool OpsModified = false; 2995 for (unsigned OtherIdx = Idx+1; 2996 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2997 ++OtherIdx) { 2998 const SCEVAddRecExpr *OtherAddRec = 2999 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3000 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3001 continue; 3002 3003 // Limit max number of arguments to avoid creation of unreasonably big 3004 // SCEVAddRecs with very complex operands. 3005 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3006 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3007 continue; 3008 3009 bool Overflow = false; 3010 Type *Ty = AddRec->getType(); 3011 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3012 SmallVector<const SCEV*, 7> AddRecOps; 3013 for (int x = 0, xe = AddRec->getNumOperands() + 3014 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3015 SmallVector <const SCEV *, 7> SumOps; 3016 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3017 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3018 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3019 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3020 z < ze && !Overflow; ++z) { 3021 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3022 uint64_t Coeff; 3023 if (LargerThan64Bits) 3024 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3025 else 3026 Coeff = Coeff1*Coeff2; 3027 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3028 const SCEV *Term1 = AddRec->getOperand(y-z); 3029 const SCEV *Term2 = OtherAddRec->getOperand(z); 3030 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3031 SCEV::FlagAnyWrap, Depth + 1)); 3032 } 3033 } 3034 if (SumOps.empty()) 3035 SumOps.push_back(getZero(Ty)); 3036 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3037 } 3038 if (!Overflow) { 3039 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3040 SCEV::FlagAnyWrap); 3041 if (Ops.size() == 2) return NewAddRec; 3042 Ops[Idx] = NewAddRec; 3043 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3044 OpsModified = true; 3045 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3046 if (!AddRec) 3047 break; 3048 } 3049 } 3050 if (OpsModified) 3051 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3052 3053 // Otherwise couldn't fold anything into this recurrence. Move onto the 3054 // next one. 3055 } 3056 3057 // Okay, it looks like we really DO need an mul expr. Check to see if we 3058 // already have one, otherwise create a new one. 3059 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3060 } 3061 3062 /// Represents an unsigned remainder expression based on unsigned division. 3063 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3064 const SCEV *RHS) { 3065 assert(getEffectiveSCEVType(LHS->getType()) == 3066 getEffectiveSCEVType(RHS->getType()) && 3067 "SCEVURemExpr operand types don't match!"); 3068 3069 // Short-circuit easy cases 3070 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3071 // If constant is one, the result is trivial 3072 if (RHSC->getValue()->isOne()) 3073 return getZero(LHS->getType()); // X urem 1 --> 0 3074 3075 // If constant is a power of two, fold into a zext(trunc(LHS)). 3076 if (RHSC->getAPInt().isPowerOf2()) { 3077 Type *FullTy = LHS->getType(); 3078 Type *TruncTy = 3079 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3080 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3081 } 3082 } 3083 3084 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3085 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3086 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3087 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3088 } 3089 3090 /// Get a canonical unsigned division expression, or something simpler if 3091 /// possible. 3092 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3093 const SCEV *RHS) { 3094 assert(getEffectiveSCEVType(LHS->getType()) == 3095 getEffectiveSCEVType(RHS->getType()) && 3096 "SCEVUDivExpr operand types don't match!"); 3097 3098 FoldingSetNodeID ID; 3099 ID.AddInteger(scUDivExpr); 3100 ID.AddPointer(LHS); 3101 ID.AddPointer(RHS); 3102 void *IP = nullptr; 3103 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3104 return S; 3105 3106 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3107 if (RHSC->getValue()->isOne()) 3108 return LHS; // X udiv 1 --> x 3109 // If the denominator is zero, the result of the udiv is undefined. Don't 3110 // try to analyze it, because the resolution chosen here may differ from 3111 // the resolution chosen in other parts of the compiler. 3112 if (!RHSC->getValue()->isZero()) { 3113 // Determine if the division can be folded into the operands of 3114 // its operands. 3115 // TODO: Generalize this to non-constants by using known-bits information. 3116 Type *Ty = LHS->getType(); 3117 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3118 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3119 // For non-power-of-two values, effectively round the value up to the 3120 // nearest power of two. 3121 if (!RHSC->getAPInt().isPowerOf2()) 3122 ++MaxShiftAmt; 3123 IntegerType *ExtTy = 3124 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3125 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3126 if (const SCEVConstant *Step = 3127 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3128 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3129 const APInt &StepInt = Step->getAPInt(); 3130 const APInt &DivInt = RHSC->getAPInt(); 3131 if (!StepInt.urem(DivInt) && 3132 getZeroExtendExpr(AR, ExtTy) == 3133 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3134 getZeroExtendExpr(Step, ExtTy), 3135 AR->getLoop(), SCEV::FlagAnyWrap)) { 3136 SmallVector<const SCEV *, 4> Operands; 3137 for (const SCEV *Op : AR->operands()) 3138 Operands.push_back(getUDivExpr(Op, RHS)); 3139 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3140 } 3141 /// Get a canonical UDivExpr for a recurrence. 3142 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3143 // We can currently only fold X%N if X is constant. 3144 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3145 if (StartC && !DivInt.urem(StepInt) && 3146 getZeroExtendExpr(AR, ExtTy) == 3147 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3148 getZeroExtendExpr(Step, ExtTy), 3149 AR->getLoop(), SCEV::FlagAnyWrap)) { 3150 const APInt &StartInt = StartC->getAPInt(); 3151 const APInt &StartRem = StartInt.urem(StepInt); 3152 if (StartRem != 0) { 3153 const SCEV *NewLHS = 3154 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3155 AR->getLoop(), SCEV::FlagNW); 3156 if (LHS != NewLHS) { 3157 LHS = NewLHS; 3158 3159 // Reset the ID to include the new LHS, and check if it is 3160 // already cached. 3161 ID.clear(); 3162 ID.AddInteger(scUDivExpr); 3163 ID.AddPointer(LHS); 3164 ID.AddPointer(RHS); 3165 IP = nullptr; 3166 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3167 return S; 3168 } 3169 } 3170 } 3171 } 3172 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3173 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3174 SmallVector<const SCEV *, 4> Operands; 3175 for (const SCEV *Op : M->operands()) 3176 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3177 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3178 // Find an operand that's safely divisible. 3179 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3180 const SCEV *Op = M->getOperand(i); 3181 const SCEV *Div = getUDivExpr(Op, RHSC); 3182 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3183 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3184 Operands[i] = Div; 3185 return getMulExpr(Operands); 3186 } 3187 } 3188 } 3189 3190 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3191 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3192 if (auto *DivisorConstant = 3193 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3194 bool Overflow = false; 3195 APInt NewRHS = 3196 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3197 if (Overflow) { 3198 return getConstant(RHSC->getType(), 0, false); 3199 } 3200 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3201 } 3202 } 3203 3204 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3205 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3206 SmallVector<const SCEV *, 4> Operands; 3207 for (const SCEV *Op : A->operands()) 3208 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3209 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3210 Operands.clear(); 3211 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3212 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3213 if (isa<SCEVUDivExpr>(Op) || 3214 getMulExpr(Op, RHS) != A->getOperand(i)) 3215 break; 3216 Operands.push_back(Op); 3217 } 3218 if (Operands.size() == A->getNumOperands()) 3219 return getAddExpr(Operands); 3220 } 3221 } 3222 3223 // Fold if both operands are constant. 3224 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3225 Constant *LHSCV = LHSC->getValue(); 3226 Constant *RHSCV = RHSC->getValue(); 3227 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3228 RHSCV))); 3229 } 3230 } 3231 } 3232 3233 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3234 // changes). Make sure we get a new one. 3235 IP = nullptr; 3236 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3237 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3238 LHS, RHS); 3239 UniqueSCEVs.InsertNode(S, IP); 3240 addToLoopUseLists(S); 3241 return S; 3242 } 3243 3244 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3245 APInt A = C1->getAPInt().abs(); 3246 APInt B = C2->getAPInt().abs(); 3247 uint32_t ABW = A.getBitWidth(); 3248 uint32_t BBW = B.getBitWidth(); 3249 3250 if (ABW > BBW) 3251 B = B.zext(ABW); 3252 else if (ABW < BBW) 3253 A = A.zext(BBW); 3254 3255 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3256 } 3257 3258 /// Get a canonical unsigned division expression, or something simpler if 3259 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3260 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3261 /// it's not exact because the udiv may be clearing bits. 3262 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3263 const SCEV *RHS) { 3264 // TODO: we could try to find factors in all sorts of things, but for now we 3265 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3266 // end of this file for inspiration. 3267 3268 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3269 if (!Mul || !Mul->hasNoUnsignedWrap()) 3270 return getUDivExpr(LHS, RHS); 3271 3272 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3273 // If the mulexpr multiplies by a constant, then that constant must be the 3274 // first element of the mulexpr. 3275 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3276 if (LHSCst == RHSCst) { 3277 SmallVector<const SCEV *, 2> Operands; 3278 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3279 return getMulExpr(Operands); 3280 } 3281 3282 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3283 // that there's a factor provided by one of the other terms. We need to 3284 // check. 3285 APInt Factor = gcd(LHSCst, RHSCst); 3286 if (!Factor.isIntN(1)) { 3287 LHSCst = 3288 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3289 RHSCst = 3290 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3291 SmallVector<const SCEV *, 2> Operands; 3292 Operands.push_back(LHSCst); 3293 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3294 LHS = getMulExpr(Operands); 3295 RHS = RHSCst; 3296 Mul = dyn_cast<SCEVMulExpr>(LHS); 3297 if (!Mul) 3298 return getUDivExactExpr(LHS, RHS); 3299 } 3300 } 3301 } 3302 3303 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3304 if (Mul->getOperand(i) == RHS) { 3305 SmallVector<const SCEV *, 2> Operands; 3306 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3307 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3308 return getMulExpr(Operands); 3309 } 3310 } 3311 3312 return getUDivExpr(LHS, RHS); 3313 } 3314 3315 /// Get an add recurrence expression for the specified loop. Simplify the 3316 /// expression as much as possible. 3317 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3318 const Loop *L, 3319 SCEV::NoWrapFlags Flags) { 3320 SmallVector<const SCEV *, 4> Operands; 3321 Operands.push_back(Start); 3322 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3323 if (StepChrec->getLoop() == L) { 3324 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3325 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3326 } 3327 3328 Operands.push_back(Step); 3329 return getAddRecExpr(Operands, L, Flags); 3330 } 3331 3332 /// Get an add recurrence expression for the specified loop. Simplify the 3333 /// expression as much as possible. 3334 const SCEV * 3335 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3336 const Loop *L, SCEV::NoWrapFlags Flags) { 3337 if (Operands.size() == 1) return Operands[0]; 3338 #ifndef NDEBUG 3339 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3340 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3341 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3342 "SCEVAddRecExpr operand types don't match!"); 3343 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3344 assert(isLoopInvariant(Operands[i], L) && 3345 "SCEVAddRecExpr operand is not loop-invariant!"); 3346 #endif 3347 3348 if (Operands.back()->isZero()) { 3349 Operands.pop_back(); 3350 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3351 } 3352 3353 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3354 // use that information to infer NUW and NSW flags. However, computing a 3355 // BE count requires calling getAddRecExpr, so we may not yet have a 3356 // meaningful BE count at this point (and if we don't, we'd be stuck 3357 // with a SCEVCouldNotCompute as the cached BE count). 3358 3359 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3360 3361 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3362 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3363 const Loop *NestedLoop = NestedAR->getLoop(); 3364 if (L->contains(NestedLoop) 3365 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3366 : (!NestedLoop->contains(L) && 3367 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3368 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3369 Operands[0] = NestedAR->getStart(); 3370 // AddRecs require their operands be loop-invariant with respect to their 3371 // loops. Don't perform this transformation if it would break this 3372 // requirement. 3373 bool AllInvariant = all_of( 3374 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3375 3376 if (AllInvariant) { 3377 // Create a recurrence for the outer loop with the same step size. 3378 // 3379 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3380 // inner recurrence has the same property. 3381 SCEV::NoWrapFlags OuterFlags = 3382 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3383 3384 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3385 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3386 return isLoopInvariant(Op, NestedLoop); 3387 }); 3388 3389 if (AllInvariant) { 3390 // Ok, both add recurrences are valid after the transformation. 3391 // 3392 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3393 // the outer recurrence has the same property. 3394 SCEV::NoWrapFlags InnerFlags = 3395 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3396 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3397 } 3398 } 3399 // Reset Operands to its original state. 3400 Operands[0] = NestedAR; 3401 } 3402 } 3403 3404 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3405 // already have one, otherwise create a new one. 3406 return getOrCreateAddRecExpr(Operands, L, Flags); 3407 } 3408 3409 const SCEV * 3410 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3411 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3412 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3413 // getSCEV(Base)->getType() has the same address space as Base->getType() 3414 // because SCEV::getType() preserves the address space. 3415 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3416 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3417 // instruction to its SCEV, because the Instruction may be guarded by control 3418 // flow and the no-overflow bits may not be valid for the expression in any 3419 // context. This can be fixed similarly to how these flags are handled for 3420 // adds. 3421 SCEV::NoWrapFlags OffsetWrap = 3422 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3423 3424 Type *CurTy = GEP->getType(); 3425 bool FirstIter = true; 3426 SmallVector<const SCEV *, 4> Offsets; 3427 for (const SCEV *IndexExpr : IndexExprs) { 3428 // Compute the (potentially symbolic) offset in bytes for this index. 3429 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3430 // For a struct, add the member offset. 3431 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3432 unsigned FieldNo = Index->getZExtValue(); 3433 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3434 Offsets.push_back(FieldOffset); 3435 3436 // Update CurTy to the type of the field at Index. 3437 CurTy = STy->getTypeAtIndex(Index); 3438 } else { 3439 // Update CurTy to its element type. 3440 if (FirstIter) { 3441 assert(isa<PointerType>(CurTy) && 3442 "The first index of a GEP indexes a pointer"); 3443 CurTy = GEP->getSourceElementType(); 3444 FirstIter = false; 3445 } else { 3446 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3447 } 3448 // For an array, add the element offset, explicitly scaled. 3449 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3450 // Getelementptr indices are signed. 3451 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3452 3453 // Multiply the index by the element size to compute the element offset. 3454 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3455 Offsets.push_back(LocalOffset); 3456 } 3457 } 3458 3459 // Handle degenerate case of GEP without offsets. 3460 if (Offsets.empty()) 3461 return BaseExpr; 3462 3463 // Add the offsets together, assuming nsw if inbounds. 3464 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3465 // Add the base address and the offset. We cannot use the nsw flag, as the 3466 // base address is unsigned. However, if we know that the offset is 3467 // non-negative, we can use nuw. 3468 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3469 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3470 return getAddExpr(BaseExpr, Offset, BaseWrap); 3471 } 3472 3473 std::tuple<SCEV *, FoldingSetNodeID, void *> 3474 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3475 ArrayRef<const SCEV *> Ops) { 3476 FoldingSetNodeID ID; 3477 void *IP = nullptr; 3478 ID.AddInteger(SCEVType); 3479 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3480 ID.AddPointer(Ops[i]); 3481 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3482 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3483 } 3484 3485 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3486 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3487 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3488 } 3489 3490 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) { 3491 Type *Ty = Op->getType(); 3492 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty)); 3493 } 3494 3495 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3496 SmallVectorImpl<const SCEV *> &Ops) { 3497 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3498 if (Ops.size() == 1) return Ops[0]; 3499 #ifndef NDEBUG 3500 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3501 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3502 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3503 "Operand types don't match!"); 3504 #endif 3505 3506 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3507 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3508 3509 // Sort by complexity, this groups all similar expression types together. 3510 GroupByComplexity(Ops, &LI, DT); 3511 3512 // Check if we have created the same expression before. 3513 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3514 return S; 3515 } 3516 3517 // If there are any constants, fold them together. 3518 unsigned Idx = 0; 3519 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3520 ++Idx; 3521 assert(Idx < Ops.size()); 3522 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3523 if (Kind == scSMaxExpr) 3524 return APIntOps::smax(LHS, RHS); 3525 else if (Kind == scSMinExpr) 3526 return APIntOps::smin(LHS, RHS); 3527 else if (Kind == scUMaxExpr) 3528 return APIntOps::umax(LHS, RHS); 3529 else if (Kind == scUMinExpr) 3530 return APIntOps::umin(LHS, RHS); 3531 llvm_unreachable("Unknown SCEV min/max opcode"); 3532 }; 3533 3534 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3535 // We found two constants, fold them together! 3536 ConstantInt *Fold = ConstantInt::get( 3537 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3538 Ops[0] = getConstant(Fold); 3539 Ops.erase(Ops.begin()+1); // Erase the folded element 3540 if (Ops.size() == 1) return Ops[0]; 3541 LHSC = cast<SCEVConstant>(Ops[0]); 3542 } 3543 3544 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3545 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3546 3547 if (IsMax ? IsMinV : IsMaxV) { 3548 // If we are left with a constant minimum(/maximum)-int, strip it off. 3549 Ops.erase(Ops.begin()); 3550 --Idx; 3551 } else if (IsMax ? IsMaxV : IsMinV) { 3552 // If we have a max(/min) with a constant maximum(/minimum)-int, 3553 // it will always be the extremum. 3554 return LHSC; 3555 } 3556 3557 if (Ops.size() == 1) return Ops[0]; 3558 } 3559 3560 // Find the first operation of the same kind 3561 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3562 ++Idx; 3563 3564 // Check to see if one of the operands is of the same kind. If so, expand its 3565 // operands onto our operand list, and recurse to simplify. 3566 if (Idx < Ops.size()) { 3567 bool DeletedAny = false; 3568 while (Ops[Idx]->getSCEVType() == Kind) { 3569 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3570 Ops.erase(Ops.begin()+Idx); 3571 Ops.append(SMME->op_begin(), SMME->op_end()); 3572 DeletedAny = true; 3573 } 3574 3575 if (DeletedAny) 3576 return getMinMaxExpr(Kind, Ops); 3577 } 3578 3579 // Okay, check to see if the same value occurs in the operand list twice. If 3580 // so, delete one. Since we sorted the list, these values are required to 3581 // be adjacent. 3582 llvm::CmpInst::Predicate GEPred = 3583 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3584 llvm::CmpInst::Predicate LEPred = 3585 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3586 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3587 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3588 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3589 if (Ops[i] == Ops[i + 1] || 3590 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3591 // X op Y op Y --> X op Y 3592 // X op Y --> X, if we know X, Y are ordered appropriately 3593 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3594 --i; 3595 --e; 3596 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3597 Ops[i + 1])) { 3598 // X op Y --> Y, if we know X, Y are ordered appropriately 3599 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3600 --i; 3601 --e; 3602 } 3603 } 3604 3605 if (Ops.size() == 1) return Ops[0]; 3606 3607 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3608 3609 // Okay, it looks like we really DO need an expr. Check to see if we 3610 // already have one, otherwise create a new one. 3611 const SCEV *ExistingSCEV; 3612 FoldingSetNodeID ID; 3613 void *IP; 3614 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3615 if (ExistingSCEV) 3616 return ExistingSCEV; 3617 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3618 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3619 SCEV *S = new (SCEVAllocator) 3620 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3621 3622 UniqueSCEVs.InsertNode(S, IP); 3623 addToLoopUseLists(S); 3624 return S; 3625 } 3626 3627 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3628 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3629 return getSMaxExpr(Ops); 3630 } 3631 3632 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3633 return getMinMaxExpr(scSMaxExpr, Ops); 3634 } 3635 3636 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3637 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3638 return getUMaxExpr(Ops); 3639 } 3640 3641 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3642 return getMinMaxExpr(scUMaxExpr, Ops); 3643 } 3644 3645 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3646 const SCEV *RHS) { 3647 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3648 return getSMinExpr(Ops); 3649 } 3650 3651 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3652 return getMinMaxExpr(scSMinExpr, Ops); 3653 } 3654 3655 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3656 const SCEV *RHS) { 3657 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3658 return getUMinExpr(Ops); 3659 } 3660 3661 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3662 return getMinMaxExpr(scUMinExpr, Ops); 3663 } 3664 3665 const SCEV * 3666 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3667 ScalableVectorType *ScalableTy) { 3668 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3669 Constant *One = ConstantInt::get(IntTy, 1); 3670 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3671 // Note that the expression we created is the final expression, we don't 3672 // want to simplify it any further Also, if we call a normal getSCEV(), 3673 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3674 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3675 } 3676 3677 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3678 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3679 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3680 // We can bypass creating a target-independent constant expression and then 3681 // folding it back into a ConstantInt. This is just a compile-time 3682 // optimization. 3683 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3684 } 3685 3686 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3687 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3688 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3689 // We can bypass creating a target-independent constant expression and then 3690 // folding it back into a ConstantInt. This is just a compile-time 3691 // optimization. 3692 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3693 } 3694 3695 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3696 StructType *STy, 3697 unsigned FieldNo) { 3698 // We can bypass creating a target-independent constant expression and then 3699 // folding it back into a ConstantInt. This is just a compile-time 3700 // optimization. 3701 return getConstant( 3702 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3703 } 3704 3705 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3706 // Don't attempt to do anything other than create a SCEVUnknown object 3707 // here. createSCEV only calls getUnknown after checking for all other 3708 // interesting possibilities, and any other code that calls getUnknown 3709 // is doing so in order to hide a value from SCEV canonicalization. 3710 3711 FoldingSetNodeID ID; 3712 ID.AddInteger(scUnknown); 3713 ID.AddPointer(V); 3714 void *IP = nullptr; 3715 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3716 assert(cast<SCEVUnknown>(S)->getValue() == V && 3717 "Stale SCEVUnknown in uniquing map!"); 3718 return S; 3719 } 3720 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3721 FirstUnknown); 3722 FirstUnknown = cast<SCEVUnknown>(S); 3723 UniqueSCEVs.InsertNode(S, IP); 3724 return S; 3725 } 3726 3727 //===----------------------------------------------------------------------===// 3728 // Basic SCEV Analysis and PHI Idiom Recognition Code 3729 // 3730 3731 /// Test if values of the given type are analyzable within the SCEV 3732 /// framework. This primarily includes integer types, and it can optionally 3733 /// include pointer types if the ScalarEvolution class has access to 3734 /// target-specific information. 3735 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3736 // Integers and pointers are always SCEVable. 3737 return Ty->isIntOrPtrTy(); 3738 } 3739 3740 /// Return the size in bits of the specified type, for which isSCEVable must 3741 /// return true. 3742 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3743 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3744 if (Ty->isPointerTy()) 3745 return getDataLayout().getIndexTypeSizeInBits(Ty); 3746 return getDataLayout().getTypeSizeInBits(Ty); 3747 } 3748 3749 /// Return a type with the same bitwidth as the given type and which represents 3750 /// how SCEV will treat the given type, for which isSCEVable must return 3751 /// true. For pointer types, this is the pointer index sized integer type. 3752 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3753 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3754 3755 if (Ty->isIntegerTy()) 3756 return Ty; 3757 3758 // The only other support type is pointer. 3759 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3760 return getDataLayout().getIndexType(Ty); 3761 } 3762 3763 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3764 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3765 } 3766 3767 const SCEV *ScalarEvolution::getCouldNotCompute() { 3768 return CouldNotCompute.get(); 3769 } 3770 3771 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3772 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3773 auto *SU = dyn_cast<SCEVUnknown>(S); 3774 return SU && SU->getValue() == nullptr; 3775 }); 3776 3777 return !ContainsNulls; 3778 } 3779 3780 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3781 HasRecMapType::iterator I = HasRecMap.find(S); 3782 if (I != HasRecMap.end()) 3783 return I->second; 3784 3785 bool FoundAddRec = 3786 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3787 HasRecMap.insert({S, FoundAddRec}); 3788 return FoundAddRec; 3789 } 3790 3791 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3792 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3793 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3794 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3795 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3796 if (!Add) 3797 return {S, nullptr}; 3798 3799 if (Add->getNumOperands() != 2) 3800 return {S, nullptr}; 3801 3802 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3803 if (!ConstOp) 3804 return {S, nullptr}; 3805 3806 return {Add->getOperand(1), ConstOp->getValue()}; 3807 } 3808 3809 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3810 /// by the value and offset from any ValueOffsetPair in the set. 3811 SetVector<ScalarEvolution::ValueOffsetPair> * 3812 ScalarEvolution::getSCEVValues(const SCEV *S) { 3813 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3814 if (SI == ExprValueMap.end()) 3815 return nullptr; 3816 #ifndef NDEBUG 3817 if (VerifySCEVMap) { 3818 // Check there is no dangling Value in the set returned. 3819 for (const auto &VE : SI->second) 3820 assert(ValueExprMap.count(VE.first)); 3821 } 3822 #endif 3823 return &SI->second; 3824 } 3825 3826 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3827 /// cannot be used separately. eraseValueFromMap should be used to remove 3828 /// V from ValueExprMap and ExprValueMap at the same time. 3829 void ScalarEvolution::eraseValueFromMap(Value *V) { 3830 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3831 if (I != ValueExprMap.end()) { 3832 const SCEV *S = I->second; 3833 // Remove {V, 0} from the set of ExprValueMap[S] 3834 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3835 SV->remove({V, nullptr}); 3836 3837 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3838 const SCEV *Stripped; 3839 ConstantInt *Offset; 3840 std::tie(Stripped, Offset) = splitAddExpr(S); 3841 if (Offset != nullptr) { 3842 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3843 SV->remove({V, Offset}); 3844 } 3845 ValueExprMap.erase(V); 3846 } 3847 } 3848 3849 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3850 /// TODO: In reality it is better to check the poison recursively 3851 /// but this is better than nothing. 3852 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3853 if (auto *I = dyn_cast<Instruction>(V)) { 3854 if (isa<OverflowingBinaryOperator>(I)) { 3855 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3856 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3857 return true; 3858 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3859 return true; 3860 } 3861 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3862 return true; 3863 } 3864 return false; 3865 } 3866 3867 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3868 /// create a new one. 3869 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3870 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3871 3872 const SCEV *S = getExistingSCEV(V); 3873 if (S == nullptr) { 3874 S = createSCEV(V); 3875 // During PHI resolution, it is possible to create two SCEVs for the same 3876 // V, so it is needed to double check whether V->S is inserted into 3877 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3878 std::pair<ValueExprMapType::iterator, bool> Pair = 3879 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3880 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3881 ExprValueMap[S].insert({V, nullptr}); 3882 3883 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3884 // ExprValueMap. 3885 const SCEV *Stripped = S; 3886 ConstantInt *Offset = nullptr; 3887 std::tie(Stripped, Offset) = splitAddExpr(S); 3888 // If stripped is SCEVUnknown, don't bother to save 3889 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3890 // increase the complexity of the expansion code. 3891 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3892 // because it may generate add/sub instead of GEP in SCEV expansion. 3893 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3894 !isa<GetElementPtrInst>(V)) 3895 ExprValueMap[Stripped].insert({V, Offset}); 3896 } 3897 } 3898 return S; 3899 } 3900 3901 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3902 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3903 3904 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3905 if (I != ValueExprMap.end()) { 3906 const SCEV *S = I->second; 3907 if (checkValidity(S)) 3908 return S; 3909 eraseValueFromMap(V); 3910 forgetMemoizedResults(S); 3911 } 3912 return nullptr; 3913 } 3914 3915 /// Return a SCEV corresponding to -V = -1*V 3916 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3917 SCEV::NoWrapFlags Flags) { 3918 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3919 return getConstant( 3920 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3921 3922 Type *Ty = V->getType(); 3923 Ty = getEffectiveSCEVType(Ty); 3924 return getMulExpr(V, getMinusOne(Ty), Flags); 3925 } 3926 3927 /// If Expr computes ~A, return A else return nullptr 3928 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3929 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3930 if (!Add || Add->getNumOperands() != 2 || 3931 !Add->getOperand(0)->isAllOnesValue()) 3932 return nullptr; 3933 3934 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3935 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3936 !AddRHS->getOperand(0)->isAllOnesValue()) 3937 return nullptr; 3938 3939 return AddRHS->getOperand(1); 3940 } 3941 3942 /// Return a SCEV corresponding to ~V = -1-V 3943 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3944 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3945 return getConstant( 3946 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3947 3948 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3949 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3950 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3951 SmallVector<const SCEV *, 2> MatchedOperands; 3952 for (const SCEV *Operand : MME->operands()) { 3953 const SCEV *Matched = MatchNotExpr(Operand); 3954 if (!Matched) 3955 return (const SCEV *)nullptr; 3956 MatchedOperands.push_back(Matched); 3957 } 3958 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 3959 MatchedOperands); 3960 }; 3961 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3962 return Replaced; 3963 } 3964 3965 Type *Ty = V->getType(); 3966 Ty = getEffectiveSCEVType(Ty); 3967 return getMinusSCEV(getMinusOne(Ty), V); 3968 } 3969 3970 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3971 SCEV::NoWrapFlags Flags, 3972 unsigned Depth) { 3973 // Fast path: X - X --> 0. 3974 if (LHS == RHS) 3975 return getZero(LHS->getType()); 3976 3977 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3978 // makes it so that we cannot make much use of NUW. 3979 auto AddFlags = SCEV::FlagAnyWrap; 3980 const bool RHSIsNotMinSigned = 3981 !getSignedRangeMin(RHS).isMinSignedValue(); 3982 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3983 // Let M be the minimum representable signed value. Then (-1)*RHS 3984 // signed-wraps if and only if RHS is M. That can happen even for 3985 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3986 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3987 // (-1)*RHS, we need to prove that RHS != M. 3988 // 3989 // If LHS is non-negative and we know that LHS - RHS does not 3990 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3991 // either by proving that RHS > M or that LHS >= 0. 3992 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3993 AddFlags = SCEV::FlagNSW; 3994 } 3995 } 3996 3997 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3998 // RHS is NSW and LHS >= 0. 3999 // 4000 // The difficulty here is that the NSW flag may have been proven 4001 // relative to a loop that is to be found in a recurrence in LHS and 4002 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4003 // larger scope than intended. 4004 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4005 4006 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4007 } 4008 4009 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4010 unsigned Depth) { 4011 Type *SrcTy = V->getType(); 4012 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4013 "Cannot truncate or zero extend with non-integer arguments!"); 4014 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4015 return V; // No conversion 4016 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4017 return getTruncateExpr(V, Ty, Depth); 4018 return getZeroExtendExpr(V, Ty, Depth); 4019 } 4020 4021 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4022 unsigned Depth) { 4023 Type *SrcTy = V->getType(); 4024 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4025 "Cannot truncate or zero extend with non-integer arguments!"); 4026 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4027 return V; // No conversion 4028 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4029 return getTruncateExpr(V, Ty, Depth); 4030 return getSignExtendExpr(V, Ty, Depth); 4031 } 4032 4033 const SCEV * 4034 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4035 Type *SrcTy = V->getType(); 4036 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4037 "Cannot noop or zero extend with non-integer arguments!"); 4038 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4039 "getNoopOrZeroExtend cannot truncate!"); 4040 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4041 return V; // No conversion 4042 return getZeroExtendExpr(V, Ty); 4043 } 4044 4045 const SCEV * 4046 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4047 Type *SrcTy = V->getType(); 4048 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4049 "Cannot noop or sign extend with non-integer arguments!"); 4050 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4051 "getNoopOrSignExtend cannot truncate!"); 4052 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4053 return V; // No conversion 4054 return getSignExtendExpr(V, Ty); 4055 } 4056 4057 const SCEV * 4058 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4059 Type *SrcTy = V->getType(); 4060 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4061 "Cannot noop or any extend with non-integer arguments!"); 4062 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4063 "getNoopOrAnyExtend cannot truncate!"); 4064 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4065 return V; // No conversion 4066 return getAnyExtendExpr(V, Ty); 4067 } 4068 4069 const SCEV * 4070 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4071 Type *SrcTy = V->getType(); 4072 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4073 "Cannot truncate or noop with non-integer arguments!"); 4074 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4075 "getTruncateOrNoop cannot extend!"); 4076 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4077 return V; // No conversion 4078 return getTruncateExpr(V, Ty); 4079 } 4080 4081 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4082 const SCEV *RHS) { 4083 const SCEV *PromotedLHS = LHS; 4084 const SCEV *PromotedRHS = RHS; 4085 4086 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4087 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4088 else 4089 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4090 4091 return getUMaxExpr(PromotedLHS, PromotedRHS); 4092 } 4093 4094 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4095 const SCEV *RHS) { 4096 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4097 return getUMinFromMismatchedTypes(Ops); 4098 } 4099 4100 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4101 SmallVectorImpl<const SCEV *> &Ops) { 4102 assert(!Ops.empty() && "At least one operand must be!"); 4103 // Trivial case. 4104 if (Ops.size() == 1) 4105 return Ops[0]; 4106 4107 // Find the max type first. 4108 Type *MaxType = nullptr; 4109 for (auto *S : Ops) 4110 if (MaxType) 4111 MaxType = getWiderType(MaxType, S->getType()); 4112 else 4113 MaxType = S->getType(); 4114 assert(MaxType && "Failed to find maximum type!"); 4115 4116 // Extend all ops to max type. 4117 SmallVector<const SCEV *, 2> PromotedOps; 4118 for (auto *S : Ops) 4119 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4120 4121 // Generate umin. 4122 return getUMinExpr(PromotedOps); 4123 } 4124 4125 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4126 // A pointer operand may evaluate to a nonpointer expression, such as null. 4127 if (!V->getType()->isPointerTy()) 4128 return V; 4129 4130 while (true) { 4131 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 4132 V = Cast->getOperand(); 4133 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4134 const SCEV *PtrOp = nullptr; 4135 for (const SCEV *NAryOp : NAry->operands()) { 4136 if (NAryOp->getType()->isPointerTy()) { 4137 // Cannot find the base of an expression with multiple pointer ops. 4138 if (PtrOp) 4139 return V; 4140 PtrOp = NAryOp; 4141 } 4142 } 4143 if (!PtrOp) // All operands were non-pointer. 4144 return V; 4145 V = PtrOp; 4146 } else // Not something we can look further into. 4147 return V; 4148 } 4149 } 4150 4151 /// Push users of the given Instruction onto the given Worklist. 4152 static void 4153 PushDefUseChildren(Instruction *I, 4154 SmallVectorImpl<Instruction *> &Worklist) { 4155 // Push the def-use children onto the Worklist stack. 4156 for (User *U : I->users()) 4157 Worklist.push_back(cast<Instruction>(U)); 4158 } 4159 4160 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4161 SmallVector<Instruction *, 16> Worklist; 4162 PushDefUseChildren(PN, Worklist); 4163 4164 SmallPtrSet<Instruction *, 8> Visited; 4165 Visited.insert(PN); 4166 while (!Worklist.empty()) { 4167 Instruction *I = Worklist.pop_back_val(); 4168 if (!Visited.insert(I).second) 4169 continue; 4170 4171 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4172 if (It != ValueExprMap.end()) { 4173 const SCEV *Old = It->second; 4174 4175 // Short-circuit the def-use traversal if the symbolic name 4176 // ceases to appear in expressions. 4177 if (Old != SymName && !hasOperand(Old, SymName)) 4178 continue; 4179 4180 // SCEVUnknown for a PHI either means that it has an unrecognized 4181 // structure, it's a PHI that's in the progress of being computed 4182 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4183 // additional loop trip count information isn't going to change anything. 4184 // In the second case, createNodeForPHI will perform the necessary 4185 // updates on its own when it gets to that point. In the third, we do 4186 // want to forget the SCEVUnknown. 4187 if (!isa<PHINode>(I) || 4188 !isa<SCEVUnknown>(Old) || 4189 (I != PN && Old == SymName)) { 4190 eraseValueFromMap(It->first); 4191 forgetMemoizedResults(Old); 4192 } 4193 } 4194 4195 PushDefUseChildren(I, Worklist); 4196 } 4197 } 4198 4199 namespace { 4200 4201 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4202 /// expression in case its Loop is L. If it is not L then 4203 /// if IgnoreOtherLoops is true then use AddRec itself 4204 /// otherwise rewrite cannot be done. 4205 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4206 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4207 public: 4208 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4209 bool IgnoreOtherLoops = true) { 4210 SCEVInitRewriter Rewriter(L, SE); 4211 const SCEV *Result = Rewriter.visit(S); 4212 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4213 return SE.getCouldNotCompute(); 4214 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4215 ? SE.getCouldNotCompute() 4216 : Result; 4217 } 4218 4219 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4220 if (!SE.isLoopInvariant(Expr, L)) 4221 SeenLoopVariantSCEVUnknown = true; 4222 return Expr; 4223 } 4224 4225 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4226 // Only re-write AddRecExprs for this loop. 4227 if (Expr->getLoop() == L) 4228 return Expr->getStart(); 4229 SeenOtherLoops = true; 4230 return Expr; 4231 } 4232 4233 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4234 4235 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4236 4237 private: 4238 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4239 : SCEVRewriteVisitor(SE), L(L) {} 4240 4241 const Loop *L; 4242 bool SeenLoopVariantSCEVUnknown = false; 4243 bool SeenOtherLoops = false; 4244 }; 4245 4246 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4247 /// increment expression in case its Loop is L. If it is not L then 4248 /// use AddRec itself. 4249 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4250 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4251 public: 4252 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4253 SCEVPostIncRewriter Rewriter(L, SE); 4254 const SCEV *Result = Rewriter.visit(S); 4255 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4256 ? SE.getCouldNotCompute() 4257 : Result; 4258 } 4259 4260 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4261 if (!SE.isLoopInvariant(Expr, L)) 4262 SeenLoopVariantSCEVUnknown = true; 4263 return Expr; 4264 } 4265 4266 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4267 // Only re-write AddRecExprs for this loop. 4268 if (Expr->getLoop() == L) 4269 return Expr->getPostIncExpr(SE); 4270 SeenOtherLoops = true; 4271 return Expr; 4272 } 4273 4274 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4275 4276 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4277 4278 private: 4279 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4280 : SCEVRewriteVisitor(SE), L(L) {} 4281 4282 const Loop *L; 4283 bool SeenLoopVariantSCEVUnknown = false; 4284 bool SeenOtherLoops = false; 4285 }; 4286 4287 /// This class evaluates the compare condition by matching it against the 4288 /// condition of loop latch. If there is a match we assume a true value 4289 /// for the condition while building SCEV nodes. 4290 class SCEVBackedgeConditionFolder 4291 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4292 public: 4293 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4294 ScalarEvolution &SE) { 4295 bool IsPosBECond = false; 4296 Value *BECond = nullptr; 4297 if (BasicBlock *Latch = L->getLoopLatch()) { 4298 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4299 if (BI && BI->isConditional()) { 4300 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4301 "Both outgoing branches should not target same header!"); 4302 BECond = BI->getCondition(); 4303 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4304 } else { 4305 return S; 4306 } 4307 } 4308 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4309 return Rewriter.visit(S); 4310 } 4311 4312 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4313 const SCEV *Result = Expr; 4314 bool InvariantF = SE.isLoopInvariant(Expr, L); 4315 4316 if (!InvariantF) { 4317 Instruction *I = cast<Instruction>(Expr->getValue()); 4318 switch (I->getOpcode()) { 4319 case Instruction::Select: { 4320 SelectInst *SI = cast<SelectInst>(I); 4321 Optional<const SCEV *> Res = 4322 compareWithBackedgeCondition(SI->getCondition()); 4323 if (Res.hasValue()) { 4324 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4325 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4326 } 4327 break; 4328 } 4329 default: { 4330 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4331 if (Res.hasValue()) 4332 Result = Res.getValue(); 4333 break; 4334 } 4335 } 4336 } 4337 return Result; 4338 } 4339 4340 private: 4341 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4342 bool IsPosBECond, ScalarEvolution &SE) 4343 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4344 IsPositiveBECond(IsPosBECond) {} 4345 4346 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4347 4348 const Loop *L; 4349 /// Loop back condition. 4350 Value *BackedgeCond = nullptr; 4351 /// Set to true if loop back is on positive branch condition. 4352 bool IsPositiveBECond; 4353 }; 4354 4355 Optional<const SCEV *> 4356 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4357 4358 // If value matches the backedge condition for loop latch, 4359 // then return a constant evolution node based on loopback 4360 // branch taken. 4361 if (BackedgeCond == IC) 4362 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4363 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4364 return None; 4365 } 4366 4367 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4368 public: 4369 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4370 ScalarEvolution &SE) { 4371 SCEVShiftRewriter Rewriter(L, SE); 4372 const SCEV *Result = Rewriter.visit(S); 4373 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4374 } 4375 4376 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4377 // Only allow AddRecExprs for this loop. 4378 if (!SE.isLoopInvariant(Expr, L)) 4379 Valid = false; 4380 return Expr; 4381 } 4382 4383 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4384 if (Expr->getLoop() == L && Expr->isAffine()) 4385 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4386 Valid = false; 4387 return Expr; 4388 } 4389 4390 bool isValid() { return Valid; } 4391 4392 private: 4393 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4394 : SCEVRewriteVisitor(SE), L(L) {} 4395 4396 const Loop *L; 4397 bool Valid = true; 4398 }; 4399 4400 } // end anonymous namespace 4401 4402 SCEV::NoWrapFlags 4403 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4404 if (!AR->isAffine()) 4405 return SCEV::FlagAnyWrap; 4406 4407 using OBO = OverflowingBinaryOperator; 4408 4409 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4410 4411 if (!AR->hasNoSignedWrap()) { 4412 ConstantRange AddRecRange = getSignedRange(AR); 4413 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4414 4415 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4416 Instruction::Add, IncRange, OBO::NoSignedWrap); 4417 if (NSWRegion.contains(AddRecRange)) 4418 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4419 } 4420 4421 if (!AR->hasNoUnsignedWrap()) { 4422 ConstantRange AddRecRange = getUnsignedRange(AR); 4423 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4424 4425 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4426 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4427 if (NUWRegion.contains(AddRecRange)) 4428 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4429 } 4430 4431 return Result; 4432 } 4433 4434 SCEV::NoWrapFlags 4435 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4436 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4437 4438 if (AR->hasNoSignedWrap()) 4439 return Result; 4440 4441 if (!AR->isAffine()) 4442 return Result; 4443 4444 const SCEV *Step = AR->getStepRecurrence(*this); 4445 const Loop *L = AR->getLoop(); 4446 4447 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4448 // Note that this serves two purposes: It filters out loops that are 4449 // simply not analyzable, and it covers the case where this code is 4450 // being called from within backedge-taken count analysis, such that 4451 // attempting to ask for the backedge-taken count would likely result 4452 // in infinite recursion. In the later case, the analysis code will 4453 // cope with a conservative value, and it will take care to purge 4454 // that value once it has finished. 4455 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4456 4457 // Normally, in the cases we can prove no-overflow via a 4458 // backedge guarding condition, we can also compute a backedge 4459 // taken count for the loop. The exceptions are assumptions and 4460 // guards present in the loop -- SCEV is not great at exploiting 4461 // these to compute max backedge taken counts, but can still use 4462 // these to prove lack of overflow. Use this fact to avoid 4463 // doing extra work that may not pay off. 4464 4465 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4466 AC.assumptions().empty()) 4467 return Result; 4468 4469 // If the backedge is guarded by a comparison with the pre-inc value the 4470 // addrec is safe. Also, if the entry is guarded by a comparison with the 4471 // start value and the backedge is guarded by a comparison with the post-inc 4472 // value, the addrec is safe. 4473 ICmpInst::Predicate Pred; 4474 const SCEV *OverflowLimit = 4475 getSignedOverflowLimitForStep(Step, &Pred, this); 4476 if (OverflowLimit && 4477 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4478 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4479 Result = setFlags(Result, SCEV::FlagNSW); 4480 } 4481 return Result; 4482 } 4483 SCEV::NoWrapFlags 4484 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4485 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4486 4487 if (AR->hasNoUnsignedWrap()) 4488 return Result; 4489 4490 if (!AR->isAffine()) 4491 return Result; 4492 4493 const SCEV *Step = AR->getStepRecurrence(*this); 4494 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4495 const Loop *L = AR->getLoop(); 4496 4497 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4498 // Note that this serves two purposes: It filters out loops that are 4499 // simply not analyzable, and it covers the case where this code is 4500 // being called from within backedge-taken count analysis, such that 4501 // attempting to ask for the backedge-taken count would likely result 4502 // in infinite recursion. In the later case, the analysis code will 4503 // cope with a conservative value, and it will take care to purge 4504 // that value once it has finished. 4505 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4506 4507 // Normally, in the cases we can prove no-overflow via a 4508 // backedge guarding condition, we can also compute a backedge 4509 // taken count for the loop. The exceptions are assumptions and 4510 // guards present in the loop -- SCEV is not great at exploiting 4511 // these to compute max backedge taken counts, but can still use 4512 // these to prove lack of overflow. Use this fact to avoid 4513 // doing extra work that may not pay off. 4514 4515 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4516 AC.assumptions().empty()) 4517 return Result; 4518 4519 // If the backedge is guarded by a comparison with the pre-inc value the 4520 // addrec is safe. Also, if the entry is guarded by a comparison with the 4521 // start value and the backedge is guarded by a comparison with the post-inc 4522 // value, the addrec is safe. 4523 if (isKnownPositive(Step)) { 4524 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4525 getUnsignedRangeMax(Step)); 4526 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4527 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4528 Result = setFlags(Result, SCEV::FlagNUW); 4529 } 4530 } 4531 4532 return Result; 4533 } 4534 4535 namespace { 4536 4537 /// Represents an abstract binary operation. This may exist as a 4538 /// normal instruction or constant expression, or may have been 4539 /// derived from an expression tree. 4540 struct BinaryOp { 4541 unsigned Opcode; 4542 Value *LHS; 4543 Value *RHS; 4544 bool IsNSW = false; 4545 bool IsNUW = false; 4546 bool IsExact = false; 4547 4548 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4549 /// constant expression. 4550 Operator *Op = nullptr; 4551 4552 explicit BinaryOp(Operator *Op) 4553 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4554 Op(Op) { 4555 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4556 IsNSW = OBO->hasNoSignedWrap(); 4557 IsNUW = OBO->hasNoUnsignedWrap(); 4558 } 4559 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op)) 4560 IsExact = PEO->isExact(); 4561 } 4562 4563 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4564 bool IsNUW = false, bool IsExact = false) 4565 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4566 IsExact(IsExact) {} 4567 }; 4568 4569 } // end anonymous namespace 4570 4571 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4572 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4573 auto *Op = dyn_cast<Operator>(V); 4574 if (!Op) 4575 return None; 4576 4577 // Implementation detail: all the cleverness here should happen without 4578 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4579 // SCEV expressions when possible, and we should not break that. 4580 4581 switch (Op->getOpcode()) { 4582 case Instruction::Add: 4583 case Instruction::Sub: 4584 case Instruction::Mul: 4585 case Instruction::UDiv: 4586 case Instruction::URem: 4587 case Instruction::And: 4588 case Instruction::Or: 4589 case Instruction::AShr: 4590 case Instruction::Shl: 4591 return BinaryOp(Op); 4592 4593 case Instruction::Xor: 4594 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4595 // If the RHS of the xor is a signmask, then this is just an add. 4596 // Instcombine turns add of signmask into xor as a strength reduction step. 4597 if (RHSC->getValue().isSignMask()) 4598 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4599 return BinaryOp(Op); 4600 4601 case Instruction::LShr: 4602 // Turn logical shift right of a constant into a unsigned divide. 4603 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4604 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4605 4606 // If the shift count is not less than the bitwidth, the result of 4607 // the shift is undefined. Don't try to analyze it, because the 4608 // resolution chosen here may differ from the resolution chosen in 4609 // other parts of the compiler. 4610 if (SA->getValue().ult(BitWidth)) { 4611 Constant *X = 4612 ConstantInt::get(SA->getContext(), 4613 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4614 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4615 } 4616 } 4617 return BinaryOp(Op); 4618 4619 case Instruction::ExtractValue: { 4620 auto *EVI = cast<ExtractValueInst>(Op); 4621 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4622 break; 4623 4624 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4625 if (!WO) 4626 break; 4627 4628 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4629 bool Signed = WO->isSigned(); 4630 // TODO: Should add nuw/nsw flags for mul as well. 4631 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4632 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4633 4634 // Now that we know that all uses of the arithmetic-result component of 4635 // CI are guarded by the overflow check, we can go ahead and pretend 4636 // that the arithmetic is non-overflowing. 4637 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4638 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4639 } 4640 4641 default: 4642 break; 4643 } 4644 4645 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4646 // semantics as a Sub, return a binary sub expression. 4647 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4648 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4649 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4650 4651 return None; 4652 } 4653 4654 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4655 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4656 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4657 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4658 /// follows one of the following patterns: 4659 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4660 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4661 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4662 /// we return the type of the truncation operation, and indicate whether the 4663 /// truncated type should be treated as signed/unsigned by setting 4664 /// \p Signed to true/false, respectively. 4665 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4666 bool &Signed, ScalarEvolution &SE) { 4667 // The case where Op == SymbolicPHI (that is, with no type conversions on 4668 // the way) is handled by the regular add recurrence creating logic and 4669 // would have already been triggered in createAddRecForPHI. Reaching it here 4670 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4671 // because one of the other operands of the SCEVAddExpr updating this PHI is 4672 // not invariant). 4673 // 4674 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4675 // this case predicates that allow us to prove that Op == SymbolicPHI will 4676 // be added. 4677 if (Op == SymbolicPHI) 4678 return nullptr; 4679 4680 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4681 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4682 if (SourceBits != NewBits) 4683 return nullptr; 4684 4685 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4686 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4687 if (!SExt && !ZExt) 4688 return nullptr; 4689 const SCEVTruncateExpr *Trunc = 4690 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4691 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4692 if (!Trunc) 4693 return nullptr; 4694 const SCEV *X = Trunc->getOperand(); 4695 if (X != SymbolicPHI) 4696 return nullptr; 4697 Signed = SExt != nullptr; 4698 return Trunc->getType(); 4699 } 4700 4701 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4702 if (!PN->getType()->isIntegerTy()) 4703 return nullptr; 4704 const Loop *L = LI.getLoopFor(PN->getParent()); 4705 if (!L || L->getHeader() != PN->getParent()) 4706 return nullptr; 4707 return L; 4708 } 4709 4710 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4711 // computation that updates the phi follows the following pattern: 4712 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4713 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4714 // If so, try to see if it can be rewritten as an AddRecExpr under some 4715 // Predicates. If successful, return them as a pair. Also cache the results 4716 // of the analysis. 4717 // 4718 // Example usage scenario: 4719 // Say the Rewriter is called for the following SCEV: 4720 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4721 // where: 4722 // %X = phi i64 (%Start, %BEValue) 4723 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4724 // and call this function with %SymbolicPHI = %X. 4725 // 4726 // The analysis will find that the value coming around the backedge has 4727 // the following SCEV: 4728 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4729 // Upon concluding that this matches the desired pattern, the function 4730 // will return the pair {NewAddRec, SmallPredsVec} where: 4731 // NewAddRec = {%Start,+,%Step} 4732 // SmallPredsVec = {P1, P2, P3} as follows: 4733 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4734 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4735 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4736 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4737 // under the predicates {P1,P2,P3}. 4738 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4739 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4740 // 4741 // TODO's: 4742 // 4743 // 1) Extend the Induction descriptor to also support inductions that involve 4744 // casts: When needed (namely, when we are called in the context of the 4745 // vectorizer induction analysis), a Set of cast instructions will be 4746 // populated by this method, and provided back to isInductionPHI. This is 4747 // needed to allow the vectorizer to properly record them to be ignored by 4748 // the cost model and to avoid vectorizing them (otherwise these casts, 4749 // which are redundant under the runtime overflow checks, will be 4750 // vectorized, which can be costly). 4751 // 4752 // 2) Support additional induction/PHISCEV patterns: We also want to support 4753 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4754 // after the induction update operation (the induction increment): 4755 // 4756 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4757 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4758 // 4759 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4760 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4761 // 4762 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4763 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4764 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4765 SmallVector<const SCEVPredicate *, 3> Predicates; 4766 4767 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4768 // return an AddRec expression under some predicate. 4769 4770 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4771 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4772 assert(L && "Expecting an integer loop header phi"); 4773 4774 // The loop may have multiple entrances or multiple exits; we can analyze 4775 // this phi as an addrec if it has a unique entry value and a unique 4776 // backedge value. 4777 Value *BEValueV = nullptr, *StartValueV = nullptr; 4778 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4779 Value *V = PN->getIncomingValue(i); 4780 if (L->contains(PN->getIncomingBlock(i))) { 4781 if (!BEValueV) { 4782 BEValueV = V; 4783 } else if (BEValueV != V) { 4784 BEValueV = nullptr; 4785 break; 4786 } 4787 } else if (!StartValueV) { 4788 StartValueV = V; 4789 } else if (StartValueV != V) { 4790 StartValueV = nullptr; 4791 break; 4792 } 4793 } 4794 if (!BEValueV || !StartValueV) 4795 return None; 4796 4797 const SCEV *BEValue = getSCEV(BEValueV); 4798 4799 // If the value coming around the backedge is an add with the symbolic 4800 // value we just inserted, possibly with casts that we can ignore under 4801 // an appropriate runtime guard, then we found a simple induction variable! 4802 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4803 if (!Add) 4804 return None; 4805 4806 // If there is a single occurrence of the symbolic value, possibly 4807 // casted, replace it with a recurrence. 4808 unsigned FoundIndex = Add->getNumOperands(); 4809 Type *TruncTy = nullptr; 4810 bool Signed; 4811 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4812 if ((TruncTy = 4813 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4814 if (FoundIndex == e) { 4815 FoundIndex = i; 4816 break; 4817 } 4818 4819 if (FoundIndex == Add->getNumOperands()) 4820 return None; 4821 4822 // Create an add with everything but the specified operand. 4823 SmallVector<const SCEV *, 8> Ops; 4824 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4825 if (i != FoundIndex) 4826 Ops.push_back(Add->getOperand(i)); 4827 const SCEV *Accum = getAddExpr(Ops); 4828 4829 // The runtime checks will not be valid if the step amount is 4830 // varying inside the loop. 4831 if (!isLoopInvariant(Accum, L)) 4832 return None; 4833 4834 // *** Part2: Create the predicates 4835 4836 // Analysis was successful: we have a phi-with-cast pattern for which we 4837 // can return an AddRec expression under the following predicates: 4838 // 4839 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4840 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4841 // P2: An Equal predicate that guarantees that 4842 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4843 // P3: An Equal predicate that guarantees that 4844 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4845 // 4846 // As we next prove, the above predicates guarantee that: 4847 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4848 // 4849 // 4850 // More formally, we want to prove that: 4851 // Expr(i+1) = Start + (i+1) * Accum 4852 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4853 // 4854 // Given that: 4855 // 1) Expr(0) = Start 4856 // 2) Expr(1) = Start + Accum 4857 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4858 // 3) Induction hypothesis (step i): 4859 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4860 // 4861 // Proof: 4862 // Expr(i+1) = 4863 // = Start + (i+1)*Accum 4864 // = (Start + i*Accum) + Accum 4865 // = Expr(i) + Accum 4866 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4867 // :: from step i 4868 // 4869 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4870 // 4871 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4872 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4873 // + Accum :: from P3 4874 // 4875 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4876 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4877 // 4878 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4879 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4880 // 4881 // By induction, the same applies to all iterations 1<=i<n: 4882 // 4883 4884 // Create a truncated addrec for which we will add a no overflow check (P1). 4885 const SCEV *StartVal = getSCEV(StartValueV); 4886 const SCEV *PHISCEV = 4887 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4888 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4889 4890 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4891 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4892 // will be constant. 4893 // 4894 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4895 // add P1. 4896 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4897 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4898 Signed ? SCEVWrapPredicate::IncrementNSSW 4899 : SCEVWrapPredicate::IncrementNUSW; 4900 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4901 Predicates.push_back(AddRecPred); 4902 } 4903 4904 // Create the Equal Predicates P2,P3: 4905 4906 // It is possible that the predicates P2 and/or P3 are computable at 4907 // compile time due to StartVal and/or Accum being constants. 4908 // If either one is, then we can check that now and escape if either P2 4909 // or P3 is false. 4910 4911 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4912 // for each of StartVal and Accum 4913 auto getExtendedExpr = [&](const SCEV *Expr, 4914 bool CreateSignExtend) -> const SCEV * { 4915 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4916 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4917 const SCEV *ExtendedExpr = 4918 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4919 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4920 return ExtendedExpr; 4921 }; 4922 4923 // Given: 4924 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4925 // = getExtendedExpr(Expr) 4926 // Determine whether the predicate P: Expr == ExtendedExpr 4927 // is known to be false at compile time 4928 auto PredIsKnownFalse = [&](const SCEV *Expr, 4929 const SCEV *ExtendedExpr) -> bool { 4930 return Expr != ExtendedExpr && 4931 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4932 }; 4933 4934 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4935 if (PredIsKnownFalse(StartVal, StartExtended)) { 4936 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4937 return None; 4938 } 4939 4940 // The Step is always Signed (because the overflow checks are either 4941 // NSSW or NUSW) 4942 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4943 if (PredIsKnownFalse(Accum, AccumExtended)) { 4944 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4945 return None; 4946 } 4947 4948 auto AppendPredicate = [&](const SCEV *Expr, 4949 const SCEV *ExtendedExpr) -> void { 4950 if (Expr != ExtendedExpr && 4951 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4952 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4953 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4954 Predicates.push_back(Pred); 4955 } 4956 }; 4957 4958 AppendPredicate(StartVal, StartExtended); 4959 AppendPredicate(Accum, AccumExtended); 4960 4961 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4962 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4963 // into NewAR if it will also add the runtime overflow checks specified in 4964 // Predicates. 4965 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4966 4967 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4968 std::make_pair(NewAR, Predicates); 4969 // Remember the result of the analysis for this SCEV at this locayyytion. 4970 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4971 return PredRewrite; 4972 } 4973 4974 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4975 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4976 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4977 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4978 if (!L) 4979 return None; 4980 4981 // Check to see if we already analyzed this PHI. 4982 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4983 if (I != PredicatedSCEVRewrites.end()) { 4984 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4985 I->second; 4986 // Analysis was done before and failed to create an AddRec: 4987 if (Rewrite.first == SymbolicPHI) 4988 return None; 4989 // Analysis was done before and succeeded to create an AddRec under 4990 // a predicate: 4991 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4992 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4993 return Rewrite; 4994 } 4995 4996 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4997 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4998 4999 // Record in the cache that the analysis failed 5000 if (!Rewrite) { 5001 SmallVector<const SCEVPredicate *, 3> Predicates; 5002 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5003 return None; 5004 } 5005 5006 return Rewrite; 5007 } 5008 5009 // FIXME: This utility is currently required because the Rewriter currently 5010 // does not rewrite this expression: 5011 // {0, +, (sext ix (trunc iy to ix) to iy)} 5012 // into {0, +, %step}, 5013 // even when the following Equal predicate exists: 5014 // "%step == (sext ix (trunc iy to ix) to iy)". 5015 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5016 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5017 if (AR1 == AR2) 5018 return true; 5019 5020 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5021 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5022 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5023 return false; 5024 return true; 5025 }; 5026 5027 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5028 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5029 return false; 5030 return true; 5031 } 5032 5033 /// A helper function for createAddRecFromPHI to handle simple cases. 5034 /// 5035 /// This function tries to find an AddRec expression for the simplest (yet most 5036 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5037 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5038 /// technique for finding the AddRec expression. 5039 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5040 Value *BEValueV, 5041 Value *StartValueV) { 5042 const Loop *L = LI.getLoopFor(PN->getParent()); 5043 assert(L && L->getHeader() == PN->getParent()); 5044 assert(BEValueV && StartValueV); 5045 5046 auto BO = MatchBinaryOp(BEValueV, DT); 5047 if (!BO) 5048 return nullptr; 5049 5050 if (BO->Opcode != Instruction::Add) 5051 return nullptr; 5052 5053 const SCEV *Accum = nullptr; 5054 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5055 Accum = getSCEV(BO->RHS); 5056 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5057 Accum = getSCEV(BO->LHS); 5058 5059 if (!Accum) 5060 return nullptr; 5061 5062 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5063 if (BO->IsNUW) 5064 Flags = setFlags(Flags, SCEV::FlagNUW); 5065 if (BO->IsNSW) 5066 Flags = setFlags(Flags, SCEV::FlagNSW); 5067 5068 const SCEV *StartVal = getSCEV(StartValueV); 5069 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5070 5071 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5072 5073 // We can add Flags to the post-inc expression only if we 5074 // know that it is *undefined behavior* for BEValueV to 5075 // overflow. 5076 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5077 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5078 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5079 5080 return PHISCEV; 5081 } 5082 5083 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5084 const Loop *L = LI.getLoopFor(PN->getParent()); 5085 if (!L || L->getHeader() != PN->getParent()) 5086 return nullptr; 5087 5088 // The loop may have multiple entrances or multiple exits; we can analyze 5089 // this phi as an addrec if it has a unique entry value and a unique 5090 // backedge value. 5091 Value *BEValueV = nullptr, *StartValueV = nullptr; 5092 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5093 Value *V = PN->getIncomingValue(i); 5094 if (L->contains(PN->getIncomingBlock(i))) { 5095 if (!BEValueV) { 5096 BEValueV = V; 5097 } else if (BEValueV != V) { 5098 BEValueV = nullptr; 5099 break; 5100 } 5101 } else if (!StartValueV) { 5102 StartValueV = V; 5103 } else if (StartValueV != V) { 5104 StartValueV = nullptr; 5105 break; 5106 } 5107 } 5108 if (!BEValueV || !StartValueV) 5109 return nullptr; 5110 5111 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5112 "PHI node already processed?"); 5113 5114 // First, try to find AddRec expression without creating a fictituos symbolic 5115 // value for PN. 5116 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5117 return S; 5118 5119 // Handle PHI node value symbolically. 5120 const SCEV *SymbolicName = getUnknown(PN); 5121 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5122 5123 // Using this symbolic name for the PHI, analyze the value coming around 5124 // the back-edge. 5125 const SCEV *BEValue = getSCEV(BEValueV); 5126 5127 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5128 // has a special value for the first iteration of the loop. 5129 5130 // If the value coming around the backedge is an add with the symbolic 5131 // value we just inserted, then we found a simple induction variable! 5132 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5133 // If there is a single occurrence of the symbolic value, replace it 5134 // with a recurrence. 5135 unsigned FoundIndex = Add->getNumOperands(); 5136 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5137 if (Add->getOperand(i) == SymbolicName) 5138 if (FoundIndex == e) { 5139 FoundIndex = i; 5140 break; 5141 } 5142 5143 if (FoundIndex != Add->getNumOperands()) { 5144 // Create an add with everything but the specified operand. 5145 SmallVector<const SCEV *, 8> Ops; 5146 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5147 if (i != FoundIndex) 5148 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5149 L, *this)); 5150 const SCEV *Accum = getAddExpr(Ops); 5151 5152 // This is not a valid addrec if the step amount is varying each 5153 // loop iteration, but is not itself an addrec in this loop. 5154 if (isLoopInvariant(Accum, L) || 5155 (isa<SCEVAddRecExpr>(Accum) && 5156 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5157 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5158 5159 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5160 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5161 if (BO->IsNUW) 5162 Flags = setFlags(Flags, SCEV::FlagNUW); 5163 if (BO->IsNSW) 5164 Flags = setFlags(Flags, SCEV::FlagNSW); 5165 } 5166 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5167 // If the increment is an inbounds GEP, then we know the address 5168 // space cannot be wrapped around. We cannot make any guarantee 5169 // about signed or unsigned overflow because pointers are 5170 // unsigned but we may have a negative index from the base 5171 // pointer. We can guarantee that no unsigned wrap occurs if the 5172 // indices form a positive value. 5173 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5174 Flags = setFlags(Flags, SCEV::FlagNW); 5175 5176 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5177 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5178 Flags = setFlags(Flags, SCEV::FlagNUW); 5179 } 5180 5181 // We cannot transfer nuw and nsw flags from subtraction 5182 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5183 // for instance. 5184 } 5185 5186 const SCEV *StartVal = getSCEV(StartValueV); 5187 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5188 5189 // Okay, for the entire analysis of this edge we assumed the PHI 5190 // to be symbolic. We now need to go back and purge all of the 5191 // entries for the scalars that use the symbolic expression. 5192 forgetSymbolicName(PN, SymbolicName); 5193 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5194 5195 // We can add Flags to the post-inc expression only if we 5196 // know that it is *undefined behavior* for BEValueV to 5197 // overflow. 5198 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5199 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5200 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5201 5202 return PHISCEV; 5203 } 5204 } 5205 } else { 5206 // Otherwise, this could be a loop like this: 5207 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5208 // In this case, j = {1,+,1} and BEValue is j. 5209 // Because the other in-value of i (0) fits the evolution of BEValue 5210 // i really is an addrec evolution. 5211 // 5212 // We can generalize this saying that i is the shifted value of BEValue 5213 // by one iteration: 5214 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5215 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5216 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5217 if (Shifted != getCouldNotCompute() && 5218 Start != getCouldNotCompute()) { 5219 const SCEV *StartVal = getSCEV(StartValueV); 5220 if (Start == StartVal) { 5221 // Okay, for the entire analysis of this edge we assumed the PHI 5222 // to be symbolic. We now need to go back and purge all of the 5223 // entries for the scalars that use the symbolic expression. 5224 forgetSymbolicName(PN, SymbolicName); 5225 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5226 return Shifted; 5227 } 5228 } 5229 } 5230 5231 // Remove the temporary PHI node SCEV that has been inserted while intending 5232 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5233 // as it will prevent later (possibly simpler) SCEV expressions to be added 5234 // to the ValueExprMap. 5235 eraseValueFromMap(PN); 5236 5237 return nullptr; 5238 } 5239 5240 // Checks if the SCEV S is available at BB. S is considered available at BB 5241 // if S can be materialized at BB without introducing a fault. 5242 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5243 BasicBlock *BB) { 5244 struct CheckAvailable { 5245 bool TraversalDone = false; 5246 bool Available = true; 5247 5248 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5249 BasicBlock *BB = nullptr; 5250 DominatorTree &DT; 5251 5252 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5253 : L(L), BB(BB), DT(DT) {} 5254 5255 bool setUnavailable() { 5256 TraversalDone = true; 5257 Available = false; 5258 return false; 5259 } 5260 5261 bool follow(const SCEV *S) { 5262 switch (S->getSCEVType()) { 5263 case scConstant: 5264 case scPtrToInt: 5265 case scTruncate: 5266 case scZeroExtend: 5267 case scSignExtend: 5268 case scAddExpr: 5269 case scMulExpr: 5270 case scUMaxExpr: 5271 case scSMaxExpr: 5272 case scUMinExpr: 5273 case scSMinExpr: 5274 // These expressions are available if their operand(s) is/are. 5275 return true; 5276 5277 case scAddRecExpr: { 5278 // We allow add recurrences that are on the loop BB is in, or some 5279 // outer loop. This guarantees availability because the value of the 5280 // add recurrence at BB is simply the "current" value of the induction 5281 // variable. We can relax this in the future; for instance an add 5282 // recurrence on a sibling dominating loop is also available at BB. 5283 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5284 if (L && (ARLoop == L || ARLoop->contains(L))) 5285 return true; 5286 5287 return setUnavailable(); 5288 } 5289 5290 case scUnknown: { 5291 // For SCEVUnknown, we check for simple dominance. 5292 const auto *SU = cast<SCEVUnknown>(S); 5293 Value *V = SU->getValue(); 5294 5295 if (isa<Argument>(V)) 5296 return false; 5297 5298 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5299 return false; 5300 5301 return setUnavailable(); 5302 } 5303 5304 case scUDivExpr: 5305 case scCouldNotCompute: 5306 // We do not try to smart about these at all. 5307 return setUnavailable(); 5308 } 5309 llvm_unreachable("Unknown SCEV kind!"); 5310 } 5311 5312 bool isDone() { return TraversalDone; } 5313 }; 5314 5315 CheckAvailable CA(L, BB, DT); 5316 SCEVTraversal<CheckAvailable> ST(CA); 5317 5318 ST.visitAll(S); 5319 return CA.Available; 5320 } 5321 5322 // Try to match a control flow sequence that branches out at BI and merges back 5323 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5324 // match. 5325 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5326 Value *&C, Value *&LHS, Value *&RHS) { 5327 C = BI->getCondition(); 5328 5329 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5330 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5331 5332 if (!LeftEdge.isSingleEdge()) 5333 return false; 5334 5335 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5336 5337 Use &LeftUse = Merge->getOperandUse(0); 5338 Use &RightUse = Merge->getOperandUse(1); 5339 5340 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5341 LHS = LeftUse; 5342 RHS = RightUse; 5343 return true; 5344 } 5345 5346 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5347 LHS = RightUse; 5348 RHS = LeftUse; 5349 return true; 5350 } 5351 5352 return false; 5353 } 5354 5355 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5356 auto IsReachable = 5357 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5358 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5359 const Loop *L = LI.getLoopFor(PN->getParent()); 5360 5361 // We don't want to break LCSSA, even in a SCEV expression tree. 5362 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5363 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5364 return nullptr; 5365 5366 // Try to match 5367 // 5368 // br %cond, label %left, label %right 5369 // left: 5370 // br label %merge 5371 // right: 5372 // br label %merge 5373 // merge: 5374 // V = phi [ %x, %left ], [ %y, %right ] 5375 // 5376 // as "select %cond, %x, %y" 5377 5378 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5379 assert(IDom && "At least the entry block should dominate PN"); 5380 5381 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5382 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5383 5384 if (BI && BI->isConditional() && 5385 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5386 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5387 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5388 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5389 } 5390 5391 return nullptr; 5392 } 5393 5394 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5395 if (const SCEV *S = createAddRecFromPHI(PN)) 5396 return S; 5397 5398 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5399 return S; 5400 5401 // If the PHI has a single incoming value, follow that value, unless the 5402 // PHI's incoming blocks are in a different loop, in which case doing so 5403 // risks breaking LCSSA form. Instcombine would normally zap these, but 5404 // it doesn't have DominatorTree information, so it may miss cases. 5405 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5406 if (LI.replacementPreservesLCSSAForm(PN, V)) 5407 return getSCEV(V); 5408 5409 // If it's not a loop phi, we can't handle it yet. 5410 return getUnknown(PN); 5411 } 5412 5413 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5414 Value *Cond, 5415 Value *TrueVal, 5416 Value *FalseVal) { 5417 // Handle "constant" branch or select. This can occur for instance when a 5418 // loop pass transforms an inner loop and moves on to process the outer loop. 5419 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5420 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5421 5422 // Try to match some simple smax or umax patterns. 5423 auto *ICI = dyn_cast<ICmpInst>(Cond); 5424 if (!ICI) 5425 return getUnknown(I); 5426 5427 Value *LHS = ICI->getOperand(0); 5428 Value *RHS = ICI->getOperand(1); 5429 5430 switch (ICI->getPredicate()) { 5431 case ICmpInst::ICMP_SLT: 5432 case ICmpInst::ICMP_SLE: 5433 std::swap(LHS, RHS); 5434 LLVM_FALLTHROUGH; 5435 case ICmpInst::ICMP_SGT: 5436 case ICmpInst::ICMP_SGE: 5437 // a >s b ? a+x : b+x -> smax(a, b)+x 5438 // a >s b ? b+x : a+x -> smin(a, b)+x 5439 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5440 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5441 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5442 const SCEV *LA = getSCEV(TrueVal); 5443 const SCEV *RA = getSCEV(FalseVal); 5444 const SCEV *LDiff = getMinusSCEV(LA, LS); 5445 const SCEV *RDiff = getMinusSCEV(RA, RS); 5446 if (LDiff == RDiff) 5447 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5448 LDiff = getMinusSCEV(LA, RS); 5449 RDiff = getMinusSCEV(RA, LS); 5450 if (LDiff == RDiff) 5451 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5452 } 5453 break; 5454 case ICmpInst::ICMP_ULT: 5455 case ICmpInst::ICMP_ULE: 5456 std::swap(LHS, RHS); 5457 LLVM_FALLTHROUGH; 5458 case ICmpInst::ICMP_UGT: 5459 case ICmpInst::ICMP_UGE: 5460 // a >u b ? a+x : b+x -> umax(a, b)+x 5461 // a >u b ? b+x : a+x -> umin(a, b)+x 5462 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5463 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5464 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5465 const SCEV *LA = getSCEV(TrueVal); 5466 const SCEV *RA = getSCEV(FalseVal); 5467 const SCEV *LDiff = getMinusSCEV(LA, LS); 5468 const SCEV *RDiff = getMinusSCEV(RA, RS); 5469 if (LDiff == RDiff) 5470 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5471 LDiff = getMinusSCEV(LA, RS); 5472 RDiff = getMinusSCEV(RA, LS); 5473 if (LDiff == RDiff) 5474 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5475 } 5476 break; 5477 case ICmpInst::ICMP_NE: 5478 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5479 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5480 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5481 const SCEV *One = getOne(I->getType()); 5482 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5483 const SCEV *LA = getSCEV(TrueVal); 5484 const SCEV *RA = getSCEV(FalseVal); 5485 const SCEV *LDiff = getMinusSCEV(LA, LS); 5486 const SCEV *RDiff = getMinusSCEV(RA, One); 5487 if (LDiff == RDiff) 5488 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5489 } 5490 break; 5491 case ICmpInst::ICMP_EQ: 5492 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5493 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5494 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5495 const SCEV *One = getOne(I->getType()); 5496 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5497 const SCEV *LA = getSCEV(TrueVal); 5498 const SCEV *RA = getSCEV(FalseVal); 5499 const SCEV *LDiff = getMinusSCEV(LA, One); 5500 const SCEV *RDiff = getMinusSCEV(RA, LS); 5501 if (LDiff == RDiff) 5502 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5503 } 5504 break; 5505 default: 5506 break; 5507 } 5508 5509 return getUnknown(I); 5510 } 5511 5512 /// Expand GEP instructions into add and multiply operations. This allows them 5513 /// to be analyzed by regular SCEV code. 5514 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5515 // Don't attempt to analyze GEPs over unsized objects. 5516 if (!GEP->getSourceElementType()->isSized()) 5517 return getUnknown(GEP); 5518 5519 SmallVector<const SCEV *, 4> IndexExprs; 5520 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5521 IndexExprs.push_back(getSCEV(*Index)); 5522 return getGEPExpr(GEP, IndexExprs); 5523 } 5524 5525 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5526 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5527 return C->getAPInt().countTrailingZeros(); 5528 5529 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5530 return GetMinTrailingZeros(I->getOperand()); 5531 5532 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5533 return std::min(GetMinTrailingZeros(T->getOperand()), 5534 (uint32_t)getTypeSizeInBits(T->getType())); 5535 5536 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5537 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5538 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5539 ? getTypeSizeInBits(E->getType()) 5540 : OpRes; 5541 } 5542 5543 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5544 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5545 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5546 ? getTypeSizeInBits(E->getType()) 5547 : OpRes; 5548 } 5549 5550 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5551 // The result is the min of all operands results. 5552 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5553 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5554 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5555 return MinOpRes; 5556 } 5557 5558 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5559 // The result is the sum of all operands results. 5560 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5561 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5562 for (unsigned i = 1, e = M->getNumOperands(); 5563 SumOpRes != BitWidth && i != e; ++i) 5564 SumOpRes = 5565 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5566 return SumOpRes; 5567 } 5568 5569 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5570 // The result is the min of all operands results. 5571 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5572 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5573 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5574 return MinOpRes; 5575 } 5576 5577 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5578 // The result is the min of all operands results. 5579 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5580 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5581 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5582 return MinOpRes; 5583 } 5584 5585 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5586 // The result is the min of all operands results. 5587 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5588 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5589 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5590 return MinOpRes; 5591 } 5592 5593 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5594 // For a SCEVUnknown, ask ValueTracking. 5595 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5596 return Known.countMinTrailingZeros(); 5597 } 5598 5599 // SCEVUDivExpr 5600 return 0; 5601 } 5602 5603 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5604 auto I = MinTrailingZerosCache.find(S); 5605 if (I != MinTrailingZerosCache.end()) 5606 return I->second; 5607 5608 uint32_t Result = GetMinTrailingZerosImpl(S); 5609 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5610 assert(InsertPair.second && "Should insert a new key"); 5611 return InsertPair.first->second; 5612 } 5613 5614 /// Helper method to assign a range to V from metadata present in the IR. 5615 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5616 if (Instruction *I = dyn_cast<Instruction>(V)) 5617 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5618 return getConstantRangeFromMetadata(*MD); 5619 5620 return None; 5621 } 5622 5623 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5624 SCEV::NoWrapFlags Flags) { 5625 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5626 AddRec->setNoWrapFlags(Flags); 5627 UnsignedRanges.erase(AddRec); 5628 SignedRanges.erase(AddRec); 5629 } 5630 } 5631 5632 /// Determine the range for a particular SCEV. If SignHint is 5633 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5634 /// with a "cleaner" unsigned (resp. signed) representation. 5635 const ConstantRange & 5636 ScalarEvolution::getRangeRef(const SCEV *S, 5637 ScalarEvolution::RangeSignHint SignHint) { 5638 DenseMap<const SCEV *, ConstantRange> &Cache = 5639 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5640 : SignedRanges; 5641 ConstantRange::PreferredRangeType RangeType = 5642 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5643 ? ConstantRange::Unsigned : ConstantRange::Signed; 5644 5645 // See if we've computed this range already. 5646 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5647 if (I != Cache.end()) 5648 return I->second; 5649 5650 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5651 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5652 5653 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5654 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5655 using OBO = OverflowingBinaryOperator; 5656 5657 // If the value has known zeros, the maximum value will have those known zeros 5658 // as well. 5659 uint32_t TZ = GetMinTrailingZeros(S); 5660 if (TZ != 0) { 5661 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5662 ConservativeResult = 5663 ConstantRange(APInt::getMinValue(BitWidth), 5664 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5665 else 5666 ConservativeResult = ConstantRange( 5667 APInt::getSignedMinValue(BitWidth), 5668 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5669 } 5670 5671 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5672 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5673 unsigned WrapType = OBO::AnyWrap; 5674 if (Add->hasNoSignedWrap()) 5675 WrapType |= OBO::NoSignedWrap; 5676 if (Add->hasNoUnsignedWrap()) 5677 WrapType |= OBO::NoUnsignedWrap; 5678 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5679 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5680 WrapType, RangeType); 5681 return setRange(Add, SignHint, 5682 ConservativeResult.intersectWith(X, RangeType)); 5683 } 5684 5685 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5686 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5687 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5688 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5689 return setRange(Mul, SignHint, 5690 ConservativeResult.intersectWith(X, RangeType)); 5691 } 5692 5693 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5694 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5695 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5696 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5697 return setRange(SMax, SignHint, 5698 ConservativeResult.intersectWith(X, RangeType)); 5699 } 5700 5701 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5702 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5703 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5704 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5705 return setRange(UMax, SignHint, 5706 ConservativeResult.intersectWith(X, RangeType)); 5707 } 5708 5709 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5710 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5711 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5712 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5713 return setRange(SMin, SignHint, 5714 ConservativeResult.intersectWith(X, RangeType)); 5715 } 5716 5717 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5718 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5719 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5720 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5721 return setRange(UMin, SignHint, 5722 ConservativeResult.intersectWith(X, RangeType)); 5723 } 5724 5725 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5726 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5727 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5728 return setRange(UDiv, SignHint, 5729 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5730 } 5731 5732 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5733 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5734 return setRange(ZExt, SignHint, 5735 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5736 RangeType)); 5737 } 5738 5739 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5740 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5741 return setRange(SExt, SignHint, 5742 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5743 RangeType)); 5744 } 5745 5746 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 5747 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 5748 return setRange(PtrToInt, SignHint, X); 5749 } 5750 5751 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5752 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5753 return setRange(Trunc, SignHint, 5754 ConservativeResult.intersectWith(X.truncate(BitWidth), 5755 RangeType)); 5756 } 5757 5758 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5759 // If there's no unsigned wrap, the value will never be less than its 5760 // initial value. 5761 if (AddRec->hasNoUnsignedWrap()) { 5762 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5763 if (!UnsignedMinValue.isNullValue()) 5764 ConservativeResult = ConservativeResult.intersectWith( 5765 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5766 } 5767 5768 // If there's no signed wrap, and all the operands except initial value have 5769 // the same sign or zero, the value won't ever be: 5770 // 1: smaller than initial value if operands are non negative, 5771 // 2: bigger than initial value if operands are non positive. 5772 // For both cases, value can not cross signed min/max boundary. 5773 if (AddRec->hasNoSignedWrap()) { 5774 bool AllNonNeg = true; 5775 bool AllNonPos = true; 5776 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5777 if (!isKnownNonNegative(AddRec->getOperand(i))) 5778 AllNonNeg = false; 5779 if (!isKnownNonPositive(AddRec->getOperand(i))) 5780 AllNonPos = false; 5781 } 5782 if (AllNonNeg) 5783 ConservativeResult = ConservativeResult.intersectWith( 5784 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5785 APInt::getSignedMinValue(BitWidth)), 5786 RangeType); 5787 else if (AllNonPos) 5788 ConservativeResult = ConservativeResult.intersectWith( 5789 ConstantRange::getNonEmpty( 5790 APInt::getSignedMinValue(BitWidth), 5791 getSignedRangeMax(AddRec->getStart()) + 1), 5792 RangeType); 5793 } 5794 5795 // TODO: non-affine addrec 5796 if (AddRec->isAffine()) { 5797 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5798 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5799 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5800 auto RangeFromAffine = getRangeForAffineAR( 5801 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5802 BitWidth); 5803 ConservativeResult = 5804 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5805 5806 auto RangeFromFactoring = getRangeViaFactoring( 5807 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5808 BitWidth); 5809 ConservativeResult = 5810 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5811 } 5812 5813 // Now try symbolic BE count and more powerful methods. 5814 if (UseExpensiveRangeSharpening) { 5815 const SCEV *SymbolicMaxBECount = 5816 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 5817 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 5818 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5819 AddRec->hasNoSelfWrap()) { 5820 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 5821 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 5822 ConservativeResult = 5823 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 5824 } 5825 } 5826 } 5827 5828 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5829 } 5830 5831 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5832 // Check if the IR explicitly contains !range metadata. 5833 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5834 if (MDRange.hasValue()) 5835 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5836 RangeType); 5837 5838 // Split here to avoid paying the compile-time cost of calling both 5839 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5840 // if needed. 5841 const DataLayout &DL = getDataLayout(); 5842 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5843 // For a SCEVUnknown, ask ValueTracking. 5844 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5845 if (Known.getBitWidth() != BitWidth) 5846 Known = Known.zextOrTrunc(BitWidth); 5847 // If Known does not result in full-set, intersect with it. 5848 if (Known.getMinValue() != Known.getMaxValue() + 1) 5849 ConservativeResult = ConservativeResult.intersectWith( 5850 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5851 RangeType); 5852 } else { 5853 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5854 "generalize as needed!"); 5855 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5856 // If the pointer size is larger than the index size type, this can cause 5857 // NS to be larger than BitWidth. So compensate for this. 5858 if (U->getType()->isPointerTy()) { 5859 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5860 int ptrIdxDiff = ptrSize - BitWidth; 5861 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5862 NS -= ptrIdxDiff; 5863 } 5864 5865 if (NS > 1) 5866 ConservativeResult = ConservativeResult.intersectWith( 5867 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5868 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5869 RangeType); 5870 } 5871 5872 // A range of Phi is a subset of union of all ranges of its input. 5873 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5874 // Make sure that we do not run over cycled Phis. 5875 if (PendingPhiRanges.insert(Phi).second) { 5876 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5877 for (auto &Op : Phi->operands()) { 5878 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5879 RangeFromOps = RangeFromOps.unionWith(OpRange); 5880 // No point to continue if we already have a full set. 5881 if (RangeFromOps.isFullSet()) 5882 break; 5883 } 5884 ConservativeResult = 5885 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5886 bool Erased = PendingPhiRanges.erase(Phi); 5887 assert(Erased && "Failed to erase Phi properly?"); 5888 (void) Erased; 5889 } 5890 } 5891 5892 return setRange(U, SignHint, std::move(ConservativeResult)); 5893 } 5894 5895 return setRange(S, SignHint, std::move(ConservativeResult)); 5896 } 5897 5898 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5899 // values that the expression can take. Initially, the expression has a value 5900 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5901 // argument defines if we treat Step as signed or unsigned. 5902 static ConstantRange getRangeForAffineARHelper(APInt Step, 5903 const ConstantRange &StartRange, 5904 const APInt &MaxBECount, 5905 unsigned BitWidth, bool Signed) { 5906 // If either Step or MaxBECount is 0, then the expression won't change, and we 5907 // just need to return the initial range. 5908 if (Step == 0 || MaxBECount == 0) 5909 return StartRange; 5910 5911 // If we don't know anything about the initial value (i.e. StartRange is 5912 // FullRange), then we don't know anything about the final range either. 5913 // Return FullRange. 5914 if (StartRange.isFullSet()) 5915 return ConstantRange::getFull(BitWidth); 5916 5917 // If Step is signed and negative, then we use its absolute value, but we also 5918 // note that we're moving in the opposite direction. 5919 bool Descending = Signed && Step.isNegative(); 5920 5921 if (Signed) 5922 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5923 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5924 // This equations hold true due to the well-defined wrap-around behavior of 5925 // APInt. 5926 Step = Step.abs(); 5927 5928 // Check if Offset is more than full span of BitWidth. If it is, the 5929 // expression is guaranteed to overflow. 5930 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5931 return ConstantRange::getFull(BitWidth); 5932 5933 // Offset is by how much the expression can change. Checks above guarantee no 5934 // overflow here. 5935 APInt Offset = Step * MaxBECount; 5936 5937 // Minimum value of the final range will match the minimal value of StartRange 5938 // if the expression is increasing and will be decreased by Offset otherwise. 5939 // Maximum value of the final range will match the maximal value of StartRange 5940 // if the expression is decreasing and will be increased by Offset otherwise. 5941 APInt StartLower = StartRange.getLower(); 5942 APInt StartUpper = StartRange.getUpper() - 1; 5943 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5944 : (StartUpper + std::move(Offset)); 5945 5946 // It's possible that the new minimum/maximum value will fall into the initial 5947 // range (due to wrap around). This means that the expression can take any 5948 // value in this bitwidth, and we have to return full range. 5949 if (StartRange.contains(MovedBoundary)) 5950 return ConstantRange::getFull(BitWidth); 5951 5952 APInt NewLower = 5953 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5954 APInt NewUpper = 5955 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5956 NewUpper += 1; 5957 5958 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5959 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5960 } 5961 5962 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5963 const SCEV *Step, 5964 const SCEV *MaxBECount, 5965 unsigned BitWidth) { 5966 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5967 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5968 "Precondition!"); 5969 5970 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5971 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5972 5973 // First, consider step signed. 5974 ConstantRange StartSRange = getSignedRange(Start); 5975 ConstantRange StepSRange = getSignedRange(Step); 5976 5977 // If Step can be both positive and negative, we need to find ranges for the 5978 // maximum absolute step values in both directions and union them. 5979 ConstantRange SR = 5980 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5981 MaxBECountValue, BitWidth, /* Signed = */ true); 5982 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5983 StartSRange, MaxBECountValue, 5984 BitWidth, /* Signed = */ true)); 5985 5986 // Next, consider step unsigned. 5987 ConstantRange UR = getRangeForAffineARHelper( 5988 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5989 MaxBECountValue, BitWidth, /* Signed = */ false); 5990 5991 // Finally, intersect signed and unsigned ranges. 5992 return SR.intersectWith(UR, ConstantRange::Smallest); 5993 } 5994 5995 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 5996 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 5997 ScalarEvolution::RangeSignHint SignHint) { 5998 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 5999 assert(AddRec->hasNoSelfWrap() && 6000 "This only works for non-self-wrapping AddRecs!"); 6001 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6002 const SCEV *Step = AddRec->getStepRecurrence(*this); 6003 // Only deal with constant step to save compile time. 6004 if (!isa<SCEVConstant>(Step)) 6005 return ConstantRange::getFull(BitWidth); 6006 // Let's make sure that we can prove that we do not self-wrap during 6007 // MaxBECount iterations. We need this because MaxBECount is a maximum 6008 // iteration count estimate, and we might infer nw from some exit for which we 6009 // do not know max exit count (or any other side reasoning). 6010 // TODO: Turn into assert at some point. 6011 if (getTypeSizeInBits(MaxBECount->getType()) > 6012 getTypeSizeInBits(AddRec->getType())) 6013 return ConstantRange::getFull(BitWidth); 6014 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6015 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6016 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6017 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6018 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6019 MaxItersWithoutWrap)) 6020 return ConstantRange::getFull(BitWidth); 6021 6022 ICmpInst::Predicate LEPred = 6023 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6024 ICmpInst::Predicate GEPred = 6025 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6026 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6027 6028 // We know that there is no self-wrap. Let's take Start and End values and 6029 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6030 // the iteration. They either lie inside the range [Min(Start, End), 6031 // Max(Start, End)] or outside it: 6032 // 6033 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6034 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6035 // 6036 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6037 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6038 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6039 // Start <= End and step is positive, or Start >= End and step is negative. 6040 const SCEV *Start = AddRec->getStart(); 6041 ConstantRange StartRange = getRangeRef(Start, SignHint); 6042 ConstantRange EndRange = getRangeRef(End, SignHint); 6043 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6044 // If they already cover full iteration space, we will know nothing useful 6045 // even if we prove what we want to prove. 6046 if (RangeBetween.isFullSet()) 6047 return RangeBetween; 6048 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6049 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6050 : RangeBetween.isWrappedSet(); 6051 if (IsWrappedSet) 6052 return ConstantRange::getFull(BitWidth); 6053 6054 if (isKnownPositive(Step) && 6055 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6056 return RangeBetween; 6057 else if (isKnownNegative(Step) && 6058 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6059 return RangeBetween; 6060 return ConstantRange::getFull(BitWidth); 6061 } 6062 6063 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6064 const SCEV *Step, 6065 const SCEV *MaxBECount, 6066 unsigned BitWidth) { 6067 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6068 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6069 6070 struct SelectPattern { 6071 Value *Condition = nullptr; 6072 APInt TrueValue; 6073 APInt FalseValue; 6074 6075 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6076 const SCEV *S) { 6077 Optional<unsigned> CastOp; 6078 APInt Offset(BitWidth, 0); 6079 6080 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6081 "Should be!"); 6082 6083 // Peel off a constant offset: 6084 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6085 // In the future we could consider being smarter here and handle 6086 // {Start+Step,+,Step} too. 6087 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6088 return; 6089 6090 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6091 S = SA->getOperand(1); 6092 } 6093 6094 // Peel off a cast operation 6095 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6096 CastOp = SCast->getSCEVType(); 6097 S = SCast->getOperand(); 6098 } 6099 6100 using namespace llvm::PatternMatch; 6101 6102 auto *SU = dyn_cast<SCEVUnknown>(S); 6103 const APInt *TrueVal, *FalseVal; 6104 if (!SU || 6105 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6106 m_APInt(FalseVal)))) { 6107 Condition = nullptr; 6108 return; 6109 } 6110 6111 TrueValue = *TrueVal; 6112 FalseValue = *FalseVal; 6113 6114 // Re-apply the cast we peeled off earlier 6115 if (CastOp.hasValue()) 6116 switch (*CastOp) { 6117 default: 6118 llvm_unreachable("Unknown SCEV cast type!"); 6119 6120 case scTruncate: 6121 TrueValue = TrueValue.trunc(BitWidth); 6122 FalseValue = FalseValue.trunc(BitWidth); 6123 break; 6124 case scZeroExtend: 6125 TrueValue = TrueValue.zext(BitWidth); 6126 FalseValue = FalseValue.zext(BitWidth); 6127 break; 6128 case scSignExtend: 6129 TrueValue = TrueValue.sext(BitWidth); 6130 FalseValue = FalseValue.sext(BitWidth); 6131 break; 6132 } 6133 6134 // Re-apply the constant offset we peeled off earlier 6135 TrueValue += Offset; 6136 FalseValue += Offset; 6137 } 6138 6139 bool isRecognized() { return Condition != nullptr; } 6140 }; 6141 6142 SelectPattern StartPattern(*this, BitWidth, Start); 6143 if (!StartPattern.isRecognized()) 6144 return ConstantRange::getFull(BitWidth); 6145 6146 SelectPattern StepPattern(*this, BitWidth, Step); 6147 if (!StepPattern.isRecognized()) 6148 return ConstantRange::getFull(BitWidth); 6149 6150 if (StartPattern.Condition != StepPattern.Condition) { 6151 // We don't handle this case today; but we could, by considering four 6152 // possibilities below instead of two. I'm not sure if there are cases where 6153 // that will help over what getRange already does, though. 6154 return ConstantRange::getFull(BitWidth); 6155 } 6156 6157 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6158 // construct arbitrary general SCEV expressions here. This function is called 6159 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6160 // say) can end up caching a suboptimal value. 6161 6162 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6163 // C2352 and C2512 (otherwise it isn't needed). 6164 6165 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6166 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6167 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6168 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6169 6170 ConstantRange TrueRange = 6171 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6172 ConstantRange FalseRange = 6173 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6174 6175 return TrueRange.unionWith(FalseRange); 6176 } 6177 6178 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6179 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6180 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6181 6182 // Return early if there are no flags to propagate to the SCEV. 6183 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6184 if (BinOp->hasNoUnsignedWrap()) 6185 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6186 if (BinOp->hasNoSignedWrap()) 6187 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6188 if (Flags == SCEV::FlagAnyWrap) 6189 return SCEV::FlagAnyWrap; 6190 6191 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6192 } 6193 6194 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6195 // Here we check that I is in the header of the innermost loop containing I, 6196 // since we only deal with instructions in the loop header. The actual loop we 6197 // need to check later will come from an add recurrence, but getting that 6198 // requires computing the SCEV of the operands, which can be expensive. This 6199 // check we can do cheaply to rule out some cases early. 6200 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6201 if (InnermostContainingLoop == nullptr || 6202 InnermostContainingLoop->getHeader() != I->getParent()) 6203 return false; 6204 6205 // Only proceed if we can prove that I does not yield poison. 6206 if (!programUndefinedIfPoison(I)) 6207 return false; 6208 6209 // At this point we know that if I is executed, then it does not wrap 6210 // according to at least one of NSW or NUW. If I is not executed, then we do 6211 // not know if the calculation that I represents would wrap. Multiple 6212 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6213 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6214 // derived from other instructions that map to the same SCEV. We cannot make 6215 // that guarantee for cases where I is not executed. So we need to find the 6216 // loop that I is considered in relation to and prove that I is executed for 6217 // every iteration of that loop. That implies that the value that I 6218 // calculates does not wrap anywhere in the loop, so then we can apply the 6219 // flags to the SCEV. 6220 // 6221 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6222 // from different loops, so that we know which loop to prove that I is 6223 // executed in. 6224 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6225 // I could be an extractvalue from a call to an overflow intrinsic. 6226 // TODO: We can do better here in some cases. 6227 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6228 return false; 6229 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6230 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6231 bool AllOtherOpsLoopInvariant = true; 6232 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6233 ++OtherOpIndex) { 6234 if (OtherOpIndex != OpIndex) { 6235 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6236 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6237 AllOtherOpsLoopInvariant = false; 6238 break; 6239 } 6240 } 6241 } 6242 if (AllOtherOpsLoopInvariant && 6243 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6244 return true; 6245 } 6246 } 6247 return false; 6248 } 6249 6250 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6251 // If we know that \c I can never be poison period, then that's enough. 6252 if (isSCEVExprNeverPoison(I)) 6253 return true; 6254 6255 // For an add recurrence specifically, we assume that infinite loops without 6256 // side effects are undefined behavior, and then reason as follows: 6257 // 6258 // If the add recurrence is poison in any iteration, it is poison on all 6259 // future iterations (since incrementing poison yields poison). If the result 6260 // of the add recurrence is fed into the loop latch condition and the loop 6261 // does not contain any throws or exiting blocks other than the latch, we now 6262 // have the ability to "choose" whether the backedge is taken or not (by 6263 // choosing a sufficiently evil value for the poison feeding into the branch) 6264 // for every iteration including and after the one in which \p I first became 6265 // poison. There are two possibilities (let's call the iteration in which \p 6266 // I first became poison as K): 6267 // 6268 // 1. In the set of iterations including and after K, the loop body executes 6269 // no side effects. In this case executing the backege an infinte number 6270 // of times will yield undefined behavior. 6271 // 6272 // 2. In the set of iterations including and after K, the loop body executes 6273 // at least one side effect. In this case, that specific instance of side 6274 // effect is control dependent on poison, which also yields undefined 6275 // behavior. 6276 6277 auto *ExitingBB = L->getExitingBlock(); 6278 auto *LatchBB = L->getLoopLatch(); 6279 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6280 return false; 6281 6282 SmallPtrSet<const Instruction *, 16> Pushed; 6283 SmallVector<const Instruction *, 8> PoisonStack; 6284 6285 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6286 // things that are known to be poison under that assumption go on the 6287 // PoisonStack. 6288 Pushed.insert(I); 6289 PoisonStack.push_back(I); 6290 6291 bool LatchControlDependentOnPoison = false; 6292 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6293 const Instruction *Poison = PoisonStack.pop_back_val(); 6294 6295 for (auto *PoisonUser : Poison->users()) { 6296 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6297 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6298 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6299 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6300 assert(BI->isConditional() && "Only possibility!"); 6301 if (BI->getParent() == LatchBB) { 6302 LatchControlDependentOnPoison = true; 6303 break; 6304 } 6305 } 6306 } 6307 } 6308 6309 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6310 } 6311 6312 ScalarEvolution::LoopProperties 6313 ScalarEvolution::getLoopProperties(const Loop *L) { 6314 using LoopProperties = ScalarEvolution::LoopProperties; 6315 6316 auto Itr = LoopPropertiesCache.find(L); 6317 if (Itr == LoopPropertiesCache.end()) { 6318 auto HasSideEffects = [](Instruction *I) { 6319 if (auto *SI = dyn_cast<StoreInst>(I)) 6320 return !SI->isSimple(); 6321 6322 return I->mayHaveSideEffects(); 6323 }; 6324 6325 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6326 /*HasNoSideEffects*/ true}; 6327 6328 for (auto *BB : L->getBlocks()) 6329 for (auto &I : *BB) { 6330 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6331 LP.HasNoAbnormalExits = false; 6332 if (HasSideEffects(&I)) 6333 LP.HasNoSideEffects = false; 6334 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6335 break; // We're already as pessimistic as we can get. 6336 } 6337 6338 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6339 assert(InsertPair.second && "We just checked!"); 6340 Itr = InsertPair.first; 6341 } 6342 6343 return Itr->second; 6344 } 6345 6346 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6347 if (!isSCEVable(V->getType())) 6348 return getUnknown(V); 6349 6350 if (Instruction *I = dyn_cast<Instruction>(V)) { 6351 // Don't attempt to analyze instructions in blocks that aren't 6352 // reachable. Such instructions don't matter, and they aren't required 6353 // to obey basic rules for definitions dominating uses which this 6354 // analysis depends on. 6355 if (!DT.isReachableFromEntry(I->getParent())) 6356 return getUnknown(UndefValue::get(V->getType())); 6357 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6358 return getConstant(CI); 6359 else if (isa<ConstantPointerNull>(V)) 6360 // FIXME: we shouldn't special-case null pointer constant. 6361 return getZero(V->getType()); 6362 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6363 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6364 else if (!isa<ConstantExpr>(V)) 6365 return getUnknown(V); 6366 6367 Operator *U = cast<Operator>(V); 6368 if (auto BO = MatchBinaryOp(U, DT)) { 6369 switch (BO->Opcode) { 6370 case Instruction::Add: { 6371 // The simple thing to do would be to just call getSCEV on both operands 6372 // and call getAddExpr with the result. However if we're looking at a 6373 // bunch of things all added together, this can be quite inefficient, 6374 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6375 // Instead, gather up all the operands and make a single getAddExpr call. 6376 // LLVM IR canonical form means we need only traverse the left operands. 6377 SmallVector<const SCEV *, 4> AddOps; 6378 do { 6379 if (BO->Op) { 6380 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6381 AddOps.push_back(OpSCEV); 6382 break; 6383 } 6384 6385 // If a NUW or NSW flag can be applied to the SCEV for this 6386 // addition, then compute the SCEV for this addition by itself 6387 // with a separate call to getAddExpr. We need to do that 6388 // instead of pushing the operands of the addition onto AddOps, 6389 // since the flags are only known to apply to this particular 6390 // addition - they may not apply to other additions that can be 6391 // formed with operands from AddOps. 6392 const SCEV *RHS = getSCEV(BO->RHS); 6393 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6394 if (Flags != SCEV::FlagAnyWrap) { 6395 const SCEV *LHS = getSCEV(BO->LHS); 6396 if (BO->Opcode == Instruction::Sub) 6397 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6398 else 6399 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6400 break; 6401 } 6402 } 6403 6404 if (BO->Opcode == Instruction::Sub) 6405 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6406 else 6407 AddOps.push_back(getSCEV(BO->RHS)); 6408 6409 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6410 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6411 NewBO->Opcode != Instruction::Sub)) { 6412 AddOps.push_back(getSCEV(BO->LHS)); 6413 break; 6414 } 6415 BO = NewBO; 6416 } while (true); 6417 6418 return getAddExpr(AddOps); 6419 } 6420 6421 case Instruction::Mul: { 6422 SmallVector<const SCEV *, 4> MulOps; 6423 do { 6424 if (BO->Op) { 6425 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6426 MulOps.push_back(OpSCEV); 6427 break; 6428 } 6429 6430 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6431 if (Flags != SCEV::FlagAnyWrap) { 6432 MulOps.push_back( 6433 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6434 break; 6435 } 6436 } 6437 6438 MulOps.push_back(getSCEV(BO->RHS)); 6439 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6440 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6441 MulOps.push_back(getSCEV(BO->LHS)); 6442 break; 6443 } 6444 BO = NewBO; 6445 } while (true); 6446 6447 return getMulExpr(MulOps); 6448 } 6449 case Instruction::UDiv: 6450 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6451 case Instruction::URem: 6452 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6453 case Instruction::Sub: { 6454 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6455 if (BO->Op) 6456 Flags = getNoWrapFlagsFromUB(BO->Op); 6457 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6458 } 6459 case Instruction::And: 6460 // For an expression like x&255 that merely masks off the high bits, 6461 // use zext(trunc(x)) as the SCEV expression. 6462 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6463 if (CI->isZero()) 6464 return getSCEV(BO->RHS); 6465 if (CI->isMinusOne()) 6466 return getSCEV(BO->LHS); 6467 const APInt &A = CI->getValue(); 6468 6469 // Instcombine's ShrinkDemandedConstant may strip bits out of 6470 // constants, obscuring what would otherwise be a low-bits mask. 6471 // Use computeKnownBits to compute what ShrinkDemandedConstant 6472 // knew about to reconstruct a low-bits mask value. 6473 unsigned LZ = A.countLeadingZeros(); 6474 unsigned TZ = A.countTrailingZeros(); 6475 unsigned BitWidth = A.getBitWidth(); 6476 KnownBits Known(BitWidth); 6477 computeKnownBits(BO->LHS, Known, getDataLayout(), 6478 0, &AC, nullptr, &DT); 6479 6480 APInt EffectiveMask = 6481 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6482 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6483 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6484 const SCEV *LHS = getSCEV(BO->LHS); 6485 const SCEV *ShiftedLHS = nullptr; 6486 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6487 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6488 // For an expression like (x * 8) & 8, simplify the multiply. 6489 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6490 unsigned GCD = std::min(MulZeros, TZ); 6491 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6492 SmallVector<const SCEV*, 4> MulOps; 6493 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6494 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6495 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6496 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6497 } 6498 } 6499 if (!ShiftedLHS) 6500 ShiftedLHS = getUDivExpr(LHS, MulCount); 6501 return getMulExpr( 6502 getZeroExtendExpr( 6503 getTruncateExpr(ShiftedLHS, 6504 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6505 BO->LHS->getType()), 6506 MulCount); 6507 } 6508 } 6509 break; 6510 6511 case Instruction::Or: 6512 // If the RHS of the Or is a constant, we may have something like: 6513 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6514 // optimizations will transparently handle this case. 6515 // 6516 // In order for this transformation to be safe, the LHS must be of the 6517 // form X*(2^n) and the Or constant must be less than 2^n. 6518 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6519 const SCEV *LHS = getSCEV(BO->LHS); 6520 const APInt &CIVal = CI->getValue(); 6521 if (GetMinTrailingZeros(LHS) >= 6522 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6523 // Build a plain add SCEV. 6524 return getAddExpr(LHS, getSCEV(CI), 6525 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6526 } 6527 } 6528 break; 6529 6530 case Instruction::Xor: 6531 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6532 // If the RHS of xor is -1, then this is a not operation. 6533 if (CI->isMinusOne()) 6534 return getNotSCEV(getSCEV(BO->LHS)); 6535 6536 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6537 // This is a variant of the check for xor with -1, and it handles 6538 // the case where instcombine has trimmed non-demanded bits out 6539 // of an xor with -1. 6540 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6541 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6542 if (LBO->getOpcode() == Instruction::And && 6543 LCI->getValue() == CI->getValue()) 6544 if (const SCEVZeroExtendExpr *Z = 6545 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6546 Type *UTy = BO->LHS->getType(); 6547 const SCEV *Z0 = Z->getOperand(); 6548 Type *Z0Ty = Z0->getType(); 6549 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6550 6551 // If C is a low-bits mask, the zero extend is serving to 6552 // mask off the high bits. Complement the operand and 6553 // re-apply the zext. 6554 if (CI->getValue().isMask(Z0TySize)) 6555 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6556 6557 // If C is a single bit, it may be in the sign-bit position 6558 // before the zero-extend. In this case, represent the xor 6559 // using an add, which is equivalent, and re-apply the zext. 6560 APInt Trunc = CI->getValue().trunc(Z0TySize); 6561 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6562 Trunc.isSignMask()) 6563 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6564 UTy); 6565 } 6566 } 6567 break; 6568 6569 case Instruction::Shl: 6570 // Turn shift left of a constant amount into a multiply. 6571 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6572 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6573 6574 // If the shift count is not less than the bitwidth, the result of 6575 // the shift is undefined. Don't try to analyze it, because the 6576 // resolution chosen here may differ from the resolution chosen in 6577 // other parts of the compiler. 6578 if (SA->getValue().uge(BitWidth)) 6579 break; 6580 6581 // We can safely preserve the nuw flag in all cases. It's also safe to 6582 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6583 // requires special handling. It can be preserved as long as we're not 6584 // left shifting by bitwidth - 1. 6585 auto Flags = SCEV::FlagAnyWrap; 6586 if (BO->Op) { 6587 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6588 if ((MulFlags & SCEV::FlagNSW) && 6589 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6590 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6591 if (MulFlags & SCEV::FlagNUW) 6592 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6593 } 6594 6595 Constant *X = ConstantInt::get( 6596 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6597 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6598 } 6599 break; 6600 6601 case Instruction::AShr: { 6602 // AShr X, C, where C is a constant. 6603 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6604 if (!CI) 6605 break; 6606 6607 Type *OuterTy = BO->LHS->getType(); 6608 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6609 // If the shift count is not less than the bitwidth, the result of 6610 // the shift is undefined. Don't try to analyze it, because the 6611 // resolution chosen here may differ from the resolution chosen in 6612 // other parts of the compiler. 6613 if (CI->getValue().uge(BitWidth)) 6614 break; 6615 6616 if (CI->isZero()) 6617 return getSCEV(BO->LHS); // shift by zero --> noop 6618 6619 uint64_t AShrAmt = CI->getZExtValue(); 6620 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6621 6622 Operator *L = dyn_cast<Operator>(BO->LHS); 6623 if (L && L->getOpcode() == Instruction::Shl) { 6624 // X = Shl A, n 6625 // Y = AShr X, m 6626 // Both n and m are constant. 6627 6628 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6629 if (L->getOperand(1) == BO->RHS) 6630 // For a two-shift sext-inreg, i.e. n = m, 6631 // use sext(trunc(x)) as the SCEV expression. 6632 return getSignExtendExpr( 6633 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6634 6635 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6636 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6637 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6638 if (ShlAmt > AShrAmt) { 6639 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6640 // expression. We already checked that ShlAmt < BitWidth, so 6641 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6642 // ShlAmt - AShrAmt < Amt. 6643 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6644 ShlAmt - AShrAmt); 6645 return getSignExtendExpr( 6646 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6647 getConstant(Mul)), OuterTy); 6648 } 6649 } 6650 } 6651 if (BO->IsExact) { 6652 // Given exact arithmetic in-bounds right-shift by a constant, 6653 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x) 6654 const SCEV *X = getSCEV(BO->LHS); 6655 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false); 6656 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt); 6657 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult)); 6658 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW); 6659 } 6660 break; 6661 } 6662 } 6663 } 6664 6665 switch (U->getOpcode()) { 6666 case Instruction::Trunc: 6667 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6668 6669 case Instruction::ZExt: 6670 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6671 6672 case Instruction::SExt: 6673 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6674 // The NSW flag of a subtract does not always survive the conversion to 6675 // A + (-1)*B. By pushing sign extension onto its operands we are much 6676 // more likely to preserve NSW and allow later AddRec optimisations. 6677 // 6678 // NOTE: This is effectively duplicating this logic from getSignExtend: 6679 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6680 // but by that point the NSW information has potentially been lost. 6681 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6682 Type *Ty = U->getType(); 6683 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6684 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6685 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6686 } 6687 } 6688 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6689 6690 case Instruction::BitCast: 6691 // BitCasts are no-op casts so we just eliminate the cast. 6692 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6693 return getSCEV(U->getOperand(0)); 6694 break; 6695 6696 case Instruction::PtrToInt: { 6697 // Pointer to integer cast is straight-forward, so do model it. 6698 Value *Ptr = U->getOperand(0); 6699 const SCEV *Op = getSCEV(Ptr); 6700 Type *DstIntTy = U->getType(); 6701 // SCEV doesn't have constant pointer expression type, but it supports 6702 // nullptr constant (and only that one), which is modelled in SCEV as a 6703 // zero integer constant. So just skip the ptrtoint cast for constants. 6704 if (isa<SCEVConstant>(Op)) 6705 return getTruncateOrZeroExtend(Op, DstIntTy); 6706 Type *PtrTy = Ptr->getType(); 6707 Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy); 6708 // But only if effective SCEV (integer) type is wide enough to represent 6709 // all possible pointer values. 6710 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) != 6711 getDataLayout().getTypeSizeInBits(IntPtrTy)) 6712 return getUnknown(V); 6713 return getPtrToIntExpr(Op, DstIntTy); 6714 } 6715 case Instruction::IntToPtr: 6716 // Just don't deal with inttoptr casts. 6717 return getUnknown(V); 6718 6719 case Instruction::SDiv: 6720 // If both operands are non-negative, this is just an udiv. 6721 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6722 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6723 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6724 break; 6725 6726 case Instruction::SRem: 6727 // If both operands are non-negative, this is just an urem. 6728 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6729 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6730 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6731 break; 6732 6733 case Instruction::GetElementPtr: 6734 return createNodeForGEP(cast<GEPOperator>(U)); 6735 6736 case Instruction::PHI: 6737 return createNodeForPHI(cast<PHINode>(U)); 6738 6739 case Instruction::Select: 6740 // U can also be a select constant expr, which let fall through. Since 6741 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6742 // constant expressions cannot have instructions as operands, we'd have 6743 // returned getUnknown for a select constant expressions anyway. 6744 if (isa<Instruction>(U)) 6745 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6746 U->getOperand(1), U->getOperand(2)); 6747 break; 6748 6749 case Instruction::Call: 6750 case Instruction::Invoke: 6751 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6752 return getSCEV(RV); 6753 6754 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6755 switch (II->getIntrinsicID()) { 6756 case Intrinsic::abs: 6757 return getAbsExpr( 6758 getSCEV(II->getArgOperand(0)), 6759 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6760 case Intrinsic::umax: 6761 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6762 getSCEV(II->getArgOperand(1))); 6763 case Intrinsic::umin: 6764 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6765 getSCEV(II->getArgOperand(1))); 6766 case Intrinsic::smax: 6767 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6768 getSCEV(II->getArgOperand(1))); 6769 case Intrinsic::smin: 6770 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6771 getSCEV(II->getArgOperand(1))); 6772 case Intrinsic::usub_sat: { 6773 const SCEV *X = getSCEV(II->getArgOperand(0)); 6774 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6775 const SCEV *ClampedY = getUMinExpr(X, Y); 6776 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6777 } 6778 case Intrinsic::uadd_sat: { 6779 const SCEV *X = getSCEV(II->getArgOperand(0)); 6780 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6781 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6782 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6783 } 6784 case Intrinsic::start_loop_iterations: 6785 // A start_loop_iterations is just equivalent to the first operand for 6786 // SCEV purposes. 6787 return getSCEV(II->getArgOperand(0)); 6788 default: 6789 break; 6790 } 6791 } 6792 break; 6793 } 6794 6795 return getUnknown(V); 6796 } 6797 6798 //===----------------------------------------------------------------------===// 6799 // Iteration Count Computation Code 6800 // 6801 6802 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6803 if (!ExitCount) 6804 return 0; 6805 6806 ConstantInt *ExitConst = ExitCount->getValue(); 6807 6808 // Guard against huge trip counts. 6809 if (ExitConst->getValue().getActiveBits() > 32) 6810 return 0; 6811 6812 // In case of integer overflow, this returns 0, which is correct. 6813 return ((unsigned)ExitConst->getZExtValue()) + 1; 6814 } 6815 6816 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6817 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6818 return getSmallConstantTripCount(L, ExitingBB); 6819 6820 // No trip count information for multiple exits. 6821 return 0; 6822 } 6823 6824 unsigned 6825 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6826 const BasicBlock *ExitingBlock) { 6827 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6828 assert(L->isLoopExiting(ExitingBlock) && 6829 "Exiting block must actually branch out of the loop!"); 6830 const SCEVConstant *ExitCount = 6831 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6832 return getConstantTripCount(ExitCount); 6833 } 6834 6835 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6836 const auto *MaxExitCount = 6837 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6838 return getConstantTripCount(MaxExitCount); 6839 } 6840 6841 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6842 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6843 return getSmallConstantTripMultiple(L, ExitingBB); 6844 6845 // No trip multiple information for multiple exits. 6846 return 0; 6847 } 6848 6849 /// Returns the largest constant divisor of the trip count of this loop as a 6850 /// normal unsigned value, if possible. This means that the actual trip count is 6851 /// always a multiple of the returned value (don't forget the trip count could 6852 /// very well be zero as well!). 6853 /// 6854 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6855 /// multiple of a constant (which is also the case if the trip count is simply 6856 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6857 /// if the trip count is very large (>= 2^32). 6858 /// 6859 /// As explained in the comments for getSmallConstantTripCount, this assumes 6860 /// that control exits the loop via ExitingBlock. 6861 unsigned 6862 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6863 const BasicBlock *ExitingBlock) { 6864 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6865 assert(L->isLoopExiting(ExitingBlock) && 6866 "Exiting block must actually branch out of the loop!"); 6867 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6868 if (ExitCount == getCouldNotCompute()) 6869 return 1; 6870 6871 // Get the trip count from the BE count by adding 1. 6872 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6873 6874 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6875 if (!TC) 6876 // Attempt to factor more general cases. Returns the greatest power of 6877 // two divisor. If overflow happens, the trip count expression is still 6878 // divisible by the greatest power of 2 divisor returned. 6879 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6880 6881 ConstantInt *Result = TC->getValue(); 6882 6883 // Guard against huge trip counts (this requires checking 6884 // for zero to handle the case where the trip count == -1 and the 6885 // addition wraps). 6886 if (!Result || Result->getValue().getActiveBits() > 32 || 6887 Result->getValue().getActiveBits() == 0) 6888 return 1; 6889 6890 return (unsigned)Result->getZExtValue(); 6891 } 6892 6893 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6894 const BasicBlock *ExitingBlock, 6895 ExitCountKind Kind) { 6896 switch (Kind) { 6897 case Exact: 6898 case SymbolicMaximum: 6899 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6900 case ConstantMaximum: 6901 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 6902 }; 6903 llvm_unreachable("Invalid ExitCountKind!"); 6904 } 6905 6906 const SCEV * 6907 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6908 SCEVUnionPredicate &Preds) { 6909 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6910 } 6911 6912 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6913 ExitCountKind Kind) { 6914 switch (Kind) { 6915 case Exact: 6916 return getBackedgeTakenInfo(L).getExact(L, this); 6917 case ConstantMaximum: 6918 return getBackedgeTakenInfo(L).getConstantMax(this); 6919 case SymbolicMaximum: 6920 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 6921 }; 6922 llvm_unreachable("Invalid ExitCountKind!"); 6923 } 6924 6925 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6926 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 6927 } 6928 6929 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6930 static void 6931 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6932 BasicBlock *Header = L->getHeader(); 6933 6934 // Push all Loop-header PHIs onto the Worklist stack. 6935 for (PHINode &PN : Header->phis()) 6936 Worklist.push_back(&PN); 6937 } 6938 6939 const ScalarEvolution::BackedgeTakenInfo & 6940 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6941 auto &BTI = getBackedgeTakenInfo(L); 6942 if (BTI.hasFullInfo()) 6943 return BTI; 6944 6945 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6946 6947 if (!Pair.second) 6948 return Pair.first->second; 6949 6950 BackedgeTakenInfo Result = 6951 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6952 6953 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6954 } 6955 6956 ScalarEvolution::BackedgeTakenInfo & 6957 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6958 // Initially insert an invalid entry for this loop. If the insertion 6959 // succeeds, proceed to actually compute a backedge-taken count and 6960 // update the value. The temporary CouldNotCompute value tells SCEV 6961 // code elsewhere that it shouldn't attempt to request a new 6962 // backedge-taken count, which could result in infinite recursion. 6963 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6964 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6965 if (!Pair.second) 6966 return Pair.first->second; 6967 6968 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6969 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6970 // must be cleared in this scope. 6971 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6972 6973 // In product build, there are no usage of statistic. 6974 (void)NumTripCountsComputed; 6975 (void)NumTripCountsNotComputed; 6976 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6977 const SCEV *BEExact = Result.getExact(L, this); 6978 if (BEExact != getCouldNotCompute()) { 6979 assert(isLoopInvariant(BEExact, L) && 6980 isLoopInvariant(Result.getConstantMax(this), L) && 6981 "Computed backedge-taken count isn't loop invariant for loop!"); 6982 ++NumTripCountsComputed; 6983 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 6984 isa<PHINode>(L->getHeader()->begin())) { 6985 // Only count loops that have phi nodes as not being computable. 6986 ++NumTripCountsNotComputed; 6987 } 6988 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6989 6990 // Now that we know more about the trip count for this loop, forget any 6991 // existing SCEV values for PHI nodes in this loop since they are only 6992 // conservative estimates made without the benefit of trip count 6993 // information. This is similar to the code in forgetLoop, except that 6994 // it handles SCEVUnknown PHI nodes specially. 6995 if (Result.hasAnyInfo()) { 6996 SmallVector<Instruction *, 16> Worklist; 6997 PushLoopPHIs(L, Worklist); 6998 6999 SmallPtrSet<Instruction *, 8> Discovered; 7000 while (!Worklist.empty()) { 7001 Instruction *I = Worklist.pop_back_val(); 7002 7003 ValueExprMapType::iterator It = 7004 ValueExprMap.find_as(static_cast<Value *>(I)); 7005 if (It != ValueExprMap.end()) { 7006 const SCEV *Old = It->second; 7007 7008 // SCEVUnknown for a PHI either means that it has an unrecognized 7009 // structure, or it's a PHI that's in the progress of being computed 7010 // by createNodeForPHI. In the former case, additional loop trip 7011 // count information isn't going to change anything. In the later 7012 // case, createNodeForPHI will perform the necessary updates on its 7013 // own when it gets to that point. 7014 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 7015 eraseValueFromMap(It->first); 7016 forgetMemoizedResults(Old); 7017 } 7018 if (PHINode *PN = dyn_cast<PHINode>(I)) 7019 ConstantEvolutionLoopExitValue.erase(PN); 7020 } 7021 7022 // Since we don't need to invalidate anything for correctness and we're 7023 // only invalidating to make SCEV's results more precise, we get to stop 7024 // early to avoid invalidating too much. This is especially important in 7025 // cases like: 7026 // 7027 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7028 // loop0: 7029 // %pn0 = phi 7030 // ... 7031 // loop1: 7032 // %pn1 = phi 7033 // ... 7034 // 7035 // where both loop0 and loop1's backedge taken count uses the SCEV 7036 // expression for %v. If we don't have the early stop below then in cases 7037 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7038 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7039 // count for loop1, effectively nullifying SCEV's trip count cache. 7040 for (auto *U : I->users()) 7041 if (auto *I = dyn_cast<Instruction>(U)) { 7042 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7043 if (LoopForUser && L->contains(LoopForUser) && 7044 Discovered.insert(I).second) 7045 Worklist.push_back(I); 7046 } 7047 } 7048 } 7049 7050 // Re-lookup the insert position, since the call to 7051 // computeBackedgeTakenCount above could result in a 7052 // recusive call to getBackedgeTakenInfo (on a different 7053 // loop), which would invalidate the iterator computed 7054 // earlier. 7055 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7056 } 7057 7058 void ScalarEvolution::forgetAllLoops() { 7059 // This method is intended to forget all info about loops. It should 7060 // invalidate caches as if the following happened: 7061 // - The trip counts of all loops have changed arbitrarily 7062 // - Every llvm::Value has been updated in place to produce a different 7063 // result. 7064 BackedgeTakenCounts.clear(); 7065 PredicatedBackedgeTakenCounts.clear(); 7066 LoopPropertiesCache.clear(); 7067 ConstantEvolutionLoopExitValue.clear(); 7068 ValueExprMap.clear(); 7069 ValuesAtScopes.clear(); 7070 LoopDispositions.clear(); 7071 BlockDispositions.clear(); 7072 UnsignedRanges.clear(); 7073 SignedRanges.clear(); 7074 ExprValueMap.clear(); 7075 HasRecMap.clear(); 7076 MinTrailingZerosCache.clear(); 7077 PredicatedSCEVRewrites.clear(); 7078 } 7079 7080 void ScalarEvolution::forgetLoop(const Loop *L) { 7081 // Drop any stored trip count value. 7082 auto RemoveLoopFromBackedgeMap = 7083 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 7084 auto BTCPos = Map.find(L); 7085 if (BTCPos != Map.end()) { 7086 BTCPos->second.clear(); 7087 Map.erase(BTCPos); 7088 } 7089 }; 7090 7091 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7092 SmallVector<Instruction *, 32> Worklist; 7093 SmallPtrSet<Instruction *, 16> Visited; 7094 7095 // Iterate over all the loops and sub-loops to drop SCEV information. 7096 while (!LoopWorklist.empty()) { 7097 auto *CurrL = LoopWorklist.pop_back_val(); 7098 7099 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 7100 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 7101 7102 // Drop information about predicated SCEV rewrites for this loop. 7103 for (auto I = PredicatedSCEVRewrites.begin(); 7104 I != PredicatedSCEVRewrites.end();) { 7105 std::pair<const SCEV *, const Loop *> Entry = I->first; 7106 if (Entry.second == CurrL) 7107 PredicatedSCEVRewrites.erase(I++); 7108 else 7109 ++I; 7110 } 7111 7112 auto LoopUsersItr = LoopUsers.find(CurrL); 7113 if (LoopUsersItr != LoopUsers.end()) { 7114 for (auto *S : LoopUsersItr->second) 7115 forgetMemoizedResults(S); 7116 LoopUsers.erase(LoopUsersItr); 7117 } 7118 7119 // Drop information about expressions based on loop-header PHIs. 7120 PushLoopPHIs(CurrL, Worklist); 7121 7122 while (!Worklist.empty()) { 7123 Instruction *I = Worklist.pop_back_val(); 7124 if (!Visited.insert(I).second) 7125 continue; 7126 7127 ValueExprMapType::iterator It = 7128 ValueExprMap.find_as(static_cast<Value *>(I)); 7129 if (It != ValueExprMap.end()) { 7130 eraseValueFromMap(It->first); 7131 forgetMemoizedResults(It->second); 7132 if (PHINode *PN = dyn_cast<PHINode>(I)) 7133 ConstantEvolutionLoopExitValue.erase(PN); 7134 } 7135 7136 PushDefUseChildren(I, Worklist); 7137 } 7138 7139 LoopPropertiesCache.erase(CurrL); 7140 // Forget all contained loops too, to avoid dangling entries in the 7141 // ValuesAtScopes map. 7142 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7143 } 7144 } 7145 7146 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7147 while (Loop *Parent = L->getParentLoop()) 7148 L = Parent; 7149 forgetLoop(L); 7150 } 7151 7152 void ScalarEvolution::forgetValue(Value *V) { 7153 Instruction *I = dyn_cast<Instruction>(V); 7154 if (!I) return; 7155 7156 // Drop information about expressions based on loop-header PHIs. 7157 SmallVector<Instruction *, 16> Worklist; 7158 Worklist.push_back(I); 7159 7160 SmallPtrSet<Instruction *, 8> Visited; 7161 while (!Worklist.empty()) { 7162 I = Worklist.pop_back_val(); 7163 if (!Visited.insert(I).second) 7164 continue; 7165 7166 ValueExprMapType::iterator It = 7167 ValueExprMap.find_as(static_cast<Value *>(I)); 7168 if (It != ValueExprMap.end()) { 7169 eraseValueFromMap(It->first); 7170 forgetMemoizedResults(It->second); 7171 if (PHINode *PN = dyn_cast<PHINode>(I)) 7172 ConstantEvolutionLoopExitValue.erase(PN); 7173 } 7174 7175 PushDefUseChildren(I, Worklist); 7176 } 7177 } 7178 7179 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7180 LoopDispositions.clear(); 7181 } 7182 7183 /// Get the exact loop backedge taken count considering all loop exits. A 7184 /// computable result can only be returned for loops with all exiting blocks 7185 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7186 /// is never skipped. This is a valid assumption as long as the loop exits via 7187 /// that test. For precise results, it is the caller's responsibility to specify 7188 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7189 const SCEV * 7190 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7191 SCEVUnionPredicate *Preds) const { 7192 // If any exits were not computable, the loop is not computable. 7193 if (!isComplete() || ExitNotTaken.empty()) 7194 return SE->getCouldNotCompute(); 7195 7196 const BasicBlock *Latch = L->getLoopLatch(); 7197 // All exiting blocks we have collected must dominate the only backedge. 7198 if (!Latch) 7199 return SE->getCouldNotCompute(); 7200 7201 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7202 // count is simply a minimum out of all these calculated exit counts. 7203 SmallVector<const SCEV *, 2> Ops; 7204 for (auto &ENT : ExitNotTaken) { 7205 const SCEV *BECount = ENT.ExactNotTaken; 7206 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7207 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7208 "We should only have known counts for exiting blocks that dominate " 7209 "latch!"); 7210 7211 Ops.push_back(BECount); 7212 7213 if (Preds && !ENT.hasAlwaysTruePredicate()) 7214 Preds->add(ENT.Predicate.get()); 7215 7216 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7217 "Predicate should be always true!"); 7218 } 7219 7220 return SE->getUMinFromMismatchedTypes(Ops); 7221 } 7222 7223 /// Get the exact not taken count for this loop exit. 7224 const SCEV * 7225 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7226 ScalarEvolution *SE) const { 7227 for (auto &ENT : ExitNotTaken) 7228 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7229 return ENT.ExactNotTaken; 7230 7231 return SE->getCouldNotCompute(); 7232 } 7233 7234 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7235 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7236 for (auto &ENT : ExitNotTaken) 7237 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7238 return ENT.MaxNotTaken; 7239 7240 return SE->getCouldNotCompute(); 7241 } 7242 7243 /// getConstantMax - Get the constant max backedge taken count for the loop. 7244 const SCEV * 7245 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7246 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7247 return !ENT.hasAlwaysTruePredicate(); 7248 }; 7249 7250 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7251 return SE->getCouldNotCompute(); 7252 7253 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7254 isa<SCEVConstant>(getConstantMax())) && 7255 "No point in having a non-constant max backedge taken count!"); 7256 return getConstantMax(); 7257 } 7258 7259 const SCEV * 7260 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7261 ScalarEvolution *SE) { 7262 if (!SymbolicMax) 7263 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7264 return SymbolicMax; 7265 } 7266 7267 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7268 ScalarEvolution *SE) const { 7269 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7270 return !ENT.hasAlwaysTruePredicate(); 7271 }; 7272 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7273 } 7274 7275 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 7276 ScalarEvolution *SE) const { 7277 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() && 7278 SE->hasOperand(getConstantMax(), S)) 7279 return true; 7280 7281 for (auto &ENT : ExitNotTaken) 7282 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 7283 SE->hasOperand(ENT.ExactNotTaken, S)) 7284 return true; 7285 7286 return false; 7287 } 7288 7289 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7290 : ExactNotTaken(E), MaxNotTaken(E) { 7291 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7292 isa<SCEVConstant>(MaxNotTaken)) && 7293 "No point in having a non-constant max backedge taken count!"); 7294 } 7295 7296 ScalarEvolution::ExitLimit::ExitLimit( 7297 const SCEV *E, const SCEV *M, bool MaxOrZero, 7298 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7299 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7300 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7301 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7302 "Exact is not allowed to be less precise than Max"); 7303 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7304 isa<SCEVConstant>(MaxNotTaken)) && 7305 "No point in having a non-constant max backedge taken count!"); 7306 for (auto *PredSet : PredSetList) 7307 for (auto *P : *PredSet) 7308 addPredicate(P); 7309 } 7310 7311 ScalarEvolution::ExitLimit::ExitLimit( 7312 const SCEV *E, const SCEV *M, bool MaxOrZero, 7313 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7314 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7315 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7316 isa<SCEVConstant>(MaxNotTaken)) && 7317 "No point in having a non-constant max backedge taken count!"); 7318 } 7319 7320 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7321 bool MaxOrZero) 7322 : ExitLimit(E, M, MaxOrZero, None) { 7323 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7324 isa<SCEVConstant>(MaxNotTaken)) && 7325 "No point in having a non-constant max backedge taken count!"); 7326 } 7327 7328 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7329 /// computable exit into a persistent ExitNotTakenInfo array. 7330 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7331 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7332 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7333 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7334 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7335 7336 ExitNotTaken.reserve(ExitCounts.size()); 7337 std::transform( 7338 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7339 [&](const EdgeExitInfo &EEI) { 7340 BasicBlock *ExitBB = EEI.first; 7341 const ExitLimit &EL = EEI.second; 7342 if (EL.Predicates.empty()) 7343 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7344 nullptr); 7345 7346 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7347 for (auto *Pred : EL.Predicates) 7348 Predicate->add(Pred); 7349 7350 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7351 std::move(Predicate)); 7352 }); 7353 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7354 isa<SCEVConstant>(ConstantMax)) && 7355 "No point in having a non-constant max backedge taken count!"); 7356 } 7357 7358 /// Invalidate this result and free the ExitNotTakenInfo array. 7359 void ScalarEvolution::BackedgeTakenInfo::clear() { 7360 ExitNotTaken.clear(); 7361 } 7362 7363 /// Compute the number of times the backedge of the specified loop will execute. 7364 ScalarEvolution::BackedgeTakenInfo 7365 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7366 bool AllowPredicates) { 7367 SmallVector<BasicBlock *, 8> ExitingBlocks; 7368 L->getExitingBlocks(ExitingBlocks); 7369 7370 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7371 7372 SmallVector<EdgeExitInfo, 4> ExitCounts; 7373 bool CouldComputeBECount = true; 7374 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7375 const SCEV *MustExitMaxBECount = nullptr; 7376 const SCEV *MayExitMaxBECount = nullptr; 7377 bool MustExitMaxOrZero = false; 7378 7379 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7380 // and compute maxBECount. 7381 // Do a union of all the predicates here. 7382 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7383 BasicBlock *ExitBB = ExitingBlocks[i]; 7384 7385 // We canonicalize untaken exits to br (constant), ignore them so that 7386 // proving an exit untaken doesn't negatively impact our ability to reason 7387 // about the loop as whole. 7388 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7389 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7390 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7391 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7392 continue; 7393 } 7394 7395 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7396 7397 assert((AllowPredicates || EL.Predicates.empty()) && 7398 "Predicated exit limit when predicates are not allowed!"); 7399 7400 // 1. For each exit that can be computed, add an entry to ExitCounts. 7401 // CouldComputeBECount is true only if all exits can be computed. 7402 if (EL.ExactNotTaken == getCouldNotCompute()) 7403 // We couldn't compute an exact value for this exit, so 7404 // we won't be able to compute an exact value for the loop. 7405 CouldComputeBECount = false; 7406 else 7407 ExitCounts.emplace_back(ExitBB, EL); 7408 7409 // 2. Derive the loop's MaxBECount from each exit's max number of 7410 // non-exiting iterations. Partition the loop exits into two kinds: 7411 // LoopMustExits and LoopMayExits. 7412 // 7413 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7414 // is a LoopMayExit. If any computable LoopMustExit is found, then 7415 // MaxBECount is the minimum EL.MaxNotTaken of computable 7416 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7417 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7418 // computable EL.MaxNotTaken. 7419 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7420 DT.dominates(ExitBB, Latch)) { 7421 if (!MustExitMaxBECount) { 7422 MustExitMaxBECount = EL.MaxNotTaken; 7423 MustExitMaxOrZero = EL.MaxOrZero; 7424 } else { 7425 MustExitMaxBECount = 7426 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7427 } 7428 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7429 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7430 MayExitMaxBECount = EL.MaxNotTaken; 7431 else { 7432 MayExitMaxBECount = 7433 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7434 } 7435 } 7436 } 7437 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7438 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7439 // The loop backedge will be taken the maximum or zero times if there's 7440 // a single exit that must be taken the maximum or zero times. 7441 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7442 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7443 MaxBECount, MaxOrZero); 7444 } 7445 7446 ScalarEvolution::ExitLimit 7447 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7448 bool AllowPredicates) { 7449 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7450 // If our exiting block does not dominate the latch, then its connection with 7451 // loop's exit limit may be far from trivial. 7452 const BasicBlock *Latch = L->getLoopLatch(); 7453 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7454 return getCouldNotCompute(); 7455 7456 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7457 Instruction *Term = ExitingBlock->getTerminator(); 7458 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7459 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7460 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7461 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7462 "It should have one successor in loop and one exit block!"); 7463 // Proceed to the next level to examine the exit condition expression. 7464 return computeExitLimitFromCond( 7465 L, BI->getCondition(), ExitIfTrue, 7466 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7467 } 7468 7469 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7470 // For switch, make sure that there is a single exit from the loop. 7471 BasicBlock *Exit = nullptr; 7472 for (auto *SBB : successors(ExitingBlock)) 7473 if (!L->contains(SBB)) { 7474 if (Exit) // Multiple exit successors. 7475 return getCouldNotCompute(); 7476 Exit = SBB; 7477 } 7478 assert(Exit && "Exiting block must have at least one exit"); 7479 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7480 /*ControlsExit=*/IsOnlyExit); 7481 } 7482 7483 return getCouldNotCompute(); 7484 } 7485 7486 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7487 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7488 bool ControlsExit, bool AllowPredicates) { 7489 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7490 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7491 ControlsExit, AllowPredicates); 7492 } 7493 7494 Optional<ScalarEvolution::ExitLimit> 7495 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7496 bool ExitIfTrue, bool ControlsExit, 7497 bool AllowPredicates) { 7498 (void)this->L; 7499 (void)this->ExitIfTrue; 7500 (void)this->AllowPredicates; 7501 7502 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7503 this->AllowPredicates == AllowPredicates && 7504 "Variance in assumed invariant key components!"); 7505 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7506 if (Itr == TripCountMap.end()) 7507 return None; 7508 return Itr->second; 7509 } 7510 7511 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7512 bool ExitIfTrue, 7513 bool ControlsExit, 7514 bool AllowPredicates, 7515 const ExitLimit &EL) { 7516 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7517 this->AllowPredicates == AllowPredicates && 7518 "Variance in assumed invariant key components!"); 7519 7520 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7521 assert(InsertResult.second && "Expected successful insertion!"); 7522 (void)InsertResult; 7523 (void)ExitIfTrue; 7524 } 7525 7526 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7527 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7528 bool ControlsExit, bool AllowPredicates) { 7529 7530 if (auto MaybeEL = 7531 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7532 return *MaybeEL; 7533 7534 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7535 ControlsExit, AllowPredicates); 7536 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7537 return EL; 7538 } 7539 7540 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7541 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7542 bool ControlsExit, bool AllowPredicates) { 7543 // Handle BinOp conditions (And, Or). 7544 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 7545 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7546 return *LimitFromBinOp; 7547 7548 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7549 // Proceed to the next level to examine the icmp. 7550 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7551 ExitLimit EL = 7552 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7553 if (EL.hasFullInfo() || !AllowPredicates) 7554 return EL; 7555 7556 // Try again, but use SCEV predicates this time. 7557 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7558 /*AllowPredicates=*/true); 7559 } 7560 7561 // Check for a constant condition. These are normally stripped out by 7562 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7563 // preserve the CFG and is temporarily leaving constant conditions 7564 // in place. 7565 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7566 if (ExitIfTrue == !CI->getZExtValue()) 7567 // The backedge is always taken. 7568 return getCouldNotCompute(); 7569 else 7570 // The backedge is never taken. 7571 return getZero(CI->getType()); 7572 } 7573 7574 // If it's not an integer or pointer comparison then compute it the hard way. 7575 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7576 } 7577 7578 Optional<ScalarEvolution::ExitLimit> 7579 ScalarEvolution::computeExitLimitFromCondFromBinOp( 7580 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7581 bool ControlsExit, bool AllowPredicates) { 7582 // Check if the controlling expression for this loop is an And or Or. 7583 Value *Op0, *Op1; 7584 bool IsAnd = false; 7585 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 7586 IsAnd = true; 7587 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 7588 IsAnd = false; 7589 else 7590 return None; 7591 7592 // EitherMayExit is true in these two cases: 7593 // br (and Op0 Op1), loop, exit 7594 // br (or Op0 Op1), exit, loop 7595 bool EitherMayExit = IsAnd ^ ExitIfTrue; 7596 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 7597 ControlsExit && !EitherMayExit, 7598 AllowPredicates); 7599 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 7600 ControlsExit && !EitherMayExit, 7601 AllowPredicates); 7602 7603 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 7604 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 7605 if (isa<ConstantInt>(Op1)) 7606 return Op1 == NeutralElement ? EL0 : EL1; 7607 if (isa<ConstantInt>(Op0)) 7608 return Op0 == NeutralElement ? EL1 : EL0; 7609 7610 const SCEV *BECount = getCouldNotCompute(); 7611 const SCEV *MaxBECount = getCouldNotCompute(); 7612 if (EitherMayExit) { 7613 // Both conditions must be same for the loop to continue executing. 7614 // Choose the less conservative count. 7615 // If ExitCond is a short-circuit form (select), using 7616 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 7617 // To see the detailed examples, please see 7618 // test/Analysis/ScalarEvolution/exit-count-select.ll 7619 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 7620 if (!PoisonSafe) 7621 // Even if ExitCond is select, we can safely derive BECount using both 7622 // EL0 and EL1 in these cases: 7623 // (1) EL0.ExactNotTaken is non-zero 7624 // (2) EL1.ExactNotTaken is non-poison 7625 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 7626 // it cannot be umin(0, ..)) 7627 // The PoisonSafe assignment below is simplified and the assertion after 7628 // BECount calculation fully guarantees the condition (3). 7629 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 7630 isa<SCEVConstant>(EL1.ExactNotTaken); 7631 if (EL0.ExactNotTaken != getCouldNotCompute() && 7632 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 7633 BECount = 7634 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7635 7636 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 7637 // it should have been simplified to zero (see the condition (3) above) 7638 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 7639 BECount->isZero()); 7640 } 7641 if (EL0.MaxNotTaken == getCouldNotCompute()) 7642 MaxBECount = EL1.MaxNotTaken; 7643 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7644 MaxBECount = EL0.MaxNotTaken; 7645 else 7646 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7647 } else { 7648 // Both conditions must be same at the same time for the loop to exit. 7649 // For now, be conservative. 7650 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7651 BECount = EL0.ExactNotTaken; 7652 } 7653 7654 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7655 // to be more aggressive when computing BECount than when computing 7656 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7657 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7658 // to not. 7659 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7660 !isa<SCEVCouldNotCompute>(BECount)) 7661 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7662 7663 return ExitLimit(BECount, MaxBECount, false, 7664 { &EL0.Predicates, &EL1.Predicates }); 7665 } 7666 7667 ScalarEvolution::ExitLimit 7668 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7669 ICmpInst *ExitCond, 7670 bool ExitIfTrue, 7671 bool ControlsExit, 7672 bool AllowPredicates) { 7673 // If the condition was exit on true, convert the condition to exit on false 7674 ICmpInst::Predicate Pred; 7675 if (!ExitIfTrue) 7676 Pred = ExitCond->getPredicate(); 7677 else 7678 Pred = ExitCond->getInversePredicate(); 7679 const ICmpInst::Predicate OriginalPred = Pred; 7680 7681 // Handle common loops like: for (X = "string"; *X; ++X) 7682 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7683 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7684 ExitLimit ItCnt = 7685 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7686 if (ItCnt.hasAnyInfo()) 7687 return ItCnt; 7688 } 7689 7690 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7691 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7692 7693 // Try to evaluate any dependencies out of the loop. 7694 LHS = getSCEVAtScope(LHS, L); 7695 RHS = getSCEVAtScope(RHS, L); 7696 7697 // At this point, we would like to compute how many iterations of the 7698 // loop the predicate will return true for these inputs. 7699 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7700 // If there is a loop-invariant, force it into the RHS. 7701 std::swap(LHS, RHS); 7702 Pred = ICmpInst::getSwappedPredicate(Pred); 7703 } 7704 7705 // Simplify the operands before analyzing them. 7706 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7707 7708 // If we have a comparison of a chrec against a constant, try to use value 7709 // ranges to answer this query. 7710 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7711 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7712 if (AddRec->getLoop() == L) { 7713 // Form the constant range. 7714 ConstantRange CompRange = 7715 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7716 7717 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7718 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7719 } 7720 7721 switch (Pred) { 7722 case ICmpInst::ICMP_NE: { // while (X != Y) 7723 // Convert to: while (X-Y != 0) 7724 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7725 AllowPredicates); 7726 if (EL.hasAnyInfo()) return EL; 7727 break; 7728 } 7729 case ICmpInst::ICMP_EQ: { // while (X == Y) 7730 // Convert to: while (X-Y == 0) 7731 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7732 if (EL.hasAnyInfo()) return EL; 7733 break; 7734 } 7735 case ICmpInst::ICMP_SLT: 7736 case ICmpInst::ICMP_ULT: { // while (X < Y) 7737 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7738 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7739 AllowPredicates); 7740 if (EL.hasAnyInfo()) return EL; 7741 break; 7742 } 7743 case ICmpInst::ICMP_SGT: 7744 case ICmpInst::ICMP_UGT: { // while (X > Y) 7745 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7746 ExitLimit EL = 7747 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7748 AllowPredicates); 7749 if (EL.hasAnyInfo()) return EL; 7750 break; 7751 } 7752 default: 7753 break; 7754 } 7755 7756 auto *ExhaustiveCount = 7757 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7758 7759 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7760 return ExhaustiveCount; 7761 7762 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7763 ExitCond->getOperand(1), L, OriginalPred); 7764 } 7765 7766 ScalarEvolution::ExitLimit 7767 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7768 SwitchInst *Switch, 7769 BasicBlock *ExitingBlock, 7770 bool ControlsExit) { 7771 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7772 7773 // Give up if the exit is the default dest of a switch. 7774 if (Switch->getDefaultDest() == ExitingBlock) 7775 return getCouldNotCompute(); 7776 7777 assert(L->contains(Switch->getDefaultDest()) && 7778 "Default case must not exit the loop!"); 7779 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7780 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7781 7782 // while (X != Y) --> while (X-Y != 0) 7783 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7784 if (EL.hasAnyInfo()) 7785 return EL; 7786 7787 return getCouldNotCompute(); 7788 } 7789 7790 static ConstantInt * 7791 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7792 ScalarEvolution &SE) { 7793 const SCEV *InVal = SE.getConstant(C); 7794 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7795 assert(isa<SCEVConstant>(Val) && 7796 "Evaluation of SCEV at constant didn't fold correctly?"); 7797 return cast<SCEVConstant>(Val)->getValue(); 7798 } 7799 7800 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7801 /// compute the backedge execution count. 7802 ScalarEvolution::ExitLimit 7803 ScalarEvolution::computeLoadConstantCompareExitLimit( 7804 LoadInst *LI, 7805 Constant *RHS, 7806 const Loop *L, 7807 ICmpInst::Predicate predicate) { 7808 if (LI->isVolatile()) return getCouldNotCompute(); 7809 7810 // Check to see if the loaded pointer is a getelementptr of a global. 7811 // TODO: Use SCEV instead of manually grubbing with GEPs. 7812 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7813 if (!GEP) return getCouldNotCompute(); 7814 7815 // Make sure that it is really a constant global we are gepping, with an 7816 // initializer, and make sure the first IDX is really 0. 7817 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7818 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7819 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7820 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7821 return getCouldNotCompute(); 7822 7823 // Okay, we allow one non-constant index into the GEP instruction. 7824 Value *VarIdx = nullptr; 7825 std::vector<Constant*> Indexes; 7826 unsigned VarIdxNum = 0; 7827 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7828 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7829 Indexes.push_back(CI); 7830 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7831 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7832 VarIdx = GEP->getOperand(i); 7833 VarIdxNum = i-2; 7834 Indexes.push_back(nullptr); 7835 } 7836 7837 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7838 if (!VarIdx) 7839 return getCouldNotCompute(); 7840 7841 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7842 // Check to see if X is a loop variant variable value now. 7843 const SCEV *Idx = getSCEV(VarIdx); 7844 Idx = getSCEVAtScope(Idx, L); 7845 7846 // We can only recognize very limited forms of loop index expressions, in 7847 // particular, only affine AddRec's like {C1,+,C2}. 7848 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7849 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7850 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7851 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7852 return getCouldNotCompute(); 7853 7854 unsigned MaxSteps = MaxBruteForceIterations; 7855 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7856 ConstantInt *ItCst = ConstantInt::get( 7857 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7858 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7859 7860 // Form the GEP offset. 7861 Indexes[VarIdxNum] = Val; 7862 7863 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7864 Indexes); 7865 if (!Result) break; // Cannot compute! 7866 7867 // Evaluate the condition for this iteration. 7868 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7869 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7870 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7871 ++NumArrayLenItCounts; 7872 return getConstant(ItCst); // Found terminating iteration! 7873 } 7874 } 7875 return getCouldNotCompute(); 7876 } 7877 7878 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7879 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7880 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7881 if (!RHS) 7882 return getCouldNotCompute(); 7883 7884 const BasicBlock *Latch = L->getLoopLatch(); 7885 if (!Latch) 7886 return getCouldNotCompute(); 7887 7888 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7889 if (!Predecessor) 7890 return getCouldNotCompute(); 7891 7892 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7893 // Return LHS in OutLHS and shift_opt in OutOpCode. 7894 auto MatchPositiveShift = 7895 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7896 7897 using namespace PatternMatch; 7898 7899 ConstantInt *ShiftAmt; 7900 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7901 OutOpCode = Instruction::LShr; 7902 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7903 OutOpCode = Instruction::AShr; 7904 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7905 OutOpCode = Instruction::Shl; 7906 else 7907 return false; 7908 7909 return ShiftAmt->getValue().isStrictlyPositive(); 7910 }; 7911 7912 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7913 // 7914 // loop: 7915 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7916 // %iv.shifted = lshr i32 %iv, <positive constant> 7917 // 7918 // Return true on a successful match. Return the corresponding PHI node (%iv 7919 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7920 auto MatchShiftRecurrence = 7921 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7922 Optional<Instruction::BinaryOps> PostShiftOpCode; 7923 7924 { 7925 Instruction::BinaryOps OpC; 7926 Value *V; 7927 7928 // If we encounter a shift instruction, "peel off" the shift operation, 7929 // and remember that we did so. Later when we inspect %iv's backedge 7930 // value, we will make sure that the backedge value uses the same 7931 // operation. 7932 // 7933 // Note: the peeled shift operation does not have to be the same 7934 // instruction as the one feeding into the PHI's backedge value. We only 7935 // really care about it being the same *kind* of shift instruction -- 7936 // that's all that is required for our later inferences to hold. 7937 if (MatchPositiveShift(LHS, V, OpC)) { 7938 PostShiftOpCode = OpC; 7939 LHS = V; 7940 } 7941 } 7942 7943 PNOut = dyn_cast<PHINode>(LHS); 7944 if (!PNOut || PNOut->getParent() != L->getHeader()) 7945 return false; 7946 7947 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7948 Value *OpLHS; 7949 7950 return 7951 // The backedge value for the PHI node must be a shift by a positive 7952 // amount 7953 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7954 7955 // of the PHI node itself 7956 OpLHS == PNOut && 7957 7958 // and the kind of shift should be match the kind of shift we peeled 7959 // off, if any. 7960 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7961 }; 7962 7963 PHINode *PN; 7964 Instruction::BinaryOps OpCode; 7965 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7966 return getCouldNotCompute(); 7967 7968 const DataLayout &DL = getDataLayout(); 7969 7970 // The key rationale for this optimization is that for some kinds of shift 7971 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7972 // within a finite number of iterations. If the condition guarding the 7973 // backedge (in the sense that the backedge is taken if the condition is true) 7974 // is false for the value the shift recurrence stabilizes to, then we know 7975 // that the backedge is taken only a finite number of times. 7976 7977 ConstantInt *StableValue = nullptr; 7978 switch (OpCode) { 7979 default: 7980 llvm_unreachable("Impossible case!"); 7981 7982 case Instruction::AShr: { 7983 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7984 // bitwidth(K) iterations. 7985 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7986 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7987 Predecessor->getTerminator(), &DT); 7988 auto *Ty = cast<IntegerType>(RHS->getType()); 7989 if (Known.isNonNegative()) 7990 StableValue = ConstantInt::get(Ty, 0); 7991 else if (Known.isNegative()) 7992 StableValue = ConstantInt::get(Ty, -1, true); 7993 else 7994 return getCouldNotCompute(); 7995 7996 break; 7997 } 7998 case Instruction::LShr: 7999 case Instruction::Shl: 8000 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8001 // stabilize to 0 in at most bitwidth(K) iterations. 8002 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8003 break; 8004 } 8005 8006 auto *Result = 8007 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8008 assert(Result->getType()->isIntegerTy(1) && 8009 "Otherwise cannot be an operand to a branch instruction"); 8010 8011 if (Result->isZeroValue()) { 8012 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8013 const SCEV *UpperBound = 8014 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8015 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8016 } 8017 8018 return getCouldNotCompute(); 8019 } 8020 8021 /// Return true if we can constant fold an instruction of the specified type, 8022 /// assuming that all operands were constants. 8023 static bool CanConstantFold(const Instruction *I) { 8024 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8025 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8026 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8027 return true; 8028 8029 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8030 if (const Function *F = CI->getCalledFunction()) 8031 return canConstantFoldCallTo(CI, F); 8032 return false; 8033 } 8034 8035 /// Determine whether this instruction can constant evolve within this loop 8036 /// assuming its operands can all constant evolve. 8037 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8038 // An instruction outside of the loop can't be derived from a loop PHI. 8039 if (!L->contains(I)) return false; 8040 8041 if (isa<PHINode>(I)) { 8042 // We don't currently keep track of the control flow needed to evaluate 8043 // PHIs, so we cannot handle PHIs inside of loops. 8044 return L->getHeader() == I->getParent(); 8045 } 8046 8047 // If we won't be able to constant fold this expression even if the operands 8048 // are constants, bail early. 8049 return CanConstantFold(I); 8050 } 8051 8052 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8053 /// recursing through each instruction operand until reaching a loop header phi. 8054 static PHINode * 8055 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8056 DenseMap<Instruction *, PHINode *> &PHIMap, 8057 unsigned Depth) { 8058 if (Depth > MaxConstantEvolvingDepth) 8059 return nullptr; 8060 8061 // Otherwise, we can evaluate this instruction if all of its operands are 8062 // constant or derived from a PHI node themselves. 8063 PHINode *PHI = nullptr; 8064 for (Value *Op : UseInst->operands()) { 8065 if (isa<Constant>(Op)) continue; 8066 8067 Instruction *OpInst = dyn_cast<Instruction>(Op); 8068 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8069 8070 PHINode *P = dyn_cast<PHINode>(OpInst); 8071 if (!P) 8072 // If this operand is already visited, reuse the prior result. 8073 // We may have P != PHI if this is the deepest point at which the 8074 // inconsistent paths meet. 8075 P = PHIMap.lookup(OpInst); 8076 if (!P) { 8077 // Recurse and memoize the results, whether a phi is found or not. 8078 // This recursive call invalidates pointers into PHIMap. 8079 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8080 PHIMap[OpInst] = P; 8081 } 8082 if (!P) 8083 return nullptr; // Not evolving from PHI 8084 if (PHI && PHI != P) 8085 return nullptr; // Evolving from multiple different PHIs. 8086 PHI = P; 8087 } 8088 // This is a expression evolving from a constant PHI! 8089 return PHI; 8090 } 8091 8092 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8093 /// in the loop that V is derived from. We allow arbitrary operations along the 8094 /// way, but the operands of an operation must either be constants or a value 8095 /// derived from a constant PHI. If this expression does not fit with these 8096 /// constraints, return null. 8097 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8098 Instruction *I = dyn_cast<Instruction>(V); 8099 if (!I || !canConstantEvolve(I, L)) return nullptr; 8100 8101 if (PHINode *PN = dyn_cast<PHINode>(I)) 8102 return PN; 8103 8104 // Record non-constant instructions contained by the loop. 8105 DenseMap<Instruction *, PHINode *> PHIMap; 8106 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8107 } 8108 8109 /// EvaluateExpression - Given an expression that passes the 8110 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8111 /// in the loop has the value PHIVal. If we can't fold this expression for some 8112 /// reason, return null. 8113 static Constant *EvaluateExpression(Value *V, const Loop *L, 8114 DenseMap<Instruction *, Constant *> &Vals, 8115 const DataLayout &DL, 8116 const TargetLibraryInfo *TLI) { 8117 // Convenient constant check, but redundant for recursive calls. 8118 if (Constant *C = dyn_cast<Constant>(V)) return C; 8119 Instruction *I = dyn_cast<Instruction>(V); 8120 if (!I) return nullptr; 8121 8122 if (Constant *C = Vals.lookup(I)) return C; 8123 8124 // An instruction inside the loop depends on a value outside the loop that we 8125 // weren't given a mapping for, or a value such as a call inside the loop. 8126 if (!canConstantEvolve(I, L)) return nullptr; 8127 8128 // An unmapped PHI can be due to a branch or another loop inside this loop, 8129 // or due to this not being the initial iteration through a loop where we 8130 // couldn't compute the evolution of this particular PHI last time. 8131 if (isa<PHINode>(I)) return nullptr; 8132 8133 std::vector<Constant*> Operands(I->getNumOperands()); 8134 8135 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8136 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8137 if (!Operand) { 8138 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8139 if (!Operands[i]) return nullptr; 8140 continue; 8141 } 8142 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8143 Vals[Operand] = C; 8144 if (!C) return nullptr; 8145 Operands[i] = C; 8146 } 8147 8148 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8149 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8150 Operands[1], DL, TLI); 8151 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8152 if (!LI->isVolatile()) 8153 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8154 } 8155 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8156 } 8157 8158 8159 // If every incoming value to PN except the one for BB is a specific Constant, 8160 // return that, else return nullptr. 8161 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8162 Constant *IncomingVal = nullptr; 8163 8164 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8165 if (PN->getIncomingBlock(i) == BB) 8166 continue; 8167 8168 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8169 if (!CurrentVal) 8170 return nullptr; 8171 8172 if (IncomingVal != CurrentVal) { 8173 if (IncomingVal) 8174 return nullptr; 8175 IncomingVal = CurrentVal; 8176 } 8177 } 8178 8179 return IncomingVal; 8180 } 8181 8182 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8183 /// in the header of its containing loop, we know the loop executes a 8184 /// constant number of times, and the PHI node is just a recurrence 8185 /// involving constants, fold it. 8186 Constant * 8187 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8188 const APInt &BEs, 8189 const Loop *L) { 8190 auto I = ConstantEvolutionLoopExitValue.find(PN); 8191 if (I != ConstantEvolutionLoopExitValue.end()) 8192 return I->second; 8193 8194 if (BEs.ugt(MaxBruteForceIterations)) 8195 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8196 8197 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8198 8199 DenseMap<Instruction *, Constant *> CurrentIterVals; 8200 BasicBlock *Header = L->getHeader(); 8201 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8202 8203 BasicBlock *Latch = L->getLoopLatch(); 8204 if (!Latch) 8205 return nullptr; 8206 8207 for (PHINode &PHI : Header->phis()) { 8208 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8209 CurrentIterVals[&PHI] = StartCST; 8210 } 8211 if (!CurrentIterVals.count(PN)) 8212 return RetVal = nullptr; 8213 8214 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8215 8216 // Execute the loop symbolically to determine the exit value. 8217 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8218 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8219 8220 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8221 unsigned IterationNum = 0; 8222 const DataLayout &DL = getDataLayout(); 8223 for (; ; ++IterationNum) { 8224 if (IterationNum == NumIterations) 8225 return RetVal = CurrentIterVals[PN]; // Got exit value! 8226 8227 // Compute the value of the PHIs for the next iteration. 8228 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8229 DenseMap<Instruction *, Constant *> NextIterVals; 8230 Constant *NextPHI = 8231 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8232 if (!NextPHI) 8233 return nullptr; // Couldn't evaluate! 8234 NextIterVals[PN] = NextPHI; 8235 8236 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8237 8238 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8239 // cease to be able to evaluate one of them or if they stop evolving, 8240 // because that doesn't necessarily prevent us from computing PN. 8241 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8242 for (const auto &I : CurrentIterVals) { 8243 PHINode *PHI = dyn_cast<PHINode>(I.first); 8244 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8245 PHIsToCompute.emplace_back(PHI, I.second); 8246 } 8247 // We use two distinct loops because EvaluateExpression may invalidate any 8248 // iterators into CurrentIterVals. 8249 for (const auto &I : PHIsToCompute) { 8250 PHINode *PHI = I.first; 8251 Constant *&NextPHI = NextIterVals[PHI]; 8252 if (!NextPHI) { // Not already computed. 8253 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8254 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8255 } 8256 if (NextPHI != I.second) 8257 StoppedEvolving = false; 8258 } 8259 8260 // If all entries in CurrentIterVals == NextIterVals then we can stop 8261 // iterating, the loop can't continue to change. 8262 if (StoppedEvolving) 8263 return RetVal = CurrentIterVals[PN]; 8264 8265 CurrentIterVals.swap(NextIterVals); 8266 } 8267 } 8268 8269 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8270 Value *Cond, 8271 bool ExitWhen) { 8272 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8273 if (!PN) return getCouldNotCompute(); 8274 8275 // If the loop is canonicalized, the PHI will have exactly two entries. 8276 // That's the only form we support here. 8277 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8278 8279 DenseMap<Instruction *, Constant *> CurrentIterVals; 8280 BasicBlock *Header = L->getHeader(); 8281 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8282 8283 BasicBlock *Latch = L->getLoopLatch(); 8284 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8285 8286 for (PHINode &PHI : Header->phis()) { 8287 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8288 CurrentIterVals[&PHI] = StartCST; 8289 } 8290 if (!CurrentIterVals.count(PN)) 8291 return getCouldNotCompute(); 8292 8293 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8294 // the loop symbolically to determine when the condition gets a value of 8295 // "ExitWhen". 8296 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8297 const DataLayout &DL = getDataLayout(); 8298 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8299 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8300 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8301 8302 // Couldn't symbolically evaluate. 8303 if (!CondVal) return getCouldNotCompute(); 8304 8305 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8306 ++NumBruteForceTripCountsComputed; 8307 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8308 } 8309 8310 // Update all the PHI nodes for the next iteration. 8311 DenseMap<Instruction *, Constant *> NextIterVals; 8312 8313 // Create a list of which PHIs we need to compute. We want to do this before 8314 // calling EvaluateExpression on them because that may invalidate iterators 8315 // into CurrentIterVals. 8316 SmallVector<PHINode *, 8> PHIsToCompute; 8317 for (const auto &I : CurrentIterVals) { 8318 PHINode *PHI = dyn_cast<PHINode>(I.first); 8319 if (!PHI || PHI->getParent() != Header) continue; 8320 PHIsToCompute.push_back(PHI); 8321 } 8322 for (PHINode *PHI : PHIsToCompute) { 8323 Constant *&NextPHI = NextIterVals[PHI]; 8324 if (NextPHI) continue; // Already computed! 8325 8326 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8327 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8328 } 8329 CurrentIterVals.swap(NextIterVals); 8330 } 8331 8332 // Too many iterations were needed to evaluate. 8333 return getCouldNotCompute(); 8334 } 8335 8336 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8337 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8338 ValuesAtScopes[V]; 8339 // Check to see if we've folded this expression at this loop before. 8340 for (auto &LS : Values) 8341 if (LS.first == L) 8342 return LS.second ? LS.second : V; 8343 8344 Values.emplace_back(L, nullptr); 8345 8346 // Otherwise compute it. 8347 const SCEV *C = computeSCEVAtScope(V, L); 8348 for (auto &LS : reverse(ValuesAtScopes[V])) 8349 if (LS.first == L) { 8350 LS.second = C; 8351 break; 8352 } 8353 return C; 8354 } 8355 8356 /// This builds up a Constant using the ConstantExpr interface. That way, we 8357 /// will return Constants for objects which aren't represented by a 8358 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8359 /// Returns NULL if the SCEV isn't representable as a Constant. 8360 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8361 switch (V->getSCEVType()) { 8362 case scCouldNotCompute: 8363 case scAddRecExpr: 8364 return nullptr; 8365 case scConstant: 8366 return cast<SCEVConstant>(V)->getValue(); 8367 case scUnknown: 8368 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8369 case scSignExtend: { 8370 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8371 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8372 return ConstantExpr::getSExt(CastOp, SS->getType()); 8373 return nullptr; 8374 } 8375 case scZeroExtend: { 8376 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8377 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8378 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8379 return nullptr; 8380 } 8381 case scPtrToInt: { 8382 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8383 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8384 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8385 8386 return nullptr; 8387 } 8388 case scTruncate: { 8389 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8390 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8391 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8392 return nullptr; 8393 } 8394 case scAddExpr: { 8395 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8396 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8397 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8398 unsigned AS = PTy->getAddressSpace(); 8399 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8400 C = ConstantExpr::getBitCast(C, DestPtrTy); 8401 } 8402 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8403 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8404 if (!C2) 8405 return nullptr; 8406 8407 // First pointer! 8408 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8409 unsigned AS = C2->getType()->getPointerAddressSpace(); 8410 std::swap(C, C2); 8411 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8412 // The offsets have been converted to bytes. We can add bytes to an 8413 // i8* by GEP with the byte count in the first index. 8414 C = ConstantExpr::getBitCast(C, DestPtrTy); 8415 } 8416 8417 // Don't bother trying to sum two pointers. We probably can't 8418 // statically compute a load that results from it anyway. 8419 if (C2->getType()->isPointerTy()) 8420 return nullptr; 8421 8422 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8423 if (PTy->getElementType()->isStructTy()) 8424 C2 = ConstantExpr::getIntegerCast( 8425 C2, Type::getInt32Ty(C->getContext()), true); 8426 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8427 } else 8428 C = ConstantExpr::getAdd(C, C2); 8429 } 8430 return C; 8431 } 8432 return nullptr; 8433 } 8434 case scMulExpr: { 8435 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8436 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8437 // Don't bother with pointers at all. 8438 if (C->getType()->isPointerTy()) 8439 return nullptr; 8440 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8441 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8442 if (!C2 || C2->getType()->isPointerTy()) 8443 return nullptr; 8444 C = ConstantExpr::getMul(C, C2); 8445 } 8446 return C; 8447 } 8448 return nullptr; 8449 } 8450 case scUDivExpr: { 8451 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8452 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8453 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8454 if (LHS->getType() == RHS->getType()) 8455 return ConstantExpr::getUDiv(LHS, RHS); 8456 return nullptr; 8457 } 8458 case scSMaxExpr: 8459 case scUMaxExpr: 8460 case scSMinExpr: 8461 case scUMinExpr: 8462 return nullptr; // TODO: smax, umax, smin, umax. 8463 } 8464 llvm_unreachable("Unknown SCEV kind!"); 8465 } 8466 8467 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8468 if (isa<SCEVConstant>(V)) return V; 8469 8470 // If this instruction is evolved from a constant-evolving PHI, compute the 8471 // exit value from the loop without using SCEVs. 8472 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8473 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8474 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8475 const Loop *CurrLoop = this->LI[I->getParent()]; 8476 // Looking for loop exit value. 8477 if (CurrLoop && CurrLoop->getParentLoop() == L && 8478 PN->getParent() == CurrLoop->getHeader()) { 8479 // Okay, there is no closed form solution for the PHI node. Check 8480 // to see if the loop that contains it has a known backedge-taken 8481 // count. If so, we may be able to force computation of the exit 8482 // value. 8483 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8484 // This trivial case can show up in some degenerate cases where 8485 // the incoming IR has not yet been fully simplified. 8486 if (BackedgeTakenCount->isZero()) { 8487 Value *InitValue = nullptr; 8488 bool MultipleInitValues = false; 8489 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8490 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8491 if (!InitValue) 8492 InitValue = PN->getIncomingValue(i); 8493 else if (InitValue != PN->getIncomingValue(i)) { 8494 MultipleInitValues = true; 8495 break; 8496 } 8497 } 8498 } 8499 if (!MultipleInitValues && InitValue) 8500 return getSCEV(InitValue); 8501 } 8502 // Do we have a loop invariant value flowing around the backedge 8503 // for a loop which must execute the backedge? 8504 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8505 isKnownPositive(BackedgeTakenCount) && 8506 PN->getNumIncomingValues() == 2) { 8507 8508 unsigned InLoopPred = 8509 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8510 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8511 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8512 return getSCEV(BackedgeVal); 8513 } 8514 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8515 // Okay, we know how many times the containing loop executes. If 8516 // this is a constant evolving PHI node, get the final value at 8517 // the specified iteration number. 8518 Constant *RV = getConstantEvolutionLoopExitValue( 8519 PN, BTCC->getAPInt(), CurrLoop); 8520 if (RV) return getSCEV(RV); 8521 } 8522 } 8523 8524 // If there is a single-input Phi, evaluate it at our scope. If we can 8525 // prove that this replacement does not break LCSSA form, use new value. 8526 if (PN->getNumOperands() == 1) { 8527 const SCEV *Input = getSCEV(PN->getOperand(0)); 8528 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8529 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8530 // for the simplest case just support constants. 8531 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8532 } 8533 } 8534 8535 // Okay, this is an expression that we cannot symbolically evaluate 8536 // into a SCEV. Check to see if it's possible to symbolically evaluate 8537 // the arguments into constants, and if so, try to constant propagate the 8538 // result. This is particularly useful for computing loop exit values. 8539 if (CanConstantFold(I)) { 8540 SmallVector<Constant *, 4> Operands; 8541 bool MadeImprovement = false; 8542 for (Value *Op : I->operands()) { 8543 if (Constant *C = dyn_cast<Constant>(Op)) { 8544 Operands.push_back(C); 8545 continue; 8546 } 8547 8548 // If any of the operands is non-constant and if they are 8549 // non-integer and non-pointer, don't even try to analyze them 8550 // with scev techniques. 8551 if (!isSCEVable(Op->getType())) 8552 return V; 8553 8554 const SCEV *OrigV = getSCEV(Op); 8555 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8556 MadeImprovement |= OrigV != OpV; 8557 8558 Constant *C = BuildConstantFromSCEV(OpV); 8559 if (!C) return V; 8560 if (C->getType() != Op->getType()) 8561 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8562 Op->getType(), 8563 false), 8564 C, Op->getType()); 8565 Operands.push_back(C); 8566 } 8567 8568 // Check to see if getSCEVAtScope actually made an improvement. 8569 if (MadeImprovement) { 8570 Constant *C = nullptr; 8571 const DataLayout &DL = getDataLayout(); 8572 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8573 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8574 Operands[1], DL, &TLI); 8575 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8576 if (!Load->isVolatile()) 8577 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8578 DL); 8579 } else 8580 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8581 if (!C) return V; 8582 return getSCEV(C); 8583 } 8584 } 8585 } 8586 8587 // This is some other type of SCEVUnknown, just return it. 8588 return V; 8589 } 8590 8591 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8592 // Avoid performing the look-up in the common case where the specified 8593 // expression has no loop-variant portions. 8594 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8595 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8596 if (OpAtScope != Comm->getOperand(i)) { 8597 // Okay, at least one of these operands is loop variant but might be 8598 // foldable. Build a new instance of the folded commutative expression. 8599 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8600 Comm->op_begin()+i); 8601 NewOps.push_back(OpAtScope); 8602 8603 for (++i; i != e; ++i) { 8604 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8605 NewOps.push_back(OpAtScope); 8606 } 8607 if (isa<SCEVAddExpr>(Comm)) 8608 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8609 if (isa<SCEVMulExpr>(Comm)) 8610 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8611 if (isa<SCEVMinMaxExpr>(Comm)) 8612 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8613 llvm_unreachable("Unknown commutative SCEV type!"); 8614 } 8615 } 8616 // If we got here, all operands are loop invariant. 8617 return Comm; 8618 } 8619 8620 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8621 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8622 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8623 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8624 return Div; // must be loop invariant 8625 return getUDivExpr(LHS, RHS); 8626 } 8627 8628 // If this is a loop recurrence for a loop that does not contain L, then we 8629 // are dealing with the final value computed by the loop. 8630 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8631 // First, attempt to evaluate each operand. 8632 // Avoid performing the look-up in the common case where the specified 8633 // expression has no loop-variant portions. 8634 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8635 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8636 if (OpAtScope == AddRec->getOperand(i)) 8637 continue; 8638 8639 // Okay, at least one of these operands is loop variant but might be 8640 // foldable. Build a new instance of the folded commutative expression. 8641 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8642 AddRec->op_begin()+i); 8643 NewOps.push_back(OpAtScope); 8644 for (++i; i != e; ++i) 8645 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8646 8647 const SCEV *FoldedRec = 8648 getAddRecExpr(NewOps, AddRec->getLoop(), 8649 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8650 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8651 // The addrec may be folded to a nonrecurrence, for example, if the 8652 // induction variable is multiplied by zero after constant folding. Go 8653 // ahead and return the folded value. 8654 if (!AddRec) 8655 return FoldedRec; 8656 break; 8657 } 8658 8659 // If the scope is outside the addrec's loop, evaluate it by using the 8660 // loop exit value of the addrec. 8661 if (!AddRec->getLoop()->contains(L)) { 8662 // To evaluate this recurrence, we need to know how many times the AddRec 8663 // loop iterates. Compute this now. 8664 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8665 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8666 8667 // Then, evaluate the AddRec. 8668 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8669 } 8670 8671 return AddRec; 8672 } 8673 8674 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8675 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8676 if (Op == Cast->getOperand()) 8677 return Cast; // must be loop invariant 8678 return getZeroExtendExpr(Op, Cast->getType()); 8679 } 8680 8681 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8682 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8683 if (Op == Cast->getOperand()) 8684 return Cast; // must be loop invariant 8685 return getSignExtendExpr(Op, Cast->getType()); 8686 } 8687 8688 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8689 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8690 if (Op == Cast->getOperand()) 8691 return Cast; // must be loop invariant 8692 return getTruncateExpr(Op, Cast->getType()); 8693 } 8694 8695 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 8696 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8697 if (Op == Cast->getOperand()) 8698 return Cast; // must be loop invariant 8699 return getPtrToIntExpr(Op, Cast->getType()); 8700 } 8701 8702 llvm_unreachable("Unknown SCEV type!"); 8703 } 8704 8705 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8706 return getSCEVAtScope(getSCEV(V), L); 8707 } 8708 8709 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8710 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8711 return stripInjectiveFunctions(ZExt->getOperand()); 8712 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8713 return stripInjectiveFunctions(SExt->getOperand()); 8714 return S; 8715 } 8716 8717 /// Finds the minimum unsigned root of the following equation: 8718 /// 8719 /// A * X = B (mod N) 8720 /// 8721 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8722 /// A and B isn't important. 8723 /// 8724 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8725 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8726 ScalarEvolution &SE) { 8727 uint32_t BW = A.getBitWidth(); 8728 assert(BW == SE.getTypeSizeInBits(B->getType())); 8729 assert(A != 0 && "A must be non-zero."); 8730 8731 // 1. D = gcd(A, N) 8732 // 8733 // The gcd of A and N may have only one prime factor: 2. The number of 8734 // trailing zeros in A is its multiplicity 8735 uint32_t Mult2 = A.countTrailingZeros(); 8736 // D = 2^Mult2 8737 8738 // 2. Check if B is divisible by D. 8739 // 8740 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8741 // is not less than multiplicity of this prime factor for D. 8742 if (SE.GetMinTrailingZeros(B) < Mult2) 8743 return SE.getCouldNotCompute(); 8744 8745 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8746 // modulo (N / D). 8747 // 8748 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8749 // (N / D) in general. The inverse itself always fits into BW bits, though, 8750 // so we immediately truncate it. 8751 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8752 APInt Mod(BW + 1, 0); 8753 Mod.setBit(BW - Mult2); // Mod = N / D 8754 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8755 8756 // 4. Compute the minimum unsigned root of the equation: 8757 // I * (B / D) mod (N / D) 8758 // To simplify the computation, we factor out the divide by D: 8759 // (I * B mod N) / D 8760 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8761 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8762 } 8763 8764 /// For a given quadratic addrec, generate coefficients of the corresponding 8765 /// quadratic equation, multiplied by a common value to ensure that they are 8766 /// integers. 8767 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8768 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8769 /// were multiplied by, and BitWidth is the bit width of the original addrec 8770 /// coefficients. 8771 /// This function returns None if the addrec coefficients are not compile- 8772 /// time constants. 8773 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8774 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8775 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8776 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8777 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8778 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8779 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8780 << *AddRec << '\n'); 8781 8782 // We currently can only solve this if the coefficients are constants. 8783 if (!LC || !MC || !NC) { 8784 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8785 return None; 8786 } 8787 8788 APInt L = LC->getAPInt(); 8789 APInt M = MC->getAPInt(); 8790 APInt N = NC->getAPInt(); 8791 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8792 8793 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8794 unsigned NewWidth = BitWidth + 1; 8795 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8796 << BitWidth << '\n'); 8797 // The sign-extension (as opposed to a zero-extension) here matches the 8798 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8799 N = N.sext(NewWidth); 8800 M = M.sext(NewWidth); 8801 L = L.sext(NewWidth); 8802 8803 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8804 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8805 // L+M, L+2M+N, L+3M+3N, ... 8806 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8807 // 8808 // The equation Acc = 0 is then 8809 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8810 // In a quadratic form it becomes: 8811 // N n^2 + (2M-N) n + 2L = 0. 8812 8813 APInt A = N; 8814 APInt B = 2 * M - A; 8815 APInt C = 2 * L; 8816 APInt T = APInt(NewWidth, 2); 8817 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8818 << "x + " << C << ", coeff bw: " << NewWidth 8819 << ", multiplied by " << T << '\n'); 8820 return std::make_tuple(A, B, C, T, BitWidth); 8821 } 8822 8823 /// Helper function to compare optional APInts: 8824 /// (a) if X and Y both exist, return min(X, Y), 8825 /// (b) if neither X nor Y exist, return None, 8826 /// (c) if exactly one of X and Y exists, return that value. 8827 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8828 if (X.hasValue() && Y.hasValue()) { 8829 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8830 APInt XW = X->sextOrSelf(W); 8831 APInt YW = Y->sextOrSelf(W); 8832 return XW.slt(YW) ? *X : *Y; 8833 } 8834 if (!X.hasValue() && !Y.hasValue()) 8835 return None; 8836 return X.hasValue() ? *X : *Y; 8837 } 8838 8839 /// Helper function to truncate an optional APInt to a given BitWidth. 8840 /// When solving addrec-related equations, it is preferable to return a value 8841 /// that has the same bit width as the original addrec's coefficients. If the 8842 /// solution fits in the original bit width, truncate it (except for i1). 8843 /// Returning a value of a different bit width may inhibit some optimizations. 8844 /// 8845 /// In general, a solution to a quadratic equation generated from an addrec 8846 /// may require BW+1 bits, where BW is the bit width of the addrec's 8847 /// coefficients. The reason is that the coefficients of the quadratic 8848 /// equation are BW+1 bits wide (to avoid truncation when converting from 8849 /// the addrec to the equation). 8850 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8851 if (!X.hasValue()) 8852 return None; 8853 unsigned W = X->getBitWidth(); 8854 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8855 return X->trunc(BitWidth); 8856 return X; 8857 } 8858 8859 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8860 /// iterations. The values L, M, N are assumed to be signed, and they 8861 /// should all have the same bit widths. 8862 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8863 /// where BW is the bit width of the addrec's coefficients. 8864 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8865 /// returned as such, otherwise the bit width of the returned value may 8866 /// be greater than BW. 8867 /// 8868 /// This function returns None if 8869 /// (a) the addrec coefficients are not constant, or 8870 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8871 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8872 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8873 static Optional<APInt> 8874 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8875 APInt A, B, C, M; 8876 unsigned BitWidth; 8877 auto T = GetQuadraticEquation(AddRec); 8878 if (!T.hasValue()) 8879 return None; 8880 8881 std::tie(A, B, C, M, BitWidth) = *T; 8882 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8883 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8884 if (!X.hasValue()) 8885 return None; 8886 8887 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8888 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8889 if (!V->isZero()) 8890 return None; 8891 8892 return TruncIfPossible(X, BitWidth); 8893 } 8894 8895 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8896 /// iterations. The values M, N are assumed to be signed, and they 8897 /// should all have the same bit widths. 8898 /// Find the least n such that c(n) does not belong to the given range, 8899 /// while c(n-1) does. 8900 /// 8901 /// This function returns None if 8902 /// (a) the addrec coefficients are not constant, or 8903 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8904 /// bounds of the range. 8905 static Optional<APInt> 8906 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8907 const ConstantRange &Range, ScalarEvolution &SE) { 8908 assert(AddRec->getOperand(0)->isZero() && 8909 "Starting value of addrec should be 0"); 8910 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8911 << Range << ", addrec " << *AddRec << '\n'); 8912 // This case is handled in getNumIterationsInRange. Here we can assume that 8913 // we start in the range. 8914 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8915 "Addrec's initial value should be in range"); 8916 8917 APInt A, B, C, M; 8918 unsigned BitWidth; 8919 auto T = GetQuadraticEquation(AddRec); 8920 if (!T.hasValue()) 8921 return None; 8922 8923 // Be careful about the return value: there can be two reasons for not 8924 // returning an actual number. First, if no solutions to the equations 8925 // were found, and second, if the solutions don't leave the given range. 8926 // The first case means that the actual solution is "unknown", the second 8927 // means that it's known, but not valid. If the solution is unknown, we 8928 // cannot make any conclusions. 8929 // Return a pair: the optional solution and a flag indicating if the 8930 // solution was found. 8931 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8932 // Solve for signed overflow and unsigned overflow, pick the lower 8933 // solution. 8934 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8935 << Bound << " (before multiplying by " << M << ")\n"); 8936 Bound *= M; // The quadratic equation multiplier. 8937 8938 Optional<APInt> SO = None; 8939 if (BitWidth > 1) { 8940 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8941 "signed overflow\n"); 8942 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8943 } 8944 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8945 "unsigned overflow\n"); 8946 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8947 BitWidth+1); 8948 8949 auto LeavesRange = [&] (const APInt &X) { 8950 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8951 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8952 if (Range.contains(V0->getValue())) 8953 return false; 8954 // X should be at least 1, so X-1 is non-negative. 8955 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8956 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8957 if (Range.contains(V1->getValue())) 8958 return true; 8959 return false; 8960 }; 8961 8962 // If SolveQuadraticEquationWrap returns None, it means that there can 8963 // be a solution, but the function failed to find it. We cannot treat it 8964 // as "no solution". 8965 if (!SO.hasValue() || !UO.hasValue()) 8966 return { None, false }; 8967 8968 // Check the smaller value first to see if it leaves the range. 8969 // At this point, both SO and UO must have values. 8970 Optional<APInt> Min = MinOptional(SO, UO); 8971 if (LeavesRange(*Min)) 8972 return { Min, true }; 8973 Optional<APInt> Max = Min == SO ? UO : SO; 8974 if (LeavesRange(*Max)) 8975 return { Max, true }; 8976 8977 // Solutions were found, but were eliminated, hence the "true". 8978 return { None, true }; 8979 }; 8980 8981 std::tie(A, B, C, M, BitWidth) = *T; 8982 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8983 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8984 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8985 auto SL = SolveForBoundary(Lower); 8986 auto SU = SolveForBoundary(Upper); 8987 // If any of the solutions was unknown, no meaninigful conclusions can 8988 // be made. 8989 if (!SL.second || !SU.second) 8990 return None; 8991 8992 // Claim: The correct solution is not some value between Min and Max. 8993 // 8994 // Justification: Assuming that Min and Max are different values, one of 8995 // them is when the first signed overflow happens, the other is when the 8996 // first unsigned overflow happens. Crossing the range boundary is only 8997 // possible via an overflow (treating 0 as a special case of it, modeling 8998 // an overflow as crossing k*2^W for some k). 8999 // 9000 // The interesting case here is when Min was eliminated as an invalid 9001 // solution, but Max was not. The argument is that if there was another 9002 // overflow between Min and Max, it would also have been eliminated if 9003 // it was considered. 9004 // 9005 // For a given boundary, it is possible to have two overflows of the same 9006 // type (signed/unsigned) without having the other type in between: this 9007 // can happen when the vertex of the parabola is between the iterations 9008 // corresponding to the overflows. This is only possible when the two 9009 // overflows cross k*2^W for the same k. In such case, if the second one 9010 // left the range (and was the first one to do so), the first overflow 9011 // would have to enter the range, which would mean that either we had left 9012 // the range before or that we started outside of it. Both of these cases 9013 // are contradictions. 9014 // 9015 // Claim: In the case where SolveForBoundary returns None, the correct 9016 // solution is not some value between the Max for this boundary and the 9017 // Min of the other boundary. 9018 // 9019 // Justification: Assume that we had such Max_A and Min_B corresponding 9020 // to range boundaries A and B and such that Max_A < Min_B. If there was 9021 // a solution between Max_A and Min_B, it would have to be caused by an 9022 // overflow corresponding to either A or B. It cannot correspond to B, 9023 // since Min_B is the first occurrence of such an overflow. If it 9024 // corresponded to A, it would have to be either a signed or an unsigned 9025 // overflow that is larger than both eliminated overflows for A. But 9026 // between the eliminated overflows and this overflow, the values would 9027 // cover the entire value space, thus crossing the other boundary, which 9028 // is a contradiction. 9029 9030 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9031 } 9032 9033 ScalarEvolution::ExitLimit 9034 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9035 bool AllowPredicates) { 9036 9037 // This is only used for loops with a "x != y" exit test. The exit condition 9038 // is now expressed as a single expression, V = x-y. So the exit test is 9039 // effectively V != 0. We know and take advantage of the fact that this 9040 // expression only being used in a comparison by zero context. 9041 9042 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9043 // If the value is a constant 9044 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9045 // If the value is already zero, the branch will execute zero times. 9046 if (C->getValue()->isZero()) return C; 9047 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9048 } 9049 9050 const SCEVAddRecExpr *AddRec = 9051 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9052 9053 if (!AddRec && AllowPredicates) 9054 // Try to make this an AddRec using runtime tests, in the first X 9055 // iterations of this loop, where X is the SCEV expression found by the 9056 // algorithm below. 9057 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9058 9059 if (!AddRec || AddRec->getLoop() != L) 9060 return getCouldNotCompute(); 9061 9062 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9063 // the quadratic equation to solve it. 9064 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9065 // We can only use this value if the chrec ends up with an exact zero 9066 // value at this index. When solving for "X*X != 5", for example, we 9067 // should not accept a root of 2. 9068 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9069 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9070 return ExitLimit(R, R, false, Predicates); 9071 } 9072 return getCouldNotCompute(); 9073 } 9074 9075 // Otherwise we can only handle this if it is affine. 9076 if (!AddRec->isAffine()) 9077 return getCouldNotCompute(); 9078 9079 // If this is an affine expression, the execution count of this branch is 9080 // the minimum unsigned root of the following equation: 9081 // 9082 // Start + Step*N = 0 (mod 2^BW) 9083 // 9084 // equivalent to: 9085 // 9086 // Step*N = -Start (mod 2^BW) 9087 // 9088 // where BW is the common bit width of Start and Step. 9089 9090 // Get the initial value for the loop. 9091 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9092 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9093 9094 // For now we handle only constant steps. 9095 // 9096 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9097 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9098 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9099 // We have not yet seen any such cases. 9100 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9101 if (!StepC || StepC->getValue()->isZero()) 9102 return getCouldNotCompute(); 9103 9104 // For positive steps (counting up until unsigned overflow): 9105 // N = -Start/Step (as unsigned) 9106 // For negative steps (counting down to zero): 9107 // N = Start/-Step 9108 // First compute the unsigned distance from zero in the direction of Step. 9109 bool CountDown = StepC->getAPInt().isNegative(); 9110 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9111 9112 // Handle unitary steps, which cannot wraparound. 9113 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9114 // N = Distance (as unsigned) 9115 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9116 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9117 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9118 if (MaxBECountBase.ult(MaxBECount)) 9119 MaxBECount = MaxBECountBase; 9120 9121 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9122 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9123 // case, and see if we can improve the bound. 9124 // 9125 // Explicitly handling this here is necessary because getUnsignedRange 9126 // isn't context-sensitive; it doesn't know that we only care about the 9127 // range inside the loop. 9128 const SCEV *Zero = getZero(Distance->getType()); 9129 const SCEV *One = getOne(Distance->getType()); 9130 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9131 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9132 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9133 // as "unsigned_max(Distance + 1) - 1". 9134 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9135 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9136 } 9137 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9138 } 9139 9140 // If the condition controls loop exit (the loop exits only if the expression 9141 // is true) and the addition is no-wrap we can use unsigned divide to 9142 // compute the backedge count. In this case, the step may not divide the 9143 // distance, but we don't care because if the condition is "missed" the loop 9144 // will have undefined behavior due to wrapping. 9145 if (ControlsExit && AddRec->hasNoSelfWrap() && 9146 loopHasNoAbnormalExits(AddRec->getLoop())) { 9147 const SCEV *Exact = 9148 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9149 const SCEV *Max = 9150 Exact == getCouldNotCompute() 9151 ? Exact 9152 : getConstant(getUnsignedRangeMax(Exact)); 9153 return ExitLimit(Exact, Max, false, Predicates); 9154 } 9155 9156 // Solve the general equation. 9157 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9158 getNegativeSCEV(Start), *this); 9159 const SCEV *M = E == getCouldNotCompute() 9160 ? E 9161 : getConstant(getUnsignedRangeMax(E)); 9162 return ExitLimit(E, M, false, Predicates); 9163 } 9164 9165 ScalarEvolution::ExitLimit 9166 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9167 // Loops that look like: while (X == 0) are very strange indeed. We don't 9168 // handle them yet except for the trivial case. This could be expanded in the 9169 // future as needed. 9170 9171 // If the value is a constant, check to see if it is known to be non-zero 9172 // already. If so, the backedge will execute zero times. 9173 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9174 if (!C->getValue()->isZero()) 9175 return getZero(C->getType()); 9176 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9177 } 9178 9179 // We could implement others, but I really doubt anyone writes loops like 9180 // this, and if they did, they would already be constant folded. 9181 return getCouldNotCompute(); 9182 } 9183 9184 std::pair<const BasicBlock *, const BasicBlock *> 9185 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9186 const { 9187 // If the block has a unique predecessor, then there is no path from the 9188 // predecessor to the block that does not go through the direct edge 9189 // from the predecessor to the block. 9190 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9191 return {Pred, BB}; 9192 9193 // A loop's header is defined to be a block that dominates the loop. 9194 // If the header has a unique predecessor outside the loop, it must be 9195 // a block that has exactly one successor that can reach the loop. 9196 if (const Loop *L = LI.getLoopFor(BB)) 9197 return {L->getLoopPredecessor(), L->getHeader()}; 9198 9199 return {nullptr, nullptr}; 9200 } 9201 9202 /// SCEV structural equivalence is usually sufficient for testing whether two 9203 /// expressions are equal, however for the purposes of looking for a condition 9204 /// guarding a loop, it can be useful to be a little more general, since a 9205 /// front-end may have replicated the controlling expression. 9206 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9207 // Quick check to see if they are the same SCEV. 9208 if (A == B) return true; 9209 9210 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9211 // Not all instructions that are "identical" compute the same value. For 9212 // instance, two distinct alloca instructions allocating the same type are 9213 // identical and do not read memory; but compute distinct values. 9214 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9215 }; 9216 9217 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9218 // two different instructions with the same value. Check for this case. 9219 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9220 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9221 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9222 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9223 if (ComputesEqualValues(AI, BI)) 9224 return true; 9225 9226 // Otherwise assume they may have a different value. 9227 return false; 9228 } 9229 9230 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9231 const SCEV *&LHS, const SCEV *&RHS, 9232 unsigned Depth) { 9233 bool Changed = false; 9234 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9235 // '0 != 0'. 9236 auto TrivialCase = [&](bool TriviallyTrue) { 9237 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9238 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9239 return true; 9240 }; 9241 // If we hit the max recursion limit bail out. 9242 if (Depth >= 3) 9243 return false; 9244 9245 // Canonicalize a constant to the right side. 9246 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9247 // Check for both operands constant. 9248 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9249 if (ConstantExpr::getICmp(Pred, 9250 LHSC->getValue(), 9251 RHSC->getValue())->isNullValue()) 9252 return TrivialCase(false); 9253 else 9254 return TrivialCase(true); 9255 } 9256 // Otherwise swap the operands to put the constant on the right. 9257 std::swap(LHS, RHS); 9258 Pred = ICmpInst::getSwappedPredicate(Pred); 9259 Changed = true; 9260 } 9261 9262 // If we're comparing an addrec with a value which is loop-invariant in the 9263 // addrec's loop, put the addrec on the left. Also make a dominance check, 9264 // as both operands could be addrecs loop-invariant in each other's loop. 9265 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9266 const Loop *L = AR->getLoop(); 9267 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9268 std::swap(LHS, RHS); 9269 Pred = ICmpInst::getSwappedPredicate(Pred); 9270 Changed = true; 9271 } 9272 } 9273 9274 // If there's a constant operand, canonicalize comparisons with boundary 9275 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9276 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9277 const APInt &RA = RC->getAPInt(); 9278 9279 bool SimplifiedByConstantRange = false; 9280 9281 if (!ICmpInst::isEquality(Pred)) { 9282 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9283 if (ExactCR.isFullSet()) 9284 return TrivialCase(true); 9285 else if (ExactCR.isEmptySet()) 9286 return TrivialCase(false); 9287 9288 APInt NewRHS; 9289 CmpInst::Predicate NewPred; 9290 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9291 ICmpInst::isEquality(NewPred)) { 9292 // We were able to convert an inequality to an equality. 9293 Pred = NewPred; 9294 RHS = getConstant(NewRHS); 9295 Changed = SimplifiedByConstantRange = true; 9296 } 9297 } 9298 9299 if (!SimplifiedByConstantRange) { 9300 switch (Pred) { 9301 default: 9302 break; 9303 case ICmpInst::ICMP_EQ: 9304 case ICmpInst::ICMP_NE: 9305 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9306 if (!RA) 9307 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9308 if (const SCEVMulExpr *ME = 9309 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9310 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9311 ME->getOperand(0)->isAllOnesValue()) { 9312 RHS = AE->getOperand(1); 9313 LHS = ME->getOperand(1); 9314 Changed = true; 9315 } 9316 break; 9317 9318 9319 // The "Should have been caught earlier!" messages refer to the fact 9320 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9321 // should have fired on the corresponding cases, and canonicalized the 9322 // check to trivial case. 9323 9324 case ICmpInst::ICMP_UGE: 9325 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9326 Pred = ICmpInst::ICMP_UGT; 9327 RHS = getConstant(RA - 1); 9328 Changed = true; 9329 break; 9330 case ICmpInst::ICMP_ULE: 9331 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9332 Pred = ICmpInst::ICMP_ULT; 9333 RHS = getConstant(RA + 1); 9334 Changed = true; 9335 break; 9336 case ICmpInst::ICMP_SGE: 9337 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9338 Pred = ICmpInst::ICMP_SGT; 9339 RHS = getConstant(RA - 1); 9340 Changed = true; 9341 break; 9342 case ICmpInst::ICMP_SLE: 9343 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9344 Pred = ICmpInst::ICMP_SLT; 9345 RHS = getConstant(RA + 1); 9346 Changed = true; 9347 break; 9348 } 9349 } 9350 } 9351 9352 // Check for obvious equality. 9353 if (HasSameValue(LHS, RHS)) { 9354 if (ICmpInst::isTrueWhenEqual(Pred)) 9355 return TrivialCase(true); 9356 if (ICmpInst::isFalseWhenEqual(Pred)) 9357 return TrivialCase(false); 9358 } 9359 9360 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9361 // adding or subtracting 1 from one of the operands. 9362 switch (Pred) { 9363 case ICmpInst::ICMP_SLE: 9364 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9365 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9366 SCEV::FlagNSW); 9367 Pred = ICmpInst::ICMP_SLT; 9368 Changed = true; 9369 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9370 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9371 SCEV::FlagNSW); 9372 Pred = ICmpInst::ICMP_SLT; 9373 Changed = true; 9374 } 9375 break; 9376 case ICmpInst::ICMP_SGE: 9377 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9378 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9379 SCEV::FlagNSW); 9380 Pred = ICmpInst::ICMP_SGT; 9381 Changed = true; 9382 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9383 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9384 SCEV::FlagNSW); 9385 Pred = ICmpInst::ICMP_SGT; 9386 Changed = true; 9387 } 9388 break; 9389 case ICmpInst::ICMP_ULE: 9390 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9391 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9392 SCEV::FlagNUW); 9393 Pred = ICmpInst::ICMP_ULT; 9394 Changed = true; 9395 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9396 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9397 Pred = ICmpInst::ICMP_ULT; 9398 Changed = true; 9399 } 9400 break; 9401 case ICmpInst::ICMP_UGE: 9402 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9403 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9404 Pred = ICmpInst::ICMP_UGT; 9405 Changed = true; 9406 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9407 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9408 SCEV::FlagNUW); 9409 Pred = ICmpInst::ICMP_UGT; 9410 Changed = true; 9411 } 9412 break; 9413 default: 9414 break; 9415 } 9416 9417 // TODO: More simplifications are possible here. 9418 9419 // Recursively simplify until we either hit a recursion limit or nothing 9420 // changes. 9421 if (Changed) 9422 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9423 9424 return Changed; 9425 } 9426 9427 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9428 return getSignedRangeMax(S).isNegative(); 9429 } 9430 9431 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9432 return getSignedRangeMin(S).isStrictlyPositive(); 9433 } 9434 9435 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9436 return !getSignedRangeMin(S).isNegative(); 9437 } 9438 9439 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9440 return !getSignedRangeMax(S).isStrictlyPositive(); 9441 } 9442 9443 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9444 return isKnownNegative(S) || isKnownPositive(S); 9445 } 9446 9447 std::pair<const SCEV *, const SCEV *> 9448 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9449 // Compute SCEV on entry of loop L. 9450 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9451 if (Start == getCouldNotCompute()) 9452 return { Start, Start }; 9453 // Compute post increment SCEV for loop L. 9454 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9455 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9456 return { Start, PostInc }; 9457 } 9458 9459 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9460 const SCEV *LHS, const SCEV *RHS) { 9461 // First collect all loops. 9462 SmallPtrSet<const Loop *, 8> LoopsUsed; 9463 getUsedLoops(LHS, LoopsUsed); 9464 getUsedLoops(RHS, LoopsUsed); 9465 9466 if (LoopsUsed.empty()) 9467 return false; 9468 9469 // Domination relationship must be a linear order on collected loops. 9470 #ifndef NDEBUG 9471 for (auto *L1 : LoopsUsed) 9472 for (auto *L2 : LoopsUsed) 9473 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9474 DT.dominates(L2->getHeader(), L1->getHeader())) && 9475 "Domination relationship is not a linear order"); 9476 #endif 9477 9478 const Loop *MDL = 9479 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9480 [&](const Loop *L1, const Loop *L2) { 9481 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9482 }); 9483 9484 // Get init and post increment value for LHS. 9485 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9486 // if LHS contains unknown non-invariant SCEV then bail out. 9487 if (SplitLHS.first == getCouldNotCompute()) 9488 return false; 9489 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9490 // Get init and post increment value for RHS. 9491 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9492 // if RHS contains unknown non-invariant SCEV then bail out. 9493 if (SplitRHS.first == getCouldNotCompute()) 9494 return false; 9495 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9496 // It is possible that init SCEV contains an invariant load but it does 9497 // not dominate MDL and is not available at MDL loop entry, so we should 9498 // check it here. 9499 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9500 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9501 return false; 9502 9503 // It seems backedge guard check is faster than entry one so in some cases 9504 // it can speed up whole estimation by short circuit 9505 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9506 SplitRHS.second) && 9507 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9508 } 9509 9510 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9511 const SCEV *LHS, const SCEV *RHS) { 9512 // Canonicalize the inputs first. 9513 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9514 9515 if (isKnownViaInduction(Pred, LHS, RHS)) 9516 return true; 9517 9518 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9519 return true; 9520 9521 // Otherwise see what can be done with some simple reasoning. 9522 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9523 } 9524 9525 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9526 const SCEV *LHS, const SCEV *RHS, 9527 const Instruction *Context) { 9528 // TODO: Analyze guards and assumes from Context's block. 9529 return isKnownPredicate(Pred, LHS, RHS) || 9530 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9531 } 9532 9533 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9534 const SCEVAddRecExpr *LHS, 9535 const SCEV *RHS) { 9536 const Loop *L = LHS->getLoop(); 9537 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9538 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9539 } 9540 9541 Optional<ScalarEvolution::MonotonicPredicateType> 9542 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9543 ICmpInst::Predicate Pred) { 9544 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9545 9546 #ifndef NDEBUG 9547 // Verify an invariant: inverting the predicate should turn a monotonically 9548 // increasing change to a monotonically decreasing one, and vice versa. 9549 if (Result) { 9550 auto ResultSwapped = 9551 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9552 9553 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9554 assert(ResultSwapped.getValue() != Result.getValue() && 9555 "monotonicity should flip as we flip the predicate"); 9556 } 9557 #endif 9558 9559 return Result; 9560 } 9561 9562 Optional<ScalarEvolution::MonotonicPredicateType> 9563 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9564 ICmpInst::Predicate Pred) { 9565 // A zero step value for LHS means the induction variable is essentially a 9566 // loop invariant value. We don't really depend on the predicate actually 9567 // flipping from false to true (for increasing predicates, and the other way 9568 // around for decreasing predicates), all we care about is that *if* the 9569 // predicate changes then it only changes from false to true. 9570 // 9571 // A zero step value in itself is not very useful, but there may be places 9572 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9573 // as general as possible. 9574 9575 // Only handle LE/LT/GE/GT predicates. 9576 if (!ICmpInst::isRelational(Pred)) 9577 return None; 9578 9579 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9580 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9581 "Should be greater or less!"); 9582 9583 // Check that AR does not wrap. 9584 if (ICmpInst::isUnsigned(Pred)) { 9585 if (!LHS->hasNoUnsignedWrap()) 9586 return None; 9587 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9588 } else { 9589 assert(ICmpInst::isSigned(Pred) && 9590 "Relational predicate is either signed or unsigned!"); 9591 if (!LHS->hasNoSignedWrap()) 9592 return None; 9593 9594 const SCEV *Step = LHS->getStepRecurrence(*this); 9595 9596 if (isKnownNonNegative(Step)) 9597 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9598 9599 if (isKnownNonPositive(Step)) 9600 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9601 9602 return None; 9603 } 9604 } 9605 9606 Optional<ScalarEvolution::LoopInvariantPredicate> 9607 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9608 const SCEV *LHS, const SCEV *RHS, 9609 const Loop *L) { 9610 9611 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9612 if (!isLoopInvariant(RHS, L)) { 9613 if (!isLoopInvariant(LHS, L)) 9614 return None; 9615 9616 std::swap(LHS, RHS); 9617 Pred = ICmpInst::getSwappedPredicate(Pred); 9618 } 9619 9620 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9621 if (!ArLHS || ArLHS->getLoop() != L) 9622 return None; 9623 9624 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9625 if (!MonotonicType) 9626 return None; 9627 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9628 // true as the loop iterates, and the backedge is control dependent on 9629 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9630 // 9631 // * if the predicate was false in the first iteration then the predicate 9632 // is never evaluated again, since the loop exits without taking the 9633 // backedge. 9634 // * if the predicate was true in the first iteration then it will 9635 // continue to be true for all future iterations since it is 9636 // monotonically increasing. 9637 // 9638 // For both the above possibilities, we can replace the loop varying 9639 // predicate with its value on the first iteration of the loop (which is 9640 // loop invariant). 9641 // 9642 // A similar reasoning applies for a monotonically decreasing predicate, by 9643 // replacing true with false and false with true in the above two bullets. 9644 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9645 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9646 9647 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9648 return None; 9649 9650 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 9651 } 9652 9653 Optional<ScalarEvolution::LoopInvariantPredicate> 9654 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 9655 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9656 const Instruction *Context, const SCEV *MaxIter) { 9657 // Try to prove the following set of facts: 9658 // - The predicate is monotonic in the iteration space. 9659 // - If the check does not fail on the 1st iteration: 9660 // - No overflow will happen during first MaxIter iterations; 9661 // - It will not fail on the MaxIter'th iteration. 9662 // If the check does fail on the 1st iteration, we leave the loop and no 9663 // other checks matter. 9664 9665 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9666 if (!isLoopInvariant(RHS, L)) { 9667 if (!isLoopInvariant(LHS, L)) 9668 return None; 9669 9670 std::swap(LHS, RHS); 9671 Pred = ICmpInst::getSwappedPredicate(Pred); 9672 } 9673 9674 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9675 if (!AR || AR->getLoop() != L) 9676 return None; 9677 9678 // The predicate must be relational (i.e. <, <=, >=, >). 9679 if (!ICmpInst::isRelational(Pred)) 9680 return None; 9681 9682 // TODO: Support steps other than +/- 1. 9683 const SCEV *Step = AR->getStepRecurrence(*this); 9684 auto *One = getOne(Step->getType()); 9685 auto *MinusOne = getNegativeSCEV(One); 9686 if (Step != One && Step != MinusOne) 9687 return None; 9688 9689 // Type mismatch here means that MaxIter is potentially larger than max 9690 // unsigned value in start type, which mean we cannot prove no wrap for the 9691 // indvar. 9692 if (AR->getType() != MaxIter->getType()) 9693 return None; 9694 9695 // Value of IV on suggested last iteration. 9696 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9697 // Does it still meet the requirement? 9698 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 9699 return None; 9700 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 9701 // not exceed max unsigned value of this type), this effectively proves 9702 // that there is no wrap during the iteration. To prove that there is no 9703 // signed/unsigned wrap, we need to check that 9704 // Start <= Last for step = 1 or Start >= Last for step = -1. 9705 ICmpInst::Predicate NoOverflowPred = 9706 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 9707 if (Step == MinusOne) 9708 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 9709 const SCEV *Start = AR->getStart(); 9710 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 9711 return None; 9712 9713 // Everything is fine. 9714 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 9715 } 9716 9717 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9718 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9719 if (HasSameValue(LHS, RHS)) 9720 return ICmpInst::isTrueWhenEqual(Pred); 9721 9722 // This code is split out from isKnownPredicate because it is called from 9723 // within isLoopEntryGuardedByCond. 9724 9725 auto CheckRanges = 9726 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9727 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9728 .contains(RangeLHS); 9729 }; 9730 9731 // The check at the top of the function catches the case where the values are 9732 // known to be equal. 9733 if (Pred == CmpInst::ICMP_EQ) 9734 return false; 9735 9736 if (Pred == CmpInst::ICMP_NE) 9737 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9738 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9739 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9740 9741 if (CmpInst::isSigned(Pred)) 9742 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9743 9744 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9745 } 9746 9747 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9748 const SCEV *LHS, 9749 const SCEV *RHS) { 9750 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9751 // Return Y via OutY. 9752 auto MatchBinaryAddToConst = 9753 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9754 SCEV::NoWrapFlags ExpectedFlags) { 9755 const SCEV *NonConstOp, *ConstOp; 9756 SCEV::NoWrapFlags FlagsPresent; 9757 9758 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9759 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9760 return false; 9761 9762 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9763 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9764 }; 9765 9766 APInt C; 9767 9768 switch (Pred) { 9769 default: 9770 break; 9771 9772 case ICmpInst::ICMP_SGE: 9773 std::swap(LHS, RHS); 9774 LLVM_FALLTHROUGH; 9775 case ICmpInst::ICMP_SLE: 9776 // X s<= (X + C)<nsw> if C >= 0 9777 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9778 return true; 9779 9780 // (X + C)<nsw> s<= X if C <= 0 9781 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9782 !C.isStrictlyPositive()) 9783 return true; 9784 break; 9785 9786 case ICmpInst::ICMP_SGT: 9787 std::swap(LHS, RHS); 9788 LLVM_FALLTHROUGH; 9789 case ICmpInst::ICMP_SLT: 9790 // X s< (X + C)<nsw> if C > 0 9791 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9792 C.isStrictlyPositive()) 9793 return true; 9794 9795 // (X + C)<nsw> s< X if C < 0 9796 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9797 return true; 9798 break; 9799 9800 case ICmpInst::ICMP_UGE: 9801 std::swap(LHS, RHS); 9802 LLVM_FALLTHROUGH; 9803 case ICmpInst::ICMP_ULE: 9804 // X u<= (X + C)<nuw> for any C 9805 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9806 return true; 9807 break; 9808 9809 case ICmpInst::ICMP_UGT: 9810 std::swap(LHS, RHS); 9811 LLVM_FALLTHROUGH; 9812 case ICmpInst::ICMP_ULT: 9813 // X u< (X + C)<nuw> if C != 0 9814 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9815 return true; 9816 break; 9817 } 9818 9819 return false; 9820 } 9821 9822 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9823 const SCEV *LHS, 9824 const SCEV *RHS) { 9825 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9826 return false; 9827 9828 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9829 // the stack can result in exponential time complexity. 9830 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9831 9832 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9833 // 9834 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9835 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9836 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9837 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9838 // use isKnownPredicate later if needed. 9839 return isKnownNonNegative(RHS) && 9840 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9841 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9842 } 9843 9844 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9845 ICmpInst::Predicate Pred, 9846 const SCEV *LHS, const SCEV *RHS) { 9847 // No need to even try if we know the module has no guards. 9848 if (!HasGuards) 9849 return false; 9850 9851 return any_of(*BB, [&](const Instruction &I) { 9852 using namespace llvm::PatternMatch; 9853 9854 Value *Condition; 9855 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9856 m_Value(Condition))) && 9857 isImpliedCond(Pred, LHS, RHS, Condition, false); 9858 }); 9859 } 9860 9861 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9862 /// protected by a conditional between LHS and RHS. This is used to 9863 /// to eliminate casts. 9864 bool 9865 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9866 ICmpInst::Predicate Pred, 9867 const SCEV *LHS, const SCEV *RHS) { 9868 // Interpret a null as meaning no loop, where there is obviously no guard 9869 // (interprocedural conditions notwithstanding). 9870 if (!L) return true; 9871 9872 if (VerifyIR) 9873 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9874 "This cannot be done on broken IR!"); 9875 9876 9877 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9878 return true; 9879 9880 BasicBlock *Latch = L->getLoopLatch(); 9881 if (!Latch) 9882 return false; 9883 9884 BranchInst *LoopContinuePredicate = 9885 dyn_cast<BranchInst>(Latch->getTerminator()); 9886 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9887 isImpliedCond(Pred, LHS, RHS, 9888 LoopContinuePredicate->getCondition(), 9889 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9890 return true; 9891 9892 // We don't want more than one activation of the following loops on the stack 9893 // -- that can lead to O(n!) time complexity. 9894 if (WalkingBEDominatingConds) 9895 return false; 9896 9897 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9898 9899 // See if we can exploit a trip count to prove the predicate. 9900 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9901 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9902 if (LatchBECount != getCouldNotCompute()) { 9903 // We know that Latch branches back to the loop header exactly 9904 // LatchBECount times. This means the backdege condition at Latch is 9905 // equivalent to "{0,+,1} u< LatchBECount". 9906 Type *Ty = LatchBECount->getType(); 9907 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9908 const SCEV *LoopCounter = 9909 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9910 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9911 LatchBECount)) 9912 return true; 9913 } 9914 9915 // Check conditions due to any @llvm.assume intrinsics. 9916 for (auto &AssumeVH : AC.assumptions()) { 9917 if (!AssumeVH) 9918 continue; 9919 auto *CI = cast<CallInst>(AssumeVH); 9920 if (!DT.dominates(CI, Latch->getTerminator())) 9921 continue; 9922 9923 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9924 return true; 9925 } 9926 9927 // If the loop is not reachable from the entry block, we risk running into an 9928 // infinite loop as we walk up into the dom tree. These loops do not matter 9929 // anyway, so we just return a conservative answer when we see them. 9930 if (!DT.isReachableFromEntry(L->getHeader())) 9931 return false; 9932 9933 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9934 return true; 9935 9936 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9937 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9938 assert(DTN && "should reach the loop header before reaching the root!"); 9939 9940 BasicBlock *BB = DTN->getBlock(); 9941 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9942 return true; 9943 9944 BasicBlock *PBB = BB->getSinglePredecessor(); 9945 if (!PBB) 9946 continue; 9947 9948 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9949 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9950 continue; 9951 9952 Value *Condition = ContinuePredicate->getCondition(); 9953 9954 // If we have an edge `E` within the loop body that dominates the only 9955 // latch, the condition guarding `E` also guards the backedge. This 9956 // reasoning works only for loops with a single latch. 9957 9958 BasicBlockEdge DominatingEdge(PBB, BB); 9959 if (DominatingEdge.isSingleEdge()) { 9960 // We're constructively (and conservatively) enumerating edges within the 9961 // loop body that dominate the latch. The dominator tree better agree 9962 // with us on this: 9963 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9964 9965 if (isImpliedCond(Pred, LHS, RHS, Condition, 9966 BB != ContinuePredicate->getSuccessor(0))) 9967 return true; 9968 } 9969 } 9970 9971 return false; 9972 } 9973 9974 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 9975 ICmpInst::Predicate Pred, 9976 const SCEV *LHS, 9977 const SCEV *RHS) { 9978 if (VerifyIR) 9979 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 9980 "This cannot be done on broken IR!"); 9981 9982 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9983 return true; 9984 9985 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9986 // the facts (a >= b && a != b) separately. A typical situation is when the 9987 // non-strict comparison is known from ranges and non-equality is known from 9988 // dominating predicates. If we are proving strict comparison, we always try 9989 // to prove non-equality and non-strict comparison separately. 9990 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9991 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9992 bool ProvedNonStrictComparison = false; 9993 bool ProvedNonEquality = false; 9994 9995 if (ProvingStrictComparison) { 9996 ProvedNonStrictComparison = 9997 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9998 ProvedNonEquality = 9999 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 10000 if (ProvedNonStrictComparison && ProvedNonEquality) 10001 return true; 10002 } 10003 10004 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10005 auto ProveViaGuard = [&](const BasicBlock *Block) { 10006 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10007 return true; 10008 if (ProvingStrictComparison) { 10009 if (!ProvedNonStrictComparison) 10010 ProvedNonStrictComparison = 10011 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 10012 if (!ProvedNonEquality) 10013 ProvedNonEquality = 10014 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 10015 if (ProvedNonStrictComparison && ProvedNonEquality) 10016 return true; 10017 } 10018 return false; 10019 }; 10020 10021 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10022 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10023 const Instruction *Context = &BB->front(); 10024 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10025 return true; 10026 if (ProvingStrictComparison) { 10027 if (!ProvedNonStrictComparison) 10028 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS, 10029 Condition, Inverse, Context); 10030 if (!ProvedNonEquality) 10031 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, 10032 Condition, Inverse, Context); 10033 if (ProvedNonStrictComparison && ProvedNonEquality) 10034 return true; 10035 } 10036 return false; 10037 }; 10038 10039 // Starting at the block's predecessor, climb up the predecessor chain, as long 10040 // as there are predecessors that can be found that have unique successors 10041 // leading to the original block. 10042 const Loop *ContainingLoop = LI.getLoopFor(BB); 10043 const BasicBlock *PredBB; 10044 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10045 PredBB = ContainingLoop->getLoopPredecessor(); 10046 else 10047 PredBB = BB->getSinglePredecessor(); 10048 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10049 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10050 if (ProveViaGuard(Pair.first)) 10051 return true; 10052 10053 const BranchInst *LoopEntryPredicate = 10054 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10055 if (!LoopEntryPredicate || 10056 LoopEntryPredicate->isUnconditional()) 10057 continue; 10058 10059 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10060 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10061 return true; 10062 } 10063 10064 // Check conditions due to any @llvm.assume intrinsics. 10065 for (auto &AssumeVH : AC.assumptions()) { 10066 if (!AssumeVH) 10067 continue; 10068 auto *CI = cast<CallInst>(AssumeVH); 10069 if (!DT.dominates(CI, BB)) 10070 continue; 10071 10072 if (ProveViaCond(CI->getArgOperand(0), false)) 10073 return true; 10074 } 10075 10076 return false; 10077 } 10078 10079 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10080 ICmpInst::Predicate Pred, 10081 const SCEV *LHS, 10082 const SCEV *RHS) { 10083 // Interpret a null as meaning no loop, where there is obviously no guard 10084 // (interprocedural conditions notwithstanding). 10085 if (!L) 10086 return false; 10087 10088 // Both LHS and RHS must be available at loop entry. 10089 assert(isAvailableAtLoopEntry(LHS, L) && 10090 "LHS is not available at Loop Entry"); 10091 assert(isAvailableAtLoopEntry(RHS, L) && 10092 "RHS is not available at Loop Entry"); 10093 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10094 } 10095 10096 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10097 const SCEV *RHS, 10098 const Value *FoundCondValue, bool Inverse, 10099 const Instruction *Context) { 10100 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10101 return false; 10102 10103 auto ClearOnExit = 10104 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10105 10106 // Recursively handle And and Or conditions. 10107 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 10108 if (BO->getOpcode() == Instruction::And) { 10109 if (!Inverse) 10110 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 10111 Context) || 10112 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 10113 Context); 10114 } else if (BO->getOpcode() == Instruction::Or) { 10115 if (Inverse) 10116 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 10117 Context) || 10118 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 10119 Context); 10120 } 10121 } 10122 10123 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10124 if (!ICI) return false; 10125 10126 // Now that we found a conditional branch that dominates the loop or controls 10127 // the loop latch. Check to see if it is the comparison we are looking for. 10128 ICmpInst::Predicate FoundPred; 10129 if (Inverse) 10130 FoundPred = ICI->getInversePredicate(); 10131 else 10132 FoundPred = ICI->getPredicate(); 10133 10134 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10135 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10136 10137 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10138 } 10139 10140 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10141 const SCEV *RHS, 10142 ICmpInst::Predicate FoundPred, 10143 const SCEV *FoundLHS, const SCEV *FoundRHS, 10144 const Instruction *Context) { 10145 // Balance the types. 10146 if (getTypeSizeInBits(LHS->getType()) < 10147 getTypeSizeInBits(FoundLHS->getType())) { 10148 // For unsigned and equality predicates, try to prove that both found 10149 // operands fit into narrow unsigned range. If so, try to prove facts in 10150 // narrow types. 10151 if (!CmpInst::isSigned(FoundPred)) { 10152 auto *NarrowType = LHS->getType(); 10153 auto *WideType = FoundLHS->getType(); 10154 auto BitWidth = getTypeSizeInBits(NarrowType); 10155 const SCEV *MaxValue = getZeroExtendExpr( 10156 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10157 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10158 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10159 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10160 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10161 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10162 TruncFoundRHS, Context)) 10163 return true; 10164 } 10165 } 10166 10167 if (CmpInst::isSigned(Pred)) { 10168 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10169 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10170 } else { 10171 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10172 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10173 } 10174 } else if (getTypeSizeInBits(LHS->getType()) > 10175 getTypeSizeInBits(FoundLHS->getType())) { 10176 if (CmpInst::isSigned(FoundPred)) { 10177 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10178 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10179 } else { 10180 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10181 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10182 } 10183 } 10184 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10185 FoundRHS, Context); 10186 } 10187 10188 bool ScalarEvolution::isImpliedCondBalancedTypes( 10189 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10190 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10191 const Instruction *Context) { 10192 assert(getTypeSizeInBits(LHS->getType()) == 10193 getTypeSizeInBits(FoundLHS->getType()) && 10194 "Types should be balanced!"); 10195 // Canonicalize the query to match the way instcombine will have 10196 // canonicalized the comparison. 10197 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10198 if (LHS == RHS) 10199 return CmpInst::isTrueWhenEqual(Pred); 10200 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10201 if (FoundLHS == FoundRHS) 10202 return CmpInst::isFalseWhenEqual(FoundPred); 10203 10204 // Check to see if we can make the LHS or RHS match. 10205 if (LHS == FoundRHS || RHS == FoundLHS) { 10206 if (isa<SCEVConstant>(RHS)) { 10207 std::swap(FoundLHS, FoundRHS); 10208 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10209 } else { 10210 std::swap(LHS, RHS); 10211 Pred = ICmpInst::getSwappedPredicate(Pred); 10212 } 10213 } 10214 10215 // Check whether the found predicate is the same as the desired predicate. 10216 if (FoundPred == Pred) 10217 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10218 10219 // Check whether swapping the found predicate makes it the same as the 10220 // desired predicate. 10221 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10222 if (isa<SCEVConstant>(RHS)) 10223 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10224 else 10225 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, 10226 LHS, FoundLHS, FoundRHS, Context); 10227 } 10228 10229 // Unsigned comparison is the same as signed comparison when both the operands 10230 // are non-negative. 10231 if (CmpInst::isUnsigned(FoundPred) && 10232 CmpInst::getSignedPredicate(FoundPred) == Pred && 10233 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10234 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10235 10236 // Check if we can make progress by sharpening ranges. 10237 if (FoundPred == ICmpInst::ICMP_NE && 10238 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10239 10240 const SCEVConstant *C = nullptr; 10241 const SCEV *V = nullptr; 10242 10243 if (isa<SCEVConstant>(FoundLHS)) { 10244 C = cast<SCEVConstant>(FoundLHS); 10245 V = FoundRHS; 10246 } else { 10247 C = cast<SCEVConstant>(FoundRHS); 10248 V = FoundLHS; 10249 } 10250 10251 // The guarding predicate tells us that C != V. If the known range 10252 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10253 // range we consider has to correspond to same signedness as the 10254 // predicate we're interested in folding. 10255 10256 APInt Min = ICmpInst::isSigned(Pred) ? 10257 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10258 10259 if (Min == C->getAPInt()) { 10260 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10261 // This is true even if (Min + 1) wraps around -- in case of 10262 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10263 10264 APInt SharperMin = Min + 1; 10265 10266 switch (Pred) { 10267 case ICmpInst::ICMP_SGE: 10268 case ICmpInst::ICMP_UGE: 10269 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10270 // RHS, we're done. 10271 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10272 Context)) 10273 return true; 10274 LLVM_FALLTHROUGH; 10275 10276 case ICmpInst::ICMP_SGT: 10277 case ICmpInst::ICMP_UGT: 10278 // We know from the range information that (V `Pred` Min || 10279 // V == Min). We know from the guarding condition that !(V 10280 // == Min). This gives us 10281 // 10282 // V `Pred` Min || V == Min && !(V == Min) 10283 // => V `Pred` Min 10284 // 10285 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10286 10287 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10288 Context)) 10289 return true; 10290 break; 10291 10292 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10293 case ICmpInst::ICMP_SLE: 10294 case ICmpInst::ICMP_ULE: 10295 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10296 LHS, V, getConstant(SharperMin), Context)) 10297 return true; 10298 LLVM_FALLTHROUGH; 10299 10300 case ICmpInst::ICMP_SLT: 10301 case ICmpInst::ICMP_ULT: 10302 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10303 LHS, V, getConstant(Min), Context)) 10304 return true; 10305 break; 10306 10307 default: 10308 // No change 10309 break; 10310 } 10311 } 10312 } 10313 10314 // Check whether the actual condition is beyond sufficient. 10315 if (FoundPred == ICmpInst::ICMP_EQ) 10316 if (ICmpInst::isTrueWhenEqual(Pred)) 10317 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10318 return true; 10319 if (Pred == ICmpInst::ICMP_NE) 10320 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10321 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10322 Context)) 10323 return true; 10324 10325 // Otherwise assume the worst. 10326 return false; 10327 } 10328 10329 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10330 const SCEV *&L, const SCEV *&R, 10331 SCEV::NoWrapFlags &Flags) { 10332 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10333 if (!AE || AE->getNumOperands() != 2) 10334 return false; 10335 10336 L = AE->getOperand(0); 10337 R = AE->getOperand(1); 10338 Flags = AE->getNoWrapFlags(); 10339 return true; 10340 } 10341 10342 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10343 const SCEV *Less) { 10344 // We avoid subtracting expressions here because this function is usually 10345 // fairly deep in the call stack (i.e. is called many times). 10346 10347 // X - X = 0. 10348 if (More == Less) 10349 return APInt(getTypeSizeInBits(More->getType()), 0); 10350 10351 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10352 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10353 const auto *MAR = cast<SCEVAddRecExpr>(More); 10354 10355 if (LAR->getLoop() != MAR->getLoop()) 10356 return None; 10357 10358 // We look at affine expressions only; not for correctness but to keep 10359 // getStepRecurrence cheap. 10360 if (!LAR->isAffine() || !MAR->isAffine()) 10361 return None; 10362 10363 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10364 return None; 10365 10366 Less = LAR->getStart(); 10367 More = MAR->getStart(); 10368 10369 // fall through 10370 } 10371 10372 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10373 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10374 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10375 return M - L; 10376 } 10377 10378 SCEV::NoWrapFlags Flags; 10379 const SCEV *LLess = nullptr, *RLess = nullptr; 10380 const SCEV *LMore = nullptr, *RMore = nullptr; 10381 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10382 // Compare (X + C1) vs X. 10383 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10384 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10385 if (RLess == More) 10386 return -(C1->getAPInt()); 10387 10388 // Compare X vs (X + C2). 10389 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10390 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10391 if (RMore == Less) 10392 return C2->getAPInt(); 10393 10394 // Compare (X + C1) vs (X + C2). 10395 if (C1 && C2 && RLess == RMore) 10396 return C2->getAPInt() - C1->getAPInt(); 10397 10398 return None; 10399 } 10400 10401 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10402 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10403 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10404 // Try to recognize the following pattern: 10405 // 10406 // FoundRHS = ... 10407 // ... 10408 // loop: 10409 // FoundLHS = {Start,+,W} 10410 // context_bb: // Basic block from the same loop 10411 // known(Pred, FoundLHS, FoundRHS) 10412 // 10413 // If some predicate is known in the context of a loop, it is also known on 10414 // each iteration of this loop, including the first iteration. Therefore, in 10415 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10416 // prove the original pred using this fact. 10417 if (!Context) 10418 return false; 10419 const BasicBlock *ContextBB = Context->getParent(); 10420 // Make sure AR varies in the context block. 10421 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10422 const Loop *L = AR->getLoop(); 10423 // Make sure that context belongs to the loop and executes on 1st iteration 10424 // (if it ever executes at all). 10425 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10426 return false; 10427 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10428 return false; 10429 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10430 } 10431 10432 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10433 const Loop *L = AR->getLoop(); 10434 // Make sure that context belongs to the loop and executes on 1st iteration 10435 // (if it ever executes at all). 10436 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10437 return false; 10438 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10439 return false; 10440 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10441 } 10442 10443 return false; 10444 } 10445 10446 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10447 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10448 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10449 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10450 return false; 10451 10452 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10453 if (!AddRecLHS) 10454 return false; 10455 10456 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10457 if (!AddRecFoundLHS) 10458 return false; 10459 10460 // We'd like to let SCEV reason about control dependencies, so we constrain 10461 // both the inequalities to be about add recurrences on the same loop. This 10462 // way we can use isLoopEntryGuardedByCond later. 10463 10464 const Loop *L = AddRecFoundLHS->getLoop(); 10465 if (L != AddRecLHS->getLoop()) 10466 return false; 10467 10468 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10469 // 10470 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10471 // ... (2) 10472 // 10473 // Informal proof for (2), assuming (1) [*]: 10474 // 10475 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10476 // 10477 // Then 10478 // 10479 // FoundLHS s< FoundRHS s< INT_MIN - C 10480 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10481 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10482 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10483 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10484 // <=> FoundLHS + C s< FoundRHS + C 10485 // 10486 // [*]: (1) can be proved by ruling out overflow. 10487 // 10488 // [**]: This can be proved by analyzing all the four possibilities: 10489 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10490 // (A s>= 0, B s>= 0). 10491 // 10492 // Note: 10493 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10494 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10495 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10496 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10497 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10498 // C)". 10499 10500 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10501 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10502 if (!LDiff || !RDiff || *LDiff != *RDiff) 10503 return false; 10504 10505 if (LDiff->isMinValue()) 10506 return true; 10507 10508 APInt FoundRHSLimit; 10509 10510 if (Pred == CmpInst::ICMP_ULT) { 10511 FoundRHSLimit = -(*RDiff); 10512 } else { 10513 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10514 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10515 } 10516 10517 // Try to prove (1) or (2), as needed. 10518 return isAvailableAtLoopEntry(FoundRHS, L) && 10519 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10520 getConstant(FoundRHSLimit)); 10521 } 10522 10523 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10524 const SCEV *LHS, const SCEV *RHS, 10525 const SCEV *FoundLHS, 10526 const SCEV *FoundRHS, unsigned Depth) { 10527 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10528 10529 auto ClearOnExit = make_scope_exit([&]() { 10530 if (LPhi) { 10531 bool Erased = PendingMerges.erase(LPhi); 10532 assert(Erased && "Failed to erase LPhi!"); 10533 (void)Erased; 10534 } 10535 if (RPhi) { 10536 bool Erased = PendingMerges.erase(RPhi); 10537 assert(Erased && "Failed to erase RPhi!"); 10538 (void)Erased; 10539 } 10540 }); 10541 10542 // Find respective Phis and check that they are not being pending. 10543 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10544 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10545 if (!PendingMerges.insert(Phi).second) 10546 return false; 10547 LPhi = Phi; 10548 } 10549 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10550 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10551 // If we detect a loop of Phi nodes being processed by this method, for 10552 // example: 10553 // 10554 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10555 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10556 // 10557 // we don't want to deal with a case that complex, so return conservative 10558 // answer false. 10559 if (!PendingMerges.insert(Phi).second) 10560 return false; 10561 RPhi = Phi; 10562 } 10563 10564 // If none of LHS, RHS is a Phi, nothing to do here. 10565 if (!LPhi && !RPhi) 10566 return false; 10567 10568 // If there is a SCEVUnknown Phi we are interested in, make it left. 10569 if (!LPhi) { 10570 std::swap(LHS, RHS); 10571 std::swap(FoundLHS, FoundRHS); 10572 std::swap(LPhi, RPhi); 10573 Pred = ICmpInst::getSwappedPredicate(Pred); 10574 } 10575 10576 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10577 const BasicBlock *LBB = LPhi->getParent(); 10578 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10579 10580 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10581 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10582 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10583 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10584 }; 10585 10586 if (RPhi && RPhi->getParent() == LBB) { 10587 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10588 // If we compare two Phis from the same block, and for each entry block 10589 // the predicate is true for incoming values from this block, then the 10590 // predicate is also true for the Phis. 10591 for (const BasicBlock *IncBB : predecessors(LBB)) { 10592 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10593 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10594 if (!ProvedEasily(L, R)) 10595 return false; 10596 } 10597 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10598 // Case two: RHS is also a Phi from the same basic block, and it is an 10599 // AddRec. It means that there is a loop which has both AddRec and Unknown 10600 // PHIs, for it we can compare incoming values of AddRec from above the loop 10601 // and latch with their respective incoming values of LPhi. 10602 // TODO: Generalize to handle loops with many inputs in a header. 10603 if (LPhi->getNumIncomingValues() != 2) return false; 10604 10605 auto *RLoop = RAR->getLoop(); 10606 auto *Predecessor = RLoop->getLoopPredecessor(); 10607 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10608 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10609 if (!ProvedEasily(L1, RAR->getStart())) 10610 return false; 10611 auto *Latch = RLoop->getLoopLatch(); 10612 assert(Latch && "Loop with AddRec with no latch?"); 10613 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10614 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10615 return false; 10616 } else { 10617 // In all other cases go over inputs of LHS and compare each of them to RHS, 10618 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10619 // At this point RHS is either a non-Phi, or it is a Phi from some block 10620 // different from LBB. 10621 for (const BasicBlock *IncBB : predecessors(LBB)) { 10622 // Check that RHS is available in this block. 10623 if (!dominates(RHS, IncBB)) 10624 return false; 10625 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10626 if (!ProvedEasily(L, RHS)) 10627 return false; 10628 } 10629 } 10630 return true; 10631 } 10632 10633 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10634 const SCEV *LHS, const SCEV *RHS, 10635 const SCEV *FoundLHS, 10636 const SCEV *FoundRHS, 10637 const Instruction *Context) { 10638 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10639 return true; 10640 10641 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10642 return true; 10643 10644 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10645 Context)) 10646 return true; 10647 10648 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10649 FoundLHS, FoundRHS) || 10650 // ~x < ~y --> x > y 10651 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10652 getNotSCEV(FoundRHS), 10653 getNotSCEV(FoundLHS)); 10654 } 10655 10656 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10657 template <typename MinMaxExprType> 10658 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10659 const SCEV *Candidate) { 10660 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10661 if (!MinMaxExpr) 10662 return false; 10663 10664 return is_contained(MinMaxExpr->operands(), Candidate); 10665 } 10666 10667 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10668 ICmpInst::Predicate Pred, 10669 const SCEV *LHS, const SCEV *RHS) { 10670 // If both sides are affine addrecs for the same loop, with equal 10671 // steps, and we know the recurrences don't wrap, then we only 10672 // need to check the predicate on the starting values. 10673 10674 if (!ICmpInst::isRelational(Pred)) 10675 return false; 10676 10677 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10678 if (!LAR) 10679 return false; 10680 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10681 if (!RAR) 10682 return false; 10683 if (LAR->getLoop() != RAR->getLoop()) 10684 return false; 10685 if (!LAR->isAffine() || !RAR->isAffine()) 10686 return false; 10687 10688 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10689 return false; 10690 10691 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10692 SCEV::FlagNSW : SCEV::FlagNUW; 10693 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10694 return false; 10695 10696 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10697 } 10698 10699 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10700 /// expression? 10701 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10702 ICmpInst::Predicate Pred, 10703 const SCEV *LHS, const SCEV *RHS) { 10704 switch (Pred) { 10705 default: 10706 return false; 10707 10708 case ICmpInst::ICMP_SGE: 10709 std::swap(LHS, RHS); 10710 LLVM_FALLTHROUGH; 10711 case ICmpInst::ICMP_SLE: 10712 return 10713 // min(A, ...) <= A 10714 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10715 // A <= max(A, ...) 10716 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10717 10718 case ICmpInst::ICMP_UGE: 10719 std::swap(LHS, RHS); 10720 LLVM_FALLTHROUGH; 10721 case ICmpInst::ICMP_ULE: 10722 return 10723 // min(A, ...) <= A 10724 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10725 // A <= max(A, ...) 10726 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10727 } 10728 10729 llvm_unreachable("covered switch fell through?!"); 10730 } 10731 10732 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10733 const SCEV *LHS, const SCEV *RHS, 10734 const SCEV *FoundLHS, 10735 const SCEV *FoundRHS, 10736 unsigned Depth) { 10737 assert(getTypeSizeInBits(LHS->getType()) == 10738 getTypeSizeInBits(RHS->getType()) && 10739 "LHS and RHS have different sizes?"); 10740 assert(getTypeSizeInBits(FoundLHS->getType()) == 10741 getTypeSizeInBits(FoundRHS->getType()) && 10742 "FoundLHS and FoundRHS have different sizes?"); 10743 // We want to avoid hurting the compile time with analysis of too big trees. 10744 if (Depth > MaxSCEVOperationsImplicationDepth) 10745 return false; 10746 10747 // We only want to work with GT comparison so far. 10748 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10749 Pred = CmpInst::getSwappedPredicate(Pred); 10750 std::swap(LHS, RHS); 10751 std::swap(FoundLHS, FoundRHS); 10752 } 10753 10754 // For unsigned, try to reduce it to corresponding signed comparison. 10755 if (Pred == ICmpInst::ICMP_UGT) 10756 // We can replace unsigned predicate with its signed counterpart if all 10757 // involved values are non-negative. 10758 // TODO: We could have better support for unsigned. 10759 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10760 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10761 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10762 // use this fact to prove that LHS and RHS are non-negative. 10763 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10764 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10765 FoundRHS) && 10766 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10767 FoundRHS)) 10768 Pred = ICmpInst::ICMP_SGT; 10769 } 10770 10771 if (Pred != ICmpInst::ICMP_SGT) 10772 return false; 10773 10774 auto GetOpFromSExt = [&](const SCEV *S) { 10775 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10776 return Ext->getOperand(); 10777 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10778 // the constant in some cases. 10779 return S; 10780 }; 10781 10782 // Acquire values from extensions. 10783 auto *OrigLHS = LHS; 10784 auto *OrigFoundLHS = FoundLHS; 10785 LHS = GetOpFromSExt(LHS); 10786 FoundLHS = GetOpFromSExt(FoundLHS); 10787 10788 // Is the SGT predicate can be proved trivially or using the found context. 10789 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10790 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10791 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10792 FoundRHS, Depth + 1); 10793 }; 10794 10795 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10796 // We want to avoid creation of any new non-constant SCEV. Since we are 10797 // going to compare the operands to RHS, we should be certain that we don't 10798 // need any size extensions for this. So let's decline all cases when the 10799 // sizes of types of LHS and RHS do not match. 10800 // TODO: Maybe try to get RHS from sext to catch more cases? 10801 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10802 return false; 10803 10804 // Should not overflow. 10805 if (!LHSAddExpr->hasNoSignedWrap()) 10806 return false; 10807 10808 auto *LL = LHSAddExpr->getOperand(0); 10809 auto *LR = LHSAddExpr->getOperand(1); 10810 auto *MinusOne = getMinusOne(RHS->getType()); 10811 10812 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10813 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10814 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10815 }; 10816 // Try to prove the following rule: 10817 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10818 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10819 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10820 return true; 10821 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10822 Value *LL, *LR; 10823 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10824 10825 using namespace llvm::PatternMatch; 10826 10827 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10828 // Rules for division. 10829 // We are going to perform some comparisons with Denominator and its 10830 // derivative expressions. In general case, creating a SCEV for it may 10831 // lead to a complex analysis of the entire graph, and in particular it 10832 // can request trip count recalculation for the same loop. This would 10833 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10834 // this, we only want to create SCEVs that are constants in this section. 10835 // So we bail if Denominator is not a constant. 10836 if (!isa<ConstantInt>(LR)) 10837 return false; 10838 10839 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10840 10841 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10842 // then a SCEV for the numerator already exists and matches with FoundLHS. 10843 auto *Numerator = getExistingSCEV(LL); 10844 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10845 return false; 10846 10847 // Make sure that the numerator matches with FoundLHS and the denominator 10848 // is positive. 10849 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10850 return false; 10851 10852 auto *DTy = Denominator->getType(); 10853 auto *FRHSTy = FoundRHS->getType(); 10854 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10855 // One of types is a pointer and another one is not. We cannot extend 10856 // them properly to a wider type, so let us just reject this case. 10857 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10858 // to avoid this check. 10859 return false; 10860 10861 // Given that: 10862 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10863 auto *WTy = getWiderType(DTy, FRHSTy); 10864 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10865 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10866 10867 // Try to prove the following rule: 10868 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10869 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10870 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10871 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10872 if (isKnownNonPositive(RHS) && 10873 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10874 return true; 10875 10876 // Try to prove the following rule: 10877 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10878 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10879 // If we divide it by Denominator > 2, then: 10880 // 1. If FoundLHS is negative, then the result is 0. 10881 // 2. If FoundLHS is non-negative, then the result is non-negative. 10882 // Anyways, the result is non-negative. 10883 auto *MinusOne = getMinusOne(WTy); 10884 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10885 if (isKnownNegative(RHS) && 10886 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10887 return true; 10888 } 10889 } 10890 10891 // If our expression contained SCEVUnknown Phis, and we split it down and now 10892 // need to prove something for them, try to prove the predicate for every 10893 // possible incoming values of those Phis. 10894 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10895 return true; 10896 10897 return false; 10898 } 10899 10900 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10901 const SCEV *LHS, const SCEV *RHS) { 10902 // zext x u<= sext x, sext x s<= zext x 10903 switch (Pred) { 10904 case ICmpInst::ICMP_SGE: 10905 std::swap(LHS, RHS); 10906 LLVM_FALLTHROUGH; 10907 case ICmpInst::ICMP_SLE: { 10908 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10909 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10910 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10911 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10912 return true; 10913 break; 10914 } 10915 case ICmpInst::ICMP_UGE: 10916 std::swap(LHS, RHS); 10917 LLVM_FALLTHROUGH; 10918 case ICmpInst::ICMP_ULE: { 10919 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10920 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10921 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10922 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10923 return true; 10924 break; 10925 } 10926 default: 10927 break; 10928 }; 10929 return false; 10930 } 10931 10932 bool 10933 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10934 const SCEV *LHS, const SCEV *RHS) { 10935 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10936 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10937 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10938 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10939 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10940 } 10941 10942 bool 10943 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10944 const SCEV *LHS, const SCEV *RHS, 10945 const SCEV *FoundLHS, 10946 const SCEV *FoundRHS) { 10947 switch (Pred) { 10948 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10949 case ICmpInst::ICMP_EQ: 10950 case ICmpInst::ICMP_NE: 10951 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10952 return true; 10953 break; 10954 case ICmpInst::ICMP_SLT: 10955 case ICmpInst::ICMP_SLE: 10956 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10957 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10958 return true; 10959 break; 10960 case ICmpInst::ICMP_SGT: 10961 case ICmpInst::ICMP_SGE: 10962 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10963 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10964 return true; 10965 break; 10966 case ICmpInst::ICMP_ULT: 10967 case ICmpInst::ICMP_ULE: 10968 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10969 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10970 return true; 10971 break; 10972 case ICmpInst::ICMP_UGT: 10973 case ICmpInst::ICMP_UGE: 10974 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10975 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10976 return true; 10977 break; 10978 } 10979 10980 // Maybe it can be proved via operations? 10981 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10982 return true; 10983 10984 return false; 10985 } 10986 10987 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10988 const SCEV *LHS, 10989 const SCEV *RHS, 10990 const SCEV *FoundLHS, 10991 const SCEV *FoundRHS) { 10992 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10993 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10994 // reduce the compile time impact of this optimization. 10995 return false; 10996 10997 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10998 if (!Addend) 10999 return false; 11000 11001 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11002 11003 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11004 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11005 ConstantRange FoundLHSRange = 11006 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 11007 11008 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11009 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11010 11011 // We can also compute the range of values for `LHS` that satisfy the 11012 // consequent, "`LHS` `Pred` `RHS`": 11013 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11014 ConstantRange SatisfyingLHSRange = 11015 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 11016 11017 // The antecedent implies the consequent if every value of `LHS` that 11018 // satisfies the antecedent also satisfies the consequent. 11019 return SatisfyingLHSRange.contains(LHSRange); 11020 } 11021 11022 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11023 bool IsSigned, bool NoWrap) { 11024 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11025 11026 if (NoWrap) return false; 11027 11028 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11029 const SCEV *One = getOne(Stride->getType()); 11030 11031 if (IsSigned) { 11032 APInt MaxRHS = getSignedRangeMax(RHS); 11033 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11034 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11035 11036 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11037 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11038 } 11039 11040 APInt MaxRHS = getUnsignedRangeMax(RHS); 11041 APInt MaxValue = APInt::getMaxValue(BitWidth); 11042 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11043 11044 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11045 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11046 } 11047 11048 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11049 bool IsSigned, bool NoWrap) { 11050 if (NoWrap) return false; 11051 11052 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11053 const SCEV *One = getOne(Stride->getType()); 11054 11055 if (IsSigned) { 11056 APInt MinRHS = getSignedRangeMin(RHS); 11057 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11058 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11059 11060 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11061 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11062 } 11063 11064 APInt MinRHS = getUnsignedRangeMin(RHS); 11065 APInt MinValue = APInt::getMinValue(BitWidth); 11066 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11067 11068 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11069 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11070 } 11071 11072 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 11073 bool Equality) { 11074 const SCEV *One = getOne(Step->getType()); 11075 Delta = Equality ? getAddExpr(Delta, Step) 11076 : getAddExpr(Delta, getMinusSCEV(Step, One)); 11077 return getUDivExpr(Delta, Step); 11078 } 11079 11080 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11081 const SCEV *Stride, 11082 const SCEV *End, 11083 unsigned BitWidth, 11084 bool IsSigned) { 11085 11086 assert(!isKnownNonPositive(Stride) && 11087 "Stride is expected strictly positive!"); 11088 // Calculate the maximum backedge count based on the range of values 11089 // permitted by Start, End, and Stride. 11090 const SCEV *MaxBECount; 11091 APInt MinStart = 11092 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11093 11094 APInt StrideForMaxBECount = 11095 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11096 11097 // We already know that the stride is positive, so we paper over conservatism 11098 // in our range computation by forcing StrideForMaxBECount to be at least one. 11099 // In theory this is unnecessary, but we expect MaxBECount to be a 11100 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11101 // is nothing to constant fold it to). 11102 APInt One(BitWidth, 1, IsSigned); 11103 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11104 11105 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11106 : APInt::getMaxValue(BitWidth); 11107 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11108 11109 // Although End can be a MAX expression we estimate MaxEnd considering only 11110 // the case End = RHS of the loop termination condition. This is safe because 11111 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11112 // taken count. 11113 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11114 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11115 11116 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11117 getConstant(StrideForMaxBECount) /* Step */, 11118 false /* Equality */); 11119 11120 return MaxBECount; 11121 } 11122 11123 ScalarEvolution::ExitLimit 11124 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11125 const Loop *L, bool IsSigned, 11126 bool ControlsExit, bool AllowPredicates) { 11127 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11128 11129 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11130 bool PredicatedIV = false; 11131 11132 if (!IV && AllowPredicates) { 11133 // Try to make this an AddRec using runtime tests, in the first X 11134 // iterations of this loop, where X is the SCEV expression found by the 11135 // algorithm below. 11136 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11137 PredicatedIV = true; 11138 } 11139 11140 // Avoid weird loops 11141 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11142 return getCouldNotCompute(); 11143 11144 bool NoWrap = ControlsExit && 11145 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11146 11147 const SCEV *Stride = IV->getStepRecurrence(*this); 11148 11149 bool PositiveStride = isKnownPositive(Stride); 11150 11151 // Avoid negative or zero stride values. 11152 if (!PositiveStride) { 11153 // We can compute the correct backedge taken count for loops with unknown 11154 // strides if we can prove that the loop is not an infinite loop with side 11155 // effects. Here's the loop structure we are trying to handle - 11156 // 11157 // i = start 11158 // do { 11159 // A[i] = i; 11160 // i += s; 11161 // } while (i < end); 11162 // 11163 // The backedge taken count for such loops is evaluated as - 11164 // (max(end, start + stride) - start - 1) /u stride 11165 // 11166 // The additional preconditions that we need to check to prove correctness 11167 // of the above formula is as follows - 11168 // 11169 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11170 // NoWrap flag). 11171 // b) loop is single exit with no side effects. 11172 // 11173 // 11174 // Precondition a) implies that if the stride is negative, this is a single 11175 // trip loop. The backedge taken count formula reduces to zero in this case. 11176 // 11177 // Precondition b) implies that the unknown stride cannot be zero otherwise 11178 // we have UB. 11179 // 11180 // The positive stride case is the same as isKnownPositive(Stride) returning 11181 // true (original behavior of the function). 11182 // 11183 // We want to make sure that the stride is truly unknown as there are edge 11184 // cases where ScalarEvolution propagates no wrap flags to the 11185 // post-increment/decrement IV even though the increment/decrement operation 11186 // itself is wrapping. The computed backedge taken count may be wrong in 11187 // such cases. This is prevented by checking that the stride is not known to 11188 // be either positive or non-positive. For example, no wrap flags are 11189 // propagated to the post-increment IV of this loop with a trip count of 2 - 11190 // 11191 // unsigned char i; 11192 // for(i=127; i<128; i+=129) 11193 // A[i] = i; 11194 // 11195 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11196 !loopHasNoSideEffects(L)) 11197 return getCouldNotCompute(); 11198 } else if (!Stride->isOne() && 11199 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 11200 // Avoid proven overflow cases: this will ensure that the backedge taken 11201 // count will not generate any unsigned overflow. Relaxed no-overflow 11202 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11203 // undefined behaviors like the case of C language. 11204 return getCouldNotCompute(); 11205 11206 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 11207 : ICmpInst::ICMP_ULT; 11208 const SCEV *Start = IV->getStart(); 11209 const SCEV *End = RHS; 11210 // When the RHS is not invariant, we do not know the end bound of the loop and 11211 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11212 // calculate the MaxBECount, given the start, stride and max value for the end 11213 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11214 // checked above). 11215 if (!isLoopInvariant(RHS, L)) { 11216 const SCEV *MaxBECount = computeMaxBECountForLT( 11217 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11218 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11219 false /*MaxOrZero*/, Predicates); 11220 } 11221 // If the backedge is taken at least once, then it will be taken 11222 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11223 // is the LHS value of the less-than comparison the first time it is evaluated 11224 // and End is the RHS. 11225 const SCEV *BECountIfBackedgeTaken = 11226 computeBECount(getMinusSCEV(End, Start), Stride, false); 11227 // If the loop entry is guarded by the result of the backedge test of the 11228 // first loop iteration, then we know the backedge will be taken at least 11229 // once and so the backedge taken count is as above. If not then we use the 11230 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11231 // as if the backedge is taken at least once max(End,Start) is End and so the 11232 // result is as above, and if not max(End,Start) is Start so we get a backedge 11233 // count of zero. 11234 const SCEV *BECount; 11235 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 11236 BECount = BECountIfBackedgeTaken; 11237 else { 11238 // If we know that RHS >= Start in the context of loop, then we know that 11239 // max(RHS, Start) = RHS at this point. 11240 if (isLoopEntryGuardedByCond( 11241 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 11242 End = RHS; 11243 else 11244 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11245 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 11246 } 11247 11248 const SCEV *MaxBECount; 11249 bool MaxOrZero = false; 11250 if (isa<SCEVConstant>(BECount)) 11251 MaxBECount = BECount; 11252 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11253 // If we know exactly how many times the backedge will be taken if it's 11254 // taken at least once, then the backedge count will either be that or 11255 // zero. 11256 MaxBECount = BECountIfBackedgeTaken; 11257 MaxOrZero = true; 11258 } else { 11259 MaxBECount = computeMaxBECountForLT( 11260 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11261 } 11262 11263 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11264 !isa<SCEVCouldNotCompute>(BECount)) 11265 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11266 11267 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11268 } 11269 11270 ScalarEvolution::ExitLimit 11271 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11272 const Loop *L, bool IsSigned, 11273 bool ControlsExit, bool AllowPredicates) { 11274 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11275 // We handle only IV > Invariant 11276 if (!isLoopInvariant(RHS, L)) 11277 return getCouldNotCompute(); 11278 11279 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11280 if (!IV && AllowPredicates) 11281 // Try to make this an AddRec using runtime tests, in the first X 11282 // iterations of this loop, where X is the SCEV expression found by the 11283 // algorithm below. 11284 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11285 11286 // Avoid weird loops 11287 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11288 return getCouldNotCompute(); 11289 11290 bool NoWrap = ControlsExit && 11291 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11292 11293 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11294 11295 // Avoid negative or zero stride values 11296 if (!isKnownPositive(Stride)) 11297 return getCouldNotCompute(); 11298 11299 // Avoid proven overflow cases: this will ensure that the backedge taken count 11300 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11301 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11302 // behaviors like the case of C language. 11303 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 11304 return getCouldNotCompute(); 11305 11306 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 11307 : ICmpInst::ICMP_UGT; 11308 11309 const SCEV *Start = IV->getStart(); 11310 const SCEV *End = RHS; 11311 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11312 // If we know that Start >= RHS in the context of loop, then we know that 11313 // min(RHS, Start) = RHS at this point. 11314 if (isLoopEntryGuardedByCond( 11315 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11316 End = RHS; 11317 else 11318 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11319 } 11320 11321 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 11322 11323 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11324 : getUnsignedRangeMax(Start); 11325 11326 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11327 : getUnsignedRangeMin(Stride); 11328 11329 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11330 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11331 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11332 11333 // Although End can be a MIN expression we estimate MinEnd considering only 11334 // the case End = RHS. This is safe because in the other case (Start - End) 11335 // is zero, leading to a zero maximum backedge taken count. 11336 APInt MinEnd = 11337 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11338 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11339 11340 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11341 ? BECount 11342 : computeBECount(getConstant(MaxStart - MinEnd), 11343 getConstant(MinStride), false); 11344 11345 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11346 MaxBECount = BECount; 11347 11348 return ExitLimit(BECount, MaxBECount, false, Predicates); 11349 } 11350 11351 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11352 ScalarEvolution &SE) const { 11353 if (Range.isFullSet()) // Infinite loop. 11354 return SE.getCouldNotCompute(); 11355 11356 // If the start is a non-zero constant, shift the range to simplify things. 11357 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11358 if (!SC->getValue()->isZero()) { 11359 SmallVector<const SCEV *, 4> Operands(operands()); 11360 Operands[0] = SE.getZero(SC->getType()); 11361 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11362 getNoWrapFlags(FlagNW)); 11363 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11364 return ShiftedAddRec->getNumIterationsInRange( 11365 Range.subtract(SC->getAPInt()), SE); 11366 // This is strange and shouldn't happen. 11367 return SE.getCouldNotCompute(); 11368 } 11369 11370 // The only time we can solve this is when we have all constant indices. 11371 // Otherwise, we cannot determine the overflow conditions. 11372 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11373 return SE.getCouldNotCompute(); 11374 11375 // Okay at this point we know that all elements of the chrec are constants and 11376 // that the start element is zero. 11377 11378 // First check to see if the range contains zero. If not, the first 11379 // iteration exits. 11380 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11381 if (!Range.contains(APInt(BitWidth, 0))) 11382 return SE.getZero(getType()); 11383 11384 if (isAffine()) { 11385 // If this is an affine expression then we have this situation: 11386 // Solve {0,+,A} in Range === Ax in Range 11387 11388 // We know that zero is in the range. If A is positive then we know that 11389 // the upper value of the range must be the first possible exit value. 11390 // If A is negative then the lower of the range is the last possible loop 11391 // value. Also note that we already checked for a full range. 11392 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11393 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11394 11395 // The exit value should be (End+A)/A. 11396 APInt ExitVal = (End + A).udiv(A); 11397 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11398 11399 // Evaluate at the exit value. If we really did fall out of the valid 11400 // range, then we computed our trip count, otherwise wrap around or other 11401 // things must have happened. 11402 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11403 if (Range.contains(Val->getValue())) 11404 return SE.getCouldNotCompute(); // Something strange happened 11405 11406 // Ensure that the previous value is in the range. This is a sanity check. 11407 assert(Range.contains( 11408 EvaluateConstantChrecAtConstant(this, 11409 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11410 "Linear scev computation is off in a bad way!"); 11411 return SE.getConstant(ExitValue); 11412 } 11413 11414 if (isQuadratic()) { 11415 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11416 return SE.getConstant(S.getValue()); 11417 } 11418 11419 return SE.getCouldNotCompute(); 11420 } 11421 11422 const SCEVAddRecExpr * 11423 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11424 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11425 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11426 // but in this case we cannot guarantee that the value returned will be an 11427 // AddRec because SCEV does not have a fixed point where it stops 11428 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11429 // may happen if we reach arithmetic depth limit while simplifying. So we 11430 // construct the returned value explicitly. 11431 SmallVector<const SCEV *, 3> Ops; 11432 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11433 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11434 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11435 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11436 // We know that the last operand is not a constant zero (otherwise it would 11437 // have been popped out earlier). This guarantees us that if the result has 11438 // the same last operand, then it will also not be popped out, meaning that 11439 // the returned value will be an AddRec. 11440 const SCEV *Last = getOperand(getNumOperands() - 1); 11441 assert(!Last->isZero() && "Recurrency with zero step?"); 11442 Ops.push_back(Last); 11443 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11444 SCEV::FlagAnyWrap)); 11445 } 11446 11447 // Return true when S contains at least an undef value. 11448 static inline bool containsUndefs(const SCEV *S) { 11449 return SCEVExprContains(S, [](const SCEV *S) { 11450 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11451 return isa<UndefValue>(SU->getValue()); 11452 return false; 11453 }); 11454 } 11455 11456 namespace { 11457 11458 // Collect all steps of SCEV expressions. 11459 struct SCEVCollectStrides { 11460 ScalarEvolution &SE; 11461 SmallVectorImpl<const SCEV *> &Strides; 11462 11463 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11464 : SE(SE), Strides(S) {} 11465 11466 bool follow(const SCEV *S) { 11467 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11468 Strides.push_back(AR->getStepRecurrence(SE)); 11469 return true; 11470 } 11471 11472 bool isDone() const { return false; } 11473 }; 11474 11475 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11476 struct SCEVCollectTerms { 11477 SmallVectorImpl<const SCEV *> &Terms; 11478 11479 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11480 11481 bool follow(const SCEV *S) { 11482 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11483 isa<SCEVSignExtendExpr>(S)) { 11484 if (!containsUndefs(S)) 11485 Terms.push_back(S); 11486 11487 // Stop recursion: once we collected a term, do not walk its operands. 11488 return false; 11489 } 11490 11491 // Keep looking. 11492 return true; 11493 } 11494 11495 bool isDone() const { return false; } 11496 }; 11497 11498 // Check if a SCEV contains an AddRecExpr. 11499 struct SCEVHasAddRec { 11500 bool &ContainsAddRec; 11501 11502 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11503 ContainsAddRec = false; 11504 } 11505 11506 bool follow(const SCEV *S) { 11507 if (isa<SCEVAddRecExpr>(S)) { 11508 ContainsAddRec = true; 11509 11510 // Stop recursion: once we collected a term, do not walk its operands. 11511 return false; 11512 } 11513 11514 // Keep looking. 11515 return true; 11516 } 11517 11518 bool isDone() const { return false; } 11519 }; 11520 11521 // Find factors that are multiplied with an expression that (possibly as a 11522 // subexpression) contains an AddRecExpr. In the expression: 11523 // 11524 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11525 // 11526 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11527 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11528 // parameters as they form a product with an induction variable. 11529 // 11530 // This collector expects all array size parameters to be in the same MulExpr. 11531 // It might be necessary to later add support for collecting parameters that are 11532 // spread over different nested MulExpr. 11533 struct SCEVCollectAddRecMultiplies { 11534 SmallVectorImpl<const SCEV *> &Terms; 11535 ScalarEvolution &SE; 11536 11537 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11538 : Terms(T), SE(SE) {} 11539 11540 bool follow(const SCEV *S) { 11541 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11542 bool HasAddRec = false; 11543 SmallVector<const SCEV *, 0> Operands; 11544 for (auto Op : Mul->operands()) { 11545 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11546 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11547 Operands.push_back(Op); 11548 } else if (Unknown) { 11549 HasAddRec = true; 11550 } else { 11551 bool ContainsAddRec = false; 11552 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11553 visitAll(Op, ContiansAddRec); 11554 HasAddRec |= ContainsAddRec; 11555 } 11556 } 11557 if (Operands.size() == 0) 11558 return true; 11559 11560 if (!HasAddRec) 11561 return false; 11562 11563 Terms.push_back(SE.getMulExpr(Operands)); 11564 // Stop recursion: once we collected a term, do not walk its operands. 11565 return false; 11566 } 11567 11568 // Keep looking. 11569 return true; 11570 } 11571 11572 bool isDone() const { return false; } 11573 }; 11574 11575 } // end anonymous namespace 11576 11577 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11578 /// two places: 11579 /// 1) The strides of AddRec expressions. 11580 /// 2) Unknowns that are multiplied with AddRec expressions. 11581 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11582 SmallVectorImpl<const SCEV *> &Terms) { 11583 SmallVector<const SCEV *, 4> Strides; 11584 SCEVCollectStrides StrideCollector(*this, Strides); 11585 visitAll(Expr, StrideCollector); 11586 11587 LLVM_DEBUG({ 11588 dbgs() << "Strides:\n"; 11589 for (const SCEV *S : Strides) 11590 dbgs() << *S << "\n"; 11591 }); 11592 11593 for (const SCEV *S : Strides) { 11594 SCEVCollectTerms TermCollector(Terms); 11595 visitAll(S, TermCollector); 11596 } 11597 11598 LLVM_DEBUG({ 11599 dbgs() << "Terms:\n"; 11600 for (const SCEV *T : Terms) 11601 dbgs() << *T << "\n"; 11602 }); 11603 11604 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11605 visitAll(Expr, MulCollector); 11606 } 11607 11608 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11609 SmallVectorImpl<const SCEV *> &Terms, 11610 SmallVectorImpl<const SCEV *> &Sizes) { 11611 int Last = Terms.size() - 1; 11612 const SCEV *Step = Terms[Last]; 11613 11614 // End of recursion. 11615 if (Last == 0) { 11616 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11617 SmallVector<const SCEV *, 2> Qs; 11618 for (const SCEV *Op : M->operands()) 11619 if (!isa<SCEVConstant>(Op)) 11620 Qs.push_back(Op); 11621 11622 Step = SE.getMulExpr(Qs); 11623 } 11624 11625 Sizes.push_back(Step); 11626 return true; 11627 } 11628 11629 for (const SCEV *&Term : Terms) { 11630 // Normalize the terms before the next call to findArrayDimensionsRec. 11631 const SCEV *Q, *R; 11632 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11633 11634 // Bail out when GCD does not evenly divide one of the terms. 11635 if (!R->isZero()) 11636 return false; 11637 11638 Term = Q; 11639 } 11640 11641 // Remove all SCEVConstants. 11642 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); 11643 11644 if (Terms.size() > 0) 11645 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11646 return false; 11647 11648 Sizes.push_back(Step); 11649 return true; 11650 } 11651 11652 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11653 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11654 for (const SCEV *T : Terms) 11655 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11656 return true; 11657 11658 return false; 11659 } 11660 11661 // Return the number of product terms in S. 11662 static inline int numberOfTerms(const SCEV *S) { 11663 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11664 return Expr->getNumOperands(); 11665 return 1; 11666 } 11667 11668 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11669 if (isa<SCEVConstant>(T)) 11670 return nullptr; 11671 11672 if (isa<SCEVUnknown>(T)) 11673 return T; 11674 11675 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11676 SmallVector<const SCEV *, 2> Factors; 11677 for (const SCEV *Op : M->operands()) 11678 if (!isa<SCEVConstant>(Op)) 11679 Factors.push_back(Op); 11680 11681 return SE.getMulExpr(Factors); 11682 } 11683 11684 return T; 11685 } 11686 11687 /// Return the size of an element read or written by Inst. 11688 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11689 Type *Ty; 11690 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11691 Ty = Store->getValueOperand()->getType(); 11692 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11693 Ty = Load->getType(); 11694 else 11695 return nullptr; 11696 11697 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11698 return getSizeOfExpr(ETy, Ty); 11699 } 11700 11701 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11702 SmallVectorImpl<const SCEV *> &Sizes, 11703 const SCEV *ElementSize) { 11704 if (Terms.size() < 1 || !ElementSize) 11705 return; 11706 11707 // Early return when Terms do not contain parameters: we do not delinearize 11708 // non parametric SCEVs. 11709 if (!containsParameters(Terms)) 11710 return; 11711 11712 LLVM_DEBUG({ 11713 dbgs() << "Terms:\n"; 11714 for (const SCEV *T : Terms) 11715 dbgs() << *T << "\n"; 11716 }); 11717 11718 // Remove duplicates. 11719 array_pod_sort(Terms.begin(), Terms.end()); 11720 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11721 11722 // Put larger terms first. 11723 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11724 return numberOfTerms(LHS) > numberOfTerms(RHS); 11725 }); 11726 11727 // Try to divide all terms by the element size. If term is not divisible by 11728 // element size, proceed with the original term. 11729 for (const SCEV *&Term : Terms) { 11730 const SCEV *Q, *R; 11731 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11732 if (!Q->isZero()) 11733 Term = Q; 11734 } 11735 11736 SmallVector<const SCEV *, 4> NewTerms; 11737 11738 // Remove constant factors. 11739 for (const SCEV *T : Terms) 11740 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11741 NewTerms.push_back(NewT); 11742 11743 LLVM_DEBUG({ 11744 dbgs() << "Terms after sorting:\n"; 11745 for (const SCEV *T : NewTerms) 11746 dbgs() << *T << "\n"; 11747 }); 11748 11749 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11750 Sizes.clear(); 11751 return; 11752 } 11753 11754 // The last element to be pushed into Sizes is the size of an element. 11755 Sizes.push_back(ElementSize); 11756 11757 LLVM_DEBUG({ 11758 dbgs() << "Sizes:\n"; 11759 for (const SCEV *S : Sizes) 11760 dbgs() << *S << "\n"; 11761 }); 11762 } 11763 11764 void ScalarEvolution::computeAccessFunctions( 11765 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11766 SmallVectorImpl<const SCEV *> &Sizes) { 11767 // Early exit in case this SCEV is not an affine multivariate function. 11768 if (Sizes.empty()) 11769 return; 11770 11771 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11772 if (!AR->isAffine()) 11773 return; 11774 11775 const SCEV *Res = Expr; 11776 int Last = Sizes.size() - 1; 11777 for (int i = Last; i >= 0; i--) { 11778 const SCEV *Q, *R; 11779 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11780 11781 LLVM_DEBUG({ 11782 dbgs() << "Res: " << *Res << "\n"; 11783 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11784 dbgs() << "Res divided by Sizes[i]:\n"; 11785 dbgs() << "Quotient: " << *Q << "\n"; 11786 dbgs() << "Remainder: " << *R << "\n"; 11787 }); 11788 11789 Res = Q; 11790 11791 // Do not record the last subscript corresponding to the size of elements in 11792 // the array. 11793 if (i == Last) { 11794 11795 // Bail out if the remainder is too complex. 11796 if (isa<SCEVAddRecExpr>(R)) { 11797 Subscripts.clear(); 11798 Sizes.clear(); 11799 return; 11800 } 11801 11802 continue; 11803 } 11804 11805 // Record the access function for the current subscript. 11806 Subscripts.push_back(R); 11807 } 11808 11809 // Also push in last position the remainder of the last division: it will be 11810 // the access function of the innermost dimension. 11811 Subscripts.push_back(Res); 11812 11813 std::reverse(Subscripts.begin(), Subscripts.end()); 11814 11815 LLVM_DEBUG({ 11816 dbgs() << "Subscripts:\n"; 11817 for (const SCEV *S : Subscripts) 11818 dbgs() << *S << "\n"; 11819 }); 11820 } 11821 11822 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11823 /// sizes of an array access. Returns the remainder of the delinearization that 11824 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11825 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11826 /// expressions in the stride and base of a SCEV corresponding to the 11827 /// computation of a GCD (greatest common divisor) of base and stride. When 11828 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11829 /// 11830 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11831 /// 11832 /// void foo(long n, long m, long o, double A[n][m][o]) { 11833 /// 11834 /// for (long i = 0; i < n; i++) 11835 /// for (long j = 0; j < m; j++) 11836 /// for (long k = 0; k < o; k++) 11837 /// A[i][j][k] = 1.0; 11838 /// } 11839 /// 11840 /// the delinearization input is the following AddRec SCEV: 11841 /// 11842 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11843 /// 11844 /// From this SCEV, we are able to say that the base offset of the access is %A 11845 /// because it appears as an offset that does not divide any of the strides in 11846 /// the loops: 11847 /// 11848 /// CHECK: Base offset: %A 11849 /// 11850 /// and then SCEV->delinearize determines the size of some of the dimensions of 11851 /// the array as these are the multiples by which the strides are happening: 11852 /// 11853 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11854 /// 11855 /// Note that the outermost dimension remains of UnknownSize because there are 11856 /// no strides that would help identifying the size of the last dimension: when 11857 /// the array has been statically allocated, one could compute the size of that 11858 /// dimension by dividing the overall size of the array by the size of the known 11859 /// dimensions: %m * %o * 8. 11860 /// 11861 /// Finally delinearize provides the access functions for the array reference 11862 /// that does correspond to A[i][j][k] of the above C testcase: 11863 /// 11864 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11865 /// 11866 /// The testcases are checking the output of a function pass: 11867 /// DelinearizationPass that walks through all loads and stores of a function 11868 /// asking for the SCEV of the memory access with respect to all enclosing 11869 /// loops, calling SCEV->delinearize on that and printing the results. 11870 void ScalarEvolution::delinearize(const SCEV *Expr, 11871 SmallVectorImpl<const SCEV *> &Subscripts, 11872 SmallVectorImpl<const SCEV *> &Sizes, 11873 const SCEV *ElementSize) { 11874 // First step: collect parametric terms. 11875 SmallVector<const SCEV *, 4> Terms; 11876 collectParametricTerms(Expr, Terms); 11877 11878 if (Terms.empty()) 11879 return; 11880 11881 // Second step: find subscript sizes. 11882 findArrayDimensions(Terms, Sizes, ElementSize); 11883 11884 if (Sizes.empty()) 11885 return; 11886 11887 // Third step: compute the access functions for each subscript. 11888 computeAccessFunctions(Expr, Subscripts, Sizes); 11889 11890 if (Subscripts.empty()) 11891 return; 11892 11893 LLVM_DEBUG({ 11894 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11895 dbgs() << "ArrayDecl[UnknownSize]"; 11896 for (const SCEV *S : Sizes) 11897 dbgs() << "[" << *S << "]"; 11898 11899 dbgs() << "\nArrayRef"; 11900 for (const SCEV *S : Subscripts) 11901 dbgs() << "[" << *S << "]"; 11902 dbgs() << "\n"; 11903 }); 11904 } 11905 11906 bool ScalarEvolution::getIndexExpressionsFromGEP( 11907 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11908 SmallVectorImpl<int> &Sizes) { 11909 assert(Subscripts.empty() && Sizes.empty() && 11910 "Expected output lists to be empty on entry to this function."); 11911 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11912 Type *Ty = GEP->getPointerOperandType(); 11913 bool DroppedFirstDim = false; 11914 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11915 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11916 if (i == 1) { 11917 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11918 Ty = PtrTy->getElementType(); 11919 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11920 Ty = ArrayTy->getElementType(); 11921 } else { 11922 Subscripts.clear(); 11923 Sizes.clear(); 11924 return false; 11925 } 11926 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11927 if (Const->getValue()->isZero()) { 11928 DroppedFirstDim = true; 11929 continue; 11930 } 11931 Subscripts.push_back(Expr); 11932 continue; 11933 } 11934 11935 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11936 if (!ArrayTy) { 11937 Subscripts.clear(); 11938 Sizes.clear(); 11939 return false; 11940 } 11941 11942 Subscripts.push_back(Expr); 11943 if (!(DroppedFirstDim && i == 2)) 11944 Sizes.push_back(ArrayTy->getNumElements()); 11945 11946 Ty = ArrayTy->getElementType(); 11947 } 11948 return !Subscripts.empty(); 11949 } 11950 11951 //===----------------------------------------------------------------------===// 11952 // SCEVCallbackVH Class Implementation 11953 //===----------------------------------------------------------------------===// 11954 11955 void ScalarEvolution::SCEVCallbackVH::deleted() { 11956 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11957 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11958 SE->ConstantEvolutionLoopExitValue.erase(PN); 11959 SE->eraseValueFromMap(getValPtr()); 11960 // this now dangles! 11961 } 11962 11963 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11964 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11965 11966 // Forget all the expressions associated with users of the old value, 11967 // so that future queries will recompute the expressions using the new 11968 // value. 11969 Value *Old = getValPtr(); 11970 SmallVector<User *, 16> Worklist(Old->users()); 11971 SmallPtrSet<User *, 8> Visited; 11972 while (!Worklist.empty()) { 11973 User *U = Worklist.pop_back_val(); 11974 // Deleting the Old value will cause this to dangle. Postpone 11975 // that until everything else is done. 11976 if (U == Old) 11977 continue; 11978 if (!Visited.insert(U).second) 11979 continue; 11980 if (PHINode *PN = dyn_cast<PHINode>(U)) 11981 SE->ConstantEvolutionLoopExitValue.erase(PN); 11982 SE->eraseValueFromMap(U); 11983 llvm::append_range(Worklist, U->users()); 11984 } 11985 // Delete the Old value. 11986 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11987 SE->ConstantEvolutionLoopExitValue.erase(PN); 11988 SE->eraseValueFromMap(Old); 11989 // this now dangles! 11990 } 11991 11992 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11993 : CallbackVH(V), SE(se) {} 11994 11995 //===----------------------------------------------------------------------===// 11996 // ScalarEvolution Class Implementation 11997 //===----------------------------------------------------------------------===// 11998 11999 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12000 AssumptionCache &AC, DominatorTree &DT, 12001 LoopInfo &LI) 12002 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12003 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12004 LoopDispositions(64), BlockDispositions(64) { 12005 // To use guards for proving predicates, we need to scan every instruction in 12006 // relevant basic blocks, and not just terminators. Doing this is a waste of 12007 // time if the IR does not actually contain any calls to 12008 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12009 // 12010 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12011 // to _add_ guards to the module when there weren't any before, and wants 12012 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12013 // efficient in lieu of being smart in that rather obscure case. 12014 12015 auto *GuardDecl = F.getParent()->getFunction( 12016 Intrinsic::getName(Intrinsic::experimental_guard)); 12017 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12018 } 12019 12020 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12021 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12022 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12023 ValueExprMap(std::move(Arg.ValueExprMap)), 12024 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12025 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12026 PendingMerges(std::move(Arg.PendingMerges)), 12027 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12028 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12029 PredicatedBackedgeTakenCounts( 12030 std::move(Arg.PredicatedBackedgeTakenCounts)), 12031 ConstantEvolutionLoopExitValue( 12032 std::move(Arg.ConstantEvolutionLoopExitValue)), 12033 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12034 LoopDispositions(std::move(Arg.LoopDispositions)), 12035 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12036 BlockDispositions(std::move(Arg.BlockDispositions)), 12037 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12038 SignedRanges(std::move(Arg.SignedRanges)), 12039 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12040 UniquePreds(std::move(Arg.UniquePreds)), 12041 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12042 LoopUsers(std::move(Arg.LoopUsers)), 12043 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12044 FirstUnknown(Arg.FirstUnknown) { 12045 Arg.FirstUnknown = nullptr; 12046 } 12047 12048 ScalarEvolution::~ScalarEvolution() { 12049 // Iterate through all the SCEVUnknown instances and call their 12050 // destructors, so that they release their references to their values. 12051 for (SCEVUnknown *U = FirstUnknown; U;) { 12052 SCEVUnknown *Tmp = U; 12053 U = U->Next; 12054 Tmp->~SCEVUnknown(); 12055 } 12056 FirstUnknown = nullptr; 12057 12058 ExprValueMap.clear(); 12059 ValueExprMap.clear(); 12060 HasRecMap.clear(); 12061 12062 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 12063 // that a loop had multiple computable exits. 12064 for (auto &BTCI : BackedgeTakenCounts) 12065 BTCI.second.clear(); 12066 for (auto &BTCI : PredicatedBackedgeTakenCounts) 12067 BTCI.second.clear(); 12068 12069 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12070 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12071 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12072 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12073 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12074 } 12075 12076 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12077 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12078 } 12079 12080 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12081 const Loop *L) { 12082 // Print all inner loops first 12083 for (Loop *I : *L) 12084 PrintLoopInfo(OS, SE, I); 12085 12086 OS << "Loop "; 12087 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12088 OS << ": "; 12089 12090 SmallVector<BasicBlock *, 8> ExitingBlocks; 12091 L->getExitingBlocks(ExitingBlocks); 12092 if (ExitingBlocks.size() != 1) 12093 OS << "<multiple exits> "; 12094 12095 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12096 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12097 else 12098 OS << "Unpredictable backedge-taken count.\n"; 12099 12100 if (ExitingBlocks.size() > 1) 12101 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12102 OS << " exit count for " << ExitingBlock->getName() << ": " 12103 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12104 } 12105 12106 OS << "Loop "; 12107 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12108 OS << ": "; 12109 12110 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12111 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12112 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12113 OS << ", actual taken count either this or zero."; 12114 } else { 12115 OS << "Unpredictable max backedge-taken count. "; 12116 } 12117 12118 OS << "\n" 12119 "Loop "; 12120 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12121 OS << ": "; 12122 12123 SCEVUnionPredicate Pred; 12124 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12125 if (!isa<SCEVCouldNotCompute>(PBT)) { 12126 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12127 OS << " Predicates:\n"; 12128 Pred.print(OS, 4); 12129 } else { 12130 OS << "Unpredictable predicated backedge-taken count. "; 12131 } 12132 OS << "\n"; 12133 12134 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12135 OS << "Loop "; 12136 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12137 OS << ": "; 12138 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12139 } 12140 } 12141 12142 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12143 switch (LD) { 12144 case ScalarEvolution::LoopVariant: 12145 return "Variant"; 12146 case ScalarEvolution::LoopInvariant: 12147 return "Invariant"; 12148 case ScalarEvolution::LoopComputable: 12149 return "Computable"; 12150 } 12151 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12152 } 12153 12154 void ScalarEvolution::print(raw_ostream &OS) const { 12155 // ScalarEvolution's implementation of the print method is to print 12156 // out SCEV values of all instructions that are interesting. Doing 12157 // this potentially causes it to create new SCEV objects though, 12158 // which technically conflicts with the const qualifier. This isn't 12159 // observable from outside the class though, so casting away the 12160 // const isn't dangerous. 12161 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12162 12163 if (ClassifyExpressions) { 12164 OS << "Classifying expressions for: "; 12165 F.printAsOperand(OS, /*PrintType=*/false); 12166 OS << "\n"; 12167 for (Instruction &I : instructions(F)) 12168 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12169 OS << I << '\n'; 12170 OS << " --> "; 12171 const SCEV *SV = SE.getSCEV(&I); 12172 SV->print(OS); 12173 if (!isa<SCEVCouldNotCompute>(SV)) { 12174 OS << " U: "; 12175 SE.getUnsignedRange(SV).print(OS); 12176 OS << " S: "; 12177 SE.getSignedRange(SV).print(OS); 12178 } 12179 12180 const Loop *L = LI.getLoopFor(I.getParent()); 12181 12182 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12183 if (AtUse != SV) { 12184 OS << " --> "; 12185 AtUse->print(OS); 12186 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12187 OS << " U: "; 12188 SE.getUnsignedRange(AtUse).print(OS); 12189 OS << " S: "; 12190 SE.getSignedRange(AtUse).print(OS); 12191 } 12192 } 12193 12194 if (L) { 12195 OS << "\t\t" "Exits: "; 12196 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12197 if (!SE.isLoopInvariant(ExitValue, L)) { 12198 OS << "<<Unknown>>"; 12199 } else { 12200 OS << *ExitValue; 12201 } 12202 12203 bool First = true; 12204 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12205 if (First) { 12206 OS << "\t\t" "LoopDispositions: { "; 12207 First = false; 12208 } else { 12209 OS << ", "; 12210 } 12211 12212 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12213 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12214 } 12215 12216 for (auto *InnerL : depth_first(L)) { 12217 if (InnerL == L) 12218 continue; 12219 if (First) { 12220 OS << "\t\t" "LoopDispositions: { "; 12221 First = false; 12222 } else { 12223 OS << ", "; 12224 } 12225 12226 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12227 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12228 } 12229 12230 OS << " }"; 12231 } 12232 12233 OS << "\n"; 12234 } 12235 } 12236 12237 OS << "Determining loop execution counts for: "; 12238 F.printAsOperand(OS, /*PrintType=*/false); 12239 OS << "\n"; 12240 for (Loop *I : LI) 12241 PrintLoopInfo(OS, &SE, I); 12242 } 12243 12244 ScalarEvolution::LoopDisposition 12245 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12246 auto &Values = LoopDispositions[S]; 12247 for (auto &V : Values) { 12248 if (V.getPointer() == L) 12249 return V.getInt(); 12250 } 12251 Values.emplace_back(L, LoopVariant); 12252 LoopDisposition D = computeLoopDisposition(S, L); 12253 auto &Values2 = LoopDispositions[S]; 12254 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12255 if (V.getPointer() == L) { 12256 V.setInt(D); 12257 break; 12258 } 12259 } 12260 return D; 12261 } 12262 12263 ScalarEvolution::LoopDisposition 12264 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12265 switch (S->getSCEVType()) { 12266 case scConstant: 12267 return LoopInvariant; 12268 case scPtrToInt: 12269 case scTruncate: 12270 case scZeroExtend: 12271 case scSignExtend: 12272 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12273 case scAddRecExpr: { 12274 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12275 12276 // If L is the addrec's loop, it's computable. 12277 if (AR->getLoop() == L) 12278 return LoopComputable; 12279 12280 // Add recurrences are never invariant in the function-body (null loop). 12281 if (!L) 12282 return LoopVariant; 12283 12284 // Everything that is not defined at loop entry is variant. 12285 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12286 return LoopVariant; 12287 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12288 " dominate the contained loop's header?"); 12289 12290 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12291 if (AR->getLoop()->contains(L)) 12292 return LoopInvariant; 12293 12294 // This recurrence is variant w.r.t. L if any of its operands 12295 // are variant. 12296 for (auto *Op : AR->operands()) 12297 if (!isLoopInvariant(Op, L)) 12298 return LoopVariant; 12299 12300 // Otherwise it's loop-invariant. 12301 return LoopInvariant; 12302 } 12303 case scAddExpr: 12304 case scMulExpr: 12305 case scUMaxExpr: 12306 case scSMaxExpr: 12307 case scUMinExpr: 12308 case scSMinExpr: { 12309 bool HasVarying = false; 12310 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12311 LoopDisposition D = getLoopDisposition(Op, L); 12312 if (D == LoopVariant) 12313 return LoopVariant; 12314 if (D == LoopComputable) 12315 HasVarying = true; 12316 } 12317 return HasVarying ? LoopComputable : LoopInvariant; 12318 } 12319 case scUDivExpr: { 12320 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12321 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12322 if (LD == LoopVariant) 12323 return LoopVariant; 12324 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12325 if (RD == LoopVariant) 12326 return LoopVariant; 12327 return (LD == LoopInvariant && RD == LoopInvariant) ? 12328 LoopInvariant : LoopComputable; 12329 } 12330 case scUnknown: 12331 // All non-instruction values are loop invariant. All instructions are loop 12332 // invariant if they are not contained in the specified loop. 12333 // Instructions are never considered invariant in the function body 12334 // (null loop) because they are defined within the "loop". 12335 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12336 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12337 return LoopInvariant; 12338 case scCouldNotCompute: 12339 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12340 } 12341 llvm_unreachable("Unknown SCEV kind!"); 12342 } 12343 12344 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12345 return getLoopDisposition(S, L) == LoopInvariant; 12346 } 12347 12348 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12349 return getLoopDisposition(S, L) == LoopComputable; 12350 } 12351 12352 ScalarEvolution::BlockDisposition 12353 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12354 auto &Values = BlockDispositions[S]; 12355 for (auto &V : Values) { 12356 if (V.getPointer() == BB) 12357 return V.getInt(); 12358 } 12359 Values.emplace_back(BB, DoesNotDominateBlock); 12360 BlockDisposition D = computeBlockDisposition(S, BB); 12361 auto &Values2 = BlockDispositions[S]; 12362 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12363 if (V.getPointer() == BB) { 12364 V.setInt(D); 12365 break; 12366 } 12367 } 12368 return D; 12369 } 12370 12371 ScalarEvolution::BlockDisposition 12372 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12373 switch (S->getSCEVType()) { 12374 case scConstant: 12375 return ProperlyDominatesBlock; 12376 case scPtrToInt: 12377 case scTruncate: 12378 case scZeroExtend: 12379 case scSignExtend: 12380 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12381 case scAddRecExpr: { 12382 // This uses a "dominates" query instead of "properly dominates" query 12383 // to test for proper dominance too, because the instruction which 12384 // produces the addrec's value is a PHI, and a PHI effectively properly 12385 // dominates its entire containing block. 12386 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12387 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12388 return DoesNotDominateBlock; 12389 12390 // Fall through into SCEVNAryExpr handling. 12391 LLVM_FALLTHROUGH; 12392 } 12393 case scAddExpr: 12394 case scMulExpr: 12395 case scUMaxExpr: 12396 case scSMaxExpr: 12397 case scUMinExpr: 12398 case scSMinExpr: { 12399 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12400 bool Proper = true; 12401 for (const SCEV *NAryOp : NAry->operands()) { 12402 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12403 if (D == DoesNotDominateBlock) 12404 return DoesNotDominateBlock; 12405 if (D == DominatesBlock) 12406 Proper = false; 12407 } 12408 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12409 } 12410 case scUDivExpr: { 12411 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12412 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12413 BlockDisposition LD = getBlockDisposition(LHS, BB); 12414 if (LD == DoesNotDominateBlock) 12415 return DoesNotDominateBlock; 12416 BlockDisposition RD = getBlockDisposition(RHS, BB); 12417 if (RD == DoesNotDominateBlock) 12418 return DoesNotDominateBlock; 12419 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12420 ProperlyDominatesBlock : DominatesBlock; 12421 } 12422 case scUnknown: 12423 if (Instruction *I = 12424 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12425 if (I->getParent() == BB) 12426 return DominatesBlock; 12427 if (DT.properlyDominates(I->getParent(), BB)) 12428 return ProperlyDominatesBlock; 12429 return DoesNotDominateBlock; 12430 } 12431 return ProperlyDominatesBlock; 12432 case scCouldNotCompute: 12433 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12434 } 12435 llvm_unreachable("Unknown SCEV kind!"); 12436 } 12437 12438 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12439 return getBlockDisposition(S, BB) >= DominatesBlock; 12440 } 12441 12442 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12443 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12444 } 12445 12446 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12447 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12448 } 12449 12450 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12451 auto IsS = [&](const SCEV *X) { return S == X; }; 12452 auto ContainsS = [&](const SCEV *X) { 12453 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12454 }; 12455 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12456 } 12457 12458 void 12459 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12460 ValuesAtScopes.erase(S); 12461 LoopDispositions.erase(S); 12462 BlockDispositions.erase(S); 12463 UnsignedRanges.erase(S); 12464 SignedRanges.erase(S); 12465 ExprValueMap.erase(S); 12466 HasRecMap.erase(S); 12467 MinTrailingZerosCache.erase(S); 12468 12469 for (auto I = PredicatedSCEVRewrites.begin(); 12470 I != PredicatedSCEVRewrites.end();) { 12471 std::pair<const SCEV *, const Loop *> Entry = I->first; 12472 if (Entry.first == S) 12473 PredicatedSCEVRewrites.erase(I++); 12474 else 12475 ++I; 12476 } 12477 12478 auto RemoveSCEVFromBackedgeMap = 12479 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12480 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12481 BackedgeTakenInfo &BEInfo = I->second; 12482 if (BEInfo.hasOperand(S, this)) { 12483 BEInfo.clear(); 12484 Map.erase(I++); 12485 } else 12486 ++I; 12487 } 12488 }; 12489 12490 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12491 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12492 } 12493 12494 void 12495 ScalarEvolution::getUsedLoops(const SCEV *S, 12496 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12497 struct FindUsedLoops { 12498 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12499 : LoopsUsed(LoopsUsed) {} 12500 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12501 bool follow(const SCEV *S) { 12502 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12503 LoopsUsed.insert(AR->getLoop()); 12504 return true; 12505 } 12506 12507 bool isDone() const { return false; } 12508 }; 12509 12510 FindUsedLoops F(LoopsUsed); 12511 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12512 } 12513 12514 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12515 SmallPtrSet<const Loop *, 8> LoopsUsed; 12516 getUsedLoops(S, LoopsUsed); 12517 for (auto *L : LoopsUsed) 12518 LoopUsers[L].push_back(S); 12519 } 12520 12521 void ScalarEvolution::verify() const { 12522 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12523 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12524 12525 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12526 12527 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12528 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12529 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12530 12531 const SCEV *visitConstant(const SCEVConstant *Constant) { 12532 return SE.getConstant(Constant->getAPInt()); 12533 } 12534 12535 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12536 return SE.getUnknown(Expr->getValue()); 12537 } 12538 12539 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12540 return SE.getCouldNotCompute(); 12541 } 12542 }; 12543 12544 SCEVMapper SCM(SE2); 12545 12546 while (!LoopStack.empty()) { 12547 auto *L = LoopStack.pop_back_val(); 12548 llvm::append_range(LoopStack, *L); 12549 12550 auto *CurBECount = SCM.visit( 12551 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12552 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12553 12554 if (CurBECount == SE2.getCouldNotCompute() || 12555 NewBECount == SE2.getCouldNotCompute()) { 12556 // NB! This situation is legal, but is very suspicious -- whatever pass 12557 // change the loop to make a trip count go from could not compute to 12558 // computable or vice-versa *should have* invalidated SCEV. However, we 12559 // choose not to assert here (for now) since we don't want false 12560 // positives. 12561 continue; 12562 } 12563 12564 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12565 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12566 // not propagate undef aggressively). This means we can (and do) fail 12567 // verification in cases where a transform makes the trip count of a loop 12568 // go from "undef" to "undef+1" (say). The transform is fine, since in 12569 // both cases the loop iterates "undef" times, but SCEV thinks we 12570 // increased the trip count of the loop by 1 incorrectly. 12571 continue; 12572 } 12573 12574 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12575 SE.getTypeSizeInBits(NewBECount->getType())) 12576 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12577 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12578 SE.getTypeSizeInBits(NewBECount->getType())) 12579 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12580 12581 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12582 12583 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12584 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12585 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12586 dbgs() << "Old: " << *CurBECount << "\n"; 12587 dbgs() << "New: " << *NewBECount << "\n"; 12588 dbgs() << "Delta: " << *Delta << "\n"; 12589 std::abort(); 12590 } 12591 } 12592 12593 // Collect all valid loops currently in LoopInfo. 12594 SmallPtrSet<Loop *, 32> ValidLoops; 12595 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12596 while (!Worklist.empty()) { 12597 Loop *L = Worklist.pop_back_val(); 12598 if (ValidLoops.contains(L)) 12599 continue; 12600 ValidLoops.insert(L); 12601 Worklist.append(L->begin(), L->end()); 12602 } 12603 // Check for SCEV expressions referencing invalid/deleted loops. 12604 for (auto &KV : ValueExprMap) { 12605 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12606 if (!AR) 12607 continue; 12608 assert(ValidLoops.contains(AR->getLoop()) && 12609 "AddRec references invalid loop"); 12610 } 12611 } 12612 12613 bool ScalarEvolution::invalidate( 12614 Function &F, const PreservedAnalyses &PA, 12615 FunctionAnalysisManager::Invalidator &Inv) { 12616 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12617 // of its dependencies is invalidated. 12618 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12619 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12620 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12621 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12622 Inv.invalidate<LoopAnalysis>(F, PA); 12623 } 12624 12625 AnalysisKey ScalarEvolutionAnalysis::Key; 12626 12627 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12628 FunctionAnalysisManager &AM) { 12629 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12630 AM.getResult<AssumptionAnalysis>(F), 12631 AM.getResult<DominatorTreeAnalysis>(F), 12632 AM.getResult<LoopAnalysis>(F)); 12633 } 12634 12635 PreservedAnalyses 12636 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12637 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12638 return PreservedAnalyses::all(); 12639 } 12640 12641 PreservedAnalyses 12642 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12643 // For compatibility with opt's -analyze feature under legacy pass manager 12644 // which was not ported to NPM. This keeps tests using 12645 // update_analyze_test_checks.py working. 12646 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12647 << F.getName() << "':\n"; 12648 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12649 return PreservedAnalyses::all(); 12650 } 12651 12652 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12653 "Scalar Evolution Analysis", false, true) 12654 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12655 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12656 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12657 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12658 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12659 "Scalar Evolution Analysis", false, true) 12660 12661 char ScalarEvolutionWrapperPass::ID = 0; 12662 12663 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12664 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12665 } 12666 12667 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12668 SE.reset(new ScalarEvolution( 12669 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12670 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12671 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12672 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12673 return false; 12674 } 12675 12676 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12677 12678 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12679 SE->print(OS); 12680 } 12681 12682 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12683 if (!VerifySCEV) 12684 return; 12685 12686 SE->verify(); 12687 } 12688 12689 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12690 AU.setPreservesAll(); 12691 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12692 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12693 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12694 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12695 } 12696 12697 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12698 const SCEV *RHS) { 12699 FoldingSetNodeID ID; 12700 assert(LHS->getType() == RHS->getType() && 12701 "Type mismatch between LHS and RHS"); 12702 // Unique this node based on the arguments 12703 ID.AddInteger(SCEVPredicate::P_Equal); 12704 ID.AddPointer(LHS); 12705 ID.AddPointer(RHS); 12706 void *IP = nullptr; 12707 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12708 return S; 12709 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12710 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12711 UniquePreds.InsertNode(Eq, IP); 12712 return Eq; 12713 } 12714 12715 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12716 const SCEVAddRecExpr *AR, 12717 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12718 FoldingSetNodeID ID; 12719 // Unique this node based on the arguments 12720 ID.AddInteger(SCEVPredicate::P_Wrap); 12721 ID.AddPointer(AR); 12722 ID.AddInteger(AddedFlags); 12723 void *IP = nullptr; 12724 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12725 return S; 12726 auto *OF = new (SCEVAllocator) 12727 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12728 UniquePreds.InsertNode(OF, IP); 12729 return OF; 12730 } 12731 12732 namespace { 12733 12734 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12735 public: 12736 12737 /// Rewrites \p S in the context of a loop L and the SCEV predication 12738 /// infrastructure. 12739 /// 12740 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12741 /// equivalences present in \p Pred. 12742 /// 12743 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12744 /// \p NewPreds such that the result will be an AddRecExpr. 12745 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12746 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12747 SCEVUnionPredicate *Pred) { 12748 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12749 return Rewriter.visit(S); 12750 } 12751 12752 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12753 if (Pred) { 12754 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12755 for (auto *Pred : ExprPreds) 12756 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12757 if (IPred->getLHS() == Expr) 12758 return IPred->getRHS(); 12759 } 12760 return convertToAddRecWithPreds(Expr); 12761 } 12762 12763 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12764 const SCEV *Operand = visit(Expr->getOperand()); 12765 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12766 if (AR && AR->getLoop() == L && AR->isAffine()) { 12767 // This couldn't be folded because the operand didn't have the nuw 12768 // flag. Add the nusw flag as an assumption that we could make. 12769 const SCEV *Step = AR->getStepRecurrence(SE); 12770 Type *Ty = Expr->getType(); 12771 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12772 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12773 SE.getSignExtendExpr(Step, Ty), L, 12774 AR->getNoWrapFlags()); 12775 } 12776 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12777 } 12778 12779 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12780 const SCEV *Operand = visit(Expr->getOperand()); 12781 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12782 if (AR && AR->getLoop() == L && AR->isAffine()) { 12783 // This couldn't be folded because the operand didn't have the nsw 12784 // flag. Add the nssw flag as an assumption that we could make. 12785 const SCEV *Step = AR->getStepRecurrence(SE); 12786 Type *Ty = Expr->getType(); 12787 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12788 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12789 SE.getSignExtendExpr(Step, Ty), L, 12790 AR->getNoWrapFlags()); 12791 } 12792 return SE.getSignExtendExpr(Operand, Expr->getType()); 12793 } 12794 12795 private: 12796 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12797 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12798 SCEVUnionPredicate *Pred) 12799 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12800 12801 bool addOverflowAssumption(const SCEVPredicate *P) { 12802 if (!NewPreds) { 12803 // Check if we've already made this assumption. 12804 return Pred && Pred->implies(P); 12805 } 12806 NewPreds->insert(P); 12807 return true; 12808 } 12809 12810 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12811 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12812 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12813 return addOverflowAssumption(A); 12814 } 12815 12816 // If \p Expr represents a PHINode, we try to see if it can be represented 12817 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12818 // to add this predicate as a runtime overflow check, we return the AddRec. 12819 // If \p Expr does not meet these conditions (is not a PHI node, or we 12820 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12821 // return \p Expr. 12822 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12823 if (!isa<PHINode>(Expr->getValue())) 12824 return Expr; 12825 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12826 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12827 if (!PredicatedRewrite) 12828 return Expr; 12829 for (auto *P : PredicatedRewrite->second){ 12830 // Wrap predicates from outer loops are not supported. 12831 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12832 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12833 if (L != AR->getLoop()) 12834 return Expr; 12835 } 12836 if (!addOverflowAssumption(P)) 12837 return Expr; 12838 } 12839 return PredicatedRewrite->first; 12840 } 12841 12842 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12843 SCEVUnionPredicate *Pred; 12844 const Loop *L; 12845 }; 12846 12847 } // end anonymous namespace 12848 12849 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12850 SCEVUnionPredicate &Preds) { 12851 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12852 } 12853 12854 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12855 const SCEV *S, const Loop *L, 12856 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12857 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12858 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12859 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12860 12861 if (!AddRec) 12862 return nullptr; 12863 12864 // Since the transformation was successful, we can now transfer the SCEV 12865 // predicates. 12866 for (auto *P : TransformPreds) 12867 Preds.insert(P); 12868 12869 return AddRec; 12870 } 12871 12872 /// SCEV predicates 12873 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12874 SCEVPredicateKind Kind) 12875 : FastID(ID), Kind(Kind) {} 12876 12877 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12878 const SCEV *LHS, const SCEV *RHS) 12879 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12880 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12881 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12882 } 12883 12884 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12885 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12886 12887 if (!Op) 12888 return false; 12889 12890 return Op->LHS == LHS && Op->RHS == RHS; 12891 } 12892 12893 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12894 12895 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12896 12897 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12898 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12899 } 12900 12901 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12902 const SCEVAddRecExpr *AR, 12903 IncrementWrapFlags Flags) 12904 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12905 12906 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12907 12908 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12909 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12910 12911 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12912 } 12913 12914 bool SCEVWrapPredicate::isAlwaysTrue() const { 12915 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12916 IncrementWrapFlags IFlags = Flags; 12917 12918 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12919 IFlags = clearFlags(IFlags, IncrementNSSW); 12920 12921 return IFlags == IncrementAnyWrap; 12922 } 12923 12924 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12925 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12926 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12927 OS << "<nusw>"; 12928 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12929 OS << "<nssw>"; 12930 OS << "\n"; 12931 } 12932 12933 SCEVWrapPredicate::IncrementWrapFlags 12934 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12935 ScalarEvolution &SE) { 12936 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12937 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12938 12939 // We can safely transfer the NSW flag as NSSW. 12940 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12941 ImpliedFlags = IncrementNSSW; 12942 12943 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12944 // If the increment is positive, the SCEV NUW flag will also imply the 12945 // WrapPredicate NUSW flag. 12946 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12947 if (Step->getValue()->getValue().isNonNegative()) 12948 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12949 } 12950 12951 return ImpliedFlags; 12952 } 12953 12954 /// Union predicates don't get cached so create a dummy set ID for it. 12955 SCEVUnionPredicate::SCEVUnionPredicate() 12956 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12957 12958 bool SCEVUnionPredicate::isAlwaysTrue() const { 12959 return all_of(Preds, 12960 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12961 } 12962 12963 ArrayRef<const SCEVPredicate *> 12964 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12965 auto I = SCEVToPreds.find(Expr); 12966 if (I == SCEVToPreds.end()) 12967 return ArrayRef<const SCEVPredicate *>(); 12968 return I->second; 12969 } 12970 12971 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12972 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12973 return all_of(Set->Preds, 12974 [this](const SCEVPredicate *I) { return this->implies(I); }); 12975 12976 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12977 if (ScevPredsIt == SCEVToPreds.end()) 12978 return false; 12979 auto &SCEVPreds = ScevPredsIt->second; 12980 12981 return any_of(SCEVPreds, 12982 [N](const SCEVPredicate *I) { return I->implies(N); }); 12983 } 12984 12985 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12986 12987 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12988 for (auto Pred : Preds) 12989 Pred->print(OS, Depth); 12990 } 12991 12992 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12993 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12994 for (auto Pred : Set->Preds) 12995 add(Pred); 12996 return; 12997 } 12998 12999 if (implies(N)) 13000 return; 13001 13002 const SCEV *Key = N->getExpr(); 13003 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13004 " associated expression!"); 13005 13006 SCEVToPreds[Key].push_back(N); 13007 Preds.push_back(N); 13008 } 13009 13010 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13011 Loop &L) 13012 : SE(SE), L(L) {} 13013 13014 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13015 const SCEV *Expr = SE.getSCEV(V); 13016 RewriteEntry &Entry = RewriteMap[Expr]; 13017 13018 // If we already have an entry and the version matches, return it. 13019 if (Entry.second && Generation == Entry.first) 13020 return Entry.second; 13021 13022 // We found an entry but it's stale. Rewrite the stale entry 13023 // according to the current predicate. 13024 if (Entry.second) 13025 Expr = Entry.second; 13026 13027 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13028 Entry = {Generation, NewSCEV}; 13029 13030 return NewSCEV; 13031 } 13032 13033 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13034 if (!BackedgeCount) { 13035 SCEVUnionPredicate BackedgePred; 13036 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13037 addPredicate(BackedgePred); 13038 } 13039 return BackedgeCount; 13040 } 13041 13042 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13043 if (Preds.implies(&Pred)) 13044 return; 13045 Preds.add(&Pred); 13046 updateGeneration(); 13047 } 13048 13049 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13050 return Preds; 13051 } 13052 13053 void PredicatedScalarEvolution::updateGeneration() { 13054 // If the generation number wrapped recompute everything. 13055 if (++Generation == 0) { 13056 for (auto &II : RewriteMap) { 13057 const SCEV *Rewritten = II.second.second; 13058 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13059 } 13060 } 13061 } 13062 13063 void PredicatedScalarEvolution::setNoOverflow( 13064 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13065 const SCEV *Expr = getSCEV(V); 13066 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13067 13068 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13069 13070 // Clear the statically implied flags. 13071 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13072 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13073 13074 auto II = FlagsMap.insert({V, Flags}); 13075 if (!II.second) 13076 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13077 } 13078 13079 bool PredicatedScalarEvolution::hasNoOverflow( 13080 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13081 const SCEV *Expr = getSCEV(V); 13082 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13083 13084 Flags = SCEVWrapPredicate::clearFlags( 13085 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13086 13087 auto II = FlagsMap.find(V); 13088 13089 if (II != FlagsMap.end()) 13090 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13091 13092 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13093 } 13094 13095 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13096 const SCEV *Expr = this->getSCEV(V); 13097 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13098 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13099 13100 if (!New) 13101 return nullptr; 13102 13103 for (auto *P : NewPreds) 13104 Preds.add(P); 13105 13106 updateGeneration(); 13107 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13108 return New; 13109 } 13110 13111 PredicatedScalarEvolution::PredicatedScalarEvolution( 13112 const PredicatedScalarEvolution &Init) 13113 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13114 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13115 for (auto I : Init.FlagsMap) 13116 FlagsMap.insert(I); 13117 } 13118 13119 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13120 // For each block. 13121 for (auto *BB : L.getBlocks()) 13122 for (auto &I : *BB) { 13123 if (!SE.isSCEVable(I.getType())) 13124 continue; 13125 13126 auto *Expr = SE.getSCEV(&I); 13127 auto II = RewriteMap.find(Expr); 13128 13129 if (II == RewriteMap.end()) 13130 continue; 13131 13132 // Don't print things that are not interesting. 13133 if (II->second.second == Expr) 13134 continue; 13135 13136 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13137 OS.indent(Depth + 2) << *Expr << "\n"; 13138 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13139 } 13140 } 13141 13142 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13143 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13144 // for URem with constant power-of-2 second operands. 13145 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13146 // 4, A / B becomes X / 8). 13147 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13148 const SCEV *&RHS) { 13149 // Try to match 'zext (trunc A to iB) to iY', which is used 13150 // for URem with constant power-of-2 second operands. Make sure the size of 13151 // the operand A matches the size of the whole expressions. 13152 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13153 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13154 LHS = Trunc->getOperand(); 13155 if (LHS->getType() != Expr->getType()) 13156 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13157 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13158 << getTypeSizeInBits(Trunc->getType())); 13159 return true; 13160 } 13161 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13162 if (Add == nullptr || Add->getNumOperands() != 2) 13163 return false; 13164 13165 const SCEV *A = Add->getOperand(1); 13166 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13167 13168 if (Mul == nullptr) 13169 return false; 13170 13171 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13172 // (SomeExpr + (-(SomeExpr / B) * B)). 13173 if (Expr == getURemExpr(A, B)) { 13174 LHS = A; 13175 RHS = B; 13176 return true; 13177 } 13178 return false; 13179 }; 13180 13181 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13182 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13183 return MatchURemWithDivisor(Mul->getOperand(1)) || 13184 MatchURemWithDivisor(Mul->getOperand(2)); 13185 13186 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13187 if (Mul->getNumOperands() == 2) 13188 return MatchURemWithDivisor(Mul->getOperand(1)) || 13189 MatchURemWithDivisor(Mul->getOperand(0)) || 13190 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13191 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13192 return false; 13193 } 13194 13195 const SCEV * 13196 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13197 SmallVector<BasicBlock*, 16> ExitingBlocks; 13198 L->getExitingBlocks(ExitingBlocks); 13199 13200 // Form an expression for the maximum exit count possible for this loop. We 13201 // merge the max and exact information to approximate a version of 13202 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13203 SmallVector<const SCEV*, 4> ExitCounts; 13204 for (BasicBlock *ExitingBB : ExitingBlocks) { 13205 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13206 if (isa<SCEVCouldNotCompute>(ExitCount)) 13207 ExitCount = getExitCount(L, ExitingBB, 13208 ScalarEvolution::ConstantMaximum); 13209 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13210 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13211 "We should only have known counts for exiting blocks that " 13212 "dominate latch!"); 13213 ExitCounts.push_back(ExitCount); 13214 } 13215 } 13216 if (ExitCounts.empty()) 13217 return getCouldNotCompute(); 13218 return getUMinFromMismatchedTypes(ExitCounts); 13219 } 13220 13221 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13222 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13223 /// we cannot guarantee that the replacement is loop invariant in the loop of 13224 /// the AddRec. 13225 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13226 ValueToSCEVMapTy ⤅ 13227 13228 public: 13229 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13230 : SCEVRewriteVisitor(SE), Map(M) {} 13231 13232 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13233 13234 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13235 auto I = Map.find(Expr->getValue()); 13236 if (I == Map.end()) 13237 return Expr; 13238 return I->second; 13239 } 13240 }; 13241 13242 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13243 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13244 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13245 if (!isa<SCEVUnknown>(LHS)) { 13246 std::swap(LHS, RHS); 13247 Predicate = CmpInst::getSwappedPredicate(Predicate); 13248 } 13249 13250 // For now, limit to conditions that provide information about unknown 13251 // expressions. 13252 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13253 if (!LHSUnknown) 13254 return; 13255 13256 // TODO: use information from more predicates. 13257 switch (Predicate) { 13258 case CmpInst::ICMP_ULT: { 13259 if (!containsAddRecurrence(RHS)) { 13260 const SCEV *Base = LHS; 13261 auto I = RewriteMap.find(LHSUnknown->getValue()); 13262 if (I != RewriteMap.end()) 13263 Base = I->second; 13264 13265 RewriteMap[LHSUnknown->getValue()] = 13266 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 13267 } 13268 break; 13269 } 13270 case CmpInst::ICMP_ULE: { 13271 if (!containsAddRecurrence(RHS)) { 13272 const SCEV *Base = LHS; 13273 auto I = RewriteMap.find(LHSUnknown->getValue()); 13274 if (I != RewriteMap.end()) 13275 Base = I->second; 13276 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 13277 } 13278 break; 13279 } 13280 case CmpInst::ICMP_EQ: 13281 if (isa<SCEVConstant>(RHS)) 13282 RewriteMap[LHSUnknown->getValue()] = RHS; 13283 break; 13284 case CmpInst::ICMP_NE: 13285 if (isa<SCEVConstant>(RHS) && 13286 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13287 RewriteMap[LHSUnknown->getValue()] = 13288 getUMaxExpr(LHS, getOne(RHS->getType())); 13289 break; 13290 default: 13291 break; 13292 } 13293 }; 13294 // Starting at the loop predecessor, climb up the predecessor chain, as long 13295 // as there are predecessors that can be found that have unique successors 13296 // leading to the original header. 13297 // TODO: share this logic with isLoopEntryGuardedByCond. 13298 ValueToSCEVMapTy RewriteMap; 13299 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13300 L->getLoopPredecessor(), L->getHeader()); 13301 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13302 13303 const BranchInst *LoopEntryPredicate = 13304 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13305 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13306 continue; 13307 13308 // TODO: use information from more complex conditions, e.g. AND expressions. 13309 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 13310 if (!Cmp) 13311 continue; 13312 13313 auto Predicate = Cmp->getPredicate(); 13314 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 13315 Predicate = CmpInst::getInversePredicate(Predicate); 13316 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13317 getSCEV(Cmp->getOperand(1)), RewriteMap); 13318 } 13319 13320 // Also collect information from assumptions dominating the loop. 13321 for (auto &AssumeVH : AC.assumptions()) { 13322 if (!AssumeVH) 13323 continue; 13324 auto *AssumeI = cast<CallInst>(AssumeVH); 13325 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13326 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13327 continue; 13328 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13329 getSCEV(Cmp->getOperand(1)), RewriteMap); 13330 } 13331 13332 if (RewriteMap.empty()) 13333 return Expr; 13334 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13335 return Rewriter.visit(Expr); 13336 } 13337