1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/STLExtras.h" 68 #include "llvm/ADT/ScopeExit.h" 69 #include "llvm/ADT/Sequence.h" 70 #include "llvm/ADT/SmallPtrSet.h" 71 #include "llvm/ADT/SmallSet.h" 72 #include "llvm/ADT/SmallVector.h" 73 #include "llvm/ADT/Statistic.h" 74 #include "llvm/ADT/StringRef.h" 75 #include "llvm/Analysis/AssumptionCache.h" 76 #include "llvm/Analysis/ConstantFolding.h" 77 #include "llvm/Analysis/InstructionSimplify.h" 78 #include "llvm/Analysis/LoopInfo.h" 79 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 80 #include "llvm/Analysis/TargetLibraryInfo.h" 81 #include "llvm/Analysis/ValueTracking.h" 82 #include "llvm/Config/llvm-config.h" 83 #include "llvm/IR/Argument.h" 84 #include "llvm/IR/BasicBlock.h" 85 #include "llvm/IR/CFG.h" 86 #include "llvm/IR/Constant.h" 87 #include "llvm/IR/ConstantRange.h" 88 #include "llvm/IR/Constants.h" 89 #include "llvm/IR/DataLayout.h" 90 #include "llvm/IR/DerivedTypes.h" 91 #include "llvm/IR/Dominators.h" 92 #include "llvm/IR/Function.h" 93 #include "llvm/IR/GlobalAlias.h" 94 #include "llvm/IR/GlobalValue.h" 95 #include "llvm/IR/InstIterator.h" 96 #include "llvm/IR/InstrTypes.h" 97 #include "llvm/IR/Instruction.h" 98 #include "llvm/IR/Instructions.h" 99 #include "llvm/IR/IntrinsicInst.h" 100 #include "llvm/IR/Intrinsics.h" 101 #include "llvm/IR/LLVMContext.h" 102 #include "llvm/IR/Operator.h" 103 #include "llvm/IR/PatternMatch.h" 104 #include "llvm/IR/Type.h" 105 #include "llvm/IR/Use.h" 106 #include "llvm/IR/User.h" 107 #include "llvm/IR/Value.h" 108 #include "llvm/IR/Verifier.h" 109 #include "llvm/InitializePasses.h" 110 #include "llvm/Pass.h" 111 #include "llvm/Support/Casting.h" 112 #include "llvm/Support/CommandLine.h" 113 #include "llvm/Support/Compiler.h" 114 #include "llvm/Support/Debug.h" 115 #include "llvm/Support/ErrorHandling.h" 116 #include "llvm/Support/KnownBits.h" 117 #include "llvm/Support/SaveAndRestore.h" 118 #include "llvm/Support/raw_ostream.h" 119 #include <algorithm> 120 #include <cassert> 121 #include <climits> 122 #include <cstdint> 123 #include <cstdlib> 124 #include <map> 125 #include <memory> 126 #include <numeric> 127 #include <optional> 128 #include <tuple> 129 #include <utility> 130 #include <vector> 131 132 using namespace llvm; 133 using namespace PatternMatch; 134 135 #define DEBUG_TYPE "scalar-evolution" 136 137 STATISTIC(NumTripCountsComputed, 138 "Number of loops with predictable loop counts"); 139 STATISTIC(NumTripCountsNotComputed, 140 "Number of loops without predictable loop counts"); 141 STATISTIC(NumBruteForceTripCountsComputed, 142 "Number of loops with trip counts computed by force"); 143 144 #ifdef EXPENSIVE_CHECKS 145 bool llvm::VerifySCEV = true; 146 #else 147 bool llvm::VerifySCEV = false; 148 #endif 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::desc("Maximum number of iterations SCEV will " 153 "symbolically execute a constant " 154 "derived loop"), 155 cl::init(100)); 156 157 static cl::opt<bool, true> VerifySCEVOpt( 158 "verify-scev", cl::Hidden, cl::location(VerifySCEV), 159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 160 static cl::opt<bool> VerifySCEVStrict( 161 "verify-scev-strict", cl::Hidden, 162 cl::desc("Enable stricter verification with -verify-scev is passed")); 163 static cl::opt<bool> 164 VerifySCEVMap("verify-scev-maps", cl::Hidden, 165 cl::desc("Verify no dangling value in ScalarEvolution's " 166 "ExprValueMap (slow)")); 167 168 static cl::opt<bool> VerifyIR( 169 "scev-verify-ir", cl::Hidden, 170 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 171 cl::init(false)); 172 173 static cl::opt<unsigned> MulOpsInlineThreshold( 174 "scev-mulops-inline-threshold", cl::Hidden, 175 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 176 cl::init(32)); 177 178 static cl::opt<unsigned> AddOpsInlineThreshold( 179 "scev-addops-inline-threshold", cl::Hidden, 180 cl::desc("Threshold for inlining addition operands into a SCEV"), 181 cl::init(500)); 182 183 static cl::opt<unsigned> MaxSCEVCompareDepth( 184 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 185 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 186 cl::init(32)); 187 188 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 189 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 190 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 191 cl::init(2)); 192 193 static cl::opt<unsigned> MaxValueCompareDepth( 194 "scalar-evolution-max-value-compare-depth", cl::Hidden, 195 cl::desc("Maximum depth of recursive value complexity comparisons"), 196 cl::init(2)); 197 198 static cl::opt<unsigned> 199 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 200 cl::desc("Maximum depth of recursive arithmetics"), 201 cl::init(32)); 202 203 static cl::opt<unsigned> MaxConstantEvolvingDepth( 204 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 205 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 206 207 static cl::opt<unsigned> 208 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 209 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 210 cl::init(8)); 211 212 static cl::opt<unsigned> 213 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 214 cl::desc("Max coefficients in AddRec during evolving"), 215 cl::init(8)); 216 217 static cl::opt<unsigned> 218 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 219 cl::desc("Size of the expression which is considered huge"), 220 cl::init(4096)); 221 222 static cl::opt<unsigned> RangeIterThreshold( 223 "scev-range-iter-threshold", cl::Hidden, 224 cl::desc("Threshold for switching to iteratively computing SCEV ranges"), 225 cl::init(32)); 226 227 static cl::opt<bool> 228 ClassifyExpressions("scalar-evolution-classify-expressions", 229 cl::Hidden, cl::init(true), 230 cl::desc("When printing analysis, include information on every instruction")); 231 232 static cl::opt<bool> UseExpensiveRangeSharpening( 233 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 234 cl::init(false), 235 cl::desc("Use more powerful methods of sharpening expression ranges. May " 236 "be costly in terms of compile time")); 237 238 static cl::opt<unsigned> MaxPhiSCCAnalysisSize( 239 "scalar-evolution-max-scc-analysis-depth", cl::Hidden, 240 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " 241 "Phi strongly connected components"), 242 cl::init(8)); 243 244 static cl::opt<bool> 245 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, 246 cl::desc("Handle <= and >= in finite loops"), 247 cl::init(true)); 248 249 static cl::opt<bool> UseContextForNoWrapFlagInference( 250 "scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, 251 cl::desc("Infer nuw/nsw flags using context where suitable"), 252 cl::init(true)); 253 254 //===----------------------------------------------------------------------===// 255 // SCEV class definitions 256 //===----------------------------------------------------------------------===// 257 258 //===----------------------------------------------------------------------===// 259 // Implementation of the SCEV class. 260 // 261 262 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 263 LLVM_DUMP_METHOD void SCEV::dump() const { 264 print(dbgs()); 265 dbgs() << '\n'; 266 } 267 #endif 268 269 void SCEV::print(raw_ostream &OS) const { 270 switch (getSCEVType()) { 271 case scConstant: 272 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 273 return; 274 case scPtrToInt: { 275 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 276 const SCEV *Op = PtrToInt->getOperand(); 277 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 278 << *PtrToInt->getType() << ")"; 279 return; 280 } 281 case scTruncate: { 282 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 283 const SCEV *Op = Trunc->getOperand(); 284 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 285 << *Trunc->getType() << ")"; 286 return; 287 } 288 case scZeroExtend: { 289 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 290 const SCEV *Op = ZExt->getOperand(); 291 OS << "(zext " << *Op->getType() << " " << *Op << " to " 292 << *ZExt->getType() << ")"; 293 return; 294 } 295 case scSignExtend: { 296 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 297 const SCEV *Op = SExt->getOperand(); 298 OS << "(sext " << *Op->getType() << " " << *Op << " to " 299 << *SExt->getType() << ")"; 300 return; 301 } 302 case scAddRecExpr: { 303 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 304 OS << "{" << *AR->getOperand(0); 305 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 306 OS << ",+," << *AR->getOperand(i); 307 OS << "}<"; 308 if (AR->hasNoUnsignedWrap()) 309 OS << "nuw><"; 310 if (AR->hasNoSignedWrap()) 311 OS << "nsw><"; 312 if (AR->hasNoSelfWrap() && 313 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 314 OS << "nw><"; 315 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 316 OS << ">"; 317 return; 318 } 319 case scAddExpr: 320 case scMulExpr: 321 case scUMaxExpr: 322 case scSMaxExpr: 323 case scUMinExpr: 324 case scSMinExpr: 325 case scSequentialUMinExpr: { 326 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 327 const char *OpStr = nullptr; 328 switch (NAry->getSCEVType()) { 329 case scAddExpr: OpStr = " + "; break; 330 case scMulExpr: OpStr = " * "; break; 331 case scUMaxExpr: OpStr = " umax "; break; 332 case scSMaxExpr: OpStr = " smax "; break; 333 case scUMinExpr: 334 OpStr = " umin "; 335 break; 336 case scSMinExpr: 337 OpStr = " smin "; 338 break; 339 case scSequentialUMinExpr: 340 OpStr = " umin_seq "; 341 break; 342 default: 343 llvm_unreachable("There are no other nary expression types."); 344 } 345 OS << "("; 346 ListSeparator LS(OpStr); 347 for (const SCEV *Op : NAry->operands()) 348 OS << LS << *Op; 349 OS << ")"; 350 switch (NAry->getSCEVType()) { 351 case scAddExpr: 352 case scMulExpr: 353 if (NAry->hasNoUnsignedWrap()) 354 OS << "<nuw>"; 355 if (NAry->hasNoSignedWrap()) 356 OS << "<nsw>"; 357 break; 358 default: 359 // Nothing to print for other nary expressions. 360 break; 361 } 362 return; 363 } 364 case scUDivExpr: { 365 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 366 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 367 return; 368 } 369 case scUnknown: { 370 const SCEVUnknown *U = cast<SCEVUnknown>(this); 371 Type *AllocTy; 372 if (U->isSizeOf(AllocTy)) { 373 OS << "sizeof(" << *AllocTy << ")"; 374 return; 375 } 376 if (U->isAlignOf(AllocTy)) { 377 OS << "alignof(" << *AllocTy << ")"; 378 return; 379 } 380 381 Type *CTy; 382 Constant *FieldNo; 383 if (U->isOffsetOf(CTy, FieldNo)) { 384 OS << "offsetof(" << *CTy << ", "; 385 FieldNo->printAsOperand(OS, false); 386 OS << ")"; 387 return; 388 } 389 390 // Otherwise just print it normally. 391 U->getValue()->printAsOperand(OS, false); 392 return; 393 } 394 case scCouldNotCompute: 395 OS << "***COULDNOTCOMPUTE***"; 396 return; 397 } 398 llvm_unreachable("Unknown SCEV kind!"); 399 } 400 401 Type *SCEV::getType() const { 402 switch (getSCEVType()) { 403 case scConstant: 404 return cast<SCEVConstant>(this)->getType(); 405 case scPtrToInt: 406 case scTruncate: 407 case scZeroExtend: 408 case scSignExtend: 409 return cast<SCEVCastExpr>(this)->getType(); 410 case scAddRecExpr: 411 return cast<SCEVAddRecExpr>(this)->getType(); 412 case scMulExpr: 413 return cast<SCEVMulExpr>(this)->getType(); 414 case scUMaxExpr: 415 case scSMaxExpr: 416 case scUMinExpr: 417 case scSMinExpr: 418 return cast<SCEVMinMaxExpr>(this)->getType(); 419 case scSequentialUMinExpr: 420 return cast<SCEVSequentialMinMaxExpr>(this)->getType(); 421 case scAddExpr: 422 return cast<SCEVAddExpr>(this)->getType(); 423 case scUDivExpr: 424 return cast<SCEVUDivExpr>(this)->getType(); 425 case scUnknown: 426 return cast<SCEVUnknown>(this)->getType(); 427 case scCouldNotCompute: 428 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 429 } 430 llvm_unreachable("Unknown SCEV kind!"); 431 } 432 433 ArrayRef<const SCEV *> SCEV::operands() const { 434 switch (getSCEVType()) { 435 case scConstant: 436 case scUnknown: 437 return {}; 438 case scPtrToInt: 439 case scTruncate: 440 case scZeroExtend: 441 case scSignExtend: 442 return cast<SCEVCastExpr>(this)->operands(); 443 case scAddRecExpr: 444 case scAddExpr: 445 case scMulExpr: 446 case scUMaxExpr: 447 case scSMaxExpr: 448 case scUMinExpr: 449 case scSMinExpr: 450 case scSequentialUMinExpr: 451 return cast<SCEVNAryExpr>(this)->operands(); 452 case scUDivExpr: 453 return cast<SCEVUDivExpr>(this)->operands(); 454 case scCouldNotCompute: 455 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 456 } 457 llvm_unreachable("Unknown SCEV kind!"); 458 } 459 460 bool SCEV::isZero() const { 461 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 462 return SC->getValue()->isZero(); 463 return false; 464 } 465 466 bool SCEV::isOne() const { 467 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 468 return SC->getValue()->isOne(); 469 return false; 470 } 471 472 bool SCEV::isAllOnesValue() const { 473 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 474 return SC->getValue()->isMinusOne(); 475 return false; 476 } 477 478 bool SCEV::isNonConstantNegative() const { 479 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 480 if (!Mul) return false; 481 482 // If there is a constant factor, it will be first. 483 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 484 if (!SC) return false; 485 486 // Return true if the value is negative, this matches things like (-42 * V). 487 return SC->getAPInt().isNegative(); 488 } 489 490 SCEVCouldNotCompute::SCEVCouldNotCompute() : 491 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 492 493 bool SCEVCouldNotCompute::classof(const SCEV *S) { 494 return S->getSCEVType() == scCouldNotCompute; 495 } 496 497 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 498 FoldingSetNodeID ID; 499 ID.AddInteger(scConstant); 500 ID.AddPointer(V); 501 void *IP = nullptr; 502 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 503 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 504 UniqueSCEVs.InsertNode(S, IP); 505 return S; 506 } 507 508 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 509 return getConstant(ConstantInt::get(getContext(), Val)); 510 } 511 512 const SCEV * 513 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 514 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 515 return getConstant(ConstantInt::get(ITy, V, isSigned)); 516 } 517 518 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 519 const SCEV *op, Type *ty) 520 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 521 522 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 523 Type *ITy) 524 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 525 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 526 "Must be a non-bit-width-changing pointer-to-integer cast!"); 527 } 528 529 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 530 SCEVTypes SCEVTy, const SCEV *op, 531 Type *ty) 532 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 533 534 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 535 Type *ty) 536 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 537 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 538 "Cannot truncate non-integer value!"); 539 } 540 541 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 542 const SCEV *op, Type *ty) 543 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 544 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 545 "Cannot zero extend non-integer value!"); 546 } 547 548 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 549 const SCEV *op, Type *ty) 550 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 551 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 552 "Cannot sign extend non-integer value!"); 553 } 554 555 void SCEVUnknown::deleted() { 556 // Clear this SCEVUnknown from various maps. 557 SE->forgetMemoizedResults(this); 558 559 // Remove this SCEVUnknown from the uniquing map. 560 SE->UniqueSCEVs.RemoveNode(this); 561 562 // Release the value. 563 setValPtr(nullptr); 564 } 565 566 void SCEVUnknown::allUsesReplacedWith(Value *New) { 567 // Clear this SCEVUnknown from various maps. 568 SE->forgetMemoizedResults(this); 569 570 // Remove this SCEVUnknown from the uniquing map. 571 SE->UniqueSCEVs.RemoveNode(this); 572 573 // Replace the value pointer in case someone is still using this SCEVUnknown. 574 setValPtr(New); 575 } 576 577 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 578 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 579 if (VCE->getOpcode() == Instruction::PtrToInt) 580 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 581 if (CE->getOpcode() == Instruction::GetElementPtr && 582 CE->getOperand(0)->isNullValue() && 583 CE->getNumOperands() == 2) 584 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 585 if (CI->isOne()) { 586 AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); 587 return true; 588 } 589 590 return false; 591 } 592 593 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 594 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 595 if (VCE->getOpcode() == Instruction::PtrToInt) 596 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 597 if (CE->getOpcode() == Instruction::GetElementPtr && 598 CE->getOperand(0)->isNullValue()) { 599 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 600 if (StructType *STy = dyn_cast<StructType>(Ty)) 601 if (!STy->isPacked() && 602 CE->getNumOperands() == 3 && 603 CE->getOperand(1)->isNullValue()) { 604 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 605 if (CI->isOne() && 606 STy->getNumElements() == 2 && 607 STy->getElementType(0)->isIntegerTy(1)) { 608 AllocTy = STy->getElementType(1); 609 return true; 610 } 611 } 612 } 613 614 return false; 615 } 616 617 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 618 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 619 if (VCE->getOpcode() == Instruction::PtrToInt) 620 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 621 if (CE->getOpcode() == Instruction::GetElementPtr && 622 CE->getNumOperands() == 3 && 623 CE->getOperand(0)->isNullValue() && 624 CE->getOperand(1)->isNullValue()) { 625 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 626 // Ignore vector types here so that ScalarEvolutionExpander doesn't 627 // emit getelementptrs that index into vectors. 628 if (Ty->isStructTy() || Ty->isArrayTy()) { 629 CTy = Ty; 630 FieldNo = CE->getOperand(2); 631 return true; 632 } 633 } 634 635 return false; 636 } 637 638 //===----------------------------------------------------------------------===// 639 // SCEV Utilities 640 //===----------------------------------------------------------------------===// 641 642 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 643 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 644 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 645 /// have been previously deemed to be "equally complex" by this routine. It is 646 /// intended to avoid exponential time complexity in cases like: 647 /// 648 /// %a = f(%x, %y) 649 /// %b = f(%a, %a) 650 /// %c = f(%b, %b) 651 /// 652 /// %d = f(%x, %y) 653 /// %e = f(%d, %d) 654 /// %f = f(%e, %e) 655 /// 656 /// CompareValueComplexity(%f, %c) 657 /// 658 /// Since we do not continue running this routine on expression trees once we 659 /// have seen unequal values, there is no need to track them in the cache. 660 static int 661 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 662 const LoopInfo *const LI, Value *LV, Value *RV, 663 unsigned Depth) { 664 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 665 return 0; 666 667 // Order pointer values after integer values. This helps SCEVExpander form 668 // GEPs. 669 bool LIsPointer = LV->getType()->isPointerTy(), 670 RIsPointer = RV->getType()->isPointerTy(); 671 if (LIsPointer != RIsPointer) 672 return (int)LIsPointer - (int)RIsPointer; 673 674 // Compare getValueID values. 675 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 676 if (LID != RID) 677 return (int)LID - (int)RID; 678 679 // Sort arguments by their position. 680 if (const auto *LA = dyn_cast<Argument>(LV)) { 681 const auto *RA = cast<Argument>(RV); 682 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 683 return (int)LArgNo - (int)RArgNo; 684 } 685 686 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 687 const auto *RGV = cast<GlobalValue>(RV); 688 689 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 690 auto LT = GV->getLinkage(); 691 return !(GlobalValue::isPrivateLinkage(LT) || 692 GlobalValue::isInternalLinkage(LT)); 693 }; 694 695 // Use the names to distinguish the two values, but only if the 696 // names are semantically important. 697 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 698 return LGV->getName().compare(RGV->getName()); 699 } 700 701 // For instructions, compare their loop depth, and their operand count. This 702 // is pretty loose. 703 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 704 const auto *RInst = cast<Instruction>(RV); 705 706 // Compare loop depths. 707 const BasicBlock *LParent = LInst->getParent(), 708 *RParent = RInst->getParent(); 709 if (LParent != RParent) { 710 unsigned LDepth = LI->getLoopDepth(LParent), 711 RDepth = LI->getLoopDepth(RParent); 712 if (LDepth != RDepth) 713 return (int)LDepth - (int)RDepth; 714 } 715 716 // Compare the number of operands. 717 unsigned LNumOps = LInst->getNumOperands(), 718 RNumOps = RInst->getNumOperands(); 719 if (LNumOps != RNumOps) 720 return (int)LNumOps - (int)RNumOps; 721 722 for (unsigned Idx : seq(0u, LNumOps)) { 723 int Result = 724 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 725 RInst->getOperand(Idx), Depth + 1); 726 if (Result != 0) 727 return Result; 728 } 729 } 730 731 EqCacheValue.unionSets(LV, RV); 732 return 0; 733 } 734 735 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 736 // than RHS, respectively. A three-way result allows recursive comparisons to be 737 // more efficient. 738 // If the max analysis depth was reached, return std::nullopt, assuming we do 739 // not know if they are equivalent for sure. 740 static std::optional<int> 741 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 742 EquivalenceClasses<const Value *> &EqCacheValue, 743 const LoopInfo *const LI, const SCEV *LHS, 744 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 745 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 746 if (LHS == RHS) 747 return 0; 748 749 // Primarily, sort the SCEVs by their getSCEVType(). 750 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 751 if (LType != RType) 752 return (int)LType - (int)RType; 753 754 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 755 return 0; 756 757 if (Depth > MaxSCEVCompareDepth) 758 return std::nullopt; 759 760 // Aside from the getSCEVType() ordering, the particular ordering 761 // isn't very important except that it's beneficial to be consistent, 762 // so that (a + b) and (b + a) don't end up as different expressions. 763 switch (LType) { 764 case scUnknown: { 765 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 766 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 767 768 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 769 RU->getValue(), Depth + 1); 770 if (X == 0) 771 EqCacheSCEV.unionSets(LHS, RHS); 772 return X; 773 } 774 775 case scConstant: { 776 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 777 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 778 779 // Compare constant values. 780 const APInt &LA = LC->getAPInt(); 781 const APInt &RA = RC->getAPInt(); 782 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 783 if (LBitWidth != RBitWidth) 784 return (int)LBitWidth - (int)RBitWidth; 785 return LA.ult(RA) ? -1 : 1; 786 } 787 788 case scAddRecExpr: { 789 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 790 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 791 792 // There is always a dominance between two recs that are used by one SCEV, 793 // so we can safely sort recs by loop header dominance. We require such 794 // order in getAddExpr. 795 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 796 if (LLoop != RLoop) { 797 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 798 assert(LHead != RHead && "Two loops share the same header?"); 799 if (DT.dominates(LHead, RHead)) 800 return 1; 801 else 802 assert(DT.dominates(RHead, LHead) && 803 "No dominance between recurrences used by one SCEV?"); 804 return -1; 805 } 806 807 [[fallthrough]]; 808 } 809 810 case scTruncate: 811 case scZeroExtend: 812 case scSignExtend: 813 case scPtrToInt: 814 case scAddExpr: 815 case scMulExpr: 816 case scUDivExpr: 817 case scSMaxExpr: 818 case scUMaxExpr: 819 case scSMinExpr: 820 case scUMinExpr: 821 case scSequentialUMinExpr: { 822 ArrayRef<const SCEV *> LOps = LHS->operands(); 823 ArrayRef<const SCEV *> ROps = RHS->operands(); 824 825 // Lexicographically compare n-ary-like expressions. 826 unsigned LNumOps = LOps.size(), RNumOps = ROps.size(); 827 if (LNumOps != RNumOps) 828 return (int)LNumOps - (int)RNumOps; 829 830 for (unsigned i = 0; i != LNumOps; ++i) { 831 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LOps[i], 832 ROps[i], DT, Depth + 1); 833 if (X != 0) 834 return X; 835 } 836 EqCacheSCEV.unionSets(LHS, RHS); 837 return 0; 838 } 839 840 case scCouldNotCompute: 841 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 842 } 843 llvm_unreachable("Unknown SCEV kind!"); 844 } 845 846 /// Given a list of SCEV objects, order them by their complexity, and group 847 /// objects of the same complexity together by value. When this routine is 848 /// finished, we know that any duplicates in the vector are consecutive and that 849 /// complexity is monotonically increasing. 850 /// 851 /// Note that we go take special precautions to ensure that we get deterministic 852 /// results from this routine. In other words, we don't want the results of 853 /// this to depend on where the addresses of various SCEV objects happened to 854 /// land in memory. 855 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 856 LoopInfo *LI, DominatorTree &DT) { 857 if (Ops.size() < 2) return; // Noop 858 859 EquivalenceClasses<const SCEV *> EqCacheSCEV; 860 EquivalenceClasses<const Value *> EqCacheValue; 861 862 // Whether LHS has provably less complexity than RHS. 863 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 864 auto Complexity = 865 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 866 return Complexity && *Complexity < 0; 867 }; 868 if (Ops.size() == 2) { 869 // This is the common case, which also happens to be trivially simple. 870 // Special case it. 871 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 872 if (IsLessComplex(RHS, LHS)) 873 std::swap(LHS, RHS); 874 return; 875 } 876 877 // Do the rough sort by complexity. 878 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 879 return IsLessComplex(LHS, RHS); 880 }); 881 882 // Now that we are sorted by complexity, group elements of the same 883 // complexity. Note that this is, at worst, N^2, but the vector is likely to 884 // be extremely short in practice. Note that we take this approach because we 885 // do not want to depend on the addresses of the objects we are grouping. 886 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 887 const SCEV *S = Ops[i]; 888 unsigned Complexity = S->getSCEVType(); 889 890 // If there are any objects of the same complexity and same value as this 891 // one, group them. 892 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 893 if (Ops[j] == S) { // Found a duplicate. 894 // Move it to immediately after i'th element. 895 std::swap(Ops[i+1], Ops[j]); 896 ++i; // no need to rescan it. 897 if (i == e-2) return; // Done! 898 } 899 } 900 } 901 } 902 903 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 904 /// least HugeExprThreshold nodes). 905 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 906 return any_of(Ops, [](const SCEV *S) { 907 return S->getExpressionSize() >= HugeExprThreshold; 908 }); 909 } 910 911 //===----------------------------------------------------------------------===// 912 // Simple SCEV method implementations 913 //===----------------------------------------------------------------------===// 914 915 /// Compute BC(It, K). The result has width W. Assume, K > 0. 916 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 917 ScalarEvolution &SE, 918 Type *ResultTy) { 919 // Handle the simplest case efficiently. 920 if (K == 1) 921 return SE.getTruncateOrZeroExtend(It, ResultTy); 922 923 // We are using the following formula for BC(It, K): 924 // 925 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 926 // 927 // Suppose, W is the bitwidth of the return value. We must be prepared for 928 // overflow. Hence, we must assure that the result of our computation is 929 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 930 // safe in modular arithmetic. 931 // 932 // However, this code doesn't use exactly that formula; the formula it uses 933 // is something like the following, where T is the number of factors of 2 in 934 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 935 // exponentiation: 936 // 937 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 938 // 939 // This formula is trivially equivalent to the previous formula. However, 940 // this formula can be implemented much more efficiently. The trick is that 941 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 942 // arithmetic. To do exact division in modular arithmetic, all we have 943 // to do is multiply by the inverse. Therefore, this step can be done at 944 // width W. 945 // 946 // The next issue is how to safely do the division by 2^T. The way this 947 // is done is by doing the multiplication step at a width of at least W + T 948 // bits. This way, the bottom W+T bits of the product are accurate. Then, 949 // when we perform the division by 2^T (which is equivalent to a right shift 950 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 951 // truncated out after the division by 2^T. 952 // 953 // In comparison to just directly using the first formula, this technique 954 // is much more efficient; using the first formula requires W * K bits, 955 // but this formula less than W + K bits. Also, the first formula requires 956 // a division step, whereas this formula only requires multiplies and shifts. 957 // 958 // It doesn't matter whether the subtraction step is done in the calculation 959 // width or the input iteration count's width; if the subtraction overflows, 960 // the result must be zero anyway. We prefer here to do it in the width of 961 // the induction variable because it helps a lot for certain cases; CodeGen 962 // isn't smart enough to ignore the overflow, which leads to much less 963 // efficient code if the width of the subtraction is wider than the native 964 // register width. 965 // 966 // (It's possible to not widen at all by pulling out factors of 2 before 967 // the multiplication; for example, K=2 can be calculated as 968 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 969 // extra arithmetic, so it's not an obvious win, and it gets 970 // much more complicated for K > 3.) 971 972 // Protection from insane SCEVs; this bound is conservative, 973 // but it probably doesn't matter. 974 if (K > 1000) 975 return SE.getCouldNotCompute(); 976 977 unsigned W = SE.getTypeSizeInBits(ResultTy); 978 979 // Calculate K! / 2^T and T; we divide out the factors of two before 980 // multiplying for calculating K! / 2^T to avoid overflow. 981 // Other overflow doesn't matter because we only care about the bottom 982 // W bits of the result. 983 APInt OddFactorial(W, 1); 984 unsigned T = 1; 985 for (unsigned i = 3; i <= K; ++i) { 986 APInt Mult(W, i); 987 unsigned TwoFactors = Mult.countr_zero(); 988 T += TwoFactors; 989 Mult.lshrInPlace(TwoFactors); 990 OddFactorial *= Mult; 991 } 992 993 // We need at least W + T bits for the multiplication step 994 unsigned CalculationBits = W + T; 995 996 // Calculate 2^T, at width T+W. 997 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 998 999 // Calculate the multiplicative inverse of K! / 2^T; 1000 // this multiplication factor will perform the exact division by 1001 // K! / 2^T. 1002 APInt Mod = APInt::getSignedMinValue(W+1); 1003 APInt MultiplyFactor = OddFactorial.zext(W+1); 1004 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1005 MultiplyFactor = MultiplyFactor.trunc(W); 1006 1007 // Calculate the product, at width T+W 1008 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1009 CalculationBits); 1010 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1011 for (unsigned i = 1; i != K; ++i) { 1012 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1013 Dividend = SE.getMulExpr(Dividend, 1014 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1015 } 1016 1017 // Divide by 2^T 1018 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1019 1020 // Truncate the result, and divide by K! / 2^T. 1021 1022 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1023 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1024 } 1025 1026 /// Return the value of this chain of recurrences at the specified iteration 1027 /// number. We can evaluate this recurrence by multiplying each element in the 1028 /// chain by the binomial coefficient corresponding to it. In other words, we 1029 /// can evaluate {A,+,B,+,C,+,D} as: 1030 /// 1031 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1032 /// 1033 /// where BC(It, k) stands for binomial coefficient. 1034 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1035 ScalarEvolution &SE) const { 1036 return evaluateAtIteration(operands(), It, SE); 1037 } 1038 1039 const SCEV * 1040 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1041 const SCEV *It, ScalarEvolution &SE) { 1042 assert(Operands.size() > 0); 1043 const SCEV *Result = Operands[0]; 1044 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1045 // The computation is correct in the face of overflow provided that the 1046 // multiplication is performed _after_ the evaluation of the binomial 1047 // coefficient. 1048 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1049 if (isa<SCEVCouldNotCompute>(Coeff)) 1050 return Coeff; 1051 1052 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1053 } 1054 return Result; 1055 } 1056 1057 //===----------------------------------------------------------------------===// 1058 // SCEV Expression folder implementations 1059 //===----------------------------------------------------------------------===// 1060 1061 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1062 unsigned Depth) { 1063 assert(Depth <= 1 && 1064 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1065 1066 // We could be called with an integer-typed operands during SCEV rewrites. 1067 // Since the operand is an integer already, just perform zext/trunc/self cast. 1068 if (!Op->getType()->isPointerTy()) 1069 return Op; 1070 1071 // What would be an ID for such a SCEV cast expression? 1072 FoldingSetNodeID ID; 1073 ID.AddInteger(scPtrToInt); 1074 ID.AddPointer(Op); 1075 1076 void *IP = nullptr; 1077 1078 // Is there already an expression for such a cast? 1079 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1080 return S; 1081 1082 // It isn't legal for optimizations to construct new ptrtoint expressions 1083 // for non-integral pointers. 1084 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1085 return getCouldNotCompute(); 1086 1087 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1088 1089 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1090 // is sufficiently wide to represent all possible pointer values. 1091 // We could theoretically teach SCEV to truncate wider pointers, but 1092 // that isn't implemented for now. 1093 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1094 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1095 return getCouldNotCompute(); 1096 1097 // If not, is this expression something we can't reduce any further? 1098 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1099 // Perform some basic constant folding. If the operand of the ptr2int cast 1100 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1101 // left as-is), but produce a zero constant. 1102 // NOTE: We could handle a more general case, but lack motivational cases. 1103 if (isa<ConstantPointerNull>(U->getValue())) 1104 return getZero(IntPtrTy); 1105 1106 // Create an explicit cast node. 1107 // We can reuse the existing insert position since if we get here, 1108 // we won't have made any changes which would invalidate it. 1109 SCEV *S = new (SCEVAllocator) 1110 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1111 UniqueSCEVs.InsertNode(S, IP); 1112 registerUser(S, Op); 1113 return S; 1114 } 1115 1116 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1117 "non-SCEVUnknown's."); 1118 1119 // Otherwise, we've got some expression that is more complex than just a 1120 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1121 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1122 // only, and the expressions must otherwise be integer-typed. 1123 // So sink the cast down to the SCEVUnknown's. 1124 1125 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1126 /// which computes a pointer-typed value, and rewrites the whole expression 1127 /// tree so that *all* the computations are done on integers, and the only 1128 /// pointer-typed operands in the expression are SCEVUnknown. 1129 class SCEVPtrToIntSinkingRewriter 1130 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1131 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1132 1133 public: 1134 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1135 1136 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1137 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1138 return Rewriter.visit(Scev); 1139 } 1140 1141 const SCEV *visit(const SCEV *S) { 1142 Type *STy = S->getType(); 1143 // If the expression is not pointer-typed, just keep it as-is. 1144 if (!STy->isPointerTy()) 1145 return S; 1146 // Else, recursively sink the cast down into it. 1147 return Base::visit(S); 1148 } 1149 1150 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1151 SmallVector<const SCEV *, 2> Operands; 1152 bool Changed = false; 1153 for (const auto *Op : Expr->operands()) { 1154 Operands.push_back(visit(Op)); 1155 Changed |= Op != Operands.back(); 1156 } 1157 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1158 } 1159 1160 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1161 SmallVector<const SCEV *, 2> Operands; 1162 bool Changed = false; 1163 for (const auto *Op : Expr->operands()) { 1164 Operands.push_back(visit(Op)); 1165 Changed |= Op != Operands.back(); 1166 } 1167 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1168 } 1169 1170 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1171 assert(Expr->getType()->isPointerTy() && 1172 "Should only reach pointer-typed SCEVUnknown's."); 1173 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1174 } 1175 }; 1176 1177 // And actually perform the cast sinking. 1178 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1179 assert(IntOp->getType()->isIntegerTy() && 1180 "We must have succeeded in sinking the cast, " 1181 "and ending up with an integer-typed expression!"); 1182 return IntOp; 1183 } 1184 1185 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1186 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1187 1188 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1189 if (isa<SCEVCouldNotCompute>(IntOp)) 1190 return IntOp; 1191 1192 return getTruncateOrZeroExtend(IntOp, Ty); 1193 } 1194 1195 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1196 unsigned Depth) { 1197 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1198 "This is not a truncating conversion!"); 1199 assert(isSCEVable(Ty) && 1200 "This is not a conversion to a SCEVable type!"); 1201 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); 1202 Ty = getEffectiveSCEVType(Ty); 1203 1204 FoldingSetNodeID ID; 1205 ID.AddInteger(scTruncate); 1206 ID.AddPointer(Op); 1207 ID.AddPointer(Ty); 1208 void *IP = nullptr; 1209 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1210 1211 // Fold if the operand is constant. 1212 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1213 return getConstant( 1214 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1215 1216 // trunc(trunc(x)) --> trunc(x) 1217 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1218 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1219 1220 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1221 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1222 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1223 1224 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1225 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1226 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1227 1228 if (Depth > MaxCastDepth) { 1229 SCEV *S = 1230 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1231 UniqueSCEVs.InsertNode(S, IP); 1232 registerUser(S, Op); 1233 return S; 1234 } 1235 1236 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1237 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1238 // if after transforming we have at most one truncate, not counting truncates 1239 // that replace other casts. 1240 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1241 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1242 SmallVector<const SCEV *, 4> Operands; 1243 unsigned numTruncs = 0; 1244 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1245 ++i) { 1246 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1247 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1248 isa<SCEVTruncateExpr>(S)) 1249 numTruncs++; 1250 Operands.push_back(S); 1251 } 1252 if (numTruncs < 2) { 1253 if (isa<SCEVAddExpr>(Op)) 1254 return getAddExpr(Operands); 1255 else if (isa<SCEVMulExpr>(Op)) 1256 return getMulExpr(Operands); 1257 else 1258 llvm_unreachable("Unexpected SCEV type for Op."); 1259 } 1260 // Although we checked in the beginning that ID is not in the cache, it is 1261 // possible that during recursion and different modification ID was inserted 1262 // into the cache. So if we find it, just return it. 1263 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1264 return S; 1265 } 1266 1267 // If the input value is a chrec scev, truncate the chrec's operands. 1268 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1269 SmallVector<const SCEV *, 4> Operands; 1270 for (const SCEV *Op : AddRec->operands()) 1271 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1272 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1273 } 1274 1275 // Return zero if truncating to known zeros. 1276 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1277 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1278 return getZero(Ty); 1279 1280 // The cast wasn't folded; create an explicit cast node. We can reuse 1281 // the existing insert position since if we get here, we won't have 1282 // made any changes which would invalidate it. 1283 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1284 Op, Ty); 1285 UniqueSCEVs.InsertNode(S, IP); 1286 registerUser(S, Op); 1287 return S; 1288 } 1289 1290 // Get the limit of a recurrence such that incrementing by Step cannot cause 1291 // signed overflow as long as the value of the recurrence within the 1292 // loop does not exceed this limit before incrementing. 1293 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1294 ICmpInst::Predicate *Pred, 1295 ScalarEvolution *SE) { 1296 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1297 if (SE->isKnownPositive(Step)) { 1298 *Pred = ICmpInst::ICMP_SLT; 1299 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1300 SE->getSignedRangeMax(Step)); 1301 } 1302 if (SE->isKnownNegative(Step)) { 1303 *Pred = ICmpInst::ICMP_SGT; 1304 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1305 SE->getSignedRangeMin(Step)); 1306 } 1307 return nullptr; 1308 } 1309 1310 // Get the limit of a recurrence such that incrementing by Step cannot cause 1311 // unsigned overflow as long as the value of the recurrence within the loop does 1312 // not exceed this limit before incrementing. 1313 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1314 ICmpInst::Predicate *Pred, 1315 ScalarEvolution *SE) { 1316 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1317 *Pred = ICmpInst::ICMP_ULT; 1318 1319 return SE->getConstant(APInt::getMinValue(BitWidth) - 1320 SE->getUnsignedRangeMax(Step)); 1321 } 1322 1323 namespace { 1324 1325 struct ExtendOpTraitsBase { 1326 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1327 unsigned); 1328 }; 1329 1330 // Used to make code generic over signed and unsigned overflow. 1331 template <typename ExtendOp> struct ExtendOpTraits { 1332 // Members present: 1333 // 1334 // static const SCEV::NoWrapFlags WrapType; 1335 // 1336 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1337 // 1338 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1339 // ICmpInst::Predicate *Pred, 1340 // ScalarEvolution *SE); 1341 }; 1342 1343 template <> 1344 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1345 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1346 1347 static const GetExtendExprTy GetExtendExpr; 1348 1349 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1350 ICmpInst::Predicate *Pred, 1351 ScalarEvolution *SE) { 1352 return getSignedOverflowLimitForStep(Step, Pred, SE); 1353 } 1354 }; 1355 1356 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1357 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1358 1359 template <> 1360 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1361 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1362 1363 static const GetExtendExprTy GetExtendExpr; 1364 1365 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1366 ICmpInst::Predicate *Pred, 1367 ScalarEvolution *SE) { 1368 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1369 } 1370 }; 1371 1372 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1373 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1374 1375 } // end anonymous namespace 1376 1377 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1378 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1379 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1380 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1381 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1382 // expression "Step + sext/zext(PreIncAR)" is congruent with 1383 // "sext/zext(PostIncAR)" 1384 template <typename ExtendOpTy> 1385 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1386 ScalarEvolution *SE, unsigned Depth) { 1387 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1388 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1389 1390 const Loop *L = AR->getLoop(); 1391 const SCEV *Start = AR->getStart(); 1392 const SCEV *Step = AR->getStepRecurrence(*SE); 1393 1394 // Check for a simple looking step prior to loop entry. 1395 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1396 if (!SA) 1397 return nullptr; 1398 1399 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1400 // subtraction is expensive. For this purpose, perform a quick and dirty 1401 // difference, by checking for Step in the operand list. 1402 SmallVector<const SCEV *, 4> DiffOps; 1403 for (const SCEV *Op : SA->operands()) 1404 if (Op != Step) 1405 DiffOps.push_back(Op); 1406 1407 if (DiffOps.size() == SA->getNumOperands()) 1408 return nullptr; 1409 1410 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1411 // `Step`: 1412 1413 // 1. NSW/NUW flags on the step increment. 1414 auto PreStartFlags = 1415 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1416 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1417 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1418 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1419 1420 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1421 // "S+X does not sign/unsign-overflow". 1422 // 1423 1424 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1425 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1426 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1427 return PreStart; 1428 1429 // 2. Direct overflow check on the step operation's expression. 1430 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1431 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1432 const SCEV *OperandExtendedStart = 1433 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1434 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1435 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1436 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1437 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1438 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1439 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1440 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1441 } 1442 return PreStart; 1443 } 1444 1445 // 3. Loop precondition. 1446 ICmpInst::Predicate Pred; 1447 const SCEV *OverflowLimit = 1448 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1449 1450 if (OverflowLimit && 1451 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1452 return PreStart; 1453 1454 return nullptr; 1455 } 1456 1457 // Get the normalized zero or sign extended expression for this AddRec's Start. 1458 template <typename ExtendOpTy> 1459 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1460 ScalarEvolution *SE, 1461 unsigned Depth) { 1462 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1463 1464 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1465 if (!PreStart) 1466 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1467 1468 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1469 Depth), 1470 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1471 } 1472 1473 // Try to prove away overflow by looking at "nearby" add recurrences. A 1474 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1475 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1476 // 1477 // Formally: 1478 // 1479 // {S,+,X} == {S-T,+,X} + T 1480 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1481 // 1482 // If ({S-T,+,X} + T) does not overflow ... (1) 1483 // 1484 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1485 // 1486 // If {S-T,+,X} does not overflow ... (2) 1487 // 1488 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1489 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1490 // 1491 // If (S-T)+T does not overflow ... (3) 1492 // 1493 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1494 // == {Ext(S),+,Ext(X)} == LHS 1495 // 1496 // Thus, if (1), (2) and (3) are true for some T, then 1497 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1498 // 1499 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1500 // does not overflow" restricted to the 0th iteration. Therefore we only need 1501 // to check for (1) and (2). 1502 // 1503 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1504 // is `Delta` (defined below). 1505 template <typename ExtendOpTy> 1506 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1507 const SCEV *Step, 1508 const Loop *L) { 1509 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1510 1511 // We restrict `Start` to a constant to prevent SCEV from spending too much 1512 // time here. It is correct (but more expensive) to continue with a 1513 // non-constant `Start` and do a general SCEV subtraction to compute 1514 // `PreStart` below. 1515 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1516 if (!StartC) 1517 return false; 1518 1519 APInt StartAI = StartC->getAPInt(); 1520 1521 for (unsigned Delta : {-2, -1, 1, 2}) { 1522 const SCEV *PreStart = getConstant(StartAI - Delta); 1523 1524 FoldingSetNodeID ID; 1525 ID.AddInteger(scAddRecExpr); 1526 ID.AddPointer(PreStart); 1527 ID.AddPointer(Step); 1528 ID.AddPointer(L); 1529 void *IP = nullptr; 1530 const auto *PreAR = 1531 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1532 1533 // Give up if we don't already have the add recurrence we need because 1534 // actually constructing an add recurrence is relatively expensive. 1535 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1536 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1537 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1538 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1539 DeltaS, &Pred, this); 1540 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1541 return true; 1542 } 1543 } 1544 1545 return false; 1546 } 1547 1548 // Finds an integer D for an expression (C + x + y + ...) such that the top 1549 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1550 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1551 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1552 // the (C + x + y + ...) expression is \p WholeAddExpr. 1553 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1554 const SCEVConstant *ConstantTerm, 1555 const SCEVAddExpr *WholeAddExpr) { 1556 const APInt &C = ConstantTerm->getAPInt(); 1557 const unsigned BitWidth = C.getBitWidth(); 1558 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1559 uint32_t TZ = BitWidth; 1560 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1561 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1562 if (TZ) { 1563 // Set D to be as many least significant bits of C as possible while still 1564 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1565 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1566 } 1567 return APInt(BitWidth, 0); 1568 } 1569 1570 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1571 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1572 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1573 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1574 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1575 const APInt &ConstantStart, 1576 const SCEV *Step) { 1577 const unsigned BitWidth = ConstantStart.getBitWidth(); 1578 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1579 if (TZ) 1580 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1581 : ConstantStart; 1582 return APInt(BitWidth, 0); 1583 } 1584 1585 static void insertFoldCacheEntry( 1586 const ScalarEvolution::FoldID &ID, const SCEV *S, 1587 DenseMap<ScalarEvolution::FoldID, const SCEV *> &FoldCache, 1588 DenseMap<const SCEV *, SmallVector<ScalarEvolution::FoldID, 2>> 1589 &FoldCacheUser) { 1590 auto I = FoldCache.insert({ID, S}); 1591 if (!I.second) { 1592 // Remove FoldCacheUser entry for ID when replacing an existing FoldCache 1593 // entry. 1594 auto &UserIDs = FoldCacheUser[I.first->second]; 1595 assert(count(UserIDs, ID) == 1 && "unexpected duplicates in UserIDs"); 1596 for (unsigned I = 0; I != UserIDs.size(); ++I) 1597 if (UserIDs[I] == ID) { 1598 std::swap(UserIDs[I], UserIDs.back()); 1599 break; 1600 } 1601 UserIDs.pop_back(); 1602 I.first->second = S; 1603 } 1604 auto R = FoldCacheUser.insert({S, {}}); 1605 R.first->second.push_back(ID); 1606 } 1607 1608 const SCEV * 1609 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1610 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1611 "This is not an extending conversion!"); 1612 assert(isSCEVable(Ty) && 1613 "This is not a conversion to a SCEVable type!"); 1614 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1615 Ty = getEffectiveSCEVType(Ty); 1616 1617 FoldID ID; 1618 ID.addInteger(scZeroExtend); 1619 ID.addPointer(Op); 1620 ID.addPointer(Ty); 1621 auto Iter = FoldCache.find(ID); 1622 if (Iter != FoldCache.end()) 1623 return Iter->second; 1624 1625 const SCEV *S = getZeroExtendExprImpl(Op, Ty, Depth); 1626 if (!isa<SCEVZeroExtendExpr>(S)) 1627 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); 1628 return S; 1629 } 1630 1631 const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, 1632 unsigned Depth) { 1633 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1634 "This is not an extending conversion!"); 1635 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); 1636 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1637 1638 // Fold if the operand is constant. 1639 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1640 return getConstant( 1641 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1642 1643 // zext(zext(x)) --> zext(x) 1644 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1645 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1646 1647 // Before doing any expensive analysis, check to see if we've already 1648 // computed a SCEV for this Op and Ty. 1649 FoldingSetNodeID ID; 1650 ID.AddInteger(scZeroExtend); 1651 ID.AddPointer(Op); 1652 ID.AddPointer(Ty); 1653 void *IP = nullptr; 1654 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1655 if (Depth > MaxCastDepth) { 1656 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1657 Op, Ty); 1658 UniqueSCEVs.InsertNode(S, IP); 1659 registerUser(S, Op); 1660 return S; 1661 } 1662 1663 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1664 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1665 // It's possible the bits taken off by the truncate were all zero bits. If 1666 // so, we should be able to simplify this further. 1667 const SCEV *X = ST->getOperand(); 1668 ConstantRange CR = getUnsignedRange(X); 1669 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1670 unsigned NewBits = getTypeSizeInBits(Ty); 1671 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1672 CR.zextOrTrunc(NewBits))) 1673 return getTruncateOrZeroExtend(X, Ty, Depth); 1674 } 1675 1676 // If the input value is a chrec scev, and we can prove that the value 1677 // did not overflow the old, smaller, value, we can zero extend all of the 1678 // operands (often constants). This allows analysis of something like 1679 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1680 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1681 if (AR->isAffine()) { 1682 const SCEV *Start = AR->getStart(); 1683 const SCEV *Step = AR->getStepRecurrence(*this); 1684 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1685 const Loop *L = AR->getLoop(); 1686 1687 if (!AR->hasNoUnsignedWrap()) { 1688 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1689 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1690 } 1691 1692 // If we have special knowledge that this addrec won't overflow, 1693 // we don't need to do any further analysis. 1694 if (AR->hasNoUnsignedWrap()) { 1695 Start = 1696 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1697 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1698 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1699 } 1700 1701 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1702 // Note that this serves two purposes: It filters out loops that are 1703 // simply not analyzable, and it covers the case where this code is 1704 // being called from within backedge-taken count analysis, such that 1705 // attempting to ask for the backedge-taken count would likely result 1706 // in infinite recursion. In the later case, the analysis code will 1707 // cope with a conservative value, and it will take care to purge 1708 // that value once it has finished. 1709 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1710 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1711 // Manually compute the final value for AR, checking for overflow. 1712 1713 // Check whether the backedge-taken count can be losslessly casted to 1714 // the addrec's type. The count is always unsigned. 1715 const SCEV *CastedMaxBECount = 1716 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1717 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1718 CastedMaxBECount, MaxBECount->getType(), Depth); 1719 if (MaxBECount == RecastedMaxBECount) { 1720 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1721 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1722 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1723 SCEV::FlagAnyWrap, Depth + 1); 1724 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1725 SCEV::FlagAnyWrap, 1726 Depth + 1), 1727 WideTy, Depth + 1); 1728 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1729 const SCEV *WideMaxBECount = 1730 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1731 const SCEV *OperandExtendedAdd = 1732 getAddExpr(WideStart, 1733 getMulExpr(WideMaxBECount, 1734 getZeroExtendExpr(Step, WideTy, Depth + 1), 1735 SCEV::FlagAnyWrap, Depth + 1), 1736 SCEV::FlagAnyWrap, Depth + 1); 1737 if (ZAdd == OperandExtendedAdd) { 1738 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1739 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1740 // Return the expression with the addrec on the outside. 1741 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1742 Depth + 1); 1743 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1744 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1745 } 1746 // Similar to above, only this time treat the step value as signed. 1747 // This covers loops that count down. 1748 OperandExtendedAdd = 1749 getAddExpr(WideStart, 1750 getMulExpr(WideMaxBECount, 1751 getSignExtendExpr(Step, WideTy, Depth + 1), 1752 SCEV::FlagAnyWrap, Depth + 1), 1753 SCEV::FlagAnyWrap, Depth + 1); 1754 if (ZAdd == OperandExtendedAdd) { 1755 // Cache knowledge of AR NW, which is propagated to this AddRec. 1756 // Negative step causes unsigned wrap, but it still can't self-wrap. 1757 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1758 // Return the expression with the addrec on the outside. 1759 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1760 Depth + 1); 1761 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1762 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1763 } 1764 } 1765 } 1766 1767 // Normally, in the cases we can prove no-overflow via a 1768 // backedge guarding condition, we can also compute a backedge 1769 // taken count for the loop. The exceptions are assumptions and 1770 // guards present in the loop -- SCEV is not great at exploiting 1771 // these to compute max backedge taken counts, but can still use 1772 // these to prove lack of overflow. Use this fact to avoid 1773 // doing extra work that may not pay off. 1774 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1775 !AC.assumptions().empty()) { 1776 1777 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1778 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1779 if (AR->hasNoUnsignedWrap()) { 1780 // Same as nuw case above - duplicated here to avoid a compile time 1781 // issue. It's not clear that the order of checks does matter, but 1782 // it's one of two issue possible causes for a change which was 1783 // reverted. Be conservative for the moment. 1784 Start = 1785 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1786 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1787 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1788 } 1789 1790 // For a negative step, we can extend the operands iff doing so only 1791 // traverses values in the range zext([0,UINT_MAX]). 1792 if (isKnownNegative(Step)) { 1793 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1794 getSignedRangeMin(Step)); 1795 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1796 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1797 // Cache knowledge of AR NW, which is propagated to this 1798 // AddRec. Negative step causes unsigned wrap, but it 1799 // still can't self-wrap. 1800 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1801 // Return the expression with the addrec on the outside. 1802 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1803 Depth + 1); 1804 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1805 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1806 } 1807 } 1808 } 1809 1810 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1811 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1812 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1813 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1814 const APInt &C = SC->getAPInt(); 1815 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1816 if (D != 0) { 1817 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1818 const SCEV *SResidual = 1819 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1820 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1821 return getAddExpr(SZExtD, SZExtR, 1822 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1823 Depth + 1); 1824 } 1825 } 1826 1827 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1828 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1829 Start = 1830 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1831 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1832 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1833 } 1834 } 1835 1836 // zext(A % B) --> zext(A) % zext(B) 1837 { 1838 const SCEV *LHS; 1839 const SCEV *RHS; 1840 if (matchURem(Op, LHS, RHS)) 1841 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1842 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1843 } 1844 1845 // zext(A / B) --> zext(A) / zext(B). 1846 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1847 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1848 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1849 1850 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1851 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1852 if (SA->hasNoUnsignedWrap()) { 1853 // If the addition does not unsign overflow then we can, by definition, 1854 // commute the zero extension with the addition operation. 1855 SmallVector<const SCEV *, 4> Ops; 1856 for (const auto *Op : SA->operands()) 1857 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1858 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1859 } 1860 1861 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1862 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1863 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1864 // 1865 // Often address arithmetics contain expressions like 1866 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1867 // This transformation is useful while proving that such expressions are 1868 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1869 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1870 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1871 if (D != 0) { 1872 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1873 const SCEV *SResidual = 1874 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1875 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1876 return getAddExpr(SZExtD, SZExtR, 1877 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1878 Depth + 1); 1879 } 1880 } 1881 } 1882 1883 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1884 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1885 if (SM->hasNoUnsignedWrap()) { 1886 // If the multiply does not unsign overflow then we can, by definition, 1887 // commute the zero extension with the multiply operation. 1888 SmallVector<const SCEV *, 4> Ops; 1889 for (const auto *Op : SM->operands()) 1890 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1891 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1892 } 1893 1894 // zext(2^K * (trunc X to iN)) to iM -> 1895 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1896 // 1897 // Proof: 1898 // 1899 // zext(2^K * (trunc X to iN)) to iM 1900 // = zext((trunc X to iN) << K) to iM 1901 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1902 // (because shl removes the top K bits) 1903 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1904 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1905 // 1906 if (SM->getNumOperands() == 2) 1907 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1908 if (MulLHS->getAPInt().isPowerOf2()) 1909 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1910 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1911 MulLHS->getAPInt().logBase2(); 1912 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1913 return getMulExpr( 1914 getZeroExtendExpr(MulLHS, Ty), 1915 getZeroExtendExpr( 1916 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1917 SCEV::FlagNUW, Depth + 1); 1918 } 1919 } 1920 1921 // zext(umin(x, y)) -> umin(zext(x), zext(y)) 1922 // zext(umax(x, y)) -> umax(zext(x), zext(y)) 1923 if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) { 1924 auto *MinMax = cast<SCEVMinMaxExpr>(Op); 1925 SmallVector<const SCEV *, 4> Operands; 1926 for (auto *Operand : MinMax->operands()) 1927 Operands.push_back(getZeroExtendExpr(Operand, Ty)); 1928 if (isa<SCEVUMinExpr>(MinMax)) 1929 return getUMinExpr(Operands); 1930 else 1931 return getUMaxExpr(Operands); 1932 } 1933 1934 // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y)) 1935 if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) { 1936 assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!"); 1937 SmallVector<const SCEV *, 4> Operands; 1938 for (auto *Operand : MinMax->operands()) 1939 Operands.push_back(getZeroExtendExpr(Operand, Ty)); 1940 return getUMinExpr(Operands, /*Sequential*/ true); 1941 } 1942 1943 // The cast wasn't folded; create an explicit cast node. 1944 // Recompute the insert position, as it may have been invalidated. 1945 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1946 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1947 Op, Ty); 1948 UniqueSCEVs.InsertNode(S, IP); 1949 registerUser(S, Op); 1950 return S; 1951 } 1952 1953 const SCEV * 1954 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1955 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1956 "This is not an extending conversion!"); 1957 assert(isSCEVable(Ty) && 1958 "This is not a conversion to a SCEVable type!"); 1959 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1960 Ty = getEffectiveSCEVType(Ty); 1961 1962 FoldID ID; 1963 ID.addInteger(scSignExtend); 1964 ID.addPointer(Op); 1965 ID.addPointer(Ty); 1966 auto Iter = FoldCache.find(ID); 1967 if (Iter != FoldCache.end()) 1968 return Iter->second; 1969 1970 const SCEV *S = getSignExtendExprImpl(Op, Ty, Depth); 1971 if (!isa<SCEVSignExtendExpr>(S)) 1972 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); 1973 return S; 1974 } 1975 1976 const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty, 1977 unsigned Depth) { 1978 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1979 "This is not an extending conversion!"); 1980 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); 1981 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1982 Ty = getEffectiveSCEVType(Ty); 1983 1984 // Fold if the operand is constant. 1985 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1986 return getConstant( 1987 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1988 1989 // sext(sext(x)) --> sext(x) 1990 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1991 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1992 1993 // sext(zext(x)) --> zext(x) 1994 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1995 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1996 1997 // Before doing any expensive analysis, check to see if we've already 1998 // computed a SCEV for this Op and Ty. 1999 FoldingSetNodeID ID; 2000 ID.AddInteger(scSignExtend); 2001 ID.AddPointer(Op); 2002 ID.AddPointer(Ty); 2003 void *IP = nullptr; 2004 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2005 // Limit recursion depth. 2006 if (Depth > MaxCastDepth) { 2007 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2008 Op, Ty); 2009 UniqueSCEVs.InsertNode(S, IP); 2010 registerUser(S, Op); 2011 return S; 2012 } 2013 2014 // sext(trunc(x)) --> sext(x) or x or trunc(x) 2015 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 2016 // It's possible the bits taken off by the truncate were all sign bits. If 2017 // so, we should be able to simplify this further. 2018 const SCEV *X = ST->getOperand(); 2019 ConstantRange CR = getSignedRange(X); 2020 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 2021 unsigned NewBits = getTypeSizeInBits(Ty); 2022 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 2023 CR.sextOrTrunc(NewBits))) 2024 return getTruncateOrSignExtend(X, Ty, Depth); 2025 } 2026 2027 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 2028 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 2029 if (SA->hasNoSignedWrap()) { 2030 // If the addition does not sign overflow then we can, by definition, 2031 // commute the sign extension with the addition operation. 2032 SmallVector<const SCEV *, 4> Ops; 2033 for (const auto *Op : SA->operands()) 2034 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 2035 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 2036 } 2037 2038 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 2039 // if D + (C - D + x + y + ...) could be proven to not signed wrap 2040 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 2041 // 2042 // For instance, this will bring two seemingly different expressions: 2043 // 1 + sext(5 + 20 * %x + 24 * %y) and 2044 // sext(6 + 20 * %x + 24 * %y) 2045 // to the same form: 2046 // 2 + sext(4 + 20 * %x + 24 * %y) 2047 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 2048 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 2049 if (D != 0) { 2050 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2051 const SCEV *SResidual = 2052 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 2053 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2054 return getAddExpr(SSExtD, SSExtR, 2055 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2056 Depth + 1); 2057 } 2058 } 2059 } 2060 // If the input value is a chrec scev, and we can prove that the value 2061 // did not overflow the old, smaller, value, we can sign extend all of the 2062 // operands (often constants). This allows analysis of something like 2063 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 2064 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 2065 if (AR->isAffine()) { 2066 const SCEV *Start = AR->getStart(); 2067 const SCEV *Step = AR->getStepRecurrence(*this); 2068 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 2069 const Loop *L = AR->getLoop(); 2070 2071 if (!AR->hasNoSignedWrap()) { 2072 auto NewFlags = proveNoWrapViaConstantRanges(AR); 2073 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2074 } 2075 2076 // If we have special knowledge that this addrec won't overflow, 2077 // we don't need to do any further analysis. 2078 if (AR->hasNoSignedWrap()) { 2079 Start = 2080 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2081 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2082 return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); 2083 } 2084 2085 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2086 // Note that this serves two purposes: It filters out loops that are 2087 // simply not analyzable, and it covers the case where this code is 2088 // being called from within backedge-taken count analysis, such that 2089 // attempting to ask for the backedge-taken count would likely result 2090 // in infinite recursion. In the later case, the analysis code will 2091 // cope with a conservative value, and it will take care to purge 2092 // that value once it has finished. 2093 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2094 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2095 // Manually compute the final value for AR, checking for 2096 // overflow. 2097 2098 // Check whether the backedge-taken count can be losslessly casted to 2099 // the addrec's type. The count is always unsigned. 2100 const SCEV *CastedMaxBECount = 2101 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2102 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2103 CastedMaxBECount, MaxBECount->getType(), Depth); 2104 if (MaxBECount == RecastedMaxBECount) { 2105 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2106 // Check whether Start+Step*MaxBECount has no signed overflow. 2107 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2108 SCEV::FlagAnyWrap, Depth + 1); 2109 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2110 SCEV::FlagAnyWrap, 2111 Depth + 1), 2112 WideTy, Depth + 1); 2113 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2114 const SCEV *WideMaxBECount = 2115 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2116 const SCEV *OperandExtendedAdd = 2117 getAddExpr(WideStart, 2118 getMulExpr(WideMaxBECount, 2119 getSignExtendExpr(Step, WideTy, Depth + 1), 2120 SCEV::FlagAnyWrap, Depth + 1), 2121 SCEV::FlagAnyWrap, Depth + 1); 2122 if (SAdd == OperandExtendedAdd) { 2123 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2124 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2125 // Return the expression with the addrec on the outside. 2126 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2127 Depth + 1); 2128 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2129 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2130 } 2131 // Similar to above, only this time treat the step value as unsigned. 2132 // This covers loops that count up with an unsigned step. 2133 OperandExtendedAdd = 2134 getAddExpr(WideStart, 2135 getMulExpr(WideMaxBECount, 2136 getZeroExtendExpr(Step, WideTy, Depth + 1), 2137 SCEV::FlagAnyWrap, Depth + 1), 2138 SCEV::FlagAnyWrap, Depth + 1); 2139 if (SAdd == OperandExtendedAdd) { 2140 // If AR wraps around then 2141 // 2142 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2143 // => SAdd != OperandExtendedAdd 2144 // 2145 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2146 // (SAdd == OperandExtendedAdd => AR is NW) 2147 2148 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2149 2150 // Return the expression with the addrec on the outside. 2151 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2152 Depth + 1); 2153 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 2154 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2155 } 2156 } 2157 } 2158 2159 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2160 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2161 if (AR->hasNoSignedWrap()) { 2162 // Same as nsw case above - duplicated here to avoid a compile time 2163 // issue. It's not clear that the order of checks does matter, but 2164 // it's one of two issue possible causes for a change which was 2165 // reverted. Be conservative for the moment. 2166 Start = 2167 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2168 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2169 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2170 } 2171 2172 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2173 // if D + (C - D + Step * n) could be proven to not signed wrap 2174 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2175 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2176 const APInt &C = SC->getAPInt(); 2177 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2178 if (D != 0) { 2179 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2180 const SCEV *SResidual = 2181 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2182 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2183 return getAddExpr(SSExtD, SSExtR, 2184 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2185 Depth + 1); 2186 } 2187 } 2188 2189 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2190 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2191 Start = 2192 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2193 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2194 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2195 } 2196 } 2197 2198 // If the input value is provably positive and we could not simplify 2199 // away the sext build a zext instead. 2200 if (isKnownNonNegative(Op)) 2201 return getZeroExtendExpr(Op, Ty, Depth + 1); 2202 2203 // sext(smin(x, y)) -> smin(sext(x), sext(y)) 2204 // sext(smax(x, y)) -> smax(sext(x), sext(y)) 2205 if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) { 2206 auto *MinMax = cast<SCEVMinMaxExpr>(Op); 2207 SmallVector<const SCEV *, 4> Operands; 2208 for (auto *Operand : MinMax->operands()) 2209 Operands.push_back(getSignExtendExpr(Operand, Ty)); 2210 if (isa<SCEVSMinExpr>(MinMax)) 2211 return getSMinExpr(Operands); 2212 else 2213 return getSMaxExpr(Operands); 2214 } 2215 2216 // The cast wasn't folded; create an explicit cast node. 2217 // Recompute the insert position, as it may have been invalidated. 2218 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2219 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2220 Op, Ty); 2221 UniqueSCEVs.InsertNode(S, IP); 2222 registerUser(S, { Op }); 2223 return S; 2224 } 2225 2226 const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, 2227 Type *Ty) { 2228 switch (Kind) { 2229 case scTruncate: 2230 return getTruncateExpr(Op, Ty); 2231 case scZeroExtend: 2232 return getZeroExtendExpr(Op, Ty); 2233 case scSignExtend: 2234 return getSignExtendExpr(Op, Ty); 2235 case scPtrToInt: 2236 return getPtrToIntExpr(Op, Ty); 2237 default: 2238 llvm_unreachable("Not a SCEV cast expression!"); 2239 } 2240 } 2241 2242 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2243 /// unspecified bits out to the given type. 2244 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2245 Type *Ty) { 2246 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2247 "This is not an extending conversion!"); 2248 assert(isSCEVable(Ty) && 2249 "This is not a conversion to a SCEVable type!"); 2250 Ty = getEffectiveSCEVType(Ty); 2251 2252 // Sign-extend negative constants. 2253 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2254 if (SC->getAPInt().isNegative()) 2255 return getSignExtendExpr(Op, Ty); 2256 2257 // Peel off a truncate cast. 2258 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2259 const SCEV *NewOp = T->getOperand(); 2260 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2261 return getAnyExtendExpr(NewOp, Ty); 2262 return getTruncateOrNoop(NewOp, Ty); 2263 } 2264 2265 // Next try a zext cast. If the cast is folded, use it. 2266 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2267 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2268 return ZExt; 2269 2270 // Next try a sext cast. If the cast is folded, use it. 2271 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2272 if (!isa<SCEVSignExtendExpr>(SExt)) 2273 return SExt; 2274 2275 // Force the cast to be folded into the operands of an addrec. 2276 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2277 SmallVector<const SCEV *, 4> Ops; 2278 for (const SCEV *Op : AR->operands()) 2279 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2280 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2281 } 2282 2283 // If the expression is obviously signed, use the sext cast value. 2284 if (isa<SCEVSMaxExpr>(Op)) 2285 return SExt; 2286 2287 // Absent any other information, use the zext cast value. 2288 return ZExt; 2289 } 2290 2291 /// Process the given Ops list, which is a list of operands to be added under 2292 /// the given scale, update the given map. This is a helper function for 2293 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2294 /// that would form an add expression like this: 2295 /// 2296 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2297 /// 2298 /// where A and B are constants, update the map with these values: 2299 /// 2300 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2301 /// 2302 /// and add 13 + A*B*29 to AccumulatedConstant. 2303 /// This will allow getAddRecExpr to produce this: 2304 /// 2305 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2306 /// 2307 /// This form often exposes folding opportunities that are hidden in 2308 /// the original operand list. 2309 /// 2310 /// Return true iff it appears that any interesting folding opportunities 2311 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2312 /// the common case where no interesting opportunities are present, and 2313 /// is also used as a check to avoid infinite recursion. 2314 static bool 2315 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2316 SmallVectorImpl<const SCEV *> &NewOps, 2317 APInt &AccumulatedConstant, 2318 ArrayRef<const SCEV *> Ops, const APInt &Scale, 2319 ScalarEvolution &SE) { 2320 bool Interesting = false; 2321 2322 // Iterate over the add operands. They are sorted, with constants first. 2323 unsigned i = 0; 2324 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2325 ++i; 2326 // Pull a buried constant out to the outside. 2327 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2328 Interesting = true; 2329 AccumulatedConstant += Scale * C->getAPInt(); 2330 } 2331 2332 // Next comes everything else. We're especially interested in multiplies 2333 // here, but they're in the middle, so just visit the rest with one loop. 2334 for (; i != Ops.size(); ++i) { 2335 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2336 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2337 APInt NewScale = 2338 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2339 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2340 // A multiplication of a constant with another add; recurse. 2341 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2342 Interesting |= 2343 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2344 Add->operands(), NewScale, SE); 2345 } else { 2346 // A multiplication of a constant with some other value. Update 2347 // the map. 2348 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2349 const SCEV *Key = SE.getMulExpr(MulOps); 2350 auto Pair = M.insert({Key, NewScale}); 2351 if (Pair.second) { 2352 NewOps.push_back(Pair.first->first); 2353 } else { 2354 Pair.first->second += NewScale; 2355 // The map already had an entry for this value, which may indicate 2356 // a folding opportunity. 2357 Interesting = true; 2358 } 2359 } 2360 } else { 2361 // An ordinary operand. Update the map. 2362 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2363 M.insert({Ops[i], Scale}); 2364 if (Pair.second) { 2365 NewOps.push_back(Pair.first->first); 2366 } else { 2367 Pair.first->second += Scale; 2368 // The map already had an entry for this value, which may indicate 2369 // a folding opportunity. 2370 Interesting = true; 2371 } 2372 } 2373 } 2374 2375 return Interesting; 2376 } 2377 2378 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2379 const SCEV *LHS, const SCEV *RHS, 2380 const Instruction *CtxI) { 2381 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2382 SCEV::NoWrapFlags, unsigned); 2383 switch (BinOp) { 2384 default: 2385 llvm_unreachable("Unsupported binary op"); 2386 case Instruction::Add: 2387 Operation = &ScalarEvolution::getAddExpr; 2388 break; 2389 case Instruction::Sub: 2390 Operation = &ScalarEvolution::getMinusSCEV; 2391 break; 2392 case Instruction::Mul: 2393 Operation = &ScalarEvolution::getMulExpr; 2394 break; 2395 } 2396 2397 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2398 Signed ? &ScalarEvolution::getSignExtendExpr 2399 : &ScalarEvolution::getZeroExtendExpr; 2400 2401 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2402 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2403 auto *WideTy = 2404 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2405 2406 const SCEV *A = (this->*Extension)( 2407 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2408 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); 2409 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); 2410 const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); 2411 if (A == B) 2412 return true; 2413 // Can we use context to prove the fact we need? 2414 if (!CtxI) 2415 return false; 2416 // We can prove that add(x, constant) doesn't wrap if isKnownPredicateAt can 2417 // guarantee that x <= max_int - constant at the given context. 2418 // TODO: Support other operations. 2419 if (BinOp != Instruction::Add) 2420 return false; 2421 auto *RHSC = dyn_cast<SCEVConstant>(RHS); 2422 // TODO: Lift this limitation. 2423 if (!RHSC) 2424 return false; 2425 APInt C = RHSC->getAPInt(); 2426 // TODO: Also lift this limitation. 2427 if (Signed && C.isNegative()) 2428 return false; 2429 unsigned NumBits = C.getBitWidth(); 2430 APInt Max = 2431 Signed ? APInt::getSignedMaxValue(NumBits) : APInt::getMaxValue(NumBits); 2432 APInt Limit = Max - C; 2433 ICmpInst::Predicate Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 2434 return isKnownPredicateAt(Pred, LHS, getConstant(Limit), CtxI); 2435 } 2436 2437 std::optional<SCEV::NoWrapFlags> 2438 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2439 const OverflowingBinaryOperator *OBO) { 2440 // It cannot be done any better. 2441 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2442 return std::nullopt; 2443 2444 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2445 2446 if (OBO->hasNoUnsignedWrap()) 2447 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2448 if (OBO->hasNoSignedWrap()) 2449 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2450 2451 bool Deduced = false; 2452 2453 if (OBO->getOpcode() != Instruction::Add && 2454 OBO->getOpcode() != Instruction::Sub && 2455 OBO->getOpcode() != Instruction::Mul) 2456 return std::nullopt; 2457 2458 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2459 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2460 2461 const Instruction *CtxI = 2462 UseContextForNoWrapFlagInference ? dyn_cast<Instruction>(OBO) : nullptr; 2463 if (!OBO->hasNoUnsignedWrap() && 2464 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2465 /* Signed */ false, LHS, RHS, CtxI)) { 2466 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2467 Deduced = true; 2468 } 2469 2470 if (!OBO->hasNoSignedWrap() && 2471 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2472 /* Signed */ true, LHS, RHS, CtxI)) { 2473 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2474 Deduced = true; 2475 } 2476 2477 if (Deduced) 2478 return Flags; 2479 return std::nullopt; 2480 } 2481 2482 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2483 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2484 // can't-overflow flags for the operation if possible. 2485 static SCEV::NoWrapFlags 2486 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2487 const ArrayRef<const SCEV *> Ops, 2488 SCEV::NoWrapFlags Flags) { 2489 using namespace std::placeholders; 2490 2491 using OBO = OverflowingBinaryOperator; 2492 2493 bool CanAnalyze = 2494 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2495 (void)CanAnalyze; 2496 assert(CanAnalyze && "don't call from other places!"); 2497 2498 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2499 SCEV::NoWrapFlags SignOrUnsignWrap = 2500 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2501 2502 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2503 auto IsKnownNonNegative = [&](const SCEV *S) { 2504 return SE->isKnownNonNegative(S); 2505 }; 2506 2507 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2508 Flags = 2509 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2510 2511 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2512 2513 if (SignOrUnsignWrap != SignOrUnsignMask && 2514 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2515 isa<SCEVConstant>(Ops[0])) { 2516 2517 auto Opcode = [&] { 2518 switch (Type) { 2519 case scAddExpr: 2520 return Instruction::Add; 2521 case scMulExpr: 2522 return Instruction::Mul; 2523 default: 2524 llvm_unreachable("Unexpected SCEV op."); 2525 } 2526 }(); 2527 2528 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2529 2530 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2531 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2532 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2533 Opcode, C, OBO::NoSignedWrap); 2534 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2535 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2536 } 2537 2538 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2539 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2540 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2541 Opcode, C, OBO::NoUnsignedWrap); 2542 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2543 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2544 } 2545 } 2546 2547 // <0,+,nonnegative><nw> is also nuw 2548 // TODO: Add corresponding nsw case 2549 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && 2550 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && 2551 Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) 2552 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2553 2554 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW 2555 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && 2556 Ops.size() == 2) { 2557 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) 2558 if (UDiv->getOperand(1) == Ops[1]) 2559 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2560 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) 2561 if (UDiv->getOperand(1) == Ops[0]) 2562 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2563 } 2564 2565 return Flags; 2566 } 2567 2568 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2569 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2570 } 2571 2572 /// Get a canonical add expression, or something simpler if possible. 2573 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2574 SCEV::NoWrapFlags OrigFlags, 2575 unsigned Depth) { 2576 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2577 "only nuw or nsw allowed"); 2578 assert(!Ops.empty() && "Cannot get empty add!"); 2579 if (Ops.size() == 1) return Ops[0]; 2580 #ifndef NDEBUG 2581 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2582 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2583 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2584 "SCEVAddExpr operand types don't match!"); 2585 unsigned NumPtrs = count_if( 2586 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); 2587 assert(NumPtrs <= 1 && "add has at most one pointer operand"); 2588 #endif 2589 2590 // Sort by complexity, this groups all similar expression types together. 2591 GroupByComplexity(Ops, &LI, DT); 2592 2593 // If there are any constants, fold them together. 2594 unsigned Idx = 0; 2595 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2596 ++Idx; 2597 assert(Idx < Ops.size()); 2598 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2599 // We found two constants, fold them together! 2600 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2601 if (Ops.size() == 2) return Ops[0]; 2602 Ops.erase(Ops.begin()+1); // Erase the folded element 2603 LHSC = cast<SCEVConstant>(Ops[0]); 2604 } 2605 2606 // If we are left with a constant zero being added, strip it off. 2607 if (LHSC->getValue()->isZero()) { 2608 Ops.erase(Ops.begin()); 2609 --Idx; 2610 } 2611 2612 if (Ops.size() == 1) return Ops[0]; 2613 } 2614 2615 // Delay expensive flag strengthening until necessary. 2616 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2617 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2618 }; 2619 2620 // Limit recursion calls depth. 2621 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2622 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2623 2624 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { 2625 // Don't strengthen flags if we have no new information. 2626 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2627 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2628 Add->setNoWrapFlags(ComputeFlags(Ops)); 2629 return S; 2630 } 2631 2632 // Okay, check to see if the same value occurs in the operand list more than 2633 // once. If so, merge them together into an multiply expression. Since we 2634 // sorted the list, these values are required to be adjacent. 2635 Type *Ty = Ops[0]->getType(); 2636 bool FoundMatch = false; 2637 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2638 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2639 // Scan ahead to count how many equal operands there are. 2640 unsigned Count = 2; 2641 while (i+Count != e && Ops[i+Count] == Ops[i]) 2642 ++Count; 2643 // Merge the values into a multiply. 2644 const SCEV *Scale = getConstant(Ty, Count); 2645 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2646 if (Ops.size() == Count) 2647 return Mul; 2648 Ops[i] = Mul; 2649 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2650 --i; e -= Count - 1; 2651 FoundMatch = true; 2652 } 2653 if (FoundMatch) 2654 return getAddExpr(Ops, OrigFlags, Depth + 1); 2655 2656 // Check for truncates. If all the operands are truncated from the same 2657 // type, see if factoring out the truncate would permit the result to be 2658 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2659 // if the contents of the resulting outer trunc fold to something simple. 2660 auto FindTruncSrcType = [&]() -> Type * { 2661 // We're ultimately looking to fold an addrec of truncs and muls of only 2662 // constants and truncs, so if we find any other types of SCEV 2663 // as operands of the addrec then we bail and return nullptr here. 2664 // Otherwise, we return the type of the operand of a trunc that we find. 2665 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2666 return T->getOperand()->getType(); 2667 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2668 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2669 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2670 return T->getOperand()->getType(); 2671 } 2672 return nullptr; 2673 }; 2674 if (auto *SrcType = FindTruncSrcType()) { 2675 SmallVector<const SCEV *, 8> LargeOps; 2676 bool Ok = true; 2677 // Check all the operands to see if they can be represented in the 2678 // source type of the truncate. 2679 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2680 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2681 if (T->getOperand()->getType() != SrcType) { 2682 Ok = false; 2683 break; 2684 } 2685 LargeOps.push_back(T->getOperand()); 2686 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2687 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2688 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2689 SmallVector<const SCEV *, 8> LargeMulOps; 2690 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2691 if (const SCEVTruncateExpr *T = 2692 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2693 if (T->getOperand()->getType() != SrcType) { 2694 Ok = false; 2695 break; 2696 } 2697 LargeMulOps.push_back(T->getOperand()); 2698 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2699 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2700 } else { 2701 Ok = false; 2702 break; 2703 } 2704 } 2705 if (Ok) 2706 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2707 } else { 2708 Ok = false; 2709 break; 2710 } 2711 } 2712 if (Ok) { 2713 // Evaluate the expression in the larger type. 2714 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2715 // If it folds to something simple, use it. Otherwise, don't. 2716 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2717 return getTruncateExpr(Fold, Ty); 2718 } 2719 } 2720 2721 if (Ops.size() == 2) { 2722 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2723 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2724 // C1). 2725 const SCEV *A = Ops[0]; 2726 const SCEV *B = Ops[1]; 2727 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2728 auto *C = dyn_cast<SCEVConstant>(A); 2729 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2730 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2731 auto C2 = C->getAPInt(); 2732 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2733 2734 APInt ConstAdd = C1 + C2; 2735 auto AddFlags = AddExpr->getNoWrapFlags(); 2736 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2737 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && 2738 ConstAdd.ule(C1)) { 2739 PreservedFlags = 2740 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2741 } 2742 2743 // Adding a constant with the same sign and small magnitude is NSW, if the 2744 // original AddExpr was NSW. 2745 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && 2746 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2747 ConstAdd.abs().ule(C1.abs())) { 2748 PreservedFlags = 2749 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2750 } 2751 2752 if (PreservedFlags != SCEV::FlagAnyWrap) { 2753 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); 2754 NewOps[0] = getConstant(ConstAdd); 2755 return getAddExpr(NewOps, PreservedFlags); 2756 } 2757 } 2758 } 2759 2760 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) 2761 if (Ops.size() == 2) { 2762 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); 2763 if (Mul && Mul->getNumOperands() == 2 && 2764 Mul->getOperand(0)->isAllOnesValue()) { 2765 const SCEV *X; 2766 const SCEV *Y; 2767 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { 2768 return getMulExpr(Y, getUDivExpr(X, Y)); 2769 } 2770 } 2771 } 2772 2773 // Skip past any other cast SCEVs. 2774 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2775 ++Idx; 2776 2777 // If there are add operands they would be next. 2778 if (Idx < Ops.size()) { 2779 bool DeletedAdd = false; 2780 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2781 // common NUW flag for expression after inlining. Other flags cannot be 2782 // preserved, because they may depend on the original order of operations. 2783 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2784 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2785 if (Ops.size() > AddOpsInlineThreshold || 2786 Add->getNumOperands() > AddOpsInlineThreshold) 2787 break; 2788 // If we have an add, expand the add operands onto the end of the operands 2789 // list. 2790 Ops.erase(Ops.begin()+Idx); 2791 append_range(Ops, Add->operands()); 2792 DeletedAdd = true; 2793 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2794 } 2795 2796 // If we deleted at least one add, we added operands to the end of the list, 2797 // and they are not necessarily sorted. Recurse to resort and resimplify 2798 // any operands we just acquired. 2799 if (DeletedAdd) 2800 return getAddExpr(Ops, CommonFlags, Depth + 1); 2801 } 2802 2803 // Skip over the add expression until we get to a multiply. 2804 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2805 ++Idx; 2806 2807 // Check to see if there are any folding opportunities present with 2808 // operands multiplied by constant values. 2809 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2810 uint64_t BitWidth = getTypeSizeInBits(Ty); 2811 DenseMap<const SCEV *, APInt> M; 2812 SmallVector<const SCEV *, 8> NewOps; 2813 APInt AccumulatedConstant(BitWidth, 0); 2814 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2815 Ops, APInt(BitWidth, 1), *this)) { 2816 struct APIntCompare { 2817 bool operator()(const APInt &LHS, const APInt &RHS) const { 2818 return LHS.ult(RHS); 2819 } 2820 }; 2821 2822 // Some interesting folding opportunity is present, so its worthwhile to 2823 // re-generate the operands list. Group the operands by constant scale, 2824 // to avoid multiplying by the same constant scale multiple times. 2825 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2826 for (const SCEV *NewOp : NewOps) 2827 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2828 // Re-generate the operands list. 2829 Ops.clear(); 2830 if (AccumulatedConstant != 0) 2831 Ops.push_back(getConstant(AccumulatedConstant)); 2832 for (auto &MulOp : MulOpLists) { 2833 if (MulOp.first == 1) { 2834 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); 2835 } else if (MulOp.first != 0) { 2836 Ops.push_back(getMulExpr( 2837 getConstant(MulOp.first), 2838 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2839 SCEV::FlagAnyWrap, Depth + 1)); 2840 } 2841 } 2842 if (Ops.empty()) 2843 return getZero(Ty); 2844 if (Ops.size() == 1) 2845 return Ops[0]; 2846 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2847 } 2848 } 2849 2850 // If we are adding something to a multiply expression, make sure the 2851 // something is not already an operand of the multiply. If so, merge it into 2852 // the multiply. 2853 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2854 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2855 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2856 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2857 if (isa<SCEVConstant>(MulOpSCEV)) 2858 continue; 2859 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2860 if (MulOpSCEV == Ops[AddOp]) { 2861 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2862 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2863 if (Mul->getNumOperands() != 2) { 2864 // If the multiply has more than two operands, we must get the 2865 // Y*Z term. 2866 SmallVector<const SCEV *, 4> MulOps( 2867 Mul->operands().take_front(MulOp)); 2868 append_range(MulOps, Mul->operands().drop_front(MulOp + 1)); 2869 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2870 } 2871 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2872 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2873 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2874 SCEV::FlagAnyWrap, Depth + 1); 2875 if (Ops.size() == 2) return OuterMul; 2876 if (AddOp < Idx) { 2877 Ops.erase(Ops.begin()+AddOp); 2878 Ops.erase(Ops.begin()+Idx-1); 2879 } else { 2880 Ops.erase(Ops.begin()+Idx); 2881 Ops.erase(Ops.begin()+AddOp-1); 2882 } 2883 Ops.push_back(OuterMul); 2884 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2885 } 2886 2887 // Check this multiply against other multiplies being added together. 2888 for (unsigned OtherMulIdx = Idx+1; 2889 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2890 ++OtherMulIdx) { 2891 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2892 // If MulOp occurs in OtherMul, we can fold the two multiplies 2893 // together. 2894 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2895 OMulOp != e; ++OMulOp) 2896 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2897 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2898 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2899 if (Mul->getNumOperands() != 2) { 2900 SmallVector<const SCEV *, 4> MulOps( 2901 Mul->operands().take_front(MulOp)); 2902 append_range(MulOps, Mul->operands().drop_front(MulOp+1)); 2903 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2904 } 2905 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2906 if (OtherMul->getNumOperands() != 2) { 2907 SmallVector<const SCEV *, 4> MulOps( 2908 OtherMul->operands().take_front(OMulOp)); 2909 append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1)); 2910 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2911 } 2912 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2913 const SCEV *InnerMulSum = 2914 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2915 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2916 SCEV::FlagAnyWrap, Depth + 1); 2917 if (Ops.size() == 2) return OuterMul; 2918 Ops.erase(Ops.begin()+Idx); 2919 Ops.erase(Ops.begin()+OtherMulIdx-1); 2920 Ops.push_back(OuterMul); 2921 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2922 } 2923 } 2924 } 2925 } 2926 2927 // If there are any add recurrences in the operands list, see if any other 2928 // added values are loop invariant. If so, we can fold them into the 2929 // recurrence. 2930 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2931 ++Idx; 2932 2933 // Scan over all recurrences, trying to fold loop invariants into them. 2934 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2935 // Scan all of the other operands to this add and add them to the vector if 2936 // they are loop invariant w.r.t. the recurrence. 2937 SmallVector<const SCEV *, 8> LIOps; 2938 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2939 const Loop *AddRecLoop = AddRec->getLoop(); 2940 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2941 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2942 LIOps.push_back(Ops[i]); 2943 Ops.erase(Ops.begin()+i); 2944 --i; --e; 2945 } 2946 2947 // If we found some loop invariants, fold them into the recurrence. 2948 if (!LIOps.empty()) { 2949 // Compute nowrap flags for the addition of the loop-invariant ops and 2950 // the addrec. Temporarily push it as an operand for that purpose. These 2951 // flags are valid in the scope of the addrec only. 2952 LIOps.push_back(AddRec); 2953 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2954 LIOps.pop_back(); 2955 2956 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2957 LIOps.push_back(AddRec->getStart()); 2958 2959 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2960 2961 // It is not in general safe to propagate flags valid on an add within 2962 // the addrec scope to one outside it. We must prove that the inner 2963 // scope is guaranteed to execute if the outer one does to be able to 2964 // safely propagate. We know the program is undefined if poison is 2965 // produced on the inner scoped addrec. We also know that *for this use* 2966 // the outer scoped add can't overflow (because of the flags we just 2967 // computed for the inner scoped add) without the program being undefined. 2968 // Proving that entry to the outer scope neccesitates entry to the inner 2969 // scope, thus proves the program undefined if the flags would be violated 2970 // in the outer scope. 2971 SCEV::NoWrapFlags AddFlags = Flags; 2972 if (AddFlags != SCEV::FlagAnyWrap) { 2973 auto *DefI = getDefiningScopeBound(LIOps); 2974 auto *ReachI = &*AddRecLoop->getHeader()->begin(); 2975 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) 2976 AddFlags = SCEV::FlagAnyWrap; 2977 } 2978 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); 2979 2980 // Build the new addrec. Propagate the NUW and NSW flags if both the 2981 // outer add and the inner addrec are guaranteed to have no overflow. 2982 // Always propagate NW. 2983 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2984 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2985 2986 // If all of the other operands were loop invariant, we are done. 2987 if (Ops.size() == 1) return NewRec; 2988 2989 // Otherwise, add the folded AddRec by the non-invariant parts. 2990 for (unsigned i = 0;; ++i) 2991 if (Ops[i] == AddRec) { 2992 Ops[i] = NewRec; 2993 break; 2994 } 2995 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2996 } 2997 2998 // Okay, if there weren't any loop invariants to be folded, check to see if 2999 // there are multiple AddRec's with the same loop induction variable being 3000 // added together. If so, we can fold them. 3001 for (unsigned OtherIdx = Idx+1; 3002 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3003 ++OtherIdx) { 3004 // We expect the AddRecExpr's to be sorted in reverse dominance order, 3005 // so that the 1st found AddRecExpr is dominated by all others. 3006 assert(DT.dominates( 3007 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 3008 AddRec->getLoop()->getHeader()) && 3009 "AddRecExprs are not sorted in reverse dominance order?"); 3010 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 3011 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 3012 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 3013 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3014 ++OtherIdx) { 3015 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3016 if (OtherAddRec->getLoop() == AddRecLoop) { 3017 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 3018 i != e; ++i) { 3019 if (i >= AddRecOps.size()) { 3020 append_range(AddRecOps, OtherAddRec->operands().drop_front(i)); 3021 break; 3022 } 3023 SmallVector<const SCEV *, 2> TwoOps = { 3024 AddRecOps[i], OtherAddRec->getOperand(i)}; 3025 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 3026 } 3027 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3028 } 3029 } 3030 // Step size has changed, so we cannot guarantee no self-wraparound. 3031 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 3032 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3033 } 3034 } 3035 3036 // Otherwise couldn't fold anything into this recurrence. Move onto the 3037 // next one. 3038 } 3039 3040 // Okay, it looks like we really DO need an add expr. Check to see if we 3041 // already have one, otherwise create a new one. 3042 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 3043 } 3044 3045 const SCEV * 3046 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 3047 SCEV::NoWrapFlags Flags) { 3048 FoldingSetNodeID ID; 3049 ID.AddInteger(scAddExpr); 3050 for (const SCEV *Op : Ops) 3051 ID.AddPointer(Op); 3052 void *IP = nullptr; 3053 SCEVAddExpr *S = 3054 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3055 if (!S) { 3056 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3057 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3058 S = new (SCEVAllocator) 3059 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 3060 UniqueSCEVs.InsertNode(S, IP); 3061 registerUser(S, Ops); 3062 } 3063 S->setNoWrapFlags(Flags); 3064 return S; 3065 } 3066 3067 const SCEV * 3068 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 3069 const Loop *L, SCEV::NoWrapFlags Flags) { 3070 FoldingSetNodeID ID; 3071 ID.AddInteger(scAddRecExpr); 3072 for (const SCEV *Op : Ops) 3073 ID.AddPointer(Op); 3074 ID.AddPointer(L); 3075 void *IP = nullptr; 3076 SCEVAddRecExpr *S = 3077 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3078 if (!S) { 3079 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3080 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3081 S = new (SCEVAllocator) 3082 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 3083 UniqueSCEVs.InsertNode(S, IP); 3084 LoopUsers[L].push_back(S); 3085 registerUser(S, Ops); 3086 } 3087 setNoWrapFlags(S, Flags); 3088 return S; 3089 } 3090 3091 const SCEV * 3092 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 3093 SCEV::NoWrapFlags Flags) { 3094 FoldingSetNodeID ID; 3095 ID.AddInteger(scMulExpr); 3096 for (const SCEV *Op : Ops) 3097 ID.AddPointer(Op); 3098 void *IP = nullptr; 3099 SCEVMulExpr *S = 3100 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3101 if (!S) { 3102 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3103 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3104 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 3105 O, Ops.size()); 3106 UniqueSCEVs.InsertNode(S, IP); 3107 registerUser(S, Ops); 3108 } 3109 S->setNoWrapFlags(Flags); 3110 return S; 3111 } 3112 3113 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 3114 uint64_t k = i*j; 3115 if (j > 1 && k / j != i) Overflow = true; 3116 return k; 3117 } 3118 3119 /// Compute the result of "n choose k", the binomial coefficient. If an 3120 /// intermediate computation overflows, Overflow will be set and the return will 3121 /// be garbage. Overflow is not cleared on absence of overflow. 3122 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 3123 // We use the multiplicative formula: 3124 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 3125 // At each iteration, we take the n-th term of the numeral and divide by the 3126 // (k-n)th term of the denominator. This division will always produce an 3127 // integral result, and helps reduce the chance of overflow in the 3128 // intermediate computations. However, we can still overflow even when the 3129 // final result would fit. 3130 3131 if (n == 0 || n == k) return 1; 3132 if (k > n) return 0; 3133 3134 if (k > n/2) 3135 k = n-k; 3136 3137 uint64_t r = 1; 3138 for (uint64_t i = 1; i <= k; ++i) { 3139 r = umul_ov(r, n-(i-1), Overflow); 3140 r /= i; 3141 } 3142 return r; 3143 } 3144 3145 /// Determine if any of the operands in this SCEV are a constant or if 3146 /// any of the add or multiply expressions in this SCEV contain a constant. 3147 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 3148 struct FindConstantInAddMulChain { 3149 bool FoundConstant = false; 3150 3151 bool follow(const SCEV *S) { 3152 FoundConstant |= isa<SCEVConstant>(S); 3153 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 3154 } 3155 3156 bool isDone() const { 3157 return FoundConstant; 3158 } 3159 }; 3160 3161 FindConstantInAddMulChain F; 3162 SCEVTraversal<FindConstantInAddMulChain> ST(F); 3163 ST.visitAll(StartExpr); 3164 return F.FoundConstant; 3165 } 3166 3167 /// Get a canonical multiply expression, or something simpler if possible. 3168 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 3169 SCEV::NoWrapFlags OrigFlags, 3170 unsigned Depth) { 3171 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 3172 "only nuw or nsw allowed"); 3173 assert(!Ops.empty() && "Cannot get empty mul!"); 3174 if (Ops.size() == 1) return Ops[0]; 3175 #ifndef NDEBUG 3176 Type *ETy = Ops[0]->getType(); 3177 assert(!ETy->isPointerTy()); 3178 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3179 assert(Ops[i]->getType() == ETy && 3180 "SCEVMulExpr operand types don't match!"); 3181 #endif 3182 3183 // Sort by complexity, this groups all similar expression types together. 3184 GroupByComplexity(Ops, &LI, DT); 3185 3186 // If there are any constants, fold them together. 3187 unsigned Idx = 0; 3188 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3189 ++Idx; 3190 assert(Idx < Ops.size()); 3191 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3192 // We found two constants, fold them together! 3193 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 3194 if (Ops.size() == 2) return Ops[0]; 3195 Ops.erase(Ops.begin()+1); // Erase the folded element 3196 LHSC = cast<SCEVConstant>(Ops[0]); 3197 } 3198 3199 // If we have a multiply of zero, it will always be zero. 3200 if (LHSC->getValue()->isZero()) 3201 return LHSC; 3202 3203 // If we are left with a constant one being multiplied, strip it off. 3204 if (LHSC->getValue()->isOne()) { 3205 Ops.erase(Ops.begin()); 3206 --Idx; 3207 } 3208 3209 if (Ops.size() == 1) 3210 return Ops[0]; 3211 } 3212 3213 // Delay expensive flag strengthening until necessary. 3214 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3215 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3216 }; 3217 3218 // Limit recursion calls depth. 3219 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3220 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3221 3222 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { 3223 // Don't strengthen flags if we have no new information. 3224 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3225 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3226 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3227 return S; 3228 } 3229 3230 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3231 if (Ops.size() == 2) { 3232 // C1*(C2+V) -> C1*C2 + C1*V 3233 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3234 // If any of Add's ops are Adds or Muls with a constant, apply this 3235 // transformation as well. 3236 // 3237 // TODO: There are some cases where this transformation is not 3238 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3239 // this transformation should be narrowed down. 3240 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { 3241 const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), 3242 SCEV::FlagAnyWrap, Depth + 1); 3243 const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), 3244 SCEV::FlagAnyWrap, Depth + 1); 3245 return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); 3246 } 3247 3248 if (Ops[0]->isAllOnesValue()) { 3249 // If we have a mul by -1 of an add, try distributing the -1 among the 3250 // add operands. 3251 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3252 SmallVector<const SCEV *, 4> NewOps; 3253 bool AnyFolded = false; 3254 for (const SCEV *AddOp : Add->operands()) { 3255 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3256 Depth + 1); 3257 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3258 NewOps.push_back(Mul); 3259 } 3260 if (AnyFolded) 3261 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3262 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3263 // Negation preserves a recurrence's no self-wrap property. 3264 SmallVector<const SCEV *, 4> Operands; 3265 for (const SCEV *AddRecOp : AddRec->operands()) 3266 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3267 Depth + 1)); 3268 3269 return getAddRecExpr(Operands, AddRec->getLoop(), 3270 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3271 } 3272 } 3273 } 3274 } 3275 3276 // Skip over the add expression until we get to a multiply. 3277 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3278 ++Idx; 3279 3280 // If there are mul operands inline them all into this expression. 3281 if (Idx < Ops.size()) { 3282 bool DeletedMul = false; 3283 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3284 if (Ops.size() > MulOpsInlineThreshold) 3285 break; 3286 // If we have an mul, expand the mul operands onto the end of the 3287 // operands list. 3288 Ops.erase(Ops.begin()+Idx); 3289 append_range(Ops, Mul->operands()); 3290 DeletedMul = true; 3291 } 3292 3293 // If we deleted at least one mul, we added operands to the end of the 3294 // list, and they are not necessarily sorted. Recurse to resort and 3295 // resimplify any operands we just acquired. 3296 if (DeletedMul) 3297 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3298 } 3299 3300 // If there are any add recurrences in the operands list, see if any other 3301 // added values are loop invariant. If so, we can fold them into the 3302 // recurrence. 3303 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3304 ++Idx; 3305 3306 // Scan over all recurrences, trying to fold loop invariants into them. 3307 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3308 // Scan all of the other operands to this mul and add them to the vector 3309 // if they are loop invariant w.r.t. the recurrence. 3310 SmallVector<const SCEV *, 8> LIOps; 3311 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3312 const Loop *AddRecLoop = AddRec->getLoop(); 3313 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3314 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3315 LIOps.push_back(Ops[i]); 3316 Ops.erase(Ops.begin()+i); 3317 --i; --e; 3318 } 3319 3320 // If we found some loop invariants, fold them into the recurrence. 3321 if (!LIOps.empty()) { 3322 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3323 SmallVector<const SCEV *, 4> NewOps; 3324 NewOps.reserve(AddRec->getNumOperands()); 3325 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3326 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3327 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3328 SCEV::FlagAnyWrap, Depth + 1)); 3329 3330 // Build the new addrec. Propagate the NUW and NSW flags if both the 3331 // outer mul and the inner addrec are guaranteed to have no overflow. 3332 // 3333 // No self-wrap cannot be guaranteed after changing the step size, but 3334 // will be inferred if either NUW or NSW is true. 3335 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3336 const SCEV *NewRec = getAddRecExpr( 3337 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3338 3339 // If all of the other operands were loop invariant, we are done. 3340 if (Ops.size() == 1) return NewRec; 3341 3342 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3343 for (unsigned i = 0;; ++i) 3344 if (Ops[i] == AddRec) { 3345 Ops[i] = NewRec; 3346 break; 3347 } 3348 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3349 } 3350 3351 // Okay, if there weren't any loop invariants to be folded, check to see 3352 // if there are multiple AddRec's with the same loop induction variable 3353 // being multiplied together. If so, we can fold them. 3354 3355 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3356 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3357 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3358 // ]]],+,...up to x=2n}. 3359 // Note that the arguments to choose() are always integers with values 3360 // known at compile time, never SCEV objects. 3361 // 3362 // The implementation avoids pointless extra computations when the two 3363 // addrec's are of different length (mathematically, it's equivalent to 3364 // an infinite stream of zeros on the right). 3365 bool OpsModified = false; 3366 for (unsigned OtherIdx = Idx+1; 3367 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3368 ++OtherIdx) { 3369 const SCEVAddRecExpr *OtherAddRec = 3370 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3371 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3372 continue; 3373 3374 // Limit max number of arguments to avoid creation of unreasonably big 3375 // SCEVAddRecs with very complex operands. 3376 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3377 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3378 continue; 3379 3380 bool Overflow = false; 3381 Type *Ty = AddRec->getType(); 3382 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3383 SmallVector<const SCEV*, 7> AddRecOps; 3384 for (int x = 0, xe = AddRec->getNumOperands() + 3385 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3386 SmallVector <const SCEV *, 7> SumOps; 3387 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3388 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3389 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3390 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3391 z < ze && !Overflow; ++z) { 3392 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3393 uint64_t Coeff; 3394 if (LargerThan64Bits) 3395 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3396 else 3397 Coeff = Coeff1*Coeff2; 3398 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3399 const SCEV *Term1 = AddRec->getOperand(y-z); 3400 const SCEV *Term2 = OtherAddRec->getOperand(z); 3401 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3402 SCEV::FlagAnyWrap, Depth + 1)); 3403 } 3404 } 3405 if (SumOps.empty()) 3406 SumOps.push_back(getZero(Ty)); 3407 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3408 } 3409 if (!Overflow) { 3410 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3411 SCEV::FlagAnyWrap); 3412 if (Ops.size() == 2) return NewAddRec; 3413 Ops[Idx] = NewAddRec; 3414 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3415 OpsModified = true; 3416 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3417 if (!AddRec) 3418 break; 3419 } 3420 } 3421 if (OpsModified) 3422 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3423 3424 // Otherwise couldn't fold anything into this recurrence. Move onto the 3425 // next one. 3426 } 3427 3428 // Okay, it looks like we really DO need an mul expr. Check to see if we 3429 // already have one, otherwise create a new one. 3430 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3431 } 3432 3433 /// Represents an unsigned remainder expression based on unsigned division. 3434 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3435 const SCEV *RHS) { 3436 assert(getEffectiveSCEVType(LHS->getType()) == 3437 getEffectiveSCEVType(RHS->getType()) && 3438 "SCEVURemExpr operand types don't match!"); 3439 3440 // Short-circuit easy cases 3441 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3442 // If constant is one, the result is trivial 3443 if (RHSC->getValue()->isOne()) 3444 return getZero(LHS->getType()); // X urem 1 --> 0 3445 3446 // If constant is a power of two, fold into a zext(trunc(LHS)). 3447 if (RHSC->getAPInt().isPowerOf2()) { 3448 Type *FullTy = LHS->getType(); 3449 Type *TruncTy = 3450 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3451 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3452 } 3453 } 3454 3455 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3456 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3457 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3458 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3459 } 3460 3461 /// Get a canonical unsigned division expression, or something simpler if 3462 /// possible. 3463 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3464 const SCEV *RHS) { 3465 assert(!LHS->getType()->isPointerTy() && 3466 "SCEVUDivExpr operand can't be pointer!"); 3467 assert(LHS->getType() == RHS->getType() && 3468 "SCEVUDivExpr operand types don't match!"); 3469 3470 FoldingSetNodeID ID; 3471 ID.AddInteger(scUDivExpr); 3472 ID.AddPointer(LHS); 3473 ID.AddPointer(RHS); 3474 void *IP = nullptr; 3475 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3476 return S; 3477 3478 // 0 udiv Y == 0 3479 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3480 if (LHSC->getValue()->isZero()) 3481 return LHS; 3482 3483 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3484 if (RHSC->getValue()->isOne()) 3485 return LHS; // X udiv 1 --> x 3486 // If the denominator is zero, the result of the udiv is undefined. Don't 3487 // try to analyze it, because the resolution chosen here may differ from 3488 // the resolution chosen in other parts of the compiler. 3489 if (!RHSC->getValue()->isZero()) { 3490 // Determine if the division can be folded into the operands of 3491 // its operands. 3492 // TODO: Generalize this to non-constants by using known-bits information. 3493 Type *Ty = LHS->getType(); 3494 unsigned LZ = RHSC->getAPInt().countl_zero(); 3495 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3496 // For non-power-of-two values, effectively round the value up to the 3497 // nearest power of two. 3498 if (!RHSC->getAPInt().isPowerOf2()) 3499 ++MaxShiftAmt; 3500 IntegerType *ExtTy = 3501 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3502 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3503 if (const SCEVConstant *Step = 3504 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3505 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3506 const APInt &StepInt = Step->getAPInt(); 3507 const APInt &DivInt = RHSC->getAPInt(); 3508 if (!StepInt.urem(DivInt) && 3509 getZeroExtendExpr(AR, ExtTy) == 3510 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3511 getZeroExtendExpr(Step, ExtTy), 3512 AR->getLoop(), SCEV::FlagAnyWrap)) { 3513 SmallVector<const SCEV *, 4> Operands; 3514 for (const SCEV *Op : AR->operands()) 3515 Operands.push_back(getUDivExpr(Op, RHS)); 3516 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3517 } 3518 /// Get a canonical UDivExpr for a recurrence. 3519 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3520 // We can currently only fold X%N if X is constant. 3521 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3522 if (StartC && !DivInt.urem(StepInt) && 3523 getZeroExtendExpr(AR, ExtTy) == 3524 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3525 getZeroExtendExpr(Step, ExtTy), 3526 AR->getLoop(), SCEV::FlagAnyWrap)) { 3527 const APInt &StartInt = StartC->getAPInt(); 3528 const APInt &StartRem = StartInt.urem(StepInt); 3529 if (StartRem != 0) { 3530 const SCEV *NewLHS = 3531 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3532 AR->getLoop(), SCEV::FlagNW); 3533 if (LHS != NewLHS) { 3534 LHS = NewLHS; 3535 3536 // Reset the ID to include the new LHS, and check if it is 3537 // already cached. 3538 ID.clear(); 3539 ID.AddInteger(scUDivExpr); 3540 ID.AddPointer(LHS); 3541 ID.AddPointer(RHS); 3542 IP = nullptr; 3543 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3544 return S; 3545 } 3546 } 3547 } 3548 } 3549 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3550 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3551 SmallVector<const SCEV *, 4> Operands; 3552 for (const SCEV *Op : M->operands()) 3553 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3554 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3555 // Find an operand that's safely divisible. 3556 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3557 const SCEV *Op = M->getOperand(i); 3558 const SCEV *Div = getUDivExpr(Op, RHSC); 3559 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3560 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3561 Operands[i] = Div; 3562 return getMulExpr(Operands); 3563 } 3564 } 3565 } 3566 3567 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3568 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3569 if (auto *DivisorConstant = 3570 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3571 bool Overflow = false; 3572 APInt NewRHS = 3573 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3574 if (Overflow) { 3575 return getConstant(RHSC->getType(), 0, false); 3576 } 3577 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3578 } 3579 } 3580 3581 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3582 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3583 SmallVector<const SCEV *, 4> Operands; 3584 for (const SCEV *Op : A->operands()) 3585 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3586 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3587 Operands.clear(); 3588 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3589 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3590 if (isa<SCEVUDivExpr>(Op) || 3591 getMulExpr(Op, RHS) != A->getOperand(i)) 3592 break; 3593 Operands.push_back(Op); 3594 } 3595 if (Operands.size() == A->getNumOperands()) 3596 return getAddExpr(Operands); 3597 } 3598 } 3599 3600 // Fold if both operands are constant. 3601 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3602 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt())); 3603 } 3604 } 3605 3606 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3607 // changes). Make sure we get a new one. 3608 IP = nullptr; 3609 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3610 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3611 LHS, RHS); 3612 UniqueSCEVs.InsertNode(S, IP); 3613 registerUser(S, {LHS, RHS}); 3614 return S; 3615 } 3616 3617 APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3618 APInt A = C1->getAPInt().abs(); 3619 APInt B = C2->getAPInt().abs(); 3620 uint32_t ABW = A.getBitWidth(); 3621 uint32_t BBW = B.getBitWidth(); 3622 3623 if (ABW > BBW) 3624 B = B.zext(ABW); 3625 else if (ABW < BBW) 3626 A = A.zext(BBW); 3627 3628 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3629 } 3630 3631 /// Get a canonical unsigned division expression, or something simpler if 3632 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3633 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3634 /// it's not exact because the udiv may be clearing bits. 3635 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3636 const SCEV *RHS) { 3637 // TODO: we could try to find factors in all sorts of things, but for now we 3638 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3639 // end of this file for inspiration. 3640 3641 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3642 if (!Mul || !Mul->hasNoUnsignedWrap()) 3643 return getUDivExpr(LHS, RHS); 3644 3645 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3646 // If the mulexpr multiplies by a constant, then that constant must be the 3647 // first element of the mulexpr. 3648 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3649 if (LHSCst == RHSCst) { 3650 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3651 return getMulExpr(Operands); 3652 } 3653 3654 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3655 // that there's a factor provided by one of the other terms. We need to 3656 // check. 3657 APInt Factor = gcd(LHSCst, RHSCst); 3658 if (!Factor.isIntN(1)) { 3659 LHSCst = 3660 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3661 RHSCst = 3662 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3663 SmallVector<const SCEV *, 2> Operands; 3664 Operands.push_back(LHSCst); 3665 append_range(Operands, Mul->operands().drop_front()); 3666 LHS = getMulExpr(Operands); 3667 RHS = RHSCst; 3668 Mul = dyn_cast<SCEVMulExpr>(LHS); 3669 if (!Mul) 3670 return getUDivExactExpr(LHS, RHS); 3671 } 3672 } 3673 } 3674 3675 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3676 if (Mul->getOperand(i) == RHS) { 3677 SmallVector<const SCEV *, 2> Operands; 3678 append_range(Operands, Mul->operands().take_front(i)); 3679 append_range(Operands, Mul->operands().drop_front(i + 1)); 3680 return getMulExpr(Operands); 3681 } 3682 } 3683 3684 return getUDivExpr(LHS, RHS); 3685 } 3686 3687 /// Get an add recurrence expression for the specified loop. Simplify the 3688 /// expression as much as possible. 3689 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3690 const Loop *L, 3691 SCEV::NoWrapFlags Flags) { 3692 SmallVector<const SCEV *, 4> Operands; 3693 Operands.push_back(Start); 3694 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3695 if (StepChrec->getLoop() == L) { 3696 append_range(Operands, StepChrec->operands()); 3697 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3698 } 3699 3700 Operands.push_back(Step); 3701 return getAddRecExpr(Operands, L, Flags); 3702 } 3703 3704 /// Get an add recurrence expression for the specified loop. Simplify the 3705 /// expression as much as possible. 3706 const SCEV * 3707 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3708 const Loop *L, SCEV::NoWrapFlags Flags) { 3709 if (Operands.size() == 1) return Operands[0]; 3710 #ifndef NDEBUG 3711 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3712 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 3713 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3714 "SCEVAddRecExpr operand types don't match!"); 3715 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); 3716 } 3717 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3718 assert(isLoopInvariant(Operands[i], L) && 3719 "SCEVAddRecExpr operand is not loop-invariant!"); 3720 #endif 3721 3722 if (Operands.back()->isZero()) { 3723 Operands.pop_back(); 3724 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3725 } 3726 3727 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3728 // use that information to infer NUW and NSW flags. However, computing a 3729 // BE count requires calling getAddRecExpr, so we may not yet have a 3730 // meaningful BE count at this point (and if we don't, we'd be stuck 3731 // with a SCEVCouldNotCompute as the cached BE count). 3732 3733 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3734 3735 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3736 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3737 const Loop *NestedLoop = NestedAR->getLoop(); 3738 if (L->contains(NestedLoop) 3739 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3740 : (!NestedLoop->contains(L) && 3741 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3742 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3743 Operands[0] = NestedAR->getStart(); 3744 // AddRecs require their operands be loop-invariant with respect to their 3745 // loops. Don't perform this transformation if it would break this 3746 // requirement. 3747 bool AllInvariant = all_of( 3748 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3749 3750 if (AllInvariant) { 3751 // Create a recurrence for the outer loop with the same step size. 3752 // 3753 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3754 // inner recurrence has the same property. 3755 SCEV::NoWrapFlags OuterFlags = 3756 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3757 3758 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3759 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3760 return isLoopInvariant(Op, NestedLoop); 3761 }); 3762 3763 if (AllInvariant) { 3764 // Ok, both add recurrences are valid after the transformation. 3765 // 3766 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3767 // the outer recurrence has the same property. 3768 SCEV::NoWrapFlags InnerFlags = 3769 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3770 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3771 } 3772 } 3773 // Reset Operands to its original state. 3774 Operands[0] = NestedAR; 3775 } 3776 } 3777 3778 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3779 // already have one, otherwise create a new one. 3780 return getOrCreateAddRecExpr(Operands, L, Flags); 3781 } 3782 3783 const SCEV * 3784 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3785 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3786 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3787 // getSCEV(Base)->getType() has the same address space as Base->getType() 3788 // because SCEV::getType() preserves the address space. 3789 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3790 const bool AssumeInBoundsFlags = [&]() { 3791 if (!GEP->isInBounds()) 3792 return false; 3793 3794 // We'd like to propagate flags from the IR to the corresponding SCEV nodes, 3795 // but to do that, we have to ensure that said flag is valid in the entire 3796 // defined scope of the SCEV. 3797 auto *GEPI = dyn_cast<Instruction>(GEP); 3798 // TODO: non-instructions have global scope. We might be able to prove 3799 // some global scope cases 3800 return GEPI && isSCEVExprNeverPoison(GEPI); 3801 }(); 3802 3803 SCEV::NoWrapFlags OffsetWrap = 3804 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3805 3806 Type *CurTy = GEP->getType(); 3807 bool FirstIter = true; 3808 SmallVector<const SCEV *, 4> Offsets; 3809 for (const SCEV *IndexExpr : IndexExprs) { 3810 // Compute the (potentially symbolic) offset in bytes for this index. 3811 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3812 // For a struct, add the member offset. 3813 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3814 unsigned FieldNo = Index->getZExtValue(); 3815 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3816 Offsets.push_back(FieldOffset); 3817 3818 // Update CurTy to the type of the field at Index. 3819 CurTy = STy->getTypeAtIndex(Index); 3820 } else { 3821 // Update CurTy to its element type. 3822 if (FirstIter) { 3823 assert(isa<PointerType>(CurTy) && 3824 "The first index of a GEP indexes a pointer"); 3825 CurTy = GEP->getSourceElementType(); 3826 FirstIter = false; 3827 } else { 3828 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3829 } 3830 // For an array, add the element offset, explicitly scaled. 3831 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3832 // Getelementptr indices are signed. 3833 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3834 3835 // Multiply the index by the element size to compute the element offset. 3836 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3837 Offsets.push_back(LocalOffset); 3838 } 3839 } 3840 3841 // Handle degenerate case of GEP without offsets. 3842 if (Offsets.empty()) 3843 return BaseExpr; 3844 3845 // Add the offsets together, assuming nsw if inbounds. 3846 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3847 // Add the base address and the offset. We cannot use the nsw flag, as the 3848 // base address is unsigned. However, if we know that the offset is 3849 // non-negative, we can use nuw. 3850 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) 3851 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3852 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); 3853 assert(BaseExpr->getType() == GEPExpr->getType() && 3854 "GEP should not change type mid-flight."); 3855 return GEPExpr; 3856 } 3857 3858 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3859 ArrayRef<const SCEV *> Ops) { 3860 FoldingSetNodeID ID; 3861 ID.AddInteger(SCEVType); 3862 for (const SCEV *Op : Ops) 3863 ID.AddPointer(Op); 3864 void *IP = nullptr; 3865 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3866 } 3867 3868 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3869 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3870 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3871 } 3872 3873 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3874 SmallVectorImpl<const SCEV *> &Ops) { 3875 assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!"); 3876 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3877 if (Ops.size() == 1) return Ops[0]; 3878 #ifndef NDEBUG 3879 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3880 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 3881 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3882 "Operand types don't match!"); 3883 assert(Ops[0]->getType()->isPointerTy() == 3884 Ops[i]->getType()->isPointerTy() && 3885 "min/max should be consistently pointerish"); 3886 } 3887 #endif 3888 3889 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3890 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3891 3892 // Sort by complexity, this groups all similar expression types together. 3893 GroupByComplexity(Ops, &LI, DT); 3894 3895 // Check if we have created the same expression before. 3896 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { 3897 return S; 3898 } 3899 3900 // If there are any constants, fold them together. 3901 unsigned Idx = 0; 3902 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3903 ++Idx; 3904 assert(Idx < Ops.size()); 3905 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3906 if (Kind == scSMaxExpr) 3907 return APIntOps::smax(LHS, RHS); 3908 else if (Kind == scSMinExpr) 3909 return APIntOps::smin(LHS, RHS); 3910 else if (Kind == scUMaxExpr) 3911 return APIntOps::umax(LHS, RHS); 3912 else if (Kind == scUMinExpr) 3913 return APIntOps::umin(LHS, RHS); 3914 llvm_unreachable("Unknown SCEV min/max opcode"); 3915 }; 3916 3917 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3918 // We found two constants, fold them together! 3919 ConstantInt *Fold = ConstantInt::get( 3920 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3921 Ops[0] = getConstant(Fold); 3922 Ops.erase(Ops.begin()+1); // Erase the folded element 3923 if (Ops.size() == 1) return Ops[0]; 3924 LHSC = cast<SCEVConstant>(Ops[0]); 3925 } 3926 3927 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3928 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3929 3930 if (IsMax ? IsMinV : IsMaxV) { 3931 // If we are left with a constant minimum(/maximum)-int, strip it off. 3932 Ops.erase(Ops.begin()); 3933 --Idx; 3934 } else if (IsMax ? IsMaxV : IsMinV) { 3935 // If we have a max(/min) with a constant maximum(/minimum)-int, 3936 // it will always be the extremum. 3937 return LHSC; 3938 } 3939 3940 if (Ops.size() == 1) return Ops[0]; 3941 } 3942 3943 // Find the first operation of the same kind 3944 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3945 ++Idx; 3946 3947 // Check to see if one of the operands is of the same kind. If so, expand its 3948 // operands onto our operand list, and recurse to simplify. 3949 if (Idx < Ops.size()) { 3950 bool DeletedAny = false; 3951 while (Ops[Idx]->getSCEVType() == Kind) { 3952 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3953 Ops.erase(Ops.begin()+Idx); 3954 append_range(Ops, SMME->operands()); 3955 DeletedAny = true; 3956 } 3957 3958 if (DeletedAny) 3959 return getMinMaxExpr(Kind, Ops); 3960 } 3961 3962 // Okay, check to see if the same value occurs in the operand list twice. If 3963 // so, delete one. Since we sorted the list, these values are required to 3964 // be adjacent. 3965 llvm::CmpInst::Predicate GEPred = 3966 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3967 llvm::CmpInst::Predicate LEPred = 3968 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3969 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3970 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3971 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3972 if (Ops[i] == Ops[i + 1] || 3973 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3974 // X op Y op Y --> X op Y 3975 // X op Y --> X, if we know X, Y are ordered appropriately 3976 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3977 --i; 3978 --e; 3979 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3980 Ops[i + 1])) { 3981 // X op Y --> Y, if we know X, Y are ordered appropriately 3982 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3983 --i; 3984 --e; 3985 } 3986 } 3987 3988 if (Ops.size() == 1) return Ops[0]; 3989 3990 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3991 3992 // Okay, it looks like we really DO need an expr. Check to see if we 3993 // already have one, otherwise create a new one. 3994 FoldingSetNodeID ID; 3995 ID.AddInteger(Kind); 3996 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3997 ID.AddPointer(Ops[i]); 3998 void *IP = nullptr; 3999 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 4000 if (ExistingSCEV) 4001 return ExistingSCEV; 4002 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 4003 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 4004 SCEV *S = new (SCEVAllocator) 4005 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 4006 4007 UniqueSCEVs.InsertNode(S, IP); 4008 registerUser(S, Ops); 4009 return S; 4010 } 4011 4012 namespace { 4013 4014 class SCEVSequentialMinMaxDeduplicatingVisitor final 4015 : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, 4016 std::optional<const SCEV *>> { 4017 using RetVal = std::optional<const SCEV *>; 4018 using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>; 4019 4020 ScalarEvolution &SE; 4021 const SCEVTypes RootKind; // Must be a sequential min/max expression. 4022 const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. 4023 SmallPtrSet<const SCEV *, 16> SeenOps; 4024 4025 bool canRecurseInto(SCEVTypes Kind) const { 4026 // We can only recurse into the SCEV expression of the same effective type 4027 // as the type of our root SCEV expression. 4028 return RootKind == Kind || NonSequentialRootKind == Kind; 4029 }; 4030 4031 RetVal visitAnyMinMaxExpr(const SCEV *S) { 4032 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && 4033 "Only for min/max expressions."); 4034 SCEVTypes Kind = S->getSCEVType(); 4035 4036 if (!canRecurseInto(Kind)) 4037 return S; 4038 4039 auto *NAry = cast<SCEVNAryExpr>(S); 4040 SmallVector<const SCEV *> NewOps; 4041 bool Changed = visit(Kind, NAry->operands(), NewOps); 4042 4043 if (!Changed) 4044 return S; 4045 if (NewOps.empty()) 4046 return std::nullopt; 4047 4048 return isa<SCEVSequentialMinMaxExpr>(S) 4049 ? SE.getSequentialMinMaxExpr(Kind, NewOps) 4050 : SE.getMinMaxExpr(Kind, NewOps); 4051 } 4052 4053 RetVal visit(const SCEV *S) { 4054 // Has the whole operand been seen already? 4055 if (!SeenOps.insert(S).second) 4056 return std::nullopt; 4057 return Base::visit(S); 4058 } 4059 4060 public: 4061 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, 4062 SCEVTypes RootKind) 4063 : SE(SE), RootKind(RootKind), 4064 NonSequentialRootKind( 4065 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 4066 RootKind)) {} 4067 4068 bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps, 4069 SmallVectorImpl<const SCEV *> &NewOps) { 4070 bool Changed = false; 4071 SmallVector<const SCEV *> Ops; 4072 Ops.reserve(OrigOps.size()); 4073 4074 for (const SCEV *Op : OrigOps) { 4075 RetVal NewOp = visit(Op); 4076 if (NewOp != Op) 4077 Changed = true; 4078 if (NewOp) 4079 Ops.emplace_back(*NewOp); 4080 } 4081 4082 if (Changed) 4083 NewOps = std::move(Ops); 4084 return Changed; 4085 } 4086 4087 RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } 4088 4089 RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } 4090 4091 RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } 4092 4093 RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } 4094 4095 RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } 4096 4097 RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } 4098 4099 RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } 4100 4101 RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } 4102 4103 RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 4104 4105 RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { 4106 return visitAnyMinMaxExpr(Expr); 4107 } 4108 4109 RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { 4110 return visitAnyMinMaxExpr(Expr); 4111 } 4112 4113 RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { 4114 return visitAnyMinMaxExpr(Expr); 4115 } 4116 4117 RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { 4118 return visitAnyMinMaxExpr(Expr); 4119 } 4120 4121 RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { 4122 return visitAnyMinMaxExpr(Expr); 4123 } 4124 4125 RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } 4126 4127 RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } 4128 }; 4129 4130 } // namespace 4131 4132 static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind) { 4133 switch (Kind) { 4134 case scConstant: 4135 case scTruncate: 4136 case scZeroExtend: 4137 case scSignExtend: 4138 case scPtrToInt: 4139 case scAddExpr: 4140 case scMulExpr: 4141 case scUDivExpr: 4142 case scAddRecExpr: 4143 case scUMaxExpr: 4144 case scSMaxExpr: 4145 case scUMinExpr: 4146 case scSMinExpr: 4147 case scUnknown: 4148 // If any operand is poison, the whole expression is poison. 4149 return true; 4150 case scSequentialUMinExpr: 4151 // FIXME: if the *first* operand is poison, the whole expression is poison. 4152 return false; // Pessimistically, say that it does not propagate poison. 4153 case scCouldNotCompute: 4154 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 4155 } 4156 llvm_unreachable("Unknown SCEV kind!"); 4157 } 4158 4159 /// Return true if V is poison given that AssumedPoison is already poison. 4160 static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { 4161 // The only way poison may be introduced in a SCEV expression is from a 4162 // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, 4163 // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* 4164 // introduce poison -- they encode guaranteed, non-speculated knowledge. 4165 // 4166 // Additionally, all SCEV nodes propagate poison from inputs to outputs, 4167 // with the notable exception of umin_seq, where only poison from the first 4168 // operand is (unconditionally) propagated. 4169 struct SCEVPoisonCollector { 4170 bool LookThroughSeq; 4171 SmallPtrSet<const SCEV *, 4> MaybePoison; 4172 SCEVPoisonCollector(bool LookThroughSeq) : LookThroughSeq(LookThroughSeq) {} 4173 4174 bool follow(const SCEV *S) { 4175 if (!scevUnconditionallyPropagatesPoisonFromOperands(S->getSCEVType())) { 4176 switch (S->getSCEVType()) { 4177 case scConstant: 4178 case scTruncate: 4179 case scZeroExtend: 4180 case scSignExtend: 4181 case scPtrToInt: 4182 case scAddExpr: 4183 case scMulExpr: 4184 case scUDivExpr: 4185 case scAddRecExpr: 4186 case scUMaxExpr: 4187 case scSMaxExpr: 4188 case scUMinExpr: 4189 case scSMinExpr: 4190 case scUnknown: 4191 llvm_unreachable("These all unconditionally propagate poison."); 4192 case scSequentialUMinExpr: 4193 // TODO: We can always follow the first operand, 4194 // but the SCEVTraversal API doesn't support this. 4195 if (!LookThroughSeq) 4196 return false; 4197 break; 4198 case scCouldNotCompute: 4199 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 4200 } 4201 } 4202 4203 if (auto *SU = dyn_cast<SCEVUnknown>(S)) { 4204 if (!isGuaranteedNotToBePoison(SU->getValue())) 4205 MaybePoison.insert(S); 4206 } 4207 return true; 4208 } 4209 bool isDone() const { return false; } 4210 }; 4211 4212 // First collect all SCEVs that might result in AssumedPoison to be poison. 4213 // We need to look through umin_seq here, because we want to find all SCEVs 4214 // that *might* result in poison, not only those that are *required* to. 4215 SCEVPoisonCollector PC1(/* LookThroughSeq */ true); 4216 visitAll(AssumedPoison, PC1); 4217 4218 // AssumedPoison is never poison. As the assumption is false, the implication 4219 // is true. Don't bother walking the other SCEV in this case. 4220 if (PC1.MaybePoison.empty()) 4221 return true; 4222 4223 // Collect all SCEVs in S that, if poison, *will* result in S being poison 4224 // as well. We cannot look through umin_seq here, as its argument only *may* 4225 // make the result poison. 4226 SCEVPoisonCollector PC2(/* LookThroughSeq */ false); 4227 visitAll(S, PC2); 4228 4229 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, 4230 // it will also make S poison by being part of PC2.MaybePoison. 4231 return all_of(PC1.MaybePoison, 4232 [&](const SCEV *S) { return PC2.MaybePoison.contains(S); }); 4233 } 4234 4235 const SCEV * 4236 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, 4237 SmallVectorImpl<const SCEV *> &Ops) { 4238 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && 4239 "Not a SCEVSequentialMinMaxExpr!"); 4240 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 4241 if (Ops.size() == 1) 4242 return Ops[0]; 4243 #ifndef NDEBUG 4244 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 4245 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4246 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 4247 "Operand types don't match!"); 4248 assert(Ops[0]->getType()->isPointerTy() == 4249 Ops[i]->getType()->isPointerTy() && 4250 "min/max should be consistently pointerish"); 4251 } 4252 #endif 4253 4254 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, 4255 // so we can *NOT* do any kind of sorting of the expressions! 4256 4257 // Check if we have created the same expression before. 4258 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) 4259 return S; 4260 4261 // FIXME: there are *some* simplifications that we can do here. 4262 4263 // Keep only the first instance of an operand. 4264 { 4265 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); 4266 bool Changed = Deduplicator.visit(Kind, Ops, Ops); 4267 if (Changed) 4268 return getSequentialMinMaxExpr(Kind, Ops); 4269 } 4270 4271 // Check to see if one of the operands is of the same kind. If so, expand its 4272 // operands onto our operand list, and recurse to simplify. 4273 { 4274 unsigned Idx = 0; 4275 bool DeletedAny = false; 4276 while (Idx < Ops.size()) { 4277 if (Ops[Idx]->getSCEVType() != Kind) { 4278 ++Idx; 4279 continue; 4280 } 4281 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]); 4282 Ops.erase(Ops.begin() + Idx); 4283 Ops.insert(Ops.begin() + Idx, SMME->operands().begin(), 4284 SMME->operands().end()); 4285 DeletedAny = true; 4286 } 4287 4288 if (DeletedAny) 4289 return getSequentialMinMaxExpr(Kind, Ops); 4290 } 4291 4292 const SCEV *SaturationPoint; 4293 ICmpInst::Predicate Pred; 4294 switch (Kind) { 4295 case scSequentialUMinExpr: 4296 SaturationPoint = getZero(Ops[0]->getType()); 4297 Pred = ICmpInst::ICMP_ULE; 4298 break; 4299 default: 4300 llvm_unreachable("Not a sequential min/max type."); 4301 } 4302 4303 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4304 // We can replace %x umin_seq %y with %x umin %y if either: 4305 // * %y being poison implies %x is also poison. 4306 // * %x cannot be the saturating value (e.g. zero for umin). 4307 if (::impliesPoison(Ops[i], Ops[i - 1]) || 4308 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], 4309 SaturationPoint)) { 4310 SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]}; 4311 Ops[i - 1] = getMinMaxExpr( 4312 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), 4313 SeqOps); 4314 Ops.erase(Ops.begin() + i); 4315 return getSequentialMinMaxExpr(Kind, Ops); 4316 } 4317 // Fold %x umin_seq %y to %x if %x ule %y. 4318 // TODO: We might be able to prove the predicate for a later operand. 4319 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { 4320 Ops.erase(Ops.begin() + i); 4321 return getSequentialMinMaxExpr(Kind, Ops); 4322 } 4323 } 4324 4325 // Okay, it looks like we really DO need an expr. Check to see if we 4326 // already have one, otherwise create a new one. 4327 FoldingSetNodeID ID; 4328 ID.AddInteger(Kind); 4329 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4330 ID.AddPointer(Ops[i]); 4331 void *IP = nullptr; 4332 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 4333 if (ExistingSCEV) 4334 return ExistingSCEV; 4335 4336 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 4337 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 4338 SCEV *S = new (SCEVAllocator) 4339 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 4340 4341 UniqueSCEVs.InsertNode(S, IP); 4342 registerUser(S, Ops); 4343 return S; 4344 } 4345 4346 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4347 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4348 return getSMaxExpr(Ops); 4349 } 4350 4351 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4352 return getMinMaxExpr(scSMaxExpr, Ops); 4353 } 4354 4355 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4356 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4357 return getUMaxExpr(Ops); 4358 } 4359 4360 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4361 return getMinMaxExpr(scUMaxExpr, Ops); 4362 } 4363 4364 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 4365 const SCEV *RHS) { 4366 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4367 return getSMinExpr(Ops); 4368 } 4369 4370 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 4371 return getMinMaxExpr(scSMinExpr, Ops); 4372 } 4373 4374 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, 4375 bool Sequential) { 4376 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4377 return getUMinExpr(Ops, Sequential); 4378 } 4379 4380 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops, 4381 bool Sequential) { 4382 return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) 4383 : getMinMaxExpr(scUMinExpr, Ops); 4384 } 4385 4386 const SCEV * 4387 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 4388 ScalableVectorType *ScalableTy) { 4389 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 4390 Constant *One = ConstantInt::get(IntTy, 1); 4391 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 4392 // Note that the expression we created is the final expression, we don't 4393 // want to simplify it any further Also, if we call a normal getSCEV(), 4394 // we'll end up in an endless recursion. So just create an SCEVUnknown. 4395 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 4396 } 4397 4398 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 4399 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 4400 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 4401 // We can bypass creating a target-independent constant expression and then 4402 // folding it back into a ConstantInt. This is just a compile-time 4403 // optimization. 4404 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 4405 } 4406 4407 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 4408 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 4409 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 4410 // We can bypass creating a target-independent constant expression and then 4411 // folding it back into a ConstantInt. This is just a compile-time 4412 // optimization. 4413 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 4414 } 4415 4416 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 4417 StructType *STy, 4418 unsigned FieldNo) { 4419 // We can bypass creating a target-independent constant expression and then 4420 // folding it back into a ConstantInt. This is just a compile-time 4421 // optimization. 4422 return getConstant( 4423 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 4424 } 4425 4426 const SCEV *ScalarEvolution::getUnknown(Value *V) { 4427 // Don't attempt to do anything other than create a SCEVUnknown object 4428 // here. createSCEV only calls getUnknown after checking for all other 4429 // interesting possibilities, and any other code that calls getUnknown 4430 // is doing so in order to hide a value from SCEV canonicalization. 4431 4432 FoldingSetNodeID ID; 4433 ID.AddInteger(scUnknown); 4434 ID.AddPointer(V); 4435 void *IP = nullptr; 4436 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 4437 assert(cast<SCEVUnknown>(S)->getValue() == V && 4438 "Stale SCEVUnknown in uniquing map!"); 4439 return S; 4440 } 4441 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 4442 FirstUnknown); 4443 FirstUnknown = cast<SCEVUnknown>(S); 4444 UniqueSCEVs.InsertNode(S, IP); 4445 return S; 4446 } 4447 4448 //===----------------------------------------------------------------------===// 4449 // Basic SCEV Analysis and PHI Idiom Recognition Code 4450 // 4451 4452 /// Test if values of the given type are analyzable within the SCEV 4453 /// framework. This primarily includes integer types, and it can optionally 4454 /// include pointer types if the ScalarEvolution class has access to 4455 /// target-specific information. 4456 bool ScalarEvolution::isSCEVable(Type *Ty) const { 4457 // Integers and pointers are always SCEVable. 4458 return Ty->isIntOrPtrTy(); 4459 } 4460 4461 /// Return the size in bits of the specified type, for which isSCEVable must 4462 /// return true. 4463 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 4464 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4465 if (Ty->isPointerTy()) 4466 return getDataLayout().getIndexTypeSizeInBits(Ty); 4467 return getDataLayout().getTypeSizeInBits(Ty); 4468 } 4469 4470 /// Return a type with the same bitwidth as the given type and which represents 4471 /// how SCEV will treat the given type, for which isSCEVable must return 4472 /// true. For pointer types, this is the pointer index sized integer type. 4473 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 4474 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4475 4476 if (Ty->isIntegerTy()) 4477 return Ty; 4478 4479 // The only other support type is pointer. 4480 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 4481 return getDataLayout().getIndexType(Ty); 4482 } 4483 4484 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 4485 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 4486 } 4487 4488 bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, 4489 const SCEV *B) { 4490 /// For a valid use point to exist, the defining scope of one operand 4491 /// must dominate the other. 4492 bool PreciseA, PreciseB; 4493 auto *ScopeA = getDefiningScopeBound({A}, PreciseA); 4494 auto *ScopeB = getDefiningScopeBound({B}, PreciseB); 4495 if (!PreciseA || !PreciseB) 4496 // Can't tell. 4497 return false; 4498 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || 4499 DT.dominates(ScopeB, ScopeA); 4500 } 4501 4502 4503 const SCEV *ScalarEvolution::getCouldNotCompute() { 4504 return CouldNotCompute.get(); 4505 } 4506 4507 bool ScalarEvolution::checkValidity(const SCEV *S) const { 4508 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 4509 auto *SU = dyn_cast<SCEVUnknown>(S); 4510 return SU && SU->getValue() == nullptr; 4511 }); 4512 4513 return !ContainsNulls; 4514 } 4515 4516 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 4517 HasRecMapType::iterator I = HasRecMap.find(S); 4518 if (I != HasRecMap.end()) 4519 return I->second; 4520 4521 bool FoundAddRec = 4522 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 4523 HasRecMap.insert({S, FoundAddRec}); 4524 return FoundAddRec; 4525 } 4526 4527 /// Return the ValueOffsetPair set for \p S. \p S can be represented 4528 /// by the value and offset from any ValueOffsetPair in the set. 4529 ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) { 4530 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 4531 if (SI == ExprValueMap.end()) 4532 return std::nullopt; 4533 #ifndef NDEBUG 4534 if (VerifySCEVMap) { 4535 // Check there is no dangling Value in the set returned. 4536 for (Value *V : SI->second) 4537 assert(ValueExprMap.count(V)); 4538 } 4539 #endif 4540 return SI->second.getArrayRef(); 4541 } 4542 4543 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 4544 /// cannot be used separately. eraseValueFromMap should be used to remove 4545 /// V from ValueExprMap and ExprValueMap at the same time. 4546 void ScalarEvolution::eraseValueFromMap(Value *V) { 4547 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4548 if (I != ValueExprMap.end()) { 4549 auto EVIt = ExprValueMap.find(I->second); 4550 bool Removed = EVIt->second.remove(V); 4551 (void) Removed; 4552 assert(Removed && "Value not in ExprValueMap?"); 4553 ValueExprMap.erase(I); 4554 } 4555 } 4556 4557 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { 4558 // A recursive query may have already computed the SCEV. It should be 4559 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily 4560 // inferred nowrap flags. 4561 auto It = ValueExprMap.find_as(V); 4562 if (It == ValueExprMap.end()) { 4563 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4564 ExprValueMap[S].insert(V); 4565 } 4566 } 4567 4568 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4569 /// create a new one. 4570 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4571 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4572 4573 if (const SCEV *S = getExistingSCEV(V)) 4574 return S; 4575 return createSCEVIter(V); 4576 } 4577 4578 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4579 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4580 4581 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4582 if (I != ValueExprMap.end()) { 4583 const SCEV *S = I->second; 4584 assert(checkValidity(S) && 4585 "existing SCEV has not been properly invalidated"); 4586 return S; 4587 } 4588 return nullptr; 4589 } 4590 4591 /// Return a SCEV corresponding to -V = -1*V 4592 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4593 SCEV::NoWrapFlags Flags) { 4594 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4595 return getConstant( 4596 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4597 4598 Type *Ty = V->getType(); 4599 Ty = getEffectiveSCEVType(Ty); 4600 return getMulExpr(V, getMinusOne(Ty), Flags); 4601 } 4602 4603 /// If Expr computes ~A, return A else return nullptr 4604 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4605 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4606 if (!Add || Add->getNumOperands() != 2 || 4607 !Add->getOperand(0)->isAllOnesValue()) 4608 return nullptr; 4609 4610 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4611 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4612 !AddRHS->getOperand(0)->isAllOnesValue()) 4613 return nullptr; 4614 4615 return AddRHS->getOperand(1); 4616 } 4617 4618 /// Return a SCEV corresponding to ~V = -1-V 4619 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4620 assert(!V->getType()->isPointerTy() && "Can't negate pointer"); 4621 4622 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4623 return getConstant( 4624 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4625 4626 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4627 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4628 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4629 SmallVector<const SCEV *, 2> MatchedOperands; 4630 for (const SCEV *Operand : MME->operands()) { 4631 const SCEV *Matched = MatchNotExpr(Operand); 4632 if (!Matched) 4633 return (const SCEV *)nullptr; 4634 MatchedOperands.push_back(Matched); 4635 } 4636 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4637 MatchedOperands); 4638 }; 4639 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4640 return Replaced; 4641 } 4642 4643 Type *Ty = V->getType(); 4644 Ty = getEffectiveSCEVType(Ty); 4645 return getMinusSCEV(getMinusOne(Ty), V); 4646 } 4647 4648 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { 4649 assert(P->getType()->isPointerTy()); 4650 4651 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { 4652 // The base of an AddRec is the first operand. 4653 SmallVector<const SCEV *> Ops{AddRec->operands()}; 4654 Ops[0] = removePointerBase(Ops[0]); 4655 // Don't try to transfer nowrap flags for now. We could in some cases 4656 // (for example, if pointer operand of the AddRec is a SCEVUnknown). 4657 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); 4658 } 4659 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { 4660 // The base of an Add is the pointer operand. 4661 SmallVector<const SCEV *> Ops{Add->operands()}; 4662 const SCEV **PtrOp = nullptr; 4663 for (const SCEV *&AddOp : Ops) { 4664 if (AddOp->getType()->isPointerTy()) { 4665 assert(!PtrOp && "Cannot have multiple pointer ops"); 4666 PtrOp = &AddOp; 4667 } 4668 } 4669 *PtrOp = removePointerBase(*PtrOp); 4670 // Don't try to transfer nowrap flags for now. We could in some cases 4671 // (for example, if the pointer operand of the Add is a SCEVUnknown). 4672 return getAddExpr(Ops); 4673 } 4674 // Any other expression must be a pointer base. 4675 return getZero(P->getType()); 4676 } 4677 4678 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4679 SCEV::NoWrapFlags Flags, 4680 unsigned Depth) { 4681 // Fast path: X - X --> 0. 4682 if (LHS == RHS) 4683 return getZero(LHS->getType()); 4684 4685 // If we subtract two pointers with different pointer bases, bail. 4686 // Eventually, we're going to add an assertion to getMulExpr that we 4687 // can't multiply by a pointer. 4688 if (RHS->getType()->isPointerTy()) { 4689 if (!LHS->getType()->isPointerTy() || 4690 getPointerBase(LHS) != getPointerBase(RHS)) 4691 return getCouldNotCompute(); 4692 LHS = removePointerBase(LHS); 4693 RHS = removePointerBase(RHS); 4694 } 4695 4696 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4697 // makes it so that we cannot make much use of NUW. 4698 auto AddFlags = SCEV::FlagAnyWrap; 4699 const bool RHSIsNotMinSigned = 4700 !getSignedRangeMin(RHS).isMinSignedValue(); 4701 if (hasFlags(Flags, SCEV::FlagNSW)) { 4702 // Let M be the minimum representable signed value. Then (-1)*RHS 4703 // signed-wraps if and only if RHS is M. That can happen even for 4704 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4705 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4706 // (-1)*RHS, we need to prove that RHS != M. 4707 // 4708 // If LHS is non-negative and we know that LHS - RHS does not 4709 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4710 // either by proving that RHS > M or that LHS >= 0. 4711 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4712 AddFlags = SCEV::FlagNSW; 4713 } 4714 } 4715 4716 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4717 // RHS is NSW and LHS >= 0. 4718 // 4719 // The difficulty here is that the NSW flag may have been proven 4720 // relative to a loop that is to be found in a recurrence in LHS and 4721 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4722 // larger scope than intended. 4723 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4724 4725 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4726 } 4727 4728 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4729 unsigned Depth) { 4730 Type *SrcTy = V->getType(); 4731 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4732 "Cannot truncate or zero extend with non-integer arguments!"); 4733 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4734 return V; // No conversion 4735 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4736 return getTruncateExpr(V, Ty, Depth); 4737 return getZeroExtendExpr(V, Ty, Depth); 4738 } 4739 4740 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4741 unsigned Depth) { 4742 Type *SrcTy = V->getType(); 4743 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4744 "Cannot truncate or zero extend with non-integer arguments!"); 4745 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4746 return V; // No conversion 4747 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4748 return getTruncateExpr(V, Ty, Depth); 4749 return getSignExtendExpr(V, Ty, Depth); 4750 } 4751 4752 const SCEV * 4753 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4754 Type *SrcTy = V->getType(); 4755 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4756 "Cannot noop or zero extend with non-integer arguments!"); 4757 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4758 "getNoopOrZeroExtend cannot truncate!"); 4759 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4760 return V; // No conversion 4761 return getZeroExtendExpr(V, Ty); 4762 } 4763 4764 const SCEV * 4765 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4766 Type *SrcTy = V->getType(); 4767 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4768 "Cannot noop or sign extend with non-integer arguments!"); 4769 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4770 "getNoopOrSignExtend cannot truncate!"); 4771 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4772 return V; // No conversion 4773 return getSignExtendExpr(V, Ty); 4774 } 4775 4776 const SCEV * 4777 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4778 Type *SrcTy = V->getType(); 4779 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4780 "Cannot noop or any extend with non-integer arguments!"); 4781 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4782 "getNoopOrAnyExtend cannot truncate!"); 4783 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4784 return V; // No conversion 4785 return getAnyExtendExpr(V, Ty); 4786 } 4787 4788 const SCEV * 4789 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4790 Type *SrcTy = V->getType(); 4791 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4792 "Cannot truncate or noop with non-integer arguments!"); 4793 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4794 "getTruncateOrNoop cannot extend!"); 4795 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4796 return V; // No conversion 4797 return getTruncateExpr(V, Ty); 4798 } 4799 4800 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4801 const SCEV *RHS) { 4802 const SCEV *PromotedLHS = LHS; 4803 const SCEV *PromotedRHS = RHS; 4804 4805 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4806 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4807 else 4808 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4809 4810 return getUMaxExpr(PromotedLHS, PromotedRHS); 4811 } 4812 4813 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4814 const SCEV *RHS, 4815 bool Sequential) { 4816 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4817 return getUMinFromMismatchedTypes(Ops, Sequential); 4818 } 4819 4820 const SCEV * 4821 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops, 4822 bool Sequential) { 4823 assert(!Ops.empty() && "At least one operand must be!"); 4824 // Trivial case. 4825 if (Ops.size() == 1) 4826 return Ops[0]; 4827 4828 // Find the max type first. 4829 Type *MaxType = nullptr; 4830 for (const auto *S : Ops) 4831 if (MaxType) 4832 MaxType = getWiderType(MaxType, S->getType()); 4833 else 4834 MaxType = S->getType(); 4835 assert(MaxType && "Failed to find maximum type!"); 4836 4837 // Extend all ops to max type. 4838 SmallVector<const SCEV *, 2> PromotedOps; 4839 for (const auto *S : Ops) 4840 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4841 4842 // Generate umin. 4843 return getUMinExpr(PromotedOps, Sequential); 4844 } 4845 4846 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4847 // A pointer operand may evaluate to a nonpointer expression, such as null. 4848 if (!V->getType()->isPointerTy()) 4849 return V; 4850 4851 while (true) { 4852 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4853 V = AddRec->getStart(); 4854 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4855 const SCEV *PtrOp = nullptr; 4856 for (const SCEV *AddOp : Add->operands()) { 4857 if (AddOp->getType()->isPointerTy()) { 4858 assert(!PtrOp && "Cannot have multiple pointer ops"); 4859 PtrOp = AddOp; 4860 } 4861 } 4862 assert(PtrOp && "Must have pointer op"); 4863 V = PtrOp; 4864 } else // Not something we can look further into. 4865 return V; 4866 } 4867 } 4868 4869 /// Push users of the given Instruction onto the given Worklist. 4870 static void PushDefUseChildren(Instruction *I, 4871 SmallVectorImpl<Instruction *> &Worklist, 4872 SmallPtrSetImpl<Instruction *> &Visited) { 4873 // Push the def-use children onto the Worklist stack. 4874 for (User *U : I->users()) { 4875 auto *UserInsn = cast<Instruction>(U); 4876 if (Visited.insert(UserInsn).second) 4877 Worklist.push_back(UserInsn); 4878 } 4879 } 4880 4881 namespace { 4882 4883 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4884 /// expression in case its Loop is L. If it is not L then 4885 /// if IgnoreOtherLoops is true then use AddRec itself 4886 /// otherwise rewrite cannot be done. 4887 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4888 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4889 public: 4890 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4891 bool IgnoreOtherLoops = true) { 4892 SCEVInitRewriter Rewriter(L, SE); 4893 const SCEV *Result = Rewriter.visit(S); 4894 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4895 return SE.getCouldNotCompute(); 4896 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4897 ? SE.getCouldNotCompute() 4898 : Result; 4899 } 4900 4901 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4902 if (!SE.isLoopInvariant(Expr, L)) 4903 SeenLoopVariantSCEVUnknown = true; 4904 return Expr; 4905 } 4906 4907 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4908 // Only re-write AddRecExprs for this loop. 4909 if (Expr->getLoop() == L) 4910 return Expr->getStart(); 4911 SeenOtherLoops = true; 4912 return Expr; 4913 } 4914 4915 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4916 4917 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4918 4919 private: 4920 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4921 : SCEVRewriteVisitor(SE), L(L) {} 4922 4923 const Loop *L; 4924 bool SeenLoopVariantSCEVUnknown = false; 4925 bool SeenOtherLoops = false; 4926 }; 4927 4928 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4929 /// increment expression in case its Loop is L. If it is not L then 4930 /// use AddRec itself. 4931 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4932 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4933 public: 4934 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4935 SCEVPostIncRewriter Rewriter(L, SE); 4936 const SCEV *Result = Rewriter.visit(S); 4937 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4938 ? SE.getCouldNotCompute() 4939 : Result; 4940 } 4941 4942 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4943 if (!SE.isLoopInvariant(Expr, L)) 4944 SeenLoopVariantSCEVUnknown = true; 4945 return Expr; 4946 } 4947 4948 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4949 // Only re-write AddRecExprs for this loop. 4950 if (Expr->getLoop() == L) 4951 return Expr->getPostIncExpr(SE); 4952 SeenOtherLoops = true; 4953 return Expr; 4954 } 4955 4956 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4957 4958 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4959 4960 private: 4961 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4962 : SCEVRewriteVisitor(SE), L(L) {} 4963 4964 const Loop *L; 4965 bool SeenLoopVariantSCEVUnknown = false; 4966 bool SeenOtherLoops = false; 4967 }; 4968 4969 /// This class evaluates the compare condition by matching it against the 4970 /// condition of loop latch. If there is a match we assume a true value 4971 /// for the condition while building SCEV nodes. 4972 class SCEVBackedgeConditionFolder 4973 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4974 public: 4975 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4976 ScalarEvolution &SE) { 4977 bool IsPosBECond = false; 4978 Value *BECond = nullptr; 4979 if (BasicBlock *Latch = L->getLoopLatch()) { 4980 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4981 if (BI && BI->isConditional()) { 4982 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4983 "Both outgoing branches should not target same header!"); 4984 BECond = BI->getCondition(); 4985 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4986 } else { 4987 return S; 4988 } 4989 } 4990 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4991 return Rewriter.visit(S); 4992 } 4993 4994 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4995 const SCEV *Result = Expr; 4996 bool InvariantF = SE.isLoopInvariant(Expr, L); 4997 4998 if (!InvariantF) { 4999 Instruction *I = cast<Instruction>(Expr->getValue()); 5000 switch (I->getOpcode()) { 5001 case Instruction::Select: { 5002 SelectInst *SI = cast<SelectInst>(I); 5003 std::optional<const SCEV *> Res = 5004 compareWithBackedgeCondition(SI->getCondition()); 5005 if (Res) { 5006 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne(); 5007 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 5008 } 5009 break; 5010 } 5011 default: { 5012 std::optional<const SCEV *> Res = compareWithBackedgeCondition(I); 5013 if (Res) 5014 Result = *Res; 5015 break; 5016 } 5017 } 5018 } 5019 return Result; 5020 } 5021 5022 private: 5023 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 5024 bool IsPosBECond, ScalarEvolution &SE) 5025 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 5026 IsPositiveBECond(IsPosBECond) {} 5027 5028 std::optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 5029 5030 const Loop *L; 5031 /// Loop back condition. 5032 Value *BackedgeCond = nullptr; 5033 /// Set to true if loop back is on positive branch condition. 5034 bool IsPositiveBECond; 5035 }; 5036 5037 std::optional<const SCEV *> 5038 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 5039 5040 // If value matches the backedge condition for loop latch, 5041 // then return a constant evolution node based on loopback 5042 // branch taken. 5043 if (BackedgeCond == IC) 5044 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 5045 : SE.getZero(Type::getInt1Ty(SE.getContext())); 5046 return std::nullopt; 5047 } 5048 5049 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 5050 public: 5051 static const SCEV *rewrite(const SCEV *S, const Loop *L, 5052 ScalarEvolution &SE) { 5053 SCEVShiftRewriter Rewriter(L, SE); 5054 const SCEV *Result = Rewriter.visit(S); 5055 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 5056 } 5057 5058 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 5059 // Only allow AddRecExprs for this loop. 5060 if (!SE.isLoopInvariant(Expr, L)) 5061 Valid = false; 5062 return Expr; 5063 } 5064 5065 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 5066 if (Expr->getLoop() == L && Expr->isAffine()) 5067 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 5068 Valid = false; 5069 return Expr; 5070 } 5071 5072 bool isValid() { return Valid; } 5073 5074 private: 5075 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 5076 : SCEVRewriteVisitor(SE), L(L) {} 5077 5078 const Loop *L; 5079 bool Valid = true; 5080 }; 5081 5082 } // end anonymous namespace 5083 5084 SCEV::NoWrapFlags 5085 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 5086 if (!AR->isAffine()) 5087 return SCEV::FlagAnyWrap; 5088 5089 using OBO = OverflowingBinaryOperator; 5090 5091 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 5092 5093 if (!AR->hasNoSignedWrap()) { 5094 ConstantRange AddRecRange = getSignedRange(AR); 5095 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 5096 5097 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 5098 Instruction::Add, IncRange, OBO::NoSignedWrap); 5099 if (NSWRegion.contains(AddRecRange)) 5100 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 5101 } 5102 5103 if (!AR->hasNoUnsignedWrap()) { 5104 ConstantRange AddRecRange = getUnsignedRange(AR); 5105 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 5106 5107 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 5108 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 5109 if (NUWRegion.contains(AddRecRange)) 5110 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 5111 } 5112 5113 return Result; 5114 } 5115 5116 SCEV::NoWrapFlags 5117 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5118 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5119 5120 if (AR->hasNoSignedWrap()) 5121 return Result; 5122 5123 if (!AR->isAffine()) 5124 return Result; 5125 5126 // This function can be expensive, only try to prove NSW once per AddRec. 5127 if (!SignedWrapViaInductionTried.insert(AR).second) 5128 return Result; 5129 5130 const SCEV *Step = AR->getStepRecurrence(*this); 5131 const Loop *L = AR->getLoop(); 5132 5133 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5134 // Note that this serves two purposes: It filters out loops that are 5135 // simply not analyzable, and it covers the case where this code is 5136 // being called from within backedge-taken count analysis, such that 5137 // attempting to ask for the backedge-taken count would likely result 5138 // in infinite recursion. In the later case, the analysis code will 5139 // cope with a conservative value, and it will take care to purge 5140 // that value once it has finished. 5141 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5142 5143 // Normally, in the cases we can prove no-overflow via a 5144 // backedge guarding condition, we can also compute a backedge 5145 // taken count for the loop. The exceptions are assumptions and 5146 // guards present in the loop -- SCEV is not great at exploiting 5147 // these to compute max backedge taken counts, but can still use 5148 // these to prove lack of overflow. Use this fact to avoid 5149 // doing extra work that may not pay off. 5150 5151 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5152 AC.assumptions().empty()) 5153 return Result; 5154 5155 // If the backedge is guarded by a comparison with the pre-inc value the 5156 // addrec is safe. Also, if the entry is guarded by a comparison with the 5157 // start value and the backedge is guarded by a comparison with the post-inc 5158 // value, the addrec is safe. 5159 ICmpInst::Predicate Pred; 5160 const SCEV *OverflowLimit = 5161 getSignedOverflowLimitForStep(Step, &Pred, this); 5162 if (OverflowLimit && 5163 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 5164 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 5165 Result = setFlags(Result, SCEV::FlagNSW); 5166 } 5167 return Result; 5168 } 5169 SCEV::NoWrapFlags 5170 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5171 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5172 5173 if (AR->hasNoUnsignedWrap()) 5174 return Result; 5175 5176 if (!AR->isAffine()) 5177 return Result; 5178 5179 // This function can be expensive, only try to prove NUW once per AddRec. 5180 if (!UnsignedWrapViaInductionTried.insert(AR).second) 5181 return Result; 5182 5183 const SCEV *Step = AR->getStepRecurrence(*this); 5184 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 5185 const Loop *L = AR->getLoop(); 5186 5187 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5188 // Note that this serves two purposes: It filters out loops that are 5189 // simply not analyzable, and it covers the case where this code is 5190 // being called from within backedge-taken count analysis, such that 5191 // attempting to ask for the backedge-taken count would likely result 5192 // in infinite recursion. In the later case, the analysis code will 5193 // cope with a conservative value, and it will take care to purge 5194 // that value once it has finished. 5195 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5196 5197 // Normally, in the cases we can prove no-overflow via a 5198 // backedge guarding condition, we can also compute a backedge 5199 // taken count for the loop. The exceptions are assumptions and 5200 // guards present in the loop -- SCEV is not great at exploiting 5201 // these to compute max backedge taken counts, but can still use 5202 // these to prove lack of overflow. Use this fact to avoid 5203 // doing extra work that may not pay off. 5204 5205 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5206 AC.assumptions().empty()) 5207 return Result; 5208 5209 // If the backedge is guarded by a comparison with the pre-inc value the 5210 // addrec is safe. Also, if the entry is guarded by a comparison with the 5211 // start value and the backedge is guarded by a comparison with the post-inc 5212 // value, the addrec is safe. 5213 if (isKnownPositive(Step)) { 5214 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 5215 getUnsignedRangeMax(Step)); 5216 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 5217 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 5218 Result = setFlags(Result, SCEV::FlagNUW); 5219 } 5220 } 5221 5222 return Result; 5223 } 5224 5225 namespace { 5226 5227 /// Represents an abstract binary operation. This may exist as a 5228 /// normal instruction or constant expression, or may have been 5229 /// derived from an expression tree. 5230 struct BinaryOp { 5231 unsigned Opcode; 5232 Value *LHS; 5233 Value *RHS; 5234 bool IsNSW = false; 5235 bool IsNUW = false; 5236 5237 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 5238 /// constant expression. 5239 Operator *Op = nullptr; 5240 5241 explicit BinaryOp(Operator *Op) 5242 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 5243 Op(Op) { 5244 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 5245 IsNSW = OBO->hasNoSignedWrap(); 5246 IsNUW = OBO->hasNoUnsignedWrap(); 5247 } 5248 } 5249 5250 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 5251 bool IsNUW = false) 5252 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 5253 }; 5254 5255 } // end anonymous namespace 5256 5257 /// Try to map \p V into a BinaryOp, and return \c std::nullopt on failure. 5258 static std::optional<BinaryOp> MatchBinaryOp(Value *V, const DataLayout &DL, 5259 AssumptionCache &AC, 5260 const DominatorTree &DT, 5261 const Instruction *CxtI) { 5262 auto *Op = dyn_cast<Operator>(V); 5263 if (!Op) 5264 return std::nullopt; 5265 5266 // Implementation detail: all the cleverness here should happen without 5267 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 5268 // SCEV expressions when possible, and we should not break that. 5269 5270 switch (Op->getOpcode()) { 5271 case Instruction::Add: 5272 case Instruction::Sub: 5273 case Instruction::Mul: 5274 case Instruction::UDiv: 5275 case Instruction::URem: 5276 case Instruction::And: 5277 case Instruction::AShr: 5278 case Instruction::Shl: 5279 return BinaryOp(Op); 5280 5281 case Instruction::Or: { 5282 // LLVM loves to convert `add` of operands with no common bits 5283 // into an `or`. But SCEV really doesn't deal with `or` that well, 5284 // so try extra hard to recognize this `or` as an `add`. 5285 if (haveNoCommonBitsSet(Op->getOperand(0), Op->getOperand(1), DL, &AC, CxtI, 5286 &DT, /*UseInstrInfo=*/true)) 5287 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1), 5288 /*IsNSW=*/true, /*IsNUW=*/true); 5289 return BinaryOp(Op); 5290 } 5291 5292 case Instruction::Xor: 5293 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 5294 // If the RHS of the xor is a signmask, then this is just an add. 5295 // Instcombine turns add of signmask into xor as a strength reduction step. 5296 if (RHSC->getValue().isSignMask()) 5297 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5298 // Binary `xor` is a bit-wise `add`. 5299 if (V->getType()->isIntegerTy(1)) 5300 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5301 return BinaryOp(Op); 5302 5303 case Instruction::LShr: 5304 // Turn logical shift right of a constant into a unsigned divide. 5305 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 5306 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 5307 5308 // If the shift count is not less than the bitwidth, the result of 5309 // the shift is undefined. Don't try to analyze it, because the 5310 // resolution chosen here may differ from the resolution chosen in 5311 // other parts of the compiler. 5312 if (SA->getValue().ult(BitWidth)) { 5313 Constant *X = 5314 ConstantInt::get(SA->getContext(), 5315 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5316 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 5317 } 5318 } 5319 return BinaryOp(Op); 5320 5321 case Instruction::ExtractValue: { 5322 auto *EVI = cast<ExtractValueInst>(Op); 5323 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 5324 break; 5325 5326 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 5327 if (!WO) 5328 break; 5329 5330 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 5331 bool Signed = WO->isSigned(); 5332 // TODO: Should add nuw/nsw flags for mul as well. 5333 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 5334 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 5335 5336 // Now that we know that all uses of the arithmetic-result component of 5337 // CI are guarded by the overflow check, we can go ahead and pretend 5338 // that the arithmetic is non-overflowing. 5339 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 5340 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 5341 } 5342 5343 default: 5344 break; 5345 } 5346 5347 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 5348 // semantics as a Sub, return a binary sub expression. 5349 if (auto *II = dyn_cast<IntrinsicInst>(V)) 5350 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 5351 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 5352 5353 return std::nullopt; 5354 } 5355 5356 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 5357 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 5358 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 5359 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 5360 /// follows one of the following patterns: 5361 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5362 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5363 /// If the SCEV expression of \p Op conforms with one of the expected patterns 5364 /// we return the type of the truncation operation, and indicate whether the 5365 /// truncated type should be treated as signed/unsigned by setting 5366 /// \p Signed to true/false, respectively. 5367 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 5368 bool &Signed, ScalarEvolution &SE) { 5369 // The case where Op == SymbolicPHI (that is, with no type conversions on 5370 // the way) is handled by the regular add recurrence creating logic and 5371 // would have already been triggered in createAddRecForPHI. Reaching it here 5372 // means that createAddRecFromPHI had failed for this PHI before (e.g., 5373 // because one of the other operands of the SCEVAddExpr updating this PHI is 5374 // not invariant). 5375 // 5376 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 5377 // this case predicates that allow us to prove that Op == SymbolicPHI will 5378 // be added. 5379 if (Op == SymbolicPHI) 5380 return nullptr; 5381 5382 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 5383 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 5384 if (SourceBits != NewBits) 5385 return nullptr; 5386 5387 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 5388 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 5389 if (!SExt && !ZExt) 5390 return nullptr; 5391 const SCEVTruncateExpr *Trunc = 5392 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 5393 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 5394 if (!Trunc) 5395 return nullptr; 5396 const SCEV *X = Trunc->getOperand(); 5397 if (X != SymbolicPHI) 5398 return nullptr; 5399 Signed = SExt != nullptr; 5400 return Trunc->getType(); 5401 } 5402 5403 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 5404 if (!PN->getType()->isIntegerTy()) 5405 return nullptr; 5406 const Loop *L = LI.getLoopFor(PN->getParent()); 5407 if (!L || L->getHeader() != PN->getParent()) 5408 return nullptr; 5409 return L; 5410 } 5411 5412 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 5413 // computation that updates the phi follows the following pattern: 5414 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 5415 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 5416 // If so, try to see if it can be rewritten as an AddRecExpr under some 5417 // Predicates. If successful, return them as a pair. Also cache the results 5418 // of the analysis. 5419 // 5420 // Example usage scenario: 5421 // Say the Rewriter is called for the following SCEV: 5422 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5423 // where: 5424 // %X = phi i64 (%Start, %BEValue) 5425 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 5426 // and call this function with %SymbolicPHI = %X. 5427 // 5428 // The analysis will find that the value coming around the backedge has 5429 // the following SCEV: 5430 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5431 // Upon concluding that this matches the desired pattern, the function 5432 // will return the pair {NewAddRec, SmallPredsVec} where: 5433 // NewAddRec = {%Start,+,%Step} 5434 // SmallPredsVec = {P1, P2, P3} as follows: 5435 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 5436 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 5437 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 5438 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 5439 // under the predicates {P1,P2,P3}. 5440 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 5441 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 5442 // 5443 // TODO's: 5444 // 5445 // 1) Extend the Induction descriptor to also support inductions that involve 5446 // casts: When needed (namely, when we are called in the context of the 5447 // vectorizer induction analysis), a Set of cast instructions will be 5448 // populated by this method, and provided back to isInductionPHI. This is 5449 // needed to allow the vectorizer to properly record them to be ignored by 5450 // the cost model and to avoid vectorizing them (otherwise these casts, 5451 // which are redundant under the runtime overflow checks, will be 5452 // vectorized, which can be costly). 5453 // 5454 // 2) Support additional induction/PHISCEV patterns: We also want to support 5455 // inductions where the sext-trunc / zext-trunc operations (partly) occur 5456 // after the induction update operation (the induction increment): 5457 // 5458 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 5459 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 5460 // 5461 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 5462 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 5463 // 5464 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 5465 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5466 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 5467 SmallVector<const SCEVPredicate *, 3> Predicates; 5468 5469 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 5470 // return an AddRec expression under some predicate. 5471 5472 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5473 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5474 assert(L && "Expecting an integer loop header phi"); 5475 5476 // The loop may have multiple entrances or multiple exits; we can analyze 5477 // this phi as an addrec if it has a unique entry value and a unique 5478 // backedge value. 5479 Value *BEValueV = nullptr, *StartValueV = nullptr; 5480 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5481 Value *V = PN->getIncomingValue(i); 5482 if (L->contains(PN->getIncomingBlock(i))) { 5483 if (!BEValueV) { 5484 BEValueV = V; 5485 } else if (BEValueV != V) { 5486 BEValueV = nullptr; 5487 break; 5488 } 5489 } else if (!StartValueV) { 5490 StartValueV = V; 5491 } else if (StartValueV != V) { 5492 StartValueV = nullptr; 5493 break; 5494 } 5495 } 5496 if (!BEValueV || !StartValueV) 5497 return std::nullopt; 5498 5499 const SCEV *BEValue = getSCEV(BEValueV); 5500 5501 // If the value coming around the backedge is an add with the symbolic 5502 // value we just inserted, possibly with casts that we can ignore under 5503 // an appropriate runtime guard, then we found a simple induction variable! 5504 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 5505 if (!Add) 5506 return std::nullopt; 5507 5508 // If there is a single occurrence of the symbolic value, possibly 5509 // casted, replace it with a recurrence. 5510 unsigned FoundIndex = Add->getNumOperands(); 5511 Type *TruncTy = nullptr; 5512 bool Signed; 5513 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5514 if ((TruncTy = 5515 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 5516 if (FoundIndex == e) { 5517 FoundIndex = i; 5518 break; 5519 } 5520 5521 if (FoundIndex == Add->getNumOperands()) 5522 return std::nullopt; 5523 5524 // Create an add with everything but the specified operand. 5525 SmallVector<const SCEV *, 8> Ops; 5526 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5527 if (i != FoundIndex) 5528 Ops.push_back(Add->getOperand(i)); 5529 const SCEV *Accum = getAddExpr(Ops); 5530 5531 // The runtime checks will not be valid if the step amount is 5532 // varying inside the loop. 5533 if (!isLoopInvariant(Accum, L)) 5534 return std::nullopt; 5535 5536 // *** Part2: Create the predicates 5537 5538 // Analysis was successful: we have a phi-with-cast pattern for which we 5539 // can return an AddRec expression under the following predicates: 5540 // 5541 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5542 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5543 // P2: An Equal predicate that guarantees that 5544 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5545 // P3: An Equal predicate that guarantees that 5546 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5547 // 5548 // As we next prove, the above predicates guarantee that: 5549 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5550 // 5551 // 5552 // More formally, we want to prove that: 5553 // Expr(i+1) = Start + (i+1) * Accum 5554 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5555 // 5556 // Given that: 5557 // 1) Expr(0) = Start 5558 // 2) Expr(1) = Start + Accum 5559 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5560 // 3) Induction hypothesis (step i): 5561 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5562 // 5563 // Proof: 5564 // Expr(i+1) = 5565 // = Start + (i+1)*Accum 5566 // = (Start + i*Accum) + Accum 5567 // = Expr(i) + Accum 5568 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5569 // :: from step i 5570 // 5571 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5572 // 5573 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5574 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5575 // + Accum :: from P3 5576 // 5577 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5578 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5579 // 5580 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5581 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5582 // 5583 // By induction, the same applies to all iterations 1<=i<n: 5584 // 5585 5586 // Create a truncated addrec for which we will add a no overflow check (P1). 5587 const SCEV *StartVal = getSCEV(StartValueV); 5588 const SCEV *PHISCEV = 5589 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5590 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5591 5592 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5593 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5594 // will be constant. 5595 // 5596 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5597 // add P1. 5598 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5599 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5600 Signed ? SCEVWrapPredicate::IncrementNSSW 5601 : SCEVWrapPredicate::IncrementNUSW; 5602 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5603 Predicates.push_back(AddRecPred); 5604 } 5605 5606 // Create the Equal Predicates P2,P3: 5607 5608 // It is possible that the predicates P2 and/or P3 are computable at 5609 // compile time due to StartVal and/or Accum being constants. 5610 // If either one is, then we can check that now and escape if either P2 5611 // or P3 is false. 5612 5613 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5614 // for each of StartVal and Accum 5615 auto getExtendedExpr = [&](const SCEV *Expr, 5616 bool CreateSignExtend) -> const SCEV * { 5617 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5618 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5619 const SCEV *ExtendedExpr = 5620 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5621 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5622 return ExtendedExpr; 5623 }; 5624 5625 // Given: 5626 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5627 // = getExtendedExpr(Expr) 5628 // Determine whether the predicate P: Expr == ExtendedExpr 5629 // is known to be false at compile time 5630 auto PredIsKnownFalse = [&](const SCEV *Expr, 5631 const SCEV *ExtendedExpr) -> bool { 5632 return Expr != ExtendedExpr && 5633 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5634 }; 5635 5636 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5637 if (PredIsKnownFalse(StartVal, StartExtended)) { 5638 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5639 return std::nullopt; 5640 } 5641 5642 // The Step is always Signed (because the overflow checks are either 5643 // NSSW or NUSW) 5644 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5645 if (PredIsKnownFalse(Accum, AccumExtended)) { 5646 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5647 return std::nullopt; 5648 } 5649 5650 auto AppendPredicate = [&](const SCEV *Expr, 5651 const SCEV *ExtendedExpr) -> void { 5652 if (Expr != ExtendedExpr && 5653 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5654 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5655 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5656 Predicates.push_back(Pred); 5657 } 5658 }; 5659 5660 AppendPredicate(StartVal, StartExtended); 5661 AppendPredicate(Accum, AccumExtended); 5662 5663 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5664 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5665 // into NewAR if it will also add the runtime overflow checks specified in 5666 // Predicates. 5667 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5668 5669 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5670 std::make_pair(NewAR, Predicates); 5671 // Remember the result of the analysis for this SCEV at this locayyytion. 5672 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5673 return PredRewrite; 5674 } 5675 5676 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5677 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5678 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5679 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5680 if (!L) 5681 return std::nullopt; 5682 5683 // Check to see if we already analyzed this PHI. 5684 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5685 if (I != PredicatedSCEVRewrites.end()) { 5686 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5687 I->second; 5688 // Analysis was done before and failed to create an AddRec: 5689 if (Rewrite.first == SymbolicPHI) 5690 return std::nullopt; 5691 // Analysis was done before and succeeded to create an AddRec under 5692 // a predicate: 5693 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5694 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5695 return Rewrite; 5696 } 5697 5698 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5699 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5700 5701 // Record in the cache that the analysis failed 5702 if (!Rewrite) { 5703 SmallVector<const SCEVPredicate *, 3> Predicates; 5704 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5705 return std::nullopt; 5706 } 5707 5708 return Rewrite; 5709 } 5710 5711 // FIXME: This utility is currently required because the Rewriter currently 5712 // does not rewrite this expression: 5713 // {0, +, (sext ix (trunc iy to ix) to iy)} 5714 // into {0, +, %step}, 5715 // even when the following Equal predicate exists: 5716 // "%step == (sext ix (trunc iy to ix) to iy)". 5717 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5718 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5719 if (AR1 == AR2) 5720 return true; 5721 5722 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5723 if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) && 5724 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1))) 5725 return false; 5726 return true; 5727 }; 5728 5729 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5730 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5731 return false; 5732 return true; 5733 } 5734 5735 /// A helper function for createAddRecFromPHI to handle simple cases. 5736 /// 5737 /// This function tries to find an AddRec expression for the simplest (yet most 5738 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5739 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5740 /// technique for finding the AddRec expression. 5741 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5742 Value *BEValueV, 5743 Value *StartValueV) { 5744 const Loop *L = LI.getLoopFor(PN->getParent()); 5745 assert(L && L->getHeader() == PN->getParent()); 5746 assert(BEValueV && StartValueV); 5747 5748 auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN); 5749 if (!BO) 5750 return nullptr; 5751 5752 if (BO->Opcode != Instruction::Add) 5753 return nullptr; 5754 5755 const SCEV *Accum = nullptr; 5756 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5757 Accum = getSCEV(BO->RHS); 5758 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5759 Accum = getSCEV(BO->LHS); 5760 5761 if (!Accum) 5762 return nullptr; 5763 5764 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5765 if (BO->IsNUW) 5766 Flags = setFlags(Flags, SCEV::FlagNUW); 5767 if (BO->IsNSW) 5768 Flags = setFlags(Flags, SCEV::FlagNSW); 5769 5770 const SCEV *StartVal = getSCEV(StartValueV); 5771 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5772 insertValueToMap(PN, PHISCEV); 5773 5774 // We can add Flags to the post-inc expression only if we 5775 // know that it is *undefined behavior* for BEValueV to 5776 // overflow. 5777 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) { 5778 assert(isLoopInvariant(Accum, L) && 5779 "Accum is defined outside L, but is not invariant?"); 5780 if (isAddRecNeverPoison(BEInst, L)) 5781 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5782 } 5783 5784 return PHISCEV; 5785 } 5786 5787 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5788 const Loop *L = LI.getLoopFor(PN->getParent()); 5789 if (!L || L->getHeader() != PN->getParent()) 5790 return nullptr; 5791 5792 // The loop may have multiple entrances or multiple exits; we can analyze 5793 // this phi as an addrec if it has a unique entry value and a unique 5794 // backedge value. 5795 Value *BEValueV = nullptr, *StartValueV = nullptr; 5796 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5797 Value *V = PN->getIncomingValue(i); 5798 if (L->contains(PN->getIncomingBlock(i))) { 5799 if (!BEValueV) { 5800 BEValueV = V; 5801 } else if (BEValueV != V) { 5802 BEValueV = nullptr; 5803 break; 5804 } 5805 } else if (!StartValueV) { 5806 StartValueV = V; 5807 } else if (StartValueV != V) { 5808 StartValueV = nullptr; 5809 break; 5810 } 5811 } 5812 if (!BEValueV || !StartValueV) 5813 return nullptr; 5814 5815 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5816 "PHI node already processed?"); 5817 5818 // First, try to find AddRec expression without creating a fictituos symbolic 5819 // value for PN. 5820 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5821 return S; 5822 5823 // Handle PHI node value symbolically. 5824 const SCEV *SymbolicName = getUnknown(PN); 5825 insertValueToMap(PN, SymbolicName); 5826 5827 // Using this symbolic name for the PHI, analyze the value coming around 5828 // the back-edge. 5829 const SCEV *BEValue = getSCEV(BEValueV); 5830 5831 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5832 // has a special value for the first iteration of the loop. 5833 5834 // If the value coming around the backedge is an add with the symbolic 5835 // value we just inserted, then we found a simple induction variable! 5836 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5837 // If there is a single occurrence of the symbolic value, replace it 5838 // with a recurrence. 5839 unsigned FoundIndex = Add->getNumOperands(); 5840 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5841 if (Add->getOperand(i) == SymbolicName) 5842 if (FoundIndex == e) { 5843 FoundIndex = i; 5844 break; 5845 } 5846 5847 if (FoundIndex != Add->getNumOperands()) { 5848 // Create an add with everything but the specified operand. 5849 SmallVector<const SCEV *, 8> Ops; 5850 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5851 if (i != FoundIndex) 5852 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5853 L, *this)); 5854 const SCEV *Accum = getAddExpr(Ops); 5855 5856 // This is not a valid addrec if the step amount is varying each 5857 // loop iteration, but is not itself an addrec in this loop. 5858 if (isLoopInvariant(Accum, L) || 5859 (isa<SCEVAddRecExpr>(Accum) && 5860 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5861 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5862 5863 if (auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN)) { 5864 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5865 if (BO->IsNUW) 5866 Flags = setFlags(Flags, SCEV::FlagNUW); 5867 if (BO->IsNSW) 5868 Flags = setFlags(Flags, SCEV::FlagNSW); 5869 } 5870 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5871 // If the increment is an inbounds GEP, then we know the address 5872 // space cannot be wrapped around. We cannot make any guarantee 5873 // about signed or unsigned overflow because pointers are 5874 // unsigned but we may have a negative index from the base 5875 // pointer. We can guarantee that no unsigned wrap occurs if the 5876 // indices form a positive value. 5877 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5878 Flags = setFlags(Flags, SCEV::FlagNW); 5879 5880 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5881 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5882 Flags = setFlags(Flags, SCEV::FlagNUW); 5883 } 5884 5885 // We cannot transfer nuw and nsw flags from subtraction 5886 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5887 // for instance. 5888 } 5889 5890 const SCEV *StartVal = getSCEV(StartValueV); 5891 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5892 5893 // Okay, for the entire analysis of this edge we assumed the PHI 5894 // to be symbolic. We now need to go back and purge all of the 5895 // entries for the scalars that use the symbolic expression. 5896 forgetMemoizedResults(SymbolicName); 5897 insertValueToMap(PN, PHISCEV); 5898 5899 // We can add Flags to the post-inc expression only if we 5900 // know that it is *undefined behavior* for BEValueV to 5901 // overflow. 5902 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5903 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5904 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5905 5906 return PHISCEV; 5907 } 5908 } 5909 } else { 5910 // Otherwise, this could be a loop like this: 5911 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5912 // In this case, j = {1,+,1} and BEValue is j. 5913 // Because the other in-value of i (0) fits the evolution of BEValue 5914 // i really is an addrec evolution. 5915 // 5916 // We can generalize this saying that i is the shifted value of BEValue 5917 // by one iteration: 5918 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5919 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5920 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5921 if (Shifted != getCouldNotCompute() && 5922 Start != getCouldNotCompute()) { 5923 const SCEV *StartVal = getSCEV(StartValueV); 5924 if (Start == StartVal) { 5925 // Okay, for the entire analysis of this edge we assumed the PHI 5926 // to be symbolic. We now need to go back and purge all of the 5927 // entries for the scalars that use the symbolic expression. 5928 forgetMemoizedResults(SymbolicName); 5929 insertValueToMap(PN, Shifted); 5930 return Shifted; 5931 } 5932 } 5933 } 5934 5935 // Remove the temporary PHI node SCEV that has been inserted while intending 5936 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5937 // as it will prevent later (possibly simpler) SCEV expressions to be added 5938 // to the ValueExprMap. 5939 eraseValueFromMap(PN); 5940 5941 return nullptr; 5942 } 5943 5944 // Checks if the SCEV S is available at BB. S is considered available at BB 5945 // if S can be materialized at BB without introducing a fault. 5946 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5947 BasicBlock *BB) { 5948 struct CheckAvailable { 5949 bool TraversalDone = false; 5950 bool Available = true; 5951 5952 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5953 BasicBlock *BB = nullptr; 5954 DominatorTree &DT; 5955 5956 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5957 : L(L), BB(BB), DT(DT) {} 5958 5959 bool setUnavailable() { 5960 TraversalDone = true; 5961 Available = false; 5962 return false; 5963 } 5964 5965 bool follow(const SCEV *S) { 5966 switch (S->getSCEVType()) { 5967 case scConstant: 5968 case scPtrToInt: 5969 case scTruncate: 5970 case scZeroExtend: 5971 case scSignExtend: 5972 case scAddExpr: 5973 case scMulExpr: 5974 case scUMaxExpr: 5975 case scSMaxExpr: 5976 case scUMinExpr: 5977 case scSMinExpr: 5978 case scSequentialUMinExpr: 5979 // These expressions are available if their operand(s) is/are. 5980 return true; 5981 5982 case scAddRecExpr: { 5983 // We allow add recurrences that are on the loop BB is in, or some 5984 // outer loop. This guarantees availability because the value of the 5985 // add recurrence at BB is simply the "current" value of the induction 5986 // variable. We can relax this in the future; for instance an add 5987 // recurrence on a sibling dominating loop is also available at BB. 5988 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5989 if (L && (ARLoop == L || ARLoop->contains(L))) 5990 return true; 5991 5992 return setUnavailable(); 5993 } 5994 5995 case scUnknown: { 5996 // For SCEVUnknown, we check for simple dominance. 5997 const auto *SU = cast<SCEVUnknown>(S); 5998 Value *V = SU->getValue(); 5999 6000 if (isa<Argument>(V)) 6001 return false; 6002 6003 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 6004 return false; 6005 6006 return setUnavailable(); 6007 } 6008 6009 case scUDivExpr: 6010 case scCouldNotCompute: 6011 // We do not try to smart about these at all. 6012 return setUnavailable(); 6013 } 6014 llvm_unreachable("Unknown SCEV kind!"); 6015 } 6016 6017 bool isDone() { return TraversalDone; } 6018 }; 6019 6020 CheckAvailable CA(L, BB, DT); 6021 SCEVTraversal<CheckAvailable> ST(CA); 6022 6023 ST.visitAll(S); 6024 return CA.Available; 6025 } 6026 6027 // Try to match a control flow sequence that branches out at BI and merges back 6028 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 6029 // match. 6030 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 6031 Value *&C, Value *&LHS, Value *&RHS) { 6032 C = BI->getCondition(); 6033 6034 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 6035 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 6036 6037 if (!LeftEdge.isSingleEdge()) 6038 return false; 6039 6040 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 6041 6042 Use &LeftUse = Merge->getOperandUse(0); 6043 Use &RightUse = Merge->getOperandUse(1); 6044 6045 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 6046 LHS = LeftUse; 6047 RHS = RightUse; 6048 return true; 6049 } 6050 6051 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 6052 LHS = RightUse; 6053 RHS = LeftUse; 6054 return true; 6055 } 6056 6057 return false; 6058 } 6059 6060 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 6061 auto IsReachable = 6062 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 6063 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 6064 const Loop *L = LI.getLoopFor(PN->getParent()); 6065 6066 // We don't want to break LCSSA, even in a SCEV expression tree. 6067 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 6068 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 6069 return nullptr; 6070 6071 // Try to match 6072 // 6073 // br %cond, label %left, label %right 6074 // left: 6075 // br label %merge 6076 // right: 6077 // br label %merge 6078 // merge: 6079 // V = phi [ %x, %left ], [ %y, %right ] 6080 // 6081 // as "select %cond, %x, %y" 6082 6083 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 6084 assert(IDom && "At least the entry block should dominate PN"); 6085 6086 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 6087 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 6088 6089 if (BI && BI->isConditional() && 6090 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 6091 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 6092 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 6093 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 6094 } 6095 6096 return nullptr; 6097 } 6098 6099 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 6100 if (const SCEV *S = createAddRecFromPHI(PN)) 6101 return S; 6102 6103 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 6104 return S; 6105 6106 if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 6107 return getSCEV(V); 6108 6109 // If it's not a loop phi, we can't handle it yet. 6110 return getUnknown(PN); 6111 } 6112 6113 bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, 6114 SCEVTypes RootKind) { 6115 struct FindClosure { 6116 const SCEV *OperandToFind; 6117 const SCEVTypes RootKind; // Must be a sequential min/max expression. 6118 const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind. 6119 6120 bool Found = false; 6121 6122 bool canRecurseInto(SCEVTypes Kind) const { 6123 // We can only recurse into the SCEV expression of the same effective type 6124 // as the type of our root SCEV expression, and into zero-extensions. 6125 return RootKind == Kind || NonSequentialRootKind == Kind || 6126 scZeroExtend == Kind; 6127 }; 6128 6129 FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind) 6130 : OperandToFind(OperandToFind), RootKind(RootKind), 6131 NonSequentialRootKind( 6132 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 6133 RootKind)) {} 6134 6135 bool follow(const SCEV *S) { 6136 Found = S == OperandToFind; 6137 6138 return !isDone() && canRecurseInto(S->getSCEVType()); 6139 } 6140 6141 bool isDone() const { return Found; } 6142 }; 6143 6144 FindClosure FC(OperandToFind, RootKind); 6145 visitAll(Root, FC); 6146 return FC.Found; 6147 } 6148 6149 std::optional<const SCEV *> 6150 ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty, 6151 ICmpInst *Cond, 6152 Value *TrueVal, 6153 Value *FalseVal) { 6154 // Try to match some simple smax or umax patterns. 6155 auto *ICI = Cond; 6156 6157 Value *LHS = ICI->getOperand(0); 6158 Value *RHS = ICI->getOperand(1); 6159 6160 switch (ICI->getPredicate()) { 6161 case ICmpInst::ICMP_SLT: 6162 case ICmpInst::ICMP_SLE: 6163 case ICmpInst::ICMP_ULT: 6164 case ICmpInst::ICMP_ULE: 6165 std::swap(LHS, RHS); 6166 [[fallthrough]]; 6167 case ICmpInst::ICMP_SGT: 6168 case ICmpInst::ICMP_SGE: 6169 case ICmpInst::ICMP_UGT: 6170 case ICmpInst::ICMP_UGE: 6171 // a > b ? a+x : b+x -> max(a, b)+x 6172 // a > b ? b+x : a+x -> min(a, b)+x 6173 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty)) { 6174 bool Signed = ICI->isSigned(); 6175 const SCEV *LA = getSCEV(TrueVal); 6176 const SCEV *RA = getSCEV(FalseVal); 6177 const SCEV *LS = getSCEV(LHS); 6178 const SCEV *RS = getSCEV(RHS); 6179 if (LA->getType()->isPointerTy()) { 6180 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 6181 // Need to make sure we can't produce weird expressions involving 6182 // negated pointers. 6183 if (LA == LS && RA == RS) 6184 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 6185 if (LA == RS && RA == LS) 6186 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 6187 } 6188 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 6189 if (Op->getType()->isPointerTy()) { 6190 Op = getLosslessPtrToIntExpr(Op); 6191 if (isa<SCEVCouldNotCompute>(Op)) 6192 return Op; 6193 } 6194 if (Signed) 6195 Op = getNoopOrSignExtend(Op, Ty); 6196 else 6197 Op = getNoopOrZeroExtend(Op, Ty); 6198 return Op; 6199 }; 6200 LS = CoerceOperand(LS); 6201 RS = CoerceOperand(RS); 6202 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 6203 break; 6204 const SCEV *LDiff = getMinusSCEV(LA, LS); 6205 const SCEV *RDiff = getMinusSCEV(RA, RS); 6206 if (LDiff == RDiff) 6207 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 6208 LDiff); 6209 LDiff = getMinusSCEV(LA, RS); 6210 RDiff = getMinusSCEV(RA, LS); 6211 if (LDiff == RDiff) 6212 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 6213 LDiff); 6214 } 6215 break; 6216 case ICmpInst::ICMP_NE: 6217 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y 6218 std::swap(TrueVal, FalseVal); 6219 [[fallthrough]]; 6220 case ICmpInst::ICMP_EQ: 6221 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1 6222 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(Ty) && 6223 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 6224 const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), Ty); 6225 const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y 6226 const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y 6227 const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x 6228 const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y 6229 if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1)) 6230 return getAddExpr(getUMaxExpr(X, C), Y); 6231 } 6232 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...)) 6233 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...)) 6234 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...) 6235 // -> umin_seq(x, umin (..., umin_seq(...), ...)) 6236 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() && 6237 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) { 6238 const SCEV *X = getSCEV(LHS); 6239 while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X)) 6240 X = ZExt->getOperand(); 6241 if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(Ty)) { 6242 const SCEV *FalseValExpr = getSCEV(FalseVal); 6243 if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr)) 6244 return getUMinExpr(getNoopOrZeroExtend(X, Ty), FalseValExpr, 6245 /*Sequential=*/true); 6246 } 6247 } 6248 break; 6249 default: 6250 break; 6251 } 6252 6253 return std::nullopt; 6254 } 6255 6256 static std::optional<const SCEV *> 6257 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, 6258 const SCEV *TrueExpr, const SCEV *FalseExpr) { 6259 assert(CondExpr->getType()->isIntegerTy(1) && 6260 TrueExpr->getType() == FalseExpr->getType() && 6261 TrueExpr->getType()->isIntegerTy(1) && 6262 "Unexpected operands of a select."); 6263 6264 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0) 6265 // --> C + (umin_seq cond, x - C) 6266 // 6267 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C)) 6268 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0) 6269 // --> C + (umin_seq ~cond, x - C) 6270 6271 // FIXME: while we can't legally model the case where both of the hands 6272 // are fully variable, we only require that the *difference* is constant. 6273 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr)) 6274 return std::nullopt; 6275 6276 const SCEV *X, *C; 6277 if (isa<SCEVConstant>(TrueExpr)) { 6278 CondExpr = SE->getNotSCEV(CondExpr); 6279 X = FalseExpr; 6280 C = TrueExpr; 6281 } else { 6282 X = TrueExpr; 6283 C = FalseExpr; 6284 } 6285 return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C), 6286 /*Sequential=*/true)); 6287 } 6288 6289 static std::optional<const SCEV *> 6290 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, Value *Cond, Value *TrueVal, 6291 Value *FalseVal) { 6292 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal)) 6293 return std::nullopt; 6294 6295 const auto *SECond = SE->getSCEV(Cond); 6296 const auto *SETrue = SE->getSCEV(TrueVal); 6297 const auto *SEFalse = SE->getSCEV(FalseVal); 6298 return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse); 6299 } 6300 6301 const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq( 6302 Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { 6303 assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?"); 6304 assert(TrueVal->getType() == FalseVal->getType() && 6305 V->getType() == TrueVal->getType() && 6306 "Types of select hands and of the result must match."); 6307 6308 // For now, only deal with i1-typed `select`s. 6309 if (!V->getType()->isIntegerTy(1)) 6310 return getUnknown(V); 6311 6312 if (std::optional<const SCEV *> S = 6313 createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal)) 6314 return *S; 6315 6316 return getUnknown(V); 6317 } 6318 6319 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond, 6320 Value *TrueVal, 6321 Value *FalseVal) { 6322 // Handle "constant" branch or select. This can occur for instance when a 6323 // loop pass transforms an inner loop and moves on to process the outer loop. 6324 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 6325 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 6326 6327 if (auto *I = dyn_cast<Instruction>(V)) { 6328 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) { 6329 if (std::optional<const SCEV *> S = 6330 createNodeForSelectOrPHIInstWithICmpInstCond(I->getType(), ICI, 6331 TrueVal, FalseVal)) 6332 return *S; 6333 } 6334 } 6335 6336 return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal); 6337 } 6338 6339 /// Expand GEP instructions into add and multiply operations. This allows them 6340 /// to be analyzed by regular SCEV code. 6341 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 6342 assert(GEP->getSourceElementType()->isSized() && 6343 "GEP source element type must be sized"); 6344 6345 SmallVector<const SCEV *, 4> IndexExprs; 6346 for (Value *Index : GEP->indices()) 6347 IndexExprs.push_back(getSCEV(Index)); 6348 return getGEPExpr(GEP, IndexExprs); 6349 } 6350 6351 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 6352 switch (S->getSCEVType()) { 6353 case scConstant: 6354 return cast<SCEVConstant>(S)->getAPInt().countr_zero(); 6355 case scTruncate: { 6356 const SCEVTruncateExpr *T = cast<SCEVTruncateExpr>(S); 6357 return std::min(GetMinTrailingZeros(T->getOperand()), 6358 (uint32_t)getTypeSizeInBits(T->getType())); 6359 } 6360 case scZeroExtend: { 6361 const SCEVZeroExtendExpr *E = cast<SCEVZeroExtendExpr>(S); 6362 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6363 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6364 ? getTypeSizeInBits(E->getType()) 6365 : OpRes; 6366 } 6367 case scSignExtend: { 6368 const SCEVSignExtendExpr *E = cast<SCEVSignExtendExpr>(S); 6369 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6370 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6371 ? getTypeSizeInBits(E->getType()) 6372 : OpRes; 6373 } 6374 case scMulExpr: { 6375 const SCEVMulExpr *M = cast<SCEVMulExpr>(S); 6376 // The result is the sum of all operands results. 6377 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 6378 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 6379 for (unsigned i = 1, e = M->getNumOperands(); 6380 SumOpRes != BitWidth && i != e; ++i) 6381 SumOpRes = 6382 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 6383 return SumOpRes; 6384 } 6385 case scUDivExpr: 6386 return 0; 6387 case scPtrToInt: 6388 case scAddExpr: 6389 case scAddRecExpr: 6390 case scUMaxExpr: 6391 case scSMaxExpr: 6392 case scUMinExpr: 6393 case scSMinExpr: 6394 case scSequentialUMinExpr: { 6395 // The result is the min of all operands results. 6396 ArrayRef<const SCEV *> Ops = S->operands(); 6397 uint32_t MinOpRes = GetMinTrailingZeros(Ops[0]); 6398 for (unsigned I = 1, E = Ops.size(); MinOpRes && I != E; ++I) 6399 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(Ops[I])); 6400 return MinOpRes; 6401 } 6402 case scUnknown: { 6403 const SCEVUnknown *U = cast<SCEVUnknown>(S); 6404 // For a SCEVUnknown, ask ValueTracking. 6405 KnownBits Known = 6406 computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 6407 return Known.countMinTrailingZeros(); 6408 } 6409 case scCouldNotCompute: 6410 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6411 } 6412 llvm_unreachable("Unknown SCEV kind!"); 6413 } 6414 6415 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 6416 auto I = MinTrailingZerosCache.find(S); 6417 if (I != MinTrailingZerosCache.end()) 6418 return I->second; 6419 6420 uint32_t Result = GetMinTrailingZerosImpl(S); 6421 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 6422 assert(InsertPair.second && "Should insert a new key"); 6423 return InsertPair.first->second; 6424 } 6425 6426 /// Helper method to assign a range to V from metadata present in the IR. 6427 static std::optional<ConstantRange> GetRangeFromMetadata(Value *V) { 6428 if (Instruction *I = dyn_cast<Instruction>(V)) 6429 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 6430 return getConstantRangeFromMetadata(*MD); 6431 6432 return std::nullopt; 6433 } 6434 6435 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 6436 SCEV::NoWrapFlags Flags) { 6437 if (AddRec->getNoWrapFlags(Flags) != Flags) { 6438 AddRec->setNoWrapFlags(Flags); 6439 UnsignedRanges.erase(AddRec); 6440 SignedRanges.erase(AddRec); 6441 } 6442 } 6443 6444 ConstantRange ScalarEvolution:: 6445 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 6446 const DataLayout &DL = getDataLayout(); 6447 6448 unsigned BitWidth = getTypeSizeInBits(U->getType()); 6449 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 6450 6451 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 6452 // use information about the trip count to improve our available range. Note 6453 // that the trip count independent cases are already handled by known bits. 6454 // WARNING: The definition of recurrence used here is subtly different than 6455 // the one used by AddRec (and thus most of this file). Step is allowed to 6456 // be arbitrarily loop varying here, where AddRec allows only loop invariant 6457 // and other addrecs in the same loop (for non-affine addrecs). The code 6458 // below intentionally handles the case where step is not loop invariant. 6459 auto *P = dyn_cast<PHINode>(U->getValue()); 6460 if (!P) 6461 return FullSet; 6462 6463 // Make sure that no Phi input comes from an unreachable block. Otherwise, 6464 // even the values that are not available in these blocks may come from them, 6465 // and this leads to false-positive recurrence test. 6466 for (auto *Pred : predecessors(P->getParent())) 6467 if (!DT.isReachableFromEntry(Pred)) 6468 return FullSet; 6469 6470 BinaryOperator *BO; 6471 Value *Start, *Step; 6472 if (!matchSimpleRecurrence(P, BO, Start, Step)) 6473 return FullSet; 6474 6475 // If we found a recurrence in reachable code, we must be in a loop. Note 6476 // that BO might be in some subloop of L, and that's completely okay. 6477 auto *L = LI.getLoopFor(P->getParent()); 6478 assert(L && L->getHeader() == P->getParent()); 6479 if (!L->contains(BO->getParent())) 6480 // NOTE: This bailout should be an assert instead. However, asserting 6481 // the condition here exposes a case where LoopFusion is querying SCEV 6482 // with malformed loop information during the midst of the transform. 6483 // There doesn't appear to be an obvious fix, so for the moment bailout 6484 // until the caller issue can be fixed. PR49566 tracks the bug. 6485 return FullSet; 6486 6487 // TODO: Extend to other opcodes such as mul, and div 6488 switch (BO->getOpcode()) { 6489 default: 6490 return FullSet; 6491 case Instruction::AShr: 6492 case Instruction::LShr: 6493 case Instruction::Shl: 6494 break; 6495 }; 6496 6497 if (BO->getOperand(0) != P) 6498 // TODO: Handle the power function forms some day. 6499 return FullSet; 6500 6501 unsigned TC = getSmallConstantMaxTripCount(L); 6502 if (!TC || TC >= BitWidth) 6503 return FullSet; 6504 6505 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 6506 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 6507 assert(KnownStart.getBitWidth() == BitWidth && 6508 KnownStep.getBitWidth() == BitWidth); 6509 6510 // Compute total shift amount, being careful of overflow and bitwidths. 6511 auto MaxShiftAmt = KnownStep.getMaxValue(); 6512 APInt TCAP(BitWidth, TC-1); 6513 bool Overflow = false; 6514 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 6515 if (Overflow) 6516 return FullSet; 6517 6518 switch (BO->getOpcode()) { 6519 default: 6520 llvm_unreachable("filtered out above"); 6521 case Instruction::AShr: { 6522 // For each ashr, three cases: 6523 // shift = 0 => unchanged value 6524 // saturation => 0 or -1 6525 // other => a value closer to zero (of the same sign) 6526 // Thus, the end value is closer to zero than the start. 6527 auto KnownEnd = KnownBits::ashr(KnownStart, 6528 KnownBits::makeConstant(TotalShift)); 6529 if (KnownStart.isNonNegative()) 6530 // Analogous to lshr (simply not yet canonicalized) 6531 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6532 KnownStart.getMaxValue() + 1); 6533 if (KnownStart.isNegative()) 6534 // End >=u Start && End <=s Start 6535 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 6536 KnownEnd.getMaxValue() + 1); 6537 break; 6538 } 6539 case Instruction::LShr: { 6540 // For each lshr, three cases: 6541 // shift = 0 => unchanged value 6542 // saturation => 0 6543 // other => a smaller positive number 6544 // Thus, the low end of the unsigned range is the last value produced. 6545 auto KnownEnd = KnownBits::lshr(KnownStart, 6546 KnownBits::makeConstant(TotalShift)); 6547 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6548 KnownStart.getMaxValue() + 1); 6549 } 6550 case Instruction::Shl: { 6551 // Iff no bits are shifted out, value increases on every shift. 6552 auto KnownEnd = KnownBits::shl(KnownStart, 6553 KnownBits::makeConstant(TotalShift)); 6554 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 6555 return ConstantRange(KnownStart.getMinValue(), 6556 KnownEnd.getMaxValue() + 1); 6557 break; 6558 } 6559 }; 6560 return FullSet; 6561 } 6562 6563 const ConstantRange & 6564 ScalarEvolution::getRangeRefIter(const SCEV *S, 6565 ScalarEvolution::RangeSignHint SignHint) { 6566 DenseMap<const SCEV *, ConstantRange> &Cache = 6567 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6568 : SignedRanges; 6569 SmallVector<const SCEV *> WorkList; 6570 SmallPtrSet<const SCEV *, 8> Seen; 6571 6572 // Add Expr to the worklist, if Expr is either an N-ary expression or a 6573 // SCEVUnknown PHI node. 6574 auto AddToWorklist = [&WorkList, &Seen, &Cache](const SCEV *Expr) { 6575 if (!Seen.insert(Expr).second) 6576 return; 6577 if (Cache.find(Expr) != Cache.end()) 6578 return; 6579 switch (Expr->getSCEVType()) { 6580 case scUnknown: 6581 if (!isa<PHINode>(cast<SCEVUnknown>(Expr)->getValue())) 6582 break; 6583 [[fallthrough]]; 6584 case scConstant: 6585 case scTruncate: 6586 case scZeroExtend: 6587 case scSignExtend: 6588 case scPtrToInt: 6589 case scAddExpr: 6590 case scMulExpr: 6591 case scUDivExpr: 6592 case scAddRecExpr: 6593 case scUMaxExpr: 6594 case scSMaxExpr: 6595 case scUMinExpr: 6596 case scSMinExpr: 6597 case scSequentialUMinExpr: 6598 WorkList.push_back(Expr); 6599 break; 6600 case scCouldNotCompute: 6601 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6602 } 6603 }; 6604 AddToWorklist(S); 6605 6606 // Build worklist by queuing operands of N-ary expressions and phi nodes. 6607 for (unsigned I = 0; I != WorkList.size(); ++I) { 6608 const SCEV *P = WorkList[I]; 6609 auto *UnknownS = dyn_cast<SCEVUnknown>(P); 6610 // If it is not a `SCEVUnknown`, just recurse into operands. 6611 if (!UnknownS) { 6612 for (const SCEV *Op : P->operands()) 6613 AddToWorklist(Op); 6614 continue; 6615 } 6616 // `SCEVUnknown`'s require special treatment. 6617 if (const PHINode *P = dyn_cast<PHINode>(UnknownS->getValue())) { 6618 if (!PendingPhiRangesIter.insert(P).second) 6619 continue; 6620 for (auto &Op : reverse(P->operands())) 6621 AddToWorklist(getSCEV(Op)); 6622 } 6623 } 6624 6625 if (!WorkList.empty()) { 6626 // Use getRangeRef to compute ranges for items in the worklist in reverse 6627 // order. This will force ranges for earlier operands to be computed before 6628 // their users in most cases. 6629 for (const SCEV *P : 6630 reverse(make_range(WorkList.begin() + 1, WorkList.end()))) { 6631 getRangeRef(P, SignHint); 6632 6633 if (auto *UnknownS = dyn_cast<SCEVUnknown>(P)) 6634 if (const PHINode *P = dyn_cast<PHINode>(UnknownS->getValue())) 6635 PendingPhiRangesIter.erase(P); 6636 } 6637 } 6638 6639 return getRangeRef(S, SignHint, 0); 6640 } 6641 6642 /// Determine the range for a particular SCEV. If SignHint is 6643 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 6644 /// with a "cleaner" unsigned (resp. signed) representation. 6645 const ConstantRange &ScalarEvolution::getRangeRef( 6646 const SCEV *S, ScalarEvolution::RangeSignHint SignHint, unsigned Depth) { 6647 DenseMap<const SCEV *, ConstantRange> &Cache = 6648 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6649 : SignedRanges; 6650 ConstantRange::PreferredRangeType RangeType = 6651 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? ConstantRange::Unsigned 6652 : ConstantRange::Signed; 6653 6654 // See if we've computed this range already. 6655 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 6656 if (I != Cache.end()) 6657 return I->second; 6658 6659 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6660 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 6661 6662 // Switch to iteratively computing the range for S, if it is part of a deeply 6663 // nested expression. 6664 if (Depth > RangeIterThreshold) 6665 return getRangeRefIter(S, SignHint); 6666 6667 unsigned BitWidth = getTypeSizeInBits(S->getType()); 6668 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 6669 using OBO = OverflowingBinaryOperator; 6670 6671 // If the value has known zeros, the maximum value will have those known zeros 6672 // as well. 6673 uint32_t TZ = GetMinTrailingZeros(S); 6674 if (TZ != 0) { 6675 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 6676 ConservativeResult = 6677 ConstantRange(APInt::getMinValue(BitWidth), 6678 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 6679 else 6680 ConservativeResult = ConstantRange( 6681 APInt::getSignedMinValue(BitWidth), 6682 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 6683 } 6684 6685 switch (S->getSCEVType()) { 6686 case scConstant: 6687 llvm_unreachable("Already handled above."); 6688 case scTruncate: { 6689 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(S); 6690 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint, Depth + 1); 6691 return setRange( 6692 Trunc, SignHint, 6693 ConservativeResult.intersectWith(X.truncate(BitWidth), RangeType)); 6694 } 6695 case scZeroExtend: { 6696 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(S); 6697 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint, Depth + 1); 6698 return setRange( 6699 ZExt, SignHint, 6700 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), RangeType)); 6701 } 6702 case scSignExtend: { 6703 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(S); 6704 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint, Depth + 1); 6705 return setRange( 6706 SExt, SignHint, 6707 ConservativeResult.intersectWith(X.signExtend(BitWidth), RangeType)); 6708 } 6709 case scPtrToInt: { 6710 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(S); 6711 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint, Depth + 1); 6712 return setRange(PtrToInt, SignHint, X); 6713 } 6714 case scAddExpr: { 6715 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 6716 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint, Depth + 1); 6717 unsigned WrapType = OBO::AnyWrap; 6718 if (Add->hasNoSignedWrap()) 6719 WrapType |= OBO::NoSignedWrap; 6720 if (Add->hasNoUnsignedWrap()) 6721 WrapType |= OBO::NoUnsignedWrap; 6722 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 6723 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint, Depth + 1), 6724 WrapType, RangeType); 6725 return setRange(Add, SignHint, 6726 ConservativeResult.intersectWith(X, RangeType)); 6727 } 6728 case scMulExpr: { 6729 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(S); 6730 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint, Depth + 1); 6731 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 6732 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint, Depth + 1)); 6733 return setRange(Mul, SignHint, 6734 ConservativeResult.intersectWith(X, RangeType)); 6735 } 6736 case scUDivExpr: { 6737 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6738 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint, Depth + 1); 6739 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint, Depth + 1); 6740 return setRange(UDiv, SignHint, 6741 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6742 } 6743 case scAddRecExpr: { 6744 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(S); 6745 // If there's no unsigned wrap, the value will never be less than its 6746 // initial value. 6747 if (AddRec->hasNoUnsignedWrap()) { 6748 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6749 if (!UnsignedMinValue.isZero()) 6750 ConservativeResult = ConservativeResult.intersectWith( 6751 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6752 } 6753 6754 // If there's no signed wrap, and all the operands except initial value have 6755 // the same sign or zero, the value won't ever be: 6756 // 1: smaller than initial value if operands are non negative, 6757 // 2: bigger than initial value if operands are non positive. 6758 // For both cases, value can not cross signed min/max boundary. 6759 if (AddRec->hasNoSignedWrap()) { 6760 bool AllNonNeg = true; 6761 bool AllNonPos = true; 6762 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6763 if (!isKnownNonNegative(AddRec->getOperand(i))) 6764 AllNonNeg = false; 6765 if (!isKnownNonPositive(AddRec->getOperand(i))) 6766 AllNonPos = false; 6767 } 6768 if (AllNonNeg) 6769 ConservativeResult = ConservativeResult.intersectWith( 6770 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6771 APInt::getSignedMinValue(BitWidth)), 6772 RangeType); 6773 else if (AllNonPos) 6774 ConservativeResult = ConservativeResult.intersectWith( 6775 ConstantRange::getNonEmpty(APInt::getSignedMinValue(BitWidth), 6776 getSignedRangeMax(AddRec->getStart()) + 6777 1), 6778 RangeType); 6779 } 6780 6781 // TODO: non-affine addrec 6782 if (AddRec->isAffine()) { 6783 const SCEV *MaxBECount = 6784 getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6785 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6786 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6787 auto RangeFromAffine = getRangeForAffineAR( 6788 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6789 BitWidth); 6790 ConservativeResult = 6791 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6792 6793 auto RangeFromFactoring = getRangeViaFactoring( 6794 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6795 BitWidth); 6796 ConservativeResult = 6797 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6798 } 6799 6800 // Now try symbolic BE count and more powerful methods. 6801 if (UseExpensiveRangeSharpening) { 6802 const SCEV *SymbolicMaxBECount = 6803 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6804 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6805 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6806 AddRec->hasNoSelfWrap()) { 6807 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6808 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6809 ConservativeResult = 6810 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6811 } 6812 } 6813 } 6814 6815 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6816 } 6817 case scUMaxExpr: 6818 case scSMaxExpr: 6819 case scUMinExpr: 6820 case scSMinExpr: 6821 case scSequentialUMinExpr: { 6822 Intrinsic::ID ID; 6823 switch (S->getSCEVType()) { 6824 case scUMaxExpr: 6825 ID = Intrinsic::umax; 6826 break; 6827 case scSMaxExpr: 6828 ID = Intrinsic::smax; 6829 break; 6830 case scUMinExpr: 6831 case scSequentialUMinExpr: 6832 ID = Intrinsic::umin; 6833 break; 6834 case scSMinExpr: 6835 ID = Intrinsic::smin; 6836 break; 6837 default: 6838 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr."); 6839 } 6840 6841 const auto *NAry = cast<SCEVNAryExpr>(S); 6842 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint, Depth + 1); 6843 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) 6844 X = X.intrinsic( 6845 ID, {X, getRangeRef(NAry->getOperand(i), SignHint, Depth + 1)}); 6846 return setRange(S, SignHint, 6847 ConservativeResult.intersectWith(X, RangeType)); 6848 } 6849 case scUnknown: { 6850 const SCEVUnknown *U = cast<SCEVUnknown>(S); 6851 6852 // Check if the IR explicitly contains !range metadata. 6853 std::optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6854 if (MDRange) 6855 ConservativeResult = 6856 ConservativeResult.intersectWith(*MDRange, RangeType); 6857 6858 // Use facts about recurrences in the underlying IR. Note that add 6859 // recurrences are AddRecExprs and thus don't hit this path. This 6860 // primarily handles shift recurrences. 6861 auto CR = getRangeForUnknownRecurrence(U); 6862 ConservativeResult = ConservativeResult.intersectWith(CR); 6863 6864 // See if ValueTracking can give us a useful range. 6865 const DataLayout &DL = getDataLayout(); 6866 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6867 if (Known.getBitWidth() != BitWidth) 6868 Known = Known.zextOrTrunc(BitWidth); 6869 6870 // ValueTracking may be able to compute a tighter result for the number of 6871 // sign bits than for the value of those sign bits. 6872 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6873 if (U->getType()->isPointerTy()) { 6874 // If the pointer size is larger than the index size type, this can cause 6875 // NS to be larger than BitWidth. So compensate for this. 6876 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6877 int ptrIdxDiff = ptrSize - BitWidth; 6878 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6879 NS -= ptrIdxDiff; 6880 } 6881 6882 if (NS > 1) { 6883 // If we know any of the sign bits, we know all of the sign bits. 6884 if (!Known.Zero.getHiBits(NS).isZero()) 6885 Known.Zero.setHighBits(NS); 6886 if (!Known.One.getHiBits(NS).isZero()) 6887 Known.One.setHighBits(NS); 6888 } 6889 6890 if (Known.getMinValue() != Known.getMaxValue() + 1) 6891 ConservativeResult = ConservativeResult.intersectWith( 6892 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6893 RangeType); 6894 if (NS > 1) 6895 ConservativeResult = ConservativeResult.intersectWith( 6896 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6897 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6898 RangeType); 6899 6900 // A range of Phi is a subset of union of all ranges of its input. 6901 if (PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6902 // Make sure that we do not run over cycled Phis. 6903 if (PendingPhiRanges.insert(Phi).second) { 6904 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6905 6906 for (const auto &Op : Phi->operands()) { 6907 auto OpRange = getRangeRef(getSCEV(Op), SignHint, Depth + 1); 6908 RangeFromOps = RangeFromOps.unionWith(OpRange); 6909 // No point to continue if we already have a full set. 6910 if (RangeFromOps.isFullSet()) 6911 break; 6912 } 6913 ConservativeResult = 6914 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6915 bool Erased = PendingPhiRanges.erase(Phi); 6916 assert(Erased && "Failed to erase Phi properly?"); 6917 (void)Erased; 6918 } 6919 } 6920 6921 // vscale can't be equal to zero 6922 if (const auto *II = dyn_cast<IntrinsicInst>(U->getValue())) 6923 if (II->getIntrinsicID() == Intrinsic::vscale) { 6924 ConstantRange Disallowed = APInt::getZero(BitWidth); 6925 ConservativeResult = ConservativeResult.difference(Disallowed); 6926 } 6927 6928 return setRange(U, SignHint, std::move(ConservativeResult)); 6929 } 6930 case scCouldNotCompute: 6931 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6932 } 6933 6934 return setRange(S, SignHint, std::move(ConservativeResult)); 6935 } 6936 6937 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6938 // values that the expression can take. Initially, the expression has a value 6939 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6940 // argument defines if we treat Step as signed or unsigned. 6941 static ConstantRange getRangeForAffineARHelper(APInt Step, 6942 const ConstantRange &StartRange, 6943 const APInt &MaxBECount, 6944 unsigned BitWidth, bool Signed) { 6945 // If either Step or MaxBECount is 0, then the expression won't change, and we 6946 // just need to return the initial range. 6947 if (Step == 0 || MaxBECount == 0) 6948 return StartRange; 6949 6950 // If we don't know anything about the initial value (i.e. StartRange is 6951 // FullRange), then we don't know anything about the final range either. 6952 // Return FullRange. 6953 if (StartRange.isFullSet()) 6954 return ConstantRange::getFull(BitWidth); 6955 6956 // If Step is signed and negative, then we use its absolute value, but we also 6957 // note that we're moving in the opposite direction. 6958 bool Descending = Signed && Step.isNegative(); 6959 6960 if (Signed) 6961 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6962 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6963 // This equations hold true due to the well-defined wrap-around behavior of 6964 // APInt. 6965 Step = Step.abs(); 6966 6967 // Check if Offset is more than full span of BitWidth. If it is, the 6968 // expression is guaranteed to overflow. 6969 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6970 return ConstantRange::getFull(BitWidth); 6971 6972 // Offset is by how much the expression can change. Checks above guarantee no 6973 // overflow here. 6974 APInt Offset = Step * MaxBECount; 6975 6976 // Minimum value of the final range will match the minimal value of StartRange 6977 // if the expression is increasing and will be decreased by Offset otherwise. 6978 // Maximum value of the final range will match the maximal value of StartRange 6979 // if the expression is decreasing and will be increased by Offset otherwise. 6980 APInt StartLower = StartRange.getLower(); 6981 APInt StartUpper = StartRange.getUpper() - 1; 6982 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6983 : (StartUpper + std::move(Offset)); 6984 6985 // It's possible that the new minimum/maximum value will fall into the initial 6986 // range (due to wrap around). This means that the expression can take any 6987 // value in this bitwidth, and we have to return full range. 6988 if (StartRange.contains(MovedBoundary)) 6989 return ConstantRange::getFull(BitWidth); 6990 6991 APInt NewLower = 6992 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6993 APInt NewUpper = 6994 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6995 NewUpper += 1; 6996 6997 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6998 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6999 } 7000 7001 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 7002 const SCEV *Step, 7003 const SCEV *MaxBECount, 7004 unsigned BitWidth) { 7005 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 7006 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 7007 "Precondition!"); 7008 7009 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 7010 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 7011 7012 // First, consider step signed. 7013 ConstantRange StartSRange = getSignedRange(Start); 7014 ConstantRange StepSRange = getSignedRange(Step); 7015 7016 // If Step can be both positive and negative, we need to find ranges for the 7017 // maximum absolute step values in both directions and union them. 7018 ConstantRange SR = 7019 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 7020 MaxBECountValue, BitWidth, /* Signed = */ true); 7021 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 7022 StartSRange, MaxBECountValue, 7023 BitWidth, /* Signed = */ true)); 7024 7025 // Next, consider step unsigned. 7026 ConstantRange UR = getRangeForAffineARHelper( 7027 getUnsignedRangeMax(Step), getUnsignedRange(Start), 7028 MaxBECountValue, BitWidth, /* Signed = */ false); 7029 7030 // Finally, intersect signed and unsigned ranges. 7031 return SR.intersectWith(UR, ConstantRange::Smallest); 7032 } 7033 7034 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 7035 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 7036 ScalarEvolution::RangeSignHint SignHint) { 7037 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 7038 assert(AddRec->hasNoSelfWrap() && 7039 "This only works for non-self-wrapping AddRecs!"); 7040 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 7041 const SCEV *Step = AddRec->getStepRecurrence(*this); 7042 // Only deal with constant step to save compile time. 7043 if (!isa<SCEVConstant>(Step)) 7044 return ConstantRange::getFull(BitWidth); 7045 // Let's make sure that we can prove that we do not self-wrap during 7046 // MaxBECount iterations. We need this because MaxBECount is a maximum 7047 // iteration count estimate, and we might infer nw from some exit for which we 7048 // do not know max exit count (or any other side reasoning). 7049 // TODO: Turn into assert at some point. 7050 if (getTypeSizeInBits(MaxBECount->getType()) > 7051 getTypeSizeInBits(AddRec->getType())) 7052 return ConstantRange::getFull(BitWidth); 7053 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 7054 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 7055 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 7056 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 7057 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 7058 MaxItersWithoutWrap)) 7059 return ConstantRange::getFull(BitWidth); 7060 7061 ICmpInst::Predicate LEPred = 7062 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 7063 ICmpInst::Predicate GEPred = 7064 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 7065 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 7066 7067 // We know that there is no self-wrap. Let's take Start and End values and 7068 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 7069 // the iteration. They either lie inside the range [Min(Start, End), 7070 // Max(Start, End)] or outside it: 7071 // 7072 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 7073 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 7074 // 7075 // No self wrap flag guarantees that the intermediate values cannot be BOTH 7076 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 7077 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 7078 // Start <= End and step is positive, or Start >= End and step is negative. 7079 const SCEV *Start = AddRec->getStart(); 7080 ConstantRange StartRange = getRangeRef(Start, SignHint); 7081 ConstantRange EndRange = getRangeRef(End, SignHint); 7082 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 7083 // If they already cover full iteration space, we will know nothing useful 7084 // even if we prove what we want to prove. 7085 if (RangeBetween.isFullSet()) 7086 return RangeBetween; 7087 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 7088 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 7089 : RangeBetween.isWrappedSet(); 7090 if (IsWrappedSet) 7091 return ConstantRange::getFull(BitWidth); 7092 7093 if (isKnownPositive(Step) && 7094 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 7095 return RangeBetween; 7096 else if (isKnownNegative(Step) && 7097 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 7098 return RangeBetween; 7099 return ConstantRange::getFull(BitWidth); 7100 } 7101 7102 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 7103 const SCEV *Step, 7104 const SCEV *MaxBECount, 7105 unsigned BitWidth) { 7106 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 7107 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 7108 7109 struct SelectPattern { 7110 Value *Condition = nullptr; 7111 APInt TrueValue; 7112 APInt FalseValue; 7113 7114 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 7115 const SCEV *S) { 7116 std::optional<unsigned> CastOp; 7117 APInt Offset(BitWidth, 0); 7118 7119 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 7120 "Should be!"); 7121 7122 // Peel off a constant offset: 7123 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 7124 // In the future we could consider being smarter here and handle 7125 // {Start+Step,+,Step} too. 7126 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 7127 return; 7128 7129 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 7130 S = SA->getOperand(1); 7131 } 7132 7133 // Peel off a cast operation 7134 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 7135 CastOp = SCast->getSCEVType(); 7136 S = SCast->getOperand(); 7137 } 7138 7139 using namespace llvm::PatternMatch; 7140 7141 auto *SU = dyn_cast<SCEVUnknown>(S); 7142 const APInt *TrueVal, *FalseVal; 7143 if (!SU || 7144 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 7145 m_APInt(FalseVal)))) { 7146 Condition = nullptr; 7147 return; 7148 } 7149 7150 TrueValue = *TrueVal; 7151 FalseValue = *FalseVal; 7152 7153 // Re-apply the cast we peeled off earlier 7154 if (CastOp) 7155 switch (*CastOp) { 7156 default: 7157 llvm_unreachable("Unknown SCEV cast type!"); 7158 7159 case scTruncate: 7160 TrueValue = TrueValue.trunc(BitWidth); 7161 FalseValue = FalseValue.trunc(BitWidth); 7162 break; 7163 case scZeroExtend: 7164 TrueValue = TrueValue.zext(BitWidth); 7165 FalseValue = FalseValue.zext(BitWidth); 7166 break; 7167 case scSignExtend: 7168 TrueValue = TrueValue.sext(BitWidth); 7169 FalseValue = FalseValue.sext(BitWidth); 7170 break; 7171 } 7172 7173 // Re-apply the constant offset we peeled off earlier 7174 TrueValue += Offset; 7175 FalseValue += Offset; 7176 } 7177 7178 bool isRecognized() { return Condition != nullptr; } 7179 }; 7180 7181 SelectPattern StartPattern(*this, BitWidth, Start); 7182 if (!StartPattern.isRecognized()) 7183 return ConstantRange::getFull(BitWidth); 7184 7185 SelectPattern StepPattern(*this, BitWidth, Step); 7186 if (!StepPattern.isRecognized()) 7187 return ConstantRange::getFull(BitWidth); 7188 7189 if (StartPattern.Condition != StepPattern.Condition) { 7190 // We don't handle this case today; but we could, by considering four 7191 // possibilities below instead of two. I'm not sure if there are cases where 7192 // that will help over what getRange already does, though. 7193 return ConstantRange::getFull(BitWidth); 7194 } 7195 7196 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 7197 // construct arbitrary general SCEV expressions here. This function is called 7198 // from deep in the call stack, and calling getSCEV (on a sext instruction, 7199 // say) can end up caching a suboptimal value. 7200 7201 // FIXME: without the explicit `this` receiver below, MSVC errors out with 7202 // C2352 and C2512 (otherwise it isn't needed). 7203 7204 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 7205 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 7206 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 7207 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 7208 7209 ConstantRange TrueRange = 7210 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 7211 ConstantRange FalseRange = 7212 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 7213 7214 return TrueRange.unionWith(FalseRange); 7215 } 7216 7217 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 7218 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 7219 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 7220 7221 // Return early if there are no flags to propagate to the SCEV. 7222 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7223 if (BinOp->hasNoUnsignedWrap()) 7224 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 7225 if (BinOp->hasNoSignedWrap()) 7226 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 7227 if (Flags == SCEV::FlagAnyWrap) 7228 return SCEV::FlagAnyWrap; 7229 7230 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 7231 } 7232 7233 const Instruction * 7234 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { 7235 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) 7236 return &*AddRec->getLoop()->getHeader()->begin(); 7237 if (auto *U = dyn_cast<SCEVUnknown>(S)) 7238 if (auto *I = dyn_cast<Instruction>(U->getValue())) 7239 return I; 7240 return nullptr; 7241 } 7242 7243 const Instruction * 7244 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops, 7245 bool &Precise) { 7246 Precise = true; 7247 // Do a bounded search of the def relation of the requested SCEVs. 7248 SmallSet<const SCEV *, 16> Visited; 7249 SmallVector<const SCEV *> Worklist; 7250 auto pushOp = [&](const SCEV *S) { 7251 if (!Visited.insert(S).second) 7252 return; 7253 // Threshold of 30 here is arbitrary. 7254 if (Visited.size() > 30) { 7255 Precise = false; 7256 return; 7257 } 7258 Worklist.push_back(S); 7259 }; 7260 7261 for (const auto *S : Ops) 7262 pushOp(S); 7263 7264 const Instruction *Bound = nullptr; 7265 while (!Worklist.empty()) { 7266 auto *S = Worklist.pop_back_val(); 7267 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { 7268 if (!Bound || DT.dominates(Bound, DefI)) 7269 Bound = DefI; 7270 } else { 7271 for (const auto *Op : S->operands()) 7272 pushOp(Op); 7273 } 7274 } 7275 return Bound ? Bound : &*F.getEntryBlock().begin(); 7276 } 7277 7278 const Instruction * 7279 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) { 7280 bool Discard; 7281 return getDefiningScopeBound(Ops, Discard); 7282 } 7283 7284 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, 7285 const Instruction *B) { 7286 if (A->getParent() == B->getParent() && 7287 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7288 B->getIterator())) 7289 return true; 7290 7291 auto *BLoop = LI.getLoopFor(B->getParent()); 7292 if (BLoop && BLoop->getHeader() == B->getParent() && 7293 BLoop->getLoopPreheader() == A->getParent() && 7294 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7295 A->getParent()->end()) && 7296 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), 7297 B->getIterator())) 7298 return true; 7299 return false; 7300 } 7301 7302 7303 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 7304 // Only proceed if we can prove that I does not yield poison. 7305 if (!programUndefinedIfPoison(I)) 7306 return false; 7307 7308 // At this point we know that if I is executed, then it does not wrap 7309 // according to at least one of NSW or NUW. If I is not executed, then we do 7310 // not know if the calculation that I represents would wrap. Multiple 7311 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 7312 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 7313 // derived from other instructions that map to the same SCEV. We cannot make 7314 // that guarantee for cases where I is not executed. So we need to find a 7315 // upper bound on the defining scope for the SCEV, and prove that I is 7316 // executed every time we enter that scope. When the bounding scope is a 7317 // loop (the common case), this is equivalent to proving I executes on every 7318 // iteration of that loop. 7319 SmallVector<const SCEV *> SCEVOps; 7320 for (const Use &Op : I->operands()) { 7321 // I could be an extractvalue from a call to an overflow intrinsic. 7322 // TODO: We can do better here in some cases. 7323 if (isSCEVable(Op->getType())) 7324 SCEVOps.push_back(getSCEV(Op)); 7325 } 7326 auto *DefI = getDefiningScopeBound(SCEVOps); 7327 return isGuaranteedToTransferExecutionTo(DefI, I); 7328 } 7329 7330 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 7331 // If we know that \c I can never be poison period, then that's enough. 7332 if (isSCEVExprNeverPoison(I)) 7333 return true; 7334 7335 // For an add recurrence specifically, we assume that infinite loops without 7336 // side effects are undefined behavior, and then reason as follows: 7337 // 7338 // If the add recurrence is poison in any iteration, it is poison on all 7339 // future iterations (since incrementing poison yields poison). If the result 7340 // of the add recurrence is fed into the loop latch condition and the loop 7341 // does not contain any throws or exiting blocks other than the latch, we now 7342 // have the ability to "choose" whether the backedge is taken or not (by 7343 // choosing a sufficiently evil value for the poison feeding into the branch) 7344 // for every iteration including and after the one in which \p I first became 7345 // poison. There are two possibilities (let's call the iteration in which \p 7346 // I first became poison as K): 7347 // 7348 // 1. In the set of iterations including and after K, the loop body executes 7349 // no side effects. In this case executing the backege an infinte number 7350 // of times will yield undefined behavior. 7351 // 7352 // 2. In the set of iterations including and after K, the loop body executes 7353 // at least one side effect. In this case, that specific instance of side 7354 // effect is control dependent on poison, which also yields undefined 7355 // behavior. 7356 7357 auto *ExitingBB = L->getExitingBlock(); 7358 auto *LatchBB = L->getLoopLatch(); 7359 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 7360 return false; 7361 7362 SmallPtrSet<const Instruction *, 16> Pushed; 7363 SmallVector<const Instruction *, 8> PoisonStack; 7364 7365 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 7366 // things that are known to be poison under that assumption go on the 7367 // PoisonStack. 7368 Pushed.insert(I); 7369 PoisonStack.push_back(I); 7370 7371 bool LatchControlDependentOnPoison = false; 7372 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 7373 const Instruction *Poison = PoisonStack.pop_back_val(); 7374 7375 for (const Use &U : Poison->uses()) { 7376 const User *PoisonUser = U.getUser(); 7377 if (propagatesPoison(U)) { 7378 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 7379 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 7380 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 7381 assert(BI->isConditional() && "Only possibility!"); 7382 if (BI->getParent() == LatchBB) { 7383 LatchControlDependentOnPoison = true; 7384 break; 7385 } 7386 } 7387 } 7388 } 7389 7390 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 7391 } 7392 7393 ScalarEvolution::LoopProperties 7394 ScalarEvolution::getLoopProperties(const Loop *L) { 7395 using LoopProperties = ScalarEvolution::LoopProperties; 7396 7397 auto Itr = LoopPropertiesCache.find(L); 7398 if (Itr == LoopPropertiesCache.end()) { 7399 auto HasSideEffects = [](Instruction *I) { 7400 if (auto *SI = dyn_cast<StoreInst>(I)) 7401 return !SI->isSimple(); 7402 7403 return I->mayThrow() || I->mayWriteToMemory(); 7404 }; 7405 7406 LoopProperties LP = {/* HasNoAbnormalExits */ true, 7407 /*HasNoSideEffects*/ true}; 7408 7409 for (auto *BB : L->getBlocks()) 7410 for (auto &I : *BB) { 7411 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7412 LP.HasNoAbnormalExits = false; 7413 if (HasSideEffects(&I)) 7414 LP.HasNoSideEffects = false; 7415 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 7416 break; // We're already as pessimistic as we can get. 7417 } 7418 7419 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 7420 assert(InsertPair.second && "We just checked!"); 7421 Itr = InsertPair.first; 7422 } 7423 7424 return Itr->second; 7425 } 7426 7427 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 7428 // A mustprogress loop without side effects must be finite. 7429 // TODO: The check used here is very conservative. It's only *specific* 7430 // side effects which are well defined in infinite loops. 7431 return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L)); 7432 } 7433 7434 const SCEV *ScalarEvolution::createSCEVIter(Value *V) { 7435 // Worklist item with a Value and a bool indicating whether all operands have 7436 // been visited already. 7437 using PointerTy = PointerIntPair<Value *, 1, bool>; 7438 SmallVector<PointerTy> Stack; 7439 7440 Stack.emplace_back(V, true); 7441 Stack.emplace_back(V, false); 7442 while (!Stack.empty()) { 7443 auto E = Stack.pop_back_val(); 7444 Value *CurV = E.getPointer(); 7445 7446 if (getExistingSCEV(CurV)) 7447 continue; 7448 7449 SmallVector<Value *> Ops; 7450 const SCEV *CreatedSCEV = nullptr; 7451 // If all operands have been visited already, create the SCEV. 7452 if (E.getInt()) { 7453 CreatedSCEV = createSCEV(CurV); 7454 } else { 7455 // Otherwise get the operands we need to create SCEV's for before creating 7456 // the SCEV for CurV. If the SCEV for CurV can be constructed trivially, 7457 // just use it. 7458 CreatedSCEV = getOperandsToCreate(CurV, Ops); 7459 } 7460 7461 if (CreatedSCEV) { 7462 insertValueToMap(CurV, CreatedSCEV); 7463 } else { 7464 // Queue CurV for SCEV creation, followed by its's operands which need to 7465 // be constructed first. 7466 Stack.emplace_back(CurV, true); 7467 for (Value *Op : Ops) 7468 Stack.emplace_back(Op, false); 7469 } 7470 } 7471 7472 return getExistingSCEV(V); 7473 } 7474 7475 const SCEV * 7476 ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) { 7477 if (!isSCEVable(V->getType())) 7478 return getUnknown(V); 7479 7480 if (Instruction *I = dyn_cast<Instruction>(V)) { 7481 // Don't attempt to analyze instructions in blocks that aren't 7482 // reachable. Such instructions don't matter, and they aren't required 7483 // to obey basic rules for definitions dominating uses which this 7484 // analysis depends on. 7485 if (!DT.isReachableFromEntry(I->getParent())) 7486 return getUnknown(PoisonValue::get(V->getType())); 7487 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7488 return getConstant(CI); 7489 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 7490 if (!GA->isInterposable()) { 7491 Ops.push_back(GA->getAliasee()); 7492 return nullptr; 7493 } 7494 return getUnknown(V); 7495 } else if (!isa<ConstantExpr>(V)) 7496 return getUnknown(V); 7497 7498 Operator *U = cast<Operator>(V); 7499 if (auto BO = 7500 MatchBinaryOp(U, getDataLayout(), AC, DT, dyn_cast<Instruction>(V))) { 7501 bool IsConstArg = isa<ConstantInt>(BO->RHS); 7502 switch (BO->Opcode) { 7503 case Instruction::Add: 7504 case Instruction::Mul: { 7505 // For additions and multiplications, traverse add/mul chains for which we 7506 // can potentially create a single SCEV, to reduce the number of 7507 // get{Add,Mul}Expr calls. 7508 do { 7509 if (BO->Op) { 7510 if (BO->Op != V && getExistingSCEV(BO->Op)) { 7511 Ops.push_back(BO->Op); 7512 break; 7513 } 7514 } 7515 Ops.push_back(BO->RHS); 7516 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7517 dyn_cast<Instruction>(V)); 7518 if (!NewBO || 7519 (U->getOpcode() == Instruction::Add && 7520 (NewBO->Opcode != Instruction::Add && 7521 NewBO->Opcode != Instruction::Sub)) || 7522 (U->getOpcode() == Instruction::Mul && 7523 NewBO->Opcode != Instruction::Mul)) { 7524 Ops.push_back(BO->LHS); 7525 break; 7526 } 7527 // CreateSCEV calls getNoWrapFlagsFromUB, which under certain conditions 7528 // requires a SCEV for the LHS. 7529 if (NewBO->Op && (NewBO->IsNSW || NewBO->IsNUW)) { 7530 auto *I = dyn_cast<Instruction>(NewBO->Op); 7531 if (I && programUndefinedIfPoison(I)) { 7532 Ops.push_back(BO->LHS); 7533 break; 7534 } 7535 } 7536 BO = NewBO; 7537 } while (true); 7538 return nullptr; 7539 } 7540 case Instruction::Sub: 7541 case Instruction::UDiv: 7542 case Instruction::URem: 7543 break; 7544 case Instruction::AShr: 7545 case Instruction::Shl: 7546 case Instruction::Xor: 7547 if (!IsConstArg) 7548 return nullptr; 7549 break; 7550 case Instruction::And: 7551 case Instruction::Or: 7552 if (!IsConstArg && BO->LHS->getType()->isIntegerTy(1)) 7553 return nullptr; 7554 break; 7555 case Instruction::LShr: 7556 return getUnknown(V); 7557 default: 7558 llvm_unreachable("Unhandled binop"); 7559 break; 7560 } 7561 7562 Ops.push_back(BO->LHS); 7563 Ops.push_back(BO->RHS); 7564 return nullptr; 7565 } 7566 7567 switch (U->getOpcode()) { 7568 case Instruction::Trunc: 7569 case Instruction::ZExt: 7570 case Instruction::SExt: 7571 case Instruction::PtrToInt: 7572 Ops.push_back(U->getOperand(0)); 7573 return nullptr; 7574 7575 case Instruction::BitCast: 7576 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) { 7577 Ops.push_back(U->getOperand(0)); 7578 return nullptr; 7579 } 7580 return getUnknown(V); 7581 7582 case Instruction::SDiv: 7583 case Instruction::SRem: 7584 Ops.push_back(U->getOperand(0)); 7585 Ops.push_back(U->getOperand(1)); 7586 return nullptr; 7587 7588 case Instruction::GetElementPtr: 7589 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() && 7590 "GEP source element type must be sized"); 7591 for (Value *Index : U->operands()) 7592 Ops.push_back(Index); 7593 return nullptr; 7594 7595 case Instruction::IntToPtr: 7596 return getUnknown(V); 7597 7598 case Instruction::PHI: 7599 // Keep constructing SCEVs' for phis recursively for now. 7600 return nullptr; 7601 7602 case Instruction::Select: { 7603 // Check if U is a select that can be simplified to a SCEVUnknown. 7604 auto CanSimplifyToUnknown = [this, U]() { 7605 if (U->getType()->isIntegerTy(1) || isa<ConstantInt>(U->getOperand(0))) 7606 return false; 7607 7608 auto *ICI = dyn_cast<ICmpInst>(U->getOperand(0)); 7609 if (!ICI) 7610 return false; 7611 Value *LHS = ICI->getOperand(0); 7612 Value *RHS = ICI->getOperand(1); 7613 if (ICI->getPredicate() == CmpInst::ICMP_EQ || 7614 ICI->getPredicate() == CmpInst::ICMP_NE) { 7615 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero())) 7616 return true; 7617 } else if (getTypeSizeInBits(LHS->getType()) > 7618 getTypeSizeInBits(U->getType())) 7619 return true; 7620 return false; 7621 }; 7622 if (CanSimplifyToUnknown()) 7623 return getUnknown(U); 7624 7625 for (Value *Inc : U->operands()) 7626 Ops.push_back(Inc); 7627 return nullptr; 7628 break; 7629 } 7630 case Instruction::Call: 7631 case Instruction::Invoke: 7632 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) { 7633 Ops.push_back(RV); 7634 return nullptr; 7635 } 7636 7637 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7638 switch (II->getIntrinsicID()) { 7639 case Intrinsic::abs: 7640 Ops.push_back(II->getArgOperand(0)); 7641 return nullptr; 7642 case Intrinsic::umax: 7643 case Intrinsic::umin: 7644 case Intrinsic::smax: 7645 case Intrinsic::smin: 7646 case Intrinsic::usub_sat: 7647 case Intrinsic::uadd_sat: 7648 Ops.push_back(II->getArgOperand(0)); 7649 Ops.push_back(II->getArgOperand(1)); 7650 return nullptr; 7651 case Intrinsic::start_loop_iterations: 7652 case Intrinsic::annotation: 7653 case Intrinsic::ptr_annotation: 7654 Ops.push_back(II->getArgOperand(0)); 7655 return nullptr; 7656 default: 7657 break; 7658 } 7659 } 7660 break; 7661 } 7662 7663 return nullptr; 7664 } 7665 7666 const SCEV *ScalarEvolution::createSCEV(Value *V) { 7667 if (!isSCEVable(V->getType())) 7668 return getUnknown(V); 7669 7670 if (Instruction *I = dyn_cast<Instruction>(V)) { 7671 // Don't attempt to analyze instructions in blocks that aren't 7672 // reachable. Such instructions don't matter, and they aren't required 7673 // to obey basic rules for definitions dominating uses which this 7674 // analysis depends on. 7675 if (!DT.isReachableFromEntry(I->getParent())) 7676 return getUnknown(PoisonValue::get(V->getType())); 7677 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7678 return getConstant(CI); 7679 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 7680 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 7681 else if (!isa<ConstantExpr>(V)) 7682 return getUnknown(V); 7683 7684 const SCEV *LHS; 7685 const SCEV *RHS; 7686 7687 Operator *U = cast<Operator>(V); 7688 if (auto BO = 7689 MatchBinaryOp(U, getDataLayout(), AC, DT, dyn_cast<Instruction>(V))) { 7690 switch (BO->Opcode) { 7691 case Instruction::Add: { 7692 // The simple thing to do would be to just call getSCEV on both operands 7693 // and call getAddExpr with the result. However if we're looking at a 7694 // bunch of things all added together, this can be quite inefficient, 7695 // because it leads to N-1 getAddExpr calls for N ultimate operands. 7696 // Instead, gather up all the operands and make a single getAddExpr call. 7697 // LLVM IR canonical form means we need only traverse the left operands. 7698 SmallVector<const SCEV *, 4> AddOps; 7699 do { 7700 if (BO->Op) { 7701 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7702 AddOps.push_back(OpSCEV); 7703 break; 7704 } 7705 7706 // If a NUW or NSW flag can be applied to the SCEV for this 7707 // addition, then compute the SCEV for this addition by itself 7708 // with a separate call to getAddExpr. We need to do that 7709 // instead of pushing the operands of the addition onto AddOps, 7710 // since the flags are only known to apply to this particular 7711 // addition - they may not apply to other additions that can be 7712 // formed with operands from AddOps. 7713 const SCEV *RHS = getSCEV(BO->RHS); 7714 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7715 if (Flags != SCEV::FlagAnyWrap) { 7716 const SCEV *LHS = getSCEV(BO->LHS); 7717 if (BO->Opcode == Instruction::Sub) 7718 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 7719 else 7720 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 7721 break; 7722 } 7723 } 7724 7725 if (BO->Opcode == Instruction::Sub) 7726 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 7727 else 7728 AddOps.push_back(getSCEV(BO->RHS)); 7729 7730 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7731 dyn_cast<Instruction>(V)); 7732 if (!NewBO || (NewBO->Opcode != Instruction::Add && 7733 NewBO->Opcode != Instruction::Sub)) { 7734 AddOps.push_back(getSCEV(BO->LHS)); 7735 break; 7736 } 7737 BO = NewBO; 7738 } while (true); 7739 7740 return getAddExpr(AddOps); 7741 } 7742 7743 case Instruction::Mul: { 7744 SmallVector<const SCEV *, 4> MulOps; 7745 do { 7746 if (BO->Op) { 7747 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7748 MulOps.push_back(OpSCEV); 7749 break; 7750 } 7751 7752 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7753 if (Flags != SCEV::FlagAnyWrap) { 7754 LHS = getSCEV(BO->LHS); 7755 RHS = getSCEV(BO->RHS); 7756 MulOps.push_back(getMulExpr(LHS, RHS, Flags)); 7757 break; 7758 } 7759 } 7760 7761 MulOps.push_back(getSCEV(BO->RHS)); 7762 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT, 7763 dyn_cast<Instruction>(V)); 7764 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 7765 MulOps.push_back(getSCEV(BO->LHS)); 7766 break; 7767 } 7768 BO = NewBO; 7769 } while (true); 7770 7771 return getMulExpr(MulOps); 7772 } 7773 case Instruction::UDiv: 7774 LHS = getSCEV(BO->LHS); 7775 RHS = getSCEV(BO->RHS); 7776 return getUDivExpr(LHS, RHS); 7777 case Instruction::URem: 7778 LHS = getSCEV(BO->LHS); 7779 RHS = getSCEV(BO->RHS); 7780 return getURemExpr(LHS, RHS); 7781 case Instruction::Sub: { 7782 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7783 if (BO->Op) 7784 Flags = getNoWrapFlagsFromUB(BO->Op); 7785 LHS = getSCEV(BO->LHS); 7786 RHS = getSCEV(BO->RHS); 7787 return getMinusSCEV(LHS, RHS, Flags); 7788 } 7789 case Instruction::And: 7790 // For an expression like x&255 that merely masks off the high bits, 7791 // use zext(trunc(x)) as the SCEV expression. 7792 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7793 if (CI->isZero()) 7794 return getSCEV(BO->RHS); 7795 if (CI->isMinusOne()) 7796 return getSCEV(BO->LHS); 7797 const APInt &A = CI->getValue(); 7798 7799 // Instcombine's ShrinkDemandedConstant may strip bits out of 7800 // constants, obscuring what would otherwise be a low-bits mask. 7801 // Use computeKnownBits to compute what ShrinkDemandedConstant 7802 // knew about to reconstruct a low-bits mask value. 7803 unsigned LZ = A.countl_zero(); 7804 unsigned TZ = A.countr_zero(); 7805 unsigned BitWidth = A.getBitWidth(); 7806 KnownBits Known(BitWidth); 7807 computeKnownBits(BO->LHS, Known, getDataLayout(), 7808 0, &AC, nullptr, &DT); 7809 7810 APInt EffectiveMask = 7811 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 7812 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 7813 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 7814 const SCEV *LHS = getSCEV(BO->LHS); 7815 const SCEV *ShiftedLHS = nullptr; 7816 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 7817 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 7818 // For an expression like (x * 8) & 8, simplify the multiply. 7819 unsigned MulZeros = OpC->getAPInt().countr_zero(); 7820 unsigned GCD = std::min(MulZeros, TZ); 7821 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 7822 SmallVector<const SCEV*, 4> MulOps; 7823 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 7824 append_range(MulOps, LHSMul->operands().drop_front()); 7825 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 7826 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 7827 } 7828 } 7829 if (!ShiftedLHS) 7830 ShiftedLHS = getUDivExpr(LHS, MulCount); 7831 return getMulExpr( 7832 getZeroExtendExpr( 7833 getTruncateExpr(ShiftedLHS, 7834 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 7835 BO->LHS->getType()), 7836 MulCount); 7837 } 7838 } 7839 // Binary `and` is a bit-wise `umin`. 7840 if (BO->LHS->getType()->isIntegerTy(1)) { 7841 LHS = getSCEV(BO->LHS); 7842 RHS = getSCEV(BO->RHS); 7843 return getUMinExpr(LHS, RHS); 7844 } 7845 break; 7846 7847 case Instruction::Or: 7848 // Binary `or` is a bit-wise `umax`. 7849 if (BO->LHS->getType()->isIntegerTy(1)) { 7850 LHS = getSCEV(BO->LHS); 7851 RHS = getSCEV(BO->RHS); 7852 return getUMaxExpr(LHS, RHS); 7853 } 7854 break; 7855 7856 case Instruction::Xor: 7857 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7858 // If the RHS of xor is -1, then this is a not operation. 7859 if (CI->isMinusOne()) 7860 return getNotSCEV(getSCEV(BO->LHS)); 7861 7862 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 7863 // This is a variant of the check for xor with -1, and it handles 7864 // the case where instcombine has trimmed non-demanded bits out 7865 // of an xor with -1. 7866 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 7867 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 7868 if (LBO->getOpcode() == Instruction::And && 7869 LCI->getValue() == CI->getValue()) 7870 if (const SCEVZeroExtendExpr *Z = 7871 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 7872 Type *UTy = BO->LHS->getType(); 7873 const SCEV *Z0 = Z->getOperand(); 7874 Type *Z0Ty = Z0->getType(); 7875 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 7876 7877 // If C is a low-bits mask, the zero extend is serving to 7878 // mask off the high bits. Complement the operand and 7879 // re-apply the zext. 7880 if (CI->getValue().isMask(Z0TySize)) 7881 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 7882 7883 // If C is a single bit, it may be in the sign-bit position 7884 // before the zero-extend. In this case, represent the xor 7885 // using an add, which is equivalent, and re-apply the zext. 7886 APInt Trunc = CI->getValue().trunc(Z0TySize); 7887 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 7888 Trunc.isSignMask()) 7889 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 7890 UTy); 7891 } 7892 } 7893 break; 7894 7895 case Instruction::Shl: 7896 // Turn shift left of a constant amount into a multiply. 7897 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 7898 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 7899 7900 // If the shift count is not less than the bitwidth, the result of 7901 // the shift is undefined. Don't try to analyze it, because the 7902 // resolution chosen here may differ from the resolution chosen in 7903 // other parts of the compiler. 7904 if (SA->getValue().uge(BitWidth)) 7905 break; 7906 7907 // We can safely preserve the nuw flag in all cases. It's also safe to 7908 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 7909 // requires special handling. It can be preserved as long as we're not 7910 // left shifting by bitwidth - 1. 7911 auto Flags = SCEV::FlagAnyWrap; 7912 if (BO->Op) { 7913 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 7914 if ((MulFlags & SCEV::FlagNSW) && 7915 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 7916 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 7917 if (MulFlags & SCEV::FlagNUW) 7918 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 7919 } 7920 7921 ConstantInt *X = ConstantInt::get( 7922 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 7923 return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags); 7924 } 7925 break; 7926 7927 case Instruction::AShr: { 7928 // AShr X, C, where C is a constant. 7929 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 7930 if (!CI) 7931 break; 7932 7933 Type *OuterTy = BO->LHS->getType(); 7934 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 7935 // If the shift count is not less than the bitwidth, the result of 7936 // the shift is undefined. Don't try to analyze it, because the 7937 // resolution chosen here may differ from the resolution chosen in 7938 // other parts of the compiler. 7939 if (CI->getValue().uge(BitWidth)) 7940 break; 7941 7942 if (CI->isZero()) 7943 return getSCEV(BO->LHS); // shift by zero --> noop 7944 7945 uint64_t AShrAmt = CI->getZExtValue(); 7946 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 7947 7948 Operator *L = dyn_cast<Operator>(BO->LHS); 7949 if (L && L->getOpcode() == Instruction::Shl) { 7950 // X = Shl A, n 7951 // Y = AShr X, m 7952 // Both n and m are constant. 7953 7954 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 7955 if (L->getOperand(1) == BO->RHS) 7956 // For a two-shift sext-inreg, i.e. n = m, 7957 // use sext(trunc(x)) as the SCEV expression. 7958 return getSignExtendExpr( 7959 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 7960 7961 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7962 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 7963 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 7964 if (ShlAmt > AShrAmt) { 7965 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 7966 // expression. We already checked that ShlAmt < BitWidth, so 7967 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 7968 // ShlAmt - AShrAmt < Amt. 7969 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 7970 ShlAmt - AShrAmt); 7971 return getSignExtendExpr( 7972 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 7973 getConstant(Mul)), OuterTy); 7974 } 7975 } 7976 } 7977 break; 7978 } 7979 } 7980 } 7981 7982 switch (U->getOpcode()) { 7983 case Instruction::Trunc: 7984 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 7985 7986 case Instruction::ZExt: 7987 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7988 7989 case Instruction::SExt: 7990 if (auto BO = MatchBinaryOp(U->getOperand(0), getDataLayout(), AC, DT, 7991 dyn_cast<Instruction>(V))) { 7992 // The NSW flag of a subtract does not always survive the conversion to 7993 // A + (-1)*B. By pushing sign extension onto its operands we are much 7994 // more likely to preserve NSW and allow later AddRec optimisations. 7995 // 7996 // NOTE: This is effectively duplicating this logic from getSignExtend: 7997 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 7998 // but by that point the NSW information has potentially been lost. 7999 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 8000 Type *Ty = U->getType(); 8001 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 8002 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 8003 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 8004 } 8005 } 8006 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 8007 8008 case Instruction::BitCast: 8009 // BitCasts are no-op casts so we just eliminate the cast. 8010 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 8011 return getSCEV(U->getOperand(0)); 8012 break; 8013 8014 case Instruction::PtrToInt: { 8015 // Pointer to integer cast is straight-forward, so do model it. 8016 const SCEV *Op = getSCEV(U->getOperand(0)); 8017 Type *DstIntTy = U->getType(); 8018 // But only if effective SCEV (integer) type is wide enough to represent 8019 // all possible pointer values. 8020 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 8021 if (isa<SCEVCouldNotCompute>(IntOp)) 8022 return getUnknown(V); 8023 return IntOp; 8024 } 8025 case Instruction::IntToPtr: 8026 // Just don't deal with inttoptr casts. 8027 return getUnknown(V); 8028 8029 case Instruction::SDiv: 8030 // If both operands are non-negative, this is just an udiv. 8031 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 8032 isKnownNonNegative(getSCEV(U->getOperand(1)))) 8033 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 8034 break; 8035 8036 case Instruction::SRem: 8037 // If both operands are non-negative, this is just an urem. 8038 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 8039 isKnownNonNegative(getSCEV(U->getOperand(1)))) 8040 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 8041 break; 8042 8043 case Instruction::GetElementPtr: 8044 return createNodeForGEP(cast<GEPOperator>(U)); 8045 8046 case Instruction::PHI: 8047 return createNodeForPHI(cast<PHINode>(U)); 8048 8049 case Instruction::Select: 8050 return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1), 8051 U->getOperand(2)); 8052 8053 case Instruction::Call: 8054 case Instruction::Invoke: 8055 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 8056 return getSCEV(RV); 8057 8058 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 8059 switch (II->getIntrinsicID()) { 8060 case Intrinsic::abs: 8061 return getAbsExpr( 8062 getSCEV(II->getArgOperand(0)), 8063 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 8064 case Intrinsic::umax: 8065 LHS = getSCEV(II->getArgOperand(0)); 8066 RHS = getSCEV(II->getArgOperand(1)); 8067 return getUMaxExpr(LHS, RHS); 8068 case Intrinsic::umin: 8069 LHS = getSCEV(II->getArgOperand(0)); 8070 RHS = getSCEV(II->getArgOperand(1)); 8071 return getUMinExpr(LHS, RHS); 8072 case Intrinsic::smax: 8073 LHS = getSCEV(II->getArgOperand(0)); 8074 RHS = getSCEV(II->getArgOperand(1)); 8075 return getSMaxExpr(LHS, RHS); 8076 case Intrinsic::smin: 8077 LHS = getSCEV(II->getArgOperand(0)); 8078 RHS = getSCEV(II->getArgOperand(1)); 8079 return getSMinExpr(LHS, RHS); 8080 case Intrinsic::usub_sat: { 8081 const SCEV *X = getSCEV(II->getArgOperand(0)); 8082 const SCEV *Y = getSCEV(II->getArgOperand(1)); 8083 const SCEV *ClampedY = getUMinExpr(X, Y); 8084 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 8085 } 8086 case Intrinsic::uadd_sat: { 8087 const SCEV *X = getSCEV(II->getArgOperand(0)); 8088 const SCEV *Y = getSCEV(II->getArgOperand(1)); 8089 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 8090 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 8091 } 8092 case Intrinsic::start_loop_iterations: 8093 case Intrinsic::annotation: 8094 case Intrinsic::ptr_annotation: 8095 // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is 8096 // just eqivalent to the first operand for SCEV purposes. 8097 return getSCEV(II->getArgOperand(0)); 8098 default: 8099 break; 8100 } 8101 } 8102 break; 8103 } 8104 8105 return getUnknown(V); 8106 } 8107 8108 //===----------------------------------------------------------------------===// 8109 // Iteration Count Computation Code 8110 // 8111 8112 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, 8113 bool Extend) { 8114 if (isa<SCEVCouldNotCompute>(ExitCount)) 8115 return getCouldNotCompute(); 8116 8117 auto *ExitCountType = ExitCount->getType(); 8118 assert(ExitCountType->isIntegerTy()); 8119 8120 if (!Extend) 8121 return getAddExpr(ExitCount, getOne(ExitCountType)); 8122 8123 auto *WiderType = Type::getIntNTy(ExitCountType->getContext(), 8124 1 + ExitCountType->getScalarSizeInBits()); 8125 return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType), 8126 getOne(WiderType)); 8127 } 8128 8129 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 8130 if (!ExitCount) 8131 return 0; 8132 8133 ConstantInt *ExitConst = ExitCount->getValue(); 8134 8135 // Guard against huge trip counts. 8136 if (ExitConst->getValue().getActiveBits() > 32) 8137 return 0; 8138 8139 // In case of integer overflow, this returns 0, which is correct. 8140 return ((unsigned)ExitConst->getZExtValue()) + 1; 8141 } 8142 8143 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 8144 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 8145 return getConstantTripCount(ExitCount); 8146 } 8147 8148 unsigned 8149 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 8150 const BasicBlock *ExitingBlock) { 8151 assert(ExitingBlock && "Must pass a non-null exiting block!"); 8152 assert(L->isLoopExiting(ExitingBlock) && 8153 "Exiting block must actually branch out of the loop!"); 8154 const SCEVConstant *ExitCount = 8155 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 8156 return getConstantTripCount(ExitCount); 8157 } 8158 8159 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 8160 const auto *MaxExitCount = 8161 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 8162 return getConstantTripCount(MaxExitCount); 8163 } 8164 8165 const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) { 8166 // We can't infer from Array in Irregular Loop. 8167 // FIXME: It's hard to infer loop bound from array operated in Nested Loop. 8168 if (!L->isLoopSimplifyForm() || !L->isInnermost()) 8169 return getCouldNotCompute(); 8170 8171 // FIXME: To make the scene more typical, we only analysis loops that have 8172 // one exiting block and that block must be the latch. To make it easier to 8173 // capture loops that have memory access and memory access will be executed 8174 // in each iteration. 8175 const BasicBlock *LoopLatch = L->getLoopLatch(); 8176 assert(LoopLatch && "See defination of simplify form loop."); 8177 if (L->getExitingBlock() != LoopLatch) 8178 return getCouldNotCompute(); 8179 8180 const DataLayout &DL = getDataLayout(); 8181 SmallVector<const SCEV *> InferCountColl; 8182 for (auto *BB : L->getBlocks()) { 8183 // Go here, we can know that Loop is a single exiting and simplified form 8184 // loop. Make sure that infer from Memory Operation in those BBs must be 8185 // executed in loop. First step, we can make sure that max execution time 8186 // of MemAccessBB in loop represents latch max excution time. 8187 // If MemAccessBB does not dom Latch, skip. 8188 // Entry 8189 // │ 8190 // ┌─────▼─────┐ 8191 // │Loop Header◄─────┐ 8192 // └──┬──────┬─┘ │ 8193 // │ │ │ 8194 // ┌────────▼──┐ ┌─▼─────┐ │ 8195 // │MemAccessBB│ │OtherBB│ │ 8196 // └────────┬──┘ └─┬─────┘ │ 8197 // │ │ │ 8198 // ┌─▼──────▼─┐ │ 8199 // │Loop Latch├─────┘ 8200 // └────┬─────┘ 8201 // ▼ 8202 // Exit 8203 if (!DT.dominates(BB, LoopLatch)) 8204 continue; 8205 8206 for (Instruction &Inst : *BB) { 8207 // Find Memory Operation Instruction. 8208 auto *GEP = getLoadStorePointerOperand(&Inst); 8209 if (!GEP) 8210 continue; 8211 8212 auto *ElemSize = dyn_cast<SCEVConstant>(getElementSize(&Inst)); 8213 // Do not infer from scalar type, eg."ElemSize = sizeof()". 8214 if (!ElemSize) 8215 continue; 8216 8217 // Use a existing polynomial recurrence on the trip count. 8218 auto *AddRec = dyn_cast<SCEVAddRecExpr>(getSCEV(GEP)); 8219 if (!AddRec) 8220 continue; 8221 auto *ArrBase = dyn_cast<SCEVUnknown>(getPointerBase(AddRec)); 8222 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*this)); 8223 if (!ArrBase || !Step) 8224 continue; 8225 assert(isLoopInvariant(ArrBase, L) && "See addrec definition"); 8226 8227 // Only handle { %array + step }, 8228 // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here. 8229 if (AddRec->getStart() != ArrBase) 8230 continue; 8231 8232 // Memory operation pattern which have gaps. 8233 // Or repeat memory opreation. 8234 // And index of GEP wraps arround. 8235 if (Step->getAPInt().getActiveBits() > 32 || 8236 Step->getAPInt().getZExtValue() != 8237 ElemSize->getAPInt().getZExtValue() || 8238 Step->isZero() || Step->getAPInt().isNegative()) 8239 continue; 8240 8241 // Only infer from stack array which has certain size. 8242 // Make sure alloca instruction is not excuted in loop. 8243 AllocaInst *AllocateInst = dyn_cast<AllocaInst>(ArrBase->getValue()); 8244 if (!AllocateInst || L->contains(AllocateInst->getParent())) 8245 continue; 8246 8247 // Make sure only handle normal array. 8248 auto *Ty = dyn_cast<ArrayType>(AllocateInst->getAllocatedType()); 8249 auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize()); 8250 if (!Ty || !ArrSize || !ArrSize->isOne()) 8251 continue; 8252 8253 // FIXME: Since gep indices are silently zext to the indexing type, 8254 // we will have a narrow gep index which wraps around rather than 8255 // increasing strictly, we shoule ensure that step is increasing 8256 // strictly by the loop iteration. 8257 // Now we can infer a max execution time by MemLength/StepLength. 8258 const SCEV *MemSize = 8259 getConstant(Step->getType(), DL.getTypeAllocSize(Ty)); 8260 auto *MaxExeCount = 8261 dyn_cast<SCEVConstant>(getUDivCeilSCEV(MemSize, Step)); 8262 if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32) 8263 continue; 8264 8265 // If the loop reaches the maximum number of executions, we can not 8266 // access bytes starting outside the statically allocated size without 8267 // being immediate UB. But it is allowed to enter loop header one more 8268 // time. 8269 auto *InferCount = dyn_cast<SCEVConstant>( 8270 getAddExpr(MaxExeCount, getOne(MaxExeCount->getType()))); 8271 // Discard the maximum number of execution times under 32bits. 8272 if (!InferCount || InferCount->getAPInt().getActiveBits() > 32) 8273 continue; 8274 8275 InferCountColl.push_back(InferCount); 8276 } 8277 } 8278 8279 if (InferCountColl.size() == 0) 8280 return getCouldNotCompute(); 8281 8282 return getUMinFromMismatchedTypes(InferCountColl); 8283 } 8284 8285 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 8286 SmallVector<BasicBlock *, 8> ExitingBlocks; 8287 L->getExitingBlocks(ExitingBlocks); 8288 8289 std::optional<unsigned> Res; 8290 for (auto *ExitingBB : ExitingBlocks) { 8291 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 8292 if (!Res) 8293 Res = Multiple; 8294 Res = (unsigned)std::gcd(*Res, Multiple); 8295 } 8296 return Res.value_or(1); 8297 } 8298 8299 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8300 const SCEV *ExitCount) { 8301 if (ExitCount == getCouldNotCompute()) 8302 return 1; 8303 8304 // Get the trip count 8305 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 8306 8307 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 8308 if (!TC) 8309 // Attempt to factor more general cases. Returns the greatest power of 8310 // two divisor. If overflow happens, the trip count expression is still 8311 // divisible by the greatest power of 2 divisor returned. 8312 return 1U << std::min((uint32_t)31, 8313 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 8314 8315 ConstantInt *Result = TC->getValue(); 8316 8317 // Guard against huge trip counts (this requires checking 8318 // for zero to handle the case where the trip count == -1 and the 8319 // addition wraps). 8320 if (!Result || Result->getValue().getActiveBits() > 32 || 8321 Result->getValue().getActiveBits() == 0) 8322 return 1; 8323 8324 return (unsigned)Result->getZExtValue(); 8325 } 8326 8327 /// Returns the largest constant divisor of the trip count of this loop as a 8328 /// normal unsigned value, if possible. This means that the actual trip count is 8329 /// always a multiple of the returned value (don't forget the trip count could 8330 /// very well be zero as well!). 8331 /// 8332 /// Returns 1 if the trip count is unknown or not guaranteed to be the 8333 /// multiple of a constant (which is also the case if the trip count is simply 8334 /// constant, use getSmallConstantTripCount for that case), Will also return 1 8335 /// if the trip count is very large (>= 2^32). 8336 /// 8337 /// As explained in the comments for getSmallConstantTripCount, this assumes 8338 /// that control exits the loop via ExitingBlock. 8339 unsigned 8340 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 8341 const BasicBlock *ExitingBlock) { 8342 assert(ExitingBlock && "Must pass a non-null exiting block!"); 8343 assert(L->isLoopExiting(ExitingBlock) && 8344 "Exiting block must actually branch out of the loop!"); 8345 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 8346 return getSmallConstantTripMultiple(L, ExitCount); 8347 } 8348 8349 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 8350 const BasicBlock *ExitingBlock, 8351 ExitCountKind Kind) { 8352 switch (Kind) { 8353 case Exact: 8354 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 8355 case SymbolicMaximum: 8356 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock, this); 8357 case ConstantMaximum: 8358 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 8359 }; 8360 llvm_unreachable("Invalid ExitCountKind!"); 8361 } 8362 8363 const SCEV * 8364 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 8365 SmallVector<const SCEVPredicate *, 4> &Preds) { 8366 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 8367 } 8368 8369 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 8370 ExitCountKind Kind) { 8371 switch (Kind) { 8372 case Exact: 8373 return getBackedgeTakenInfo(L).getExact(L, this); 8374 case ConstantMaximum: 8375 return getBackedgeTakenInfo(L).getConstantMax(this); 8376 case SymbolicMaximum: 8377 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 8378 }; 8379 llvm_unreachable("Invalid ExitCountKind!"); 8380 } 8381 8382 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 8383 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 8384 } 8385 8386 /// Push PHI nodes in the header of the given loop onto the given Worklist. 8387 static void PushLoopPHIs(const Loop *L, 8388 SmallVectorImpl<Instruction *> &Worklist, 8389 SmallPtrSetImpl<Instruction *> &Visited) { 8390 BasicBlock *Header = L->getHeader(); 8391 8392 // Push all Loop-header PHIs onto the Worklist stack. 8393 for (PHINode &PN : Header->phis()) 8394 if (Visited.insert(&PN).second) 8395 Worklist.push_back(&PN); 8396 } 8397 8398 const ScalarEvolution::BackedgeTakenInfo & 8399 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 8400 auto &BTI = getBackedgeTakenInfo(L); 8401 if (BTI.hasFullInfo()) 8402 return BTI; 8403 8404 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8405 8406 if (!Pair.second) 8407 return Pair.first->second; 8408 8409 BackedgeTakenInfo Result = 8410 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 8411 8412 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 8413 } 8414 8415 ScalarEvolution::BackedgeTakenInfo & 8416 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 8417 // Initially insert an invalid entry for this loop. If the insertion 8418 // succeeds, proceed to actually compute a backedge-taken count and 8419 // update the value. The temporary CouldNotCompute value tells SCEV 8420 // code elsewhere that it shouldn't attempt to request a new 8421 // backedge-taken count, which could result in infinite recursion. 8422 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 8423 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 8424 if (!Pair.second) 8425 return Pair.first->second; 8426 8427 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 8428 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 8429 // must be cleared in this scope. 8430 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 8431 8432 // In product build, there are no usage of statistic. 8433 (void)NumTripCountsComputed; 8434 (void)NumTripCountsNotComputed; 8435 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 8436 const SCEV *BEExact = Result.getExact(L, this); 8437 if (BEExact != getCouldNotCompute()) { 8438 assert(isLoopInvariant(BEExact, L) && 8439 isLoopInvariant(Result.getConstantMax(this), L) && 8440 "Computed backedge-taken count isn't loop invariant for loop!"); 8441 ++NumTripCountsComputed; 8442 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 8443 isa<PHINode>(L->getHeader()->begin())) { 8444 // Only count loops that have phi nodes as not being computable. 8445 ++NumTripCountsNotComputed; 8446 } 8447 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 8448 8449 // Now that we know more about the trip count for this loop, forget any 8450 // existing SCEV values for PHI nodes in this loop since they are only 8451 // conservative estimates made without the benefit of trip count 8452 // information. This invalidation is not necessary for correctness, and is 8453 // only done to produce more precise results. 8454 if (Result.hasAnyInfo()) { 8455 // Invalidate any expression using an addrec in this loop. 8456 SmallVector<const SCEV *, 8> ToForget; 8457 auto LoopUsersIt = LoopUsers.find(L); 8458 if (LoopUsersIt != LoopUsers.end()) 8459 append_range(ToForget, LoopUsersIt->second); 8460 forgetMemoizedResults(ToForget); 8461 8462 // Invalidate constant-evolved loop header phis. 8463 for (PHINode &PN : L->getHeader()->phis()) 8464 ConstantEvolutionLoopExitValue.erase(&PN); 8465 } 8466 8467 // Re-lookup the insert position, since the call to 8468 // computeBackedgeTakenCount above could result in a 8469 // recusive call to getBackedgeTakenInfo (on a different 8470 // loop), which would invalidate the iterator computed 8471 // earlier. 8472 return BackedgeTakenCounts.find(L)->second = std::move(Result); 8473 } 8474 8475 void ScalarEvolution::forgetAllLoops() { 8476 // This method is intended to forget all info about loops. It should 8477 // invalidate caches as if the following happened: 8478 // - The trip counts of all loops have changed arbitrarily 8479 // - Every llvm::Value has been updated in place to produce a different 8480 // result. 8481 BackedgeTakenCounts.clear(); 8482 PredicatedBackedgeTakenCounts.clear(); 8483 BECountUsers.clear(); 8484 LoopPropertiesCache.clear(); 8485 ConstantEvolutionLoopExitValue.clear(); 8486 ValueExprMap.clear(); 8487 ValuesAtScopes.clear(); 8488 ValuesAtScopesUsers.clear(); 8489 LoopDispositions.clear(); 8490 BlockDispositions.clear(); 8491 UnsignedRanges.clear(); 8492 SignedRanges.clear(); 8493 ExprValueMap.clear(); 8494 HasRecMap.clear(); 8495 MinTrailingZerosCache.clear(); 8496 PredicatedSCEVRewrites.clear(); 8497 FoldCache.clear(); 8498 FoldCacheUser.clear(); 8499 } 8500 8501 void ScalarEvolution::forgetLoop(const Loop *L) { 8502 SmallVector<const Loop *, 16> LoopWorklist(1, L); 8503 SmallVector<Instruction *, 32> Worklist; 8504 SmallPtrSet<Instruction *, 16> Visited; 8505 SmallVector<const SCEV *, 16> ToForget; 8506 8507 // Iterate over all the loops and sub-loops to drop SCEV information. 8508 while (!LoopWorklist.empty()) { 8509 auto *CurrL = LoopWorklist.pop_back_val(); 8510 8511 // Drop any stored trip count value. 8512 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); 8513 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); 8514 8515 // Drop information about predicated SCEV rewrites for this loop. 8516 for (auto I = PredicatedSCEVRewrites.begin(); 8517 I != PredicatedSCEVRewrites.end();) { 8518 std::pair<const SCEV *, const Loop *> Entry = I->first; 8519 if (Entry.second == CurrL) 8520 PredicatedSCEVRewrites.erase(I++); 8521 else 8522 ++I; 8523 } 8524 8525 auto LoopUsersItr = LoopUsers.find(CurrL); 8526 if (LoopUsersItr != LoopUsers.end()) { 8527 ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), 8528 LoopUsersItr->second.end()); 8529 } 8530 8531 // Drop information about expressions based on loop-header PHIs. 8532 PushLoopPHIs(CurrL, Worklist, Visited); 8533 8534 while (!Worklist.empty()) { 8535 Instruction *I = Worklist.pop_back_val(); 8536 8537 ValueExprMapType::iterator It = 8538 ValueExprMap.find_as(static_cast<Value *>(I)); 8539 if (It != ValueExprMap.end()) { 8540 eraseValueFromMap(It->first); 8541 ToForget.push_back(It->second); 8542 if (PHINode *PN = dyn_cast<PHINode>(I)) 8543 ConstantEvolutionLoopExitValue.erase(PN); 8544 } 8545 8546 PushDefUseChildren(I, Worklist, Visited); 8547 } 8548 8549 LoopPropertiesCache.erase(CurrL); 8550 // Forget all contained loops too, to avoid dangling entries in the 8551 // ValuesAtScopes map. 8552 LoopWorklist.append(CurrL->begin(), CurrL->end()); 8553 } 8554 forgetMemoizedResults(ToForget); 8555 } 8556 8557 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 8558 forgetLoop(L->getOutermostLoop()); 8559 } 8560 8561 void ScalarEvolution::forgetValue(Value *V) { 8562 Instruction *I = dyn_cast<Instruction>(V); 8563 if (!I) return; 8564 8565 // Drop information about expressions based on loop-header PHIs. 8566 SmallVector<Instruction *, 16> Worklist; 8567 SmallPtrSet<Instruction *, 8> Visited; 8568 SmallVector<const SCEV *, 8> ToForget; 8569 Worklist.push_back(I); 8570 Visited.insert(I); 8571 8572 while (!Worklist.empty()) { 8573 I = Worklist.pop_back_val(); 8574 ValueExprMapType::iterator It = 8575 ValueExprMap.find_as(static_cast<Value *>(I)); 8576 if (It != ValueExprMap.end()) { 8577 eraseValueFromMap(It->first); 8578 ToForget.push_back(It->second); 8579 if (PHINode *PN = dyn_cast<PHINode>(I)) 8580 ConstantEvolutionLoopExitValue.erase(PN); 8581 } 8582 8583 PushDefUseChildren(I, Worklist, Visited); 8584 } 8585 forgetMemoizedResults(ToForget); 8586 } 8587 8588 void ScalarEvolution::forgetLoopDispositions() { LoopDispositions.clear(); } 8589 8590 void ScalarEvolution::forgetBlockAndLoopDispositions(Value *V) { 8591 // Unless a specific value is passed to invalidation, completely clear both 8592 // caches. 8593 if (!V) { 8594 BlockDispositions.clear(); 8595 LoopDispositions.clear(); 8596 return; 8597 } 8598 8599 if (!isSCEVable(V->getType())) 8600 return; 8601 8602 const SCEV *S = getExistingSCEV(V); 8603 if (!S) 8604 return; 8605 8606 // Invalidate the block and loop dispositions cached for S. Dispositions of 8607 // S's users may change if S's disposition changes (i.e. a user may change to 8608 // loop-invariant, if S changes to loop invariant), so also invalidate 8609 // dispositions of S's users recursively. 8610 SmallVector<const SCEV *, 8> Worklist = {S}; 8611 SmallPtrSet<const SCEV *, 8> Seen = {S}; 8612 while (!Worklist.empty()) { 8613 const SCEV *Curr = Worklist.pop_back_val(); 8614 bool LoopDispoRemoved = LoopDispositions.erase(Curr); 8615 bool BlockDispoRemoved = BlockDispositions.erase(Curr); 8616 if (!LoopDispoRemoved && !BlockDispoRemoved) 8617 continue; 8618 auto Users = SCEVUsers.find(Curr); 8619 if (Users != SCEVUsers.end()) 8620 for (const auto *User : Users->second) 8621 if (Seen.insert(User).second) 8622 Worklist.push_back(User); 8623 } 8624 } 8625 8626 /// Get the exact loop backedge taken count considering all loop exits. A 8627 /// computable result can only be returned for loops with all exiting blocks 8628 /// dominating the latch. howFarToZero assumes that the limit of each loop test 8629 /// is never skipped. This is a valid assumption as long as the loop exits via 8630 /// that test. For precise results, it is the caller's responsibility to specify 8631 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 8632 const SCEV * 8633 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 8634 SmallVector<const SCEVPredicate *, 4> *Preds) const { 8635 // If any exits were not computable, the loop is not computable. 8636 if (!isComplete() || ExitNotTaken.empty()) 8637 return SE->getCouldNotCompute(); 8638 8639 const BasicBlock *Latch = L->getLoopLatch(); 8640 // All exiting blocks we have collected must dominate the only backedge. 8641 if (!Latch) 8642 return SE->getCouldNotCompute(); 8643 8644 // All exiting blocks we have gathered dominate loop's latch, so exact trip 8645 // count is simply a minimum out of all these calculated exit counts. 8646 SmallVector<const SCEV *, 2> Ops; 8647 for (const auto &ENT : ExitNotTaken) { 8648 const SCEV *BECount = ENT.ExactNotTaken; 8649 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 8650 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 8651 "We should only have known counts for exiting blocks that dominate " 8652 "latch!"); 8653 8654 Ops.push_back(BECount); 8655 8656 if (Preds) 8657 for (const auto *P : ENT.Predicates) 8658 Preds->push_back(P); 8659 8660 assert((Preds || ENT.hasAlwaysTruePredicate()) && 8661 "Predicate should be always true!"); 8662 } 8663 8664 // If an earlier exit exits on the first iteration (exit count zero), then 8665 // a later poison exit count should not propagate into the result. This are 8666 // exactly the semantics provided by umin_seq. 8667 return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true); 8668 } 8669 8670 /// Get the exact not taken count for this loop exit. 8671 const SCEV * 8672 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 8673 ScalarEvolution *SE) const { 8674 for (const auto &ENT : ExitNotTaken) 8675 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8676 return ENT.ExactNotTaken; 8677 8678 return SE->getCouldNotCompute(); 8679 } 8680 8681 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 8682 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8683 for (const auto &ENT : ExitNotTaken) 8684 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8685 return ENT.ConstantMaxNotTaken; 8686 8687 return SE->getCouldNotCompute(); 8688 } 8689 8690 const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax( 8691 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8692 for (const auto &ENT : ExitNotTaken) 8693 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8694 return ENT.SymbolicMaxNotTaken; 8695 8696 return SE->getCouldNotCompute(); 8697 } 8698 8699 /// getConstantMax - Get the constant max backedge taken count for the loop. 8700 const SCEV * 8701 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 8702 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8703 return !ENT.hasAlwaysTruePredicate(); 8704 }; 8705 8706 if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) 8707 return SE->getCouldNotCompute(); 8708 8709 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 8710 isa<SCEVConstant>(getConstantMax())) && 8711 "No point in having a non-constant max backedge taken count!"); 8712 return getConstantMax(); 8713 } 8714 8715 const SCEV * 8716 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 8717 ScalarEvolution *SE) { 8718 if (!SymbolicMax) 8719 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 8720 return SymbolicMax; 8721 } 8722 8723 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 8724 ScalarEvolution *SE) const { 8725 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8726 return !ENT.hasAlwaysTruePredicate(); 8727 }; 8728 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 8729 } 8730 8731 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 8732 : ExitLimit(E, E, E, false, std::nullopt) {} 8733 8734 ScalarEvolution::ExitLimit::ExitLimit( 8735 const SCEV *E, const SCEV *ConstantMaxNotTaken, 8736 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, 8737 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 8738 : ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken), 8739 SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) { 8740 // If we prove the max count is zero, so is the symbolic bound. This happens 8741 // in practice due to differences in a) how context sensitive we've chosen 8742 // to be and b) how we reason about bounds implied by UB. 8743 if (ConstantMaxNotTaken->isZero()) { 8744 this->ExactNotTaken = E = ConstantMaxNotTaken; 8745 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken; 8746 } 8747 8748 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8749 !isa<SCEVCouldNotCompute>(ConstantMaxNotTaken)) && 8750 "Exact is not allowed to be less precise than Constant Max"); 8751 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8752 !isa<SCEVCouldNotCompute>(SymbolicMaxNotTaken)) && 8753 "Exact is not allowed to be less precise than Symbolic Max"); 8754 assert((isa<SCEVCouldNotCompute>(SymbolicMaxNotTaken) || 8755 !isa<SCEVCouldNotCompute>(ConstantMaxNotTaken)) && 8756 "Symbolic Max is not allowed to be less precise than Constant Max"); 8757 assert((isa<SCEVCouldNotCompute>(ConstantMaxNotTaken) || 8758 isa<SCEVConstant>(ConstantMaxNotTaken)) && 8759 "No point in having a non-constant max backedge taken count!"); 8760 for (const auto *PredSet : PredSetList) 8761 for (const auto *P : *PredSet) 8762 addPredicate(P); 8763 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 8764 "Backedge count should be int"); 8765 assert((isa<SCEVCouldNotCompute>(ConstantMaxNotTaken) || 8766 !ConstantMaxNotTaken->getType()->isPointerTy()) && 8767 "Max backedge count should be int"); 8768 } 8769 8770 ScalarEvolution::ExitLimit::ExitLimit( 8771 const SCEV *E, const SCEV *ConstantMaxNotTaken, 8772 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, 8773 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 8774 : ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero, 8775 { &PredSet }) {} 8776 8777 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 8778 /// computable exit into a persistent ExitNotTakenInfo array. 8779 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 8780 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 8781 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 8782 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 8783 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8784 8785 ExitNotTaken.reserve(ExitCounts.size()); 8786 std::transform(ExitCounts.begin(), ExitCounts.end(), 8787 std::back_inserter(ExitNotTaken), 8788 [&](const EdgeExitInfo &EEI) { 8789 BasicBlock *ExitBB = EEI.first; 8790 const ExitLimit &EL = EEI.second; 8791 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, 8792 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken, 8793 EL.Predicates); 8794 }); 8795 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 8796 isa<SCEVConstant>(ConstantMax)) && 8797 "No point in having a non-constant max backedge taken count!"); 8798 } 8799 8800 /// Compute the number of times the backedge of the specified loop will execute. 8801 ScalarEvolution::BackedgeTakenInfo 8802 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 8803 bool AllowPredicates) { 8804 SmallVector<BasicBlock *, 8> ExitingBlocks; 8805 L->getExitingBlocks(ExitingBlocks); 8806 8807 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8808 8809 SmallVector<EdgeExitInfo, 4> ExitCounts; 8810 bool CouldComputeBECount = true; 8811 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 8812 const SCEV *MustExitMaxBECount = nullptr; 8813 const SCEV *MayExitMaxBECount = nullptr; 8814 bool MustExitMaxOrZero = false; 8815 8816 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 8817 // and compute maxBECount. 8818 // Do a union of all the predicates here. 8819 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 8820 BasicBlock *ExitBB = ExitingBlocks[i]; 8821 8822 // We canonicalize untaken exits to br (constant), ignore them so that 8823 // proving an exit untaken doesn't negatively impact our ability to reason 8824 // about the loop as whole. 8825 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 8826 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 8827 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8828 if (ExitIfTrue == CI->isZero()) 8829 continue; 8830 } 8831 8832 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 8833 8834 assert((AllowPredicates || EL.Predicates.empty()) && 8835 "Predicated exit limit when predicates are not allowed!"); 8836 8837 // 1. For each exit that can be computed, add an entry to ExitCounts. 8838 // CouldComputeBECount is true only if all exits can be computed. 8839 if (EL.ExactNotTaken == getCouldNotCompute()) 8840 // We couldn't compute an exact value for this exit, so 8841 // we won't be able to compute an exact value for the loop. 8842 CouldComputeBECount = false; 8843 // Remember exit count if either exact or symbolic is known. Because 8844 // Exact always implies symbolic, only check symbolic. 8845 if (EL.SymbolicMaxNotTaken != getCouldNotCompute()) 8846 ExitCounts.emplace_back(ExitBB, EL); 8847 else 8848 assert(EL.ExactNotTaken == getCouldNotCompute() && 8849 "Exact is known but symbolic isn't?"); 8850 8851 // 2. Derive the loop's MaxBECount from each exit's max number of 8852 // non-exiting iterations. Partition the loop exits into two kinds: 8853 // LoopMustExits and LoopMayExits. 8854 // 8855 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 8856 // is a LoopMayExit. If any computable LoopMustExit is found, then 8857 // MaxBECount is the minimum EL.ConstantMaxNotTaken of computable 8858 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 8859 // EL.ConstantMaxNotTaken, where CouldNotCompute is considered greater than 8860 // any 8861 // computable EL.ConstantMaxNotTaken. 8862 if (EL.ConstantMaxNotTaken != getCouldNotCompute() && Latch && 8863 DT.dominates(ExitBB, Latch)) { 8864 if (!MustExitMaxBECount) { 8865 MustExitMaxBECount = EL.ConstantMaxNotTaken; 8866 MustExitMaxOrZero = EL.MaxOrZero; 8867 } else { 8868 MustExitMaxBECount = getUMinFromMismatchedTypes(MustExitMaxBECount, 8869 EL.ConstantMaxNotTaken); 8870 } 8871 } else if (MayExitMaxBECount != getCouldNotCompute()) { 8872 if (!MayExitMaxBECount || EL.ConstantMaxNotTaken == getCouldNotCompute()) 8873 MayExitMaxBECount = EL.ConstantMaxNotTaken; 8874 else { 8875 MayExitMaxBECount = getUMaxFromMismatchedTypes(MayExitMaxBECount, 8876 EL.ConstantMaxNotTaken); 8877 } 8878 } 8879 } 8880 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 8881 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 8882 // The loop backedge will be taken the maximum or zero times if there's 8883 // a single exit that must be taken the maximum or zero times. 8884 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 8885 8886 // Remember which SCEVs are used in exit limits for invalidation purposes. 8887 // We only care about non-constant SCEVs here, so we can ignore 8888 // EL.ConstantMaxNotTaken 8889 // and MaxBECount, which must be SCEVConstant. 8890 for (const auto &Pair : ExitCounts) { 8891 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken)) 8892 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); 8893 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken)) 8894 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert( 8895 {L, AllowPredicates}); 8896 } 8897 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 8898 MaxBECount, MaxOrZero); 8899 } 8900 8901 ScalarEvolution::ExitLimit 8902 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 8903 bool AllowPredicates) { 8904 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 8905 // If our exiting block does not dominate the latch, then its connection with 8906 // loop's exit limit may be far from trivial. 8907 const BasicBlock *Latch = L->getLoopLatch(); 8908 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 8909 return getCouldNotCompute(); 8910 8911 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 8912 Instruction *Term = ExitingBlock->getTerminator(); 8913 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 8914 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 8915 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8916 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 8917 "It should have one successor in loop and one exit block!"); 8918 // Proceed to the next level to examine the exit condition expression. 8919 return computeExitLimitFromCond( 8920 L, BI->getCondition(), ExitIfTrue, 8921 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 8922 } 8923 8924 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 8925 // For switch, make sure that there is a single exit from the loop. 8926 BasicBlock *Exit = nullptr; 8927 for (auto *SBB : successors(ExitingBlock)) 8928 if (!L->contains(SBB)) { 8929 if (Exit) // Multiple exit successors. 8930 return getCouldNotCompute(); 8931 Exit = SBB; 8932 } 8933 assert(Exit && "Exiting block must have at least one exit"); 8934 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 8935 /*ControlsExit=*/IsOnlyExit); 8936 } 8937 8938 return getCouldNotCompute(); 8939 } 8940 8941 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 8942 const Loop *L, Value *ExitCond, bool ExitIfTrue, 8943 bool ControlsExit, bool AllowPredicates) { 8944 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 8945 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 8946 ControlsExit, AllowPredicates); 8947 } 8948 8949 std::optional<ScalarEvolution::ExitLimit> 8950 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 8951 bool ExitIfTrue, bool ControlsExit, 8952 bool AllowPredicates) { 8953 (void)this->L; 8954 (void)this->ExitIfTrue; 8955 (void)this->AllowPredicates; 8956 8957 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8958 this->AllowPredicates == AllowPredicates && 8959 "Variance in assumed invariant key components!"); 8960 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 8961 if (Itr == TripCountMap.end()) 8962 return std::nullopt; 8963 return Itr->second; 8964 } 8965 8966 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 8967 bool ExitIfTrue, 8968 bool ControlsExit, 8969 bool AllowPredicates, 8970 const ExitLimit &EL) { 8971 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8972 this->AllowPredicates == AllowPredicates && 8973 "Variance in assumed invariant key components!"); 8974 8975 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 8976 assert(InsertResult.second && "Expected successful insertion!"); 8977 (void)InsertResult; 8978 (void)ExitIfTrue; 8979 } 8980 8981 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 8982 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8983 bool ControlsExit, bool AllowPredicates) { 8984 8985 if (auto MaybeEL = 8986 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8987 return *MaybeEL; 8988 8989 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 8990 ControlsExit, AllowPredicates); 8991 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 8992 return EL; 8993 } 8994 8995 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 8996 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8997 bool ControlsExit, bool AllowPredicates) { 8998 // Handle BinOp conditions (And, Or). 8999 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 9000 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 9001 return *LimitFromBinOp; 9002 9003 // With an icmp, it may be feasible to compute an exact backedge-taken count. 9004 // Proceed to the next level to examine the icmp. 9005 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 9006 ExitLimit EL = 9007 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 9008 if (EL.hasFullInfo() || !AllowPredicates) 9009 return EL; 9010 9011 // Try again, but use SCEV predicates this time. 9012 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 9013 /*AllowPredicates=*/true); 9014 } 9015 9016 // Check for a constant condition. These are normally stripped out by 9017 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 9018 // preserve the CFG and is temporarily leaving constant conditions 9019 // in place. 9020 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 9021 if (ExitIfTrue == !CI->getZExtValue()) 9022 // The backedge is always taken. 9023 return getCouldNotCompute(); 9024 else 9025 // The backedge is never taken. 9026 return getZero(CI->getType()); 9027 } 9028 9029 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic 9030 // with a constant step, we can form an equivalent icmp predicate and figure 9031 // out how many iterations will be taken before we exit. 9032 const WithOverflowInst *WO; 9033 const APInt *C; 9034 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && 9035 match(WO->getRHS(), m_APInt(C))) { 9036 ConstantRange NWR = 9037 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 9038 WO->getNoWrapKind()); 9039 CmpInst::Predicate Pred; 9040 APInt NewRHSC, Offset; 9041 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 9042 if (!ExitIfTrue) 9043 Pred = ICmpInst::getInversePredicate(Pred); 9044 auto *LHS = getSCEV(WO->getLHS()); 9045 if (Offset != 0) 9046 LHS = getAddExpr(LHS, getConstant(Offset)); 9047 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), 9048 ControlsExit, AllowPredicates); 9049 if (EL.hasAnyInfo()) return EL; 9050 } 9051 9052 // If it's not an integer or pointer comparison then compute it the hard way. 9053 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 9054 } 9055 9056 std::optional<ScalarEvolution::ExitLimit> 9057 ScalarEvolution::computeExitLimitFromCondFromBinOp( 9058 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 9059 bool ControlsExit, bool AllowPredicates) { 9060 // Check if the controlling expression for this loop is an And or Or. 9061 Value *Op0, *Op1; 9062 bool IsAnd = false; 9063 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 9064 IsAnd = true; 9065 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 9066 IsAnd = false; 9067 else 9068 return std::nullopt; 9069 9070 // EitherMayExit is true in these two cases: 9071 // br (and Op0 Op1), loop, exit 9072 // br (or Op0 Op1), exit, loop 9073 bool EitherMayExit = IsAnd ^ ExitIfTrue; 9074 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 9075 ControlsExit && !EitherMayExit, 9076 AllowPredicates); 9077 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 9078 ControlsExit && !EitherMayExit, 9079 AllowPredicates); 9080 9081 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 9082 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 9083 if (isa<ConstantInt>(Op1)) 9084 return Op1 == NeutralElement ? EL0 : EL1; 9085 if (isa<ConstantInt>(Op0)) 9086 return Op0 == NeutralElement ? EL1 : EL0; 9087 9088 const SCEV *BECount = getCouldNotCompute(); 9089 const SCEV *ConstantMaxBECount = getCouldNotCompute(); 9090 const SCEV *SymbolicMaxBECount = getCouldNotCompute(); 9091 if (EitherMayExit) { 9092 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond); 9093 // Both conditions must be same for the loop to continue executing. 9094 // Choose the less conservative count. 9095 if (EL0.ExactNotTaken != getCouldNotCompute() && 9096 EL1.ExactNotTaken != getCouldNotCompute()) { 9097 BECount = getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken, 9098 UseSequentialUMin); 9099 } 9100 if (EL0.ConstantMaxNotTaken == getCouldNotCompute()) 9101 ConstantMaxBECount = EL1.ConstantMaxNotTaken; 9102 else if (EL1.ConstantMaxNotTaken == getCouldNotCompute()) 9103 ConstantMaxBECount = EL0.ConstantMaxNotTaken; 9104 else 9105 ConstantMaxBECount = getUMinFromMismatchedTypes(EL0.ConstantMaxNotTaken, 9106 EL1.ConstantMaxNotTaken); 9107 if (EL0.SymbolicMaxNotTaken == getCouldNotCompute()) 9108 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken; 9109 else if (EL1.SymbolicMaxNotTaken == getCouldNotCompute()) 9110 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken; 9111 else 9112 SymbolicMaxBECount = getUMinFromMismatchedTypes( 9113 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin); 9114 } else { 9115 // Both conditions must be same at the same time for the loop to exit. 9116 // For now, be conservative. 9117 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 9118 BECount = EL0.ExactNotTaken; 9119 } 9120 9121 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 9122 // to be more aggressive when computing BECount than when computing 9123 // ConstantMaxBECount. In these cases it is possible for EL0.ExactNotTaken 9124 // and 9125 // EL1.ExactNotTaken to match, but for EL0.ConstantMaxNotTaken and 9126 // EL1.ConstantMaxNotTaken to not. 9127 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) && 9128 !isa<SCEVCouldNotCompute>(BECount)) 9129 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9130 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount)) 9131 SymbolicMaxBECount = 9132 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 9133 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false, 9134 { &EL0.Predicates, &EL1.Predicates }); 9135 } 9136 9137 ScalarEvolution::ExitLimit 9138 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 9139 ICmpInst *ExitCond, 9140 bool ExitIfTrue, 9141 bool ControlsExit, 9142 bool AllowPredicates) { 9143 // If the condition was exit on true, convert the condition to exit on false 9144 ICmpInst::Predicate Pred; 9145 if (!ExitIfTrue) 9146 Pred = ExitCond->getPredicate(); 9147 else 9148 Pred = ExitCond->getInversePredicate(); 9149 const ICmpInst::Predicate OriginalPred = Pred; 9150 9151 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 9152 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 9153 9154 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit, 9155 AllowPredicates); 9156 if (EL.hasAnyInfo()) return EL; 9157 9158 auto *ExhaustiveCount = 9159 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 9160 9161 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 9162 return ExhaustiveCount; 9163 9164 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 9165 ExitCond->getOperand(1), L, OriginalPred); 9166 } 9167 ScalarEvolution::ExitLimit 9168 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 9169 ICmpInst::Predicate Pred, 9170 const SCEV *LHS, const SCEV *RHS, 9171 bool ControlsExit, 9172 bool AllowPredicates) { 9173 9174 // Try to evaluate any dependencies out of the loop. 9175 LHS = getSCEVAtScope(LHS, L); 9176 RHS = getSCEVAtScope(RHS, L); 9177 9178 // At this point, we would like to compute how many iterations of the 9179 // loop the predicate will return true for these inputs. 9180 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 9181 // If there is a loop-invariant, force it into the RHS. 9182 std::swap(LHS, RHS); 9183 Pred = ICmpInst::getSwappedPredicate(Pred); 9184 } 9185 9186 bool ControllingFiniteLoop = 9187 ControlsExit && loopHasNoAbnormalExits(L) && loopIsFiniteByAssumption(L); 9188 // Simplify the operands before analyzing them. 9189 (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0, 9190 (EnableFiniteLoopControl ? ControllingFiniteLoop 9191 : false)); 9192 9193 // If we have a comparison of a chrec against a constant, try to use value 9194 // ranges to answer this query. 9195 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 9196 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 9197 if (AddRec->getLoop() == L) { 9198 // Form the constant range. 9199 ConstantRange CompRange = 9200 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 9201 9202 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 9203 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 9204 } 9205 9206 // If this loop must exit based on this condition (or execute undefined 9207 // behaviour), and we can prove the test sequence produced must repeat 9208 // the same values on self-wrap of the IV, then we can infer that IV 9209 // doesn't self wrap because if it did, we'd have an infinite (undefined) 9210 // loop. 9211 if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) { 9212 // TODO: We can peel off any functions which are invertible *in L*. Loop 9213 // invariant terms are effectively constants for our purposes here. 9214 auto *InnerLHS = LHS; 9215 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) 9216 InnerLHS = ZExt->getOperand(); 9217 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) { 9218 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 9219 if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && 9220 StrideC && StrideC->getAPInt().isPowerOf2()) { 9221 auto Flags = AR->getNoWrapFlags(); 9222 Flags = setFlags(Flags, SCEV::FlagNW); 9223 SmallVector<const SCEV*> Operands{AR->operands()}; 9224 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 9225 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 9226 } 9227 } 9228 } 9229 9230 switch (Pred) { 9231 case ICmpInst::ICMP_NE: { // while (X != Y) 9232 // Convert to: while (X-Y != 0) 9233 if (LHS->getType()->isPointerTy()) { 9234 LHS = getLosslessPtrToIntExpr(LHS); 9235 if (isa<SCEVCouldNotCompute>(LHS)) 9236 return LHS; 9237 } 9238 if (RHS->getType()->isPointerTy()) { 9239 RHS = getLosslessPtrToIntExpr(RHS); 9240 if (isa<SCEVCouldNotCompute>(RHS)) 9241 return RHS; 9242 } 9243 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 9244 AllowPredicates); 9245 if (EL.hasAnyInfo()) return EL; 9246 break; 9247 } 9248 case ICmpInst::ICMP_EQ: { // while (X == Y) 9249 // Convert to: while (X-Y == 0) 9250 if (LHS->getType()->isPointerTy()) { 9251 LHS = getLosslessPtrToIntExpr(LHS); 9252 if (isa<SCEVCouldNotCompute>(LHS)) 9253 return LHS; 9254 } 9255 if (RHS->getType()->isPointerTy()) { 9256 RHS = getLosslessPtrToIntExpr(RHS); 9257 if (isa<SCEVCouldNotCompute>(RHS)) 9258 return RHS; 9259 } 9260 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 9261 if (EL.hasAnyInfo()) return EL; 9262 break; 9263 } 9264 case ICmpInst::ICMP_SLT: 9265 case ICmpInst::ICMP_ULT: { // while (X < Y) 9266 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 9267 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 9268 AllowPredicates); 9269 if (EL.hasAnyInfo()) return EL; 9270 break; 9271 } 9272 case ICmpInst::ICMP_SGT: 9273 case ICmpInst::ICMP_UGT: { // while (X > Y) 9274 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 9275 ExitLimit EL = 9276 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 9277 AllowPredicates); 9278 if (EL.hasAnyInfo()) return EL; 9279 break; 9280 } 9281 default: 9282 break; 9283 } 9284 9285 return getCouldNotCompute(); 9286 } 9287 9288 ScalarEvolution::ExitLimit 9289 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 9290 SwitchInst *Switch, 9291 BasicBlock *ExitingBlock, 9292 bool ControlsExit) { 9293 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 9294 9295 // Give up if the exit is the default dest of a switch. 9296 if (Switch->getDefaultDest() == ExitingBlock) 9297 return getCouldNotCompute(); 9298 9299 assert(L->contains(Switch->getDefaultDest()) && 9300 "Default case must not exit the loop!"); 9301 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 9302 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 9303 9304 // while (X != Y) --> while (X-Y != 0) 9305 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 9306 if (EL.hasAnyInfo()) 9307 return EL; 9308 9309 return getCouldNotCompute(); 9310 } 9311 9312 static ConstantInt * 9313 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 9314 ScalarEvolution &SE) { 9315 const SCEV *InVal = SE.getConstant(C); 9316 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 9317 assert(isa<SCEVConstant>(Val) && 9318 "Evaluation of SCEV at constant didn't fold correctly?"); 9319 return cast<SCEVConstant>(Val)->getValue(); 9320 } 9321 9322 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 9323 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 9324 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 9325 if (!RHS) 9326 return getCouldNotCompute(); 9327 9328 const BasicBlock *Latch = L->getLoopLatch(); 9329 if (!Latch) 9330 return getCouldNotCompute(); 9331 9332 const BasicBlock *Predecessor = L->getLoopPredecessor(); 9333 if (!Predecessor) 9334 return getCouldNotCompute(); 9335 9336 // Return true if V is of the form "LHS `shift_op` <positive constant>". 9337 // Return LHS in OutLHS and shift_opt in OutOpCode. 9338 auto MatchPositiveShift = 9339 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 9340 9341 using namespace PatternMatch; 9342 9343 ConstantInt *ShiftAmt; 9344 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9345 OutOpCode = Instruction::LShr; 9346 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9347 OutOpCode = Instruction::AShr; 9348 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 9349 OutOpCode = Instruction::Shl; 9350 else 9351 return false; 9352 9353 return ShiftAmt->getValue().isStrictlyPositive(); 9354 }; 9355 9356 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 9357 // 9358 // loop: 9359 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 9360 // %iv.shifted = lshr i32 %iv, <positive constant> 9361 // 9362 // Return true on a successful match. Return the corresponding PHI node (%iv 9363 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 9364 auto MatchShiftRecurrence = 9365 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 9366 std::optional<Instruction::BinaryOps> PostShiftOpCode; 9367 9368 { 9369 Instruction::BinaryOps OpC; 9370 Value *V; 9371 9372 // If we encounter a shift instruction, "peel off" the shift operation, 9373 // and remember that we did so. Later when we inspect %iv's backedge 9374 // value, we will make sure that the backedge value uses the same 9375 // operation. 9376 // 9377 // Note: the peeled shift operation does not have to be the same 9378 // instruction as the one feeding into the PHI's backedge value. We only 9379 // really care about it being the same *kind* of shift instruction -- 9380 // that's all that is required for our later inferences to hold. 9381 if (MatchPositiveShift(LHS, V, OpC)) { 9382 PostShiftOpCode = OpC; 9383 LHS = V; 9384 } 9385 } 9386 9387 PNOut = dyn_cast<PHINode>(LHS); 9388 if (!PNOut || PNOut->getParent() != L->getHeader()) 9389 return false; 9390 9391 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 9392 Value *OpLHS; 9393 9394 return 9395 // The backedge value for the PHI node must be a shift by a positive 9396 // amount 9397 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 9398 9399 // of the PHI node itself 9400 OpLHS == PNOut && 9401 9402 // and the kind of shift should be match the kind of shift we peeled 9403 // off, if any. 9404 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut); 9405 }; 9406 9407 PHINode *PN; 9408 Instruction::BinaryOps OpCode; 9409 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 9410 return getCouldNotCompute(); 9411 9412 const DataLayout &DL = getDataLayout(); 9413 9414 // The key rationale for this optimization is that for some kinds of shift 9415 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 9416 // within a finite number of iterations. If the condition guarding the 9417 // backedge (in the sense that the backedge is taken if the condition is true) 9418 // is false for the value the shift recurrence stabilizes to, then we know 9419 // that the backedge is taken only a finite number of times. 9420 9421 ConstantInt *StableValue = nullptr; 9422 switch (OpCode) { 9423 default: 9424 llvm_unreachable("Impossible case!"); 9425 9426 case Instruction::AShr: { 9427 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 9428 // bitwidth(K) iterations. 9429 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 9430 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 9431 Predecessor->getTerminator(), &DT); 9432 auto *Ty = cast<IntegerType>(RHS->getType()); 9433 if (Known.isNonNegative()) 9434 StableValue = ConstantInt::get(Ty, 0); 9435 else if (Known.isNegative()) 9436 StableValue = ConstantInt::get(Ty, -1, true); 9437 else 9438 return getCouldNotCompute(); 9439 9440 break; 9441 } 9442 case Instruction::LShr: 9443 case Instruction::Shl: 9444 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 9445 // stabilize to 0 in at most bitwidth(K) iterations. 9446 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 9447 break; 9448 } 9449 9450 auto *Result = 9451 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 9452 assert(Result->getType()->isIntegerTy(1) && 9453 "Otherwise cannot be an operand to a branch instruction"); 9454 9455 if (Result->isZeroValue()) { 9456 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9457 const SCEV *UpperBound = 9458 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 9459 return ExitLimit(getCouldNotCompute(), UpperBound, UpperBound, false); 9460 } 9461 9462 return getCouldNotCompute(); 9463 } 9464 9465 /// Return true if we can constant fold an instruction of the specified type, 9466 /// assuming that all operands were constants. 9467 static bool CanConstantFold(const Instruction *I) { 9468 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 9469 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 9470 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 9471 return true; 9472 9473 if (const CallInst *CI = dyn_cast<CallInst>(I)) 9474 if (const Function *F = CI->getCalledFunction()) 9475 return canConstantFoldCallTo(CI, F); 9476 return false; 9477 } 9478 9479 /// Determine whether this instruction can constant evolve within this loop 9480 /// assuming its operands can all constant evolve. 9481 static bool canConstantEvolve(Instruction *I, const Loop *L) { 9482 // An instruction outside of the loop can't be derived from a loop PHI. 9483 if (!L->contains(I)) return false; 9484 9485 if (isa<PHINode>(I)) { 9486 // We don't currently keep track of the control flow needed to evaluate 9487 // PHIs, so we cannot handle PHIs inside of loops. 9488 return L->getHeader() == I->getParent(); 9489 } 9490 9491 // If we won't be able to constant fold this expression even if the operands 9492 // are constants, bail early. 9493 return CanConstantFold(I); 9494 } 9495 9496 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 9497 /// recursing through each instruction operand until reaching a loop header phi. 9498 static PHINode * 9499 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 9500 DenseMap<Instruction *, PHINode *> &PHIMap, 9501 unsigned Depth) { 9502 if (Depth > MaxConstantEvolvingDepth) 9503 return nullptr; 9504 9505 // Otherwise, we can evaluate this instruction if all of its operands are 9506 // constant or derived from a PHI node themselves. 9507 PHINode *PHI = nullptr; 9508 for (Value *Op : UseInst->operands()) { 9509 if (isa<Constant>(Op)) continue; 9510 9511 Instruction *OpInst = dyn_cast<Instruction>(Op); 9512 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 9513 9514 PHINode *P = dyn_cast<PHINode>(OpInst); 9515 if (!P) 9516 // If this operand is already visited, reuse the prior result. 9517 // We may have P != PHI if this is the deepest point at which the 9518 // inconsistent paths meet. 9519 P = PHIMap.lookup(OpInst); 9520 if (!P) { 9521 // Recurse and memoize the results, whether a phi is found or not. 9522 // This recursive call invalidates pointers into PHIMap. 9523 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 9524 PHIMap[OpInst] = P; 9525 } 9526 if (!P) 9527 return nullptr; // Not evolving from PHI 9528 if (PHI && PHI != P) 9529 return nullptr; // Evolving from multiple different PHIs. 9530 PHI = P; 9531 } 9532 // This is a expression evolving from a constant PHI! 9533 return PHI; 9534 } 9535 9536 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 9537 /// in the loop that V is derived from. We allow arbitrary operations along the 9538 /// way, but the operands of an operation must either be constants or a value 9539 /// derived from a constant PHI. If this expression does not fit with these 9540 /// constraints, return null. 9541 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 9542 Instruction *I = dyn_cast<Instruction>(V); 9543 if (!I || !canConstantEvolve(I, L)) return nullptr; 9544 9545 if (PHINode *PN = dyn_cast<PHINode>(I)) 9546 return PN; 9547 9548 // Record non-constant instructions contained by the loop. 9549 DenseMap<Instruction *, PHINode *> PHIMap; 9550 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 9551 } 9552 9553 /// EvaluateExpression - Given an expression that passes the 9554 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 9555 /// in the loop has the value PHIVal. If we can't fold this expression for some 9556 /// reason, return null. 9557 static Constant *EvaluateExpression(Value *V, const Loop *L, 9558 DenseMap<Instruction *, Constant *> &Vals, 9559 const DataLayout &DL, 9560 const TargetLibraryInfo *TLI) { 9561 // Convenient constant check, but redundant for recursive calls. 9562 if (Constant *C = dyn_cast<Constant>(V)) return C; 9563 Instruction *I = dyn_cast<Instruction>(V); 9564 if (!I) return nullptr; 9565 9566 if (Constant *C = Vals.lookup(I)) return C; 9567 9568 // An instruction inside the loop depends on a value outside the loop that we 9569 // weren't given a mapping for, or a value such as a call inside the loop. 9570 if (!canConstantEvolve(I, L)) return nullptr; 9571 9572 // An unmapped PHI can be due to a branch or another loop inside this loop, 9573 // or due to this not being the initial iteration through a loop where we 9574 // couldn't compute the evolution of this particular PHI last time. 9575 if (isa<PHINode>(I)) return nullptr; 9576 9577 std::vector<Constant*> Operands(I->getNumOperands()); 9578 9579 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 9580 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 9581 if (!Operand) { 9582 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 9583 if (!Operands[i]) return nullptr; 9584 continue; 9585 } 9586 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 9587 Vals[Operand] = C; 9588 if (!C) return nullptr; 9589 Operands[i] = C; 9590 } 9591 9592 return ConstantFoldInstOperands(I, Operands, DL, TLI); 9593 } 9594 9595 9596 // If every incoming value to PN except the one for BB is a specific Constant, 9597 // return that, else return nullptr. 9598 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 9599 Constant *IncomingVal = nullptr; 9600 9601 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 9602 if (PN->getIncomingBlock(i) == BB) 9603 continue; 9604 9605 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 9606 if (!CurrentVal) 9607 return nullptr; 9608 9609 if (IncomingVal != CurrentVal) { 9610 if (IncomingVal) 9611 return nullptr; 9612 IncomingVal = CurrentVal; 9613 } 9614 } 9615 9616 return IncomingVal; 9617 } 9618 9619 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 9620 /// in the header of its containing loop, we know the loop executes a 9621 /// constant number of times, and the PHI node is just a recurrence 9622 /// involving constants, fold it. 9623 Constant * 9624 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 9625 const APInt &BEs, 9626 const Loop *L) { 9627 auto I = ConstantEvolutionLoopExitValue.find(PN); 9628 if (I != ConstantEvolutionLoopExitValue.end()) 9629 return I->second; 9630 9631 if (BEs.ugt(MaxBruteForceIterations)) 9632 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 9633 9634 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 9635 9636 DenseMap<Instruction *, Constant *> CurrentIterVals; 9637 BasicBlock *Header = L->getHeader(); 9638 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9639 9640 BasicBlock *Latch = L->getLoopLatch(); 9641 if (!Latch) 9642 return nullptr; 9643 9644 for (PHINode &PHI : Header->phis()) { 9645 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9646 CurrentIterVals[&PHI] = StartCST; 9647 } 9648 if (!CurrentIterVals.count(PN)) 9649 return RetVal = nullptr; 9650 9651 Value *BEValue = PN->getIncomingValueForBlock(Latch); 9652 9653 // Execute the loop symbolically to determine the exit value. 9654 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 9655 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 9656 9657 unsigned NumIterations = BEs.getZExtValue(); // must be in range 9658 unsigned IterationNum = 0; 9659 const DataLayout &DL = getDataLayout(); 9660 for (; ; ++IterationNum) { 9661 if (IterationNum == NumIterations) 9662 return RetVal = CurrentIterVals[PN]; // Got exit value! 9663 9664 // Compute the value of the PHIs for the next iteration. 9665 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 9666 DenseMap<Instruction *, Constant *> NextIterVals; 9667 Constant *NextPHI = 9668 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9669 if (!NextPHI) 9670 return nullptr; // Couldn't evaluate! 9671 NextIterVals[PN] = NextPHI; 9672 9673 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 9674 9675 // Also evaluate the other PHI nodes. However, we don't get to stop if we 9676 // cease to be able to evaluate one of them or if they stop evolving, 9677 // because that doesn't necessarily prevent us from computing PN. 9678 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 9679 for (const auto &I : CurrentIterVals) { 9680 PHINode *PHI = dyn_cast<PHINode>(I.first); 9681 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 9682 PHIsToCompute.emplace_back(PHI, I.second); 9683 } 9684 // We use two distinct loops because EvaluateExpression may invalidate any 9685 // iterators into CurrentIterVals. 9686 for (const auto &I : PHIsToCompute) { 9687 PHINode *PHI = I.first; 9688 Constant *&NextPHI = NextIterVals[PHI]; 9689 if (!NextPHI) { // Not already computed. 9690 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9691 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9692 } 9693 if (NextPHI != I.second) 9694 StoppedEvolving = false; 9695 } 9696 9697 // If all entries in CurrentIterVals == NextIterVals then we can stop 9698 // iterating, the loop can't continue to change. 9699 if (StoppedEvolving) 9700 return RetVal = CurrentIterVals[PN]; 9701 9702 CurrentIterVals.swap(NextIterVals); 9703 } 9704 } 9705 9706 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 9707 Value *Cond, 9708 bool ExitWhen) { 9709 PHINode *PN = getConstantEvolvingPHI(Cond, L); 9710 if (!PN) return getCouldNotCompute(); 9711 9712 // If the loop is canonicalized, the PHI will have exactly two entries. 9713 // That's the only form we support here. 9714 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 9715 9716 DenseMap<Instruction *, Constant *> CurrentIterVals; 9717 BasicBlock *Header = L->getHeader(); 9718 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9719 9720 BasicBlock *Latch = L->getLoopLatch(); 9721 assert(Latch && "Should follow from NumIncomingValues == 2!"); 9722 9723 for (PHINode &PHI : Header->phis()) { 9724 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9725 CurrentIterVals[&PHI] = StartCST; 9726 } 9727 if (!CurrentIterVals.count(PN)) 9728 return getCouldNotCompute(); 9729 9730 // Okay, we find a PHI node that defines the trip count of this loop. Execute 9731 // the loop symbolically to determine when the condition gets a value of 9732 // "ExitWhen". 9733 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 9734 const DataLayout &DL = getDataLayout(); 9735 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 9736 auto *CondVal = dyn_cast_or_null<ConstantInt>( 9737 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 9738 9739 // Couldn't symbolically evaluate. 9740 if (!CondVal) return getCouldNotCompute(); 9741 9742 if (CondVal->getValue() == uint64_t(ExitWhen)) { 9743 ++NumBruteForceTripCountsComputed; 9744 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 9745 } 9746 9747 // Update all the PHI nodes for the next iteration. 9748 DenseMap<Instruction *, Constant *> NextIterVals; 9749 9750 // Create a list of which PHIs we need to compute. We want to do this before 9751 // calling EvaluateExpression on them because that may invalidate iterators 9752 // into CurrentIterVals. 9753 SmallVector<PHINode *, 8> PHIsToCompute; 9754 for (const auto &I : CurrentIterVals) { 9755 PHINode *PHI = dyn_cast<PHINode>(I.first); 9756 if (!PHI || PHI->getParent() != Header) continue; 9757 PHIsToCompute.push_back(PHI); 9758 } 9759 for (PHINode *PHI : PHIsToCompute) { 9760 Constant *&NextPHI = NextIterVals[PHI]; 9761 if (NextPHI) continue; // Already computed! 9762 9763 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9764 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9765 } 9766 CurrentIterVals.swap(NextIterVals); 9767 } 9768 9769 // Too many iterations were needed to evaluate. 9770 return getCouldNotCompute(); 9771 } 9772 9773 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 9774 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 9775 ValuesAtScopes[V]; 9776 // Check to see if we've folded this expression at this loop before. 9777 for (auto &LS : Values) 9778 if (LS.first == L) 9779 return LS.second ? LS.second : V; 9780 9781 Values.emplace_back(L, nullptr); 9782 9783 // Otherwise compute it. 9784 const SCEV *C = computeSCEVAtScope(V, L); 9785 for (auto &LS : reverse(ValuesAtScopes[V])) 9786 if (LS.first == L) { 9787 LS.second = C; 9788 if (!isa<SCEVConstant>(C)) 9789 ValuesAtScopesUsers[C].push_back({L, V}); 9790 break; 9791 } 9792 return C; 9793 } 9794 9795 /// This builds up a Constant using the ConstantExpr interface. That way, we 9796 /// will return Constants for objects which aren't represented by a 9797 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 9798 /// Returns NULL if the SCEV isn't representable as a Constant. 9799 static Constant *BuildConstantFromSCEV(const SCEV *V) { 9800 switch (V->getSCEVType()) { 9801 case scCouldNotCompute: 9802 case scAddRecExpr: 9803 return nullptr; 9804 case scConstant: 9805 return cast<SCEVConstant>(V)->getValue(); 9806 case scUnknown: 9807 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 9808 case scSignExtend: { 9809 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 9810 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 9811 return ConstantExpr::getSExt(CastOp, SS->getType()); 9812 return nullptr; 9813 } 9814 case scZeroExtend: { 9815 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 9816 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 9817 return ConstantExpr::getZExt(CastOp, SZ->getType()); 9818 return nullptr; 9819 } 9820 case scPtrToInt: { 9821 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 9822 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 9823 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 9824 9825 return nullptr; 9826 } 9827 case scTruncate: { 9828 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 9829 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 9830 return ConstantExpr::getTrunc(CastOp, ST->getType()); 9831 return nullptr; 9832 } 9833 case scAddExpr: { 9834 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 9835 Constant *C = nullptr; 9836 for (const SCEV *Op : SA->operands()) { 9837 Constant *OpC = BuildConstantFromSCEV(Op); 9838 if (!OpC) 9839 return nullptr; 9840 if (!C) { 9841 C = OpC; 9842 continue; 9843 } 9844 assert(!C->getType()->isPointerTy() && 9845 "Can only have one pointer, and it must be last"); 9846 if (auto *PT = dyn_cast<PointerType>(OpC->getType())) { 9847 // The offsets have been converted to bytes. We can add bytes to an 9848 // i8* by GEP with the byte count in the first index. 9849 Type *DestPtrTy = 9850 Type::getInt8PtrTy(PT->getContext(), PT->getAddressSpace()); 9851 OpC = ConstantExpr::getBitCast(OpC, DestPtrTy); 9852 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), 9853 OpC, C); 9854 } else { 9855 C = ConstantExpr::getAdd(C, OpC); 9856 } 9857 } 9858 return C; 9859 } 9860 case scMulExpr: { 9861 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 9862 Constant *C = nullptr; 9863 for (const SCEV *Op : SM->operands()) { 9864 assert(!Op->getType()->isPointerTy() && "Can't multiply pointers"); 9865 Constant *OpC = BuildConstantFromSCEV(Op); 9866 if (!OpC) 9867 return nullptr; 9868 C = C ? ConstantExpr::getMul(C, OpC) : OpC; 9869 } 9870 return C; 9871 } 9872 case scUDivExpr: 9873 case scSMaxExpr: 9874 case scUMaxExpr: 9875 case scSMinExpr: 9876 case scUMinExpr: 9877 case scSequentialUMinExpr: 9878 return nullptr; // TODO: smax, umax, smin, umax, umin_seq. 9879 } 9880 llvm_unreachable("Unknown SCEV kind!"); 9881 } 9882 9883 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 9884 switch (V->getSCEVType()) { 9885 case scConstant: 9886 return V; 9887 case scAddRecExpr: { 9888 // If this is a loop recurrence for a loop that does not contain L, then we 9889 // are dealing with the final value computed by the loop. 9890 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(V); 9891 // First, attempt to evaluate each operand. 9892 // Avoid performing the look-up in the common case where the specified 9893 // expression has no loop-variant portions. 9894 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 9895 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 9896 if (OpAtScope == AddRec->getOperand(i)) 9897 continue; 9898 9899 // Okay, at least one of these operands is loop variant but might be 9900 // foldable. Build a new instance of the folded commutative expression. 9901 SmallVector<const SCEV *, 8> NewOps; 9902 NewOps.reserve(AddRec->getNumOperands()); 9903 append_range(NewOps, AddRec->operands().take_front(i)); 9904 NewOps.push_back(OpAtScope); 9905 for (++i; i != e; ++i) 9906 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 9907 9908 const SCEV *FoldedRec = getAddRecExpr( 9909 NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); 9910 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 9911 // The addrec may be folded to a nonrecurrence, for example, if the 9912 // induction variable is multiplied by zero after constant folding. Go 9913 // ahead and return the folded value. 9914 if (!AddRec) 9915 return FoldedRec; 9916 break; 9917 } 9918 9919 // If the scope is outside the addrec's loop, evaluate it by using the 9920 // loop exit value of the addrec. 9921 if (!AddRec->getLoop()->contains(L)) { 9922 // To evaluate this recurrence, we need to know how many times the AddRec 9923 // loop iterates. Compute this now. 9924 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 9925 if (BackedgeTakenCount == getCouldNotCompute()) 9926 return AddRec; 9927 9928 // Then, evaluate the AddRec. 9929 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 9930 } 9931 9932 return AddRec; 9933 } 9934 case scTruncate: 9935 case scZeroExtend: 9936 case scSignExtend: 9937 case scPtrToInt: 9938 case scAddExpr: 9939 case scMulExpr: 9940 case scUDivExpr: 9941 case scUMaxExpr: 9942 case scSMaxExpr: 9943 case scUMinExpr: 9944 case scSMinExpr: 9945 case scSequentialUMinExpr: { 9946 ArrayRef<const SCEV *> Ops = V->operands(); 9947 // Avoid performing the look-up in the common case where the specified 9948 // expression has no loop-variant portions. 9949 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 9950 const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L); 9951 if (OpAtScope != Ops[i]) { 9952 // Okay, at least one of these operands is loop variant but might be 9953 // foldable. Build a new instance of the folded commutative expression. 9954 SmallVector<const SCEV *, 8> NewOps; 9955 NewOps.reserve(Ops.size()); 9956 append_range(NewOps, Ops.take_front(i)); 9957 NewOps.push_back(OpAtScope); 9958 9959 for (++i; i != e; ++i) { 9960 OpAtScope = getSCEVAtScope(Ops[i], L); 9961 NewOps.push_back(OpAtScope); 9962 } 9963 9964 switch (V->getSCEVType()) { 9965 case scTruncate: 9966 case scZeroExtend: 9967 case scSignExtend: 9968 case scPtrToInt: 9969 return getCastExpr(V->getSCEVType(), NewOps[0], V->getType()); 9970 case scAddExpr: 9971 return getAddExpr(NewOps, cast<SCEVAddExpr>(V)->getNoWrapFlags()); 9972 case scMulExpr: 9973 return getMulExpr(NewOps, cast<SCEVMulExpr>(V)->getNoWrapFlags()); 9974 case scUDivExpr: 9975 return getUDivExpr(NewOps[0], NewOps[1]); 9976 case scUMaxExpr: 9977 case scSMaxExpr: 9978 case scUMinExpr: 9979 case scSMinExpr: 9980 return getMinMaxExpr(V->getSCEVType(), NewOps); 9981 case scSequentialUMinExpr: 9982 return getSequentialMinMaxExpr(V->getSCEVType(), NewOps); 9983 case scConstant: 9984 case scAddRecExpr: 9985 case scUnknown: 9986 case scCouldNotCompute: 9987 llvm_unreachable("Can not get those expressions here."); 9988 } 9989 llvm_unreachable("Unknown n-ary-like SCEV type!"); 9990 } 9991 } 9992 // If we got here, all operands are loop invariant. 9993 return V; 9994 } 9995 case scUnknown: { 9996 // If this instruction is evolved from a constant-evolving PHI, compute the 9997 // exit value from the loop without using SCEVs. 9998 const SCEVUnknown *SU = cast<SCEVUnknown>(V); 9999 Instruction *I = dyn_cast<Instruction>(SU->getValue()); 10000 if (!I) 10001 return V; // This is some other type of SCEVUnknown, just return it. 10002 10003 if (PHINode *PN = dyn_cast<PHINode>(I)) { 10004 const Loop *CurrLoop = this->LI[I->getParent()]; 10005 // Looking for loop exit value. 10006 if (CurrLoop && CurrLoop->getParentLoop() == L && 10007 PN->getParent() == CurrLoop->getHeader()) { 10008 // Okay, there is no closed form solution for the PHI node. Check 10009 // to see if the loop that contains it has a known backedge-taken 10010 // count. If so, we may be able to force computation of the exit 10011 // value. 10012 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 10013 // This trivial case can show up in some degenerate cases where 10014 // the incoming IR has not yet been fully simplified. 10015 if (BackedgeTakenCount->isZero()) { 10016 Value *InitValue = nullptr; 10017 bool MultipleInitValues = false; 10018 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 10019 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 10020 if (!InitValue) 10021 InitValue = PN->getIncomingValue(i); 10022 else if (InitValue != PN->getIncomingValue(i)) { 10023 MultipleInitValues = true; 10024 break; 10025 } 10026 } 10027 } 10028 if (!MultipleInitValues && InitValue) 10029 return getSCEV(InitValue); 10030 } 10031 // Do we have a loop invariant value flowing around the backedge 10032 // for a loop which must execute the backedge? 10033 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 10034 isKnownPositive(BackedgeTakenCount) && 10035 PN->getNumIncomingValues() == 2) { 10036 10037 unsigned InLoopPred = 10038 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 10039 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 10040 if (CurrLoop->isLoopInvariant(BackedgeVal)) 10041 return getSCEV(BackedgeVal); 10042 } 10043 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 10044 // Okay, we know how many times the containing loop executes. If 10045 // this is a constant evolving PHI node, get the final value at 10046 // the specified iteration number. 10047 Constant *RV = 10048 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop); 10049 if (RV) 10050 return getSCEV(RV); 10051 } 10052 } 10053 10054 // If there is a single-input Phi, evaluate it at our scope. If we can 10055 // prove that this replacement does not break LCSSA form, use new value. 10056 if (PN->getNumOperands() == 1) { 10057 const SCEV *Input = getSCEV(PN->getOperand(0)); 10058 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 10059 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 10060 // for the simplest case just support constants. 10061 if (isa<SCEVConstant>(InputAtScope)) 10062 return InputAtScope; 10063 } 10064 } 10065 10066 // Okay, this is an expression that we cannot symbolically evaluate 10067 // into a SCEV. Check to see if it's possible to symbolically evaluate 10068 // the arguments into constants, and if so, try to constant propagate the 10069 // result. This is particularly useful for computing loop exit values. 10070 if (!CanConstantFold(I)) 10071 return V; // This is some other type of SCEVUnknown, just return it. 10072 10073 SmallVector<Constant *, 4> Operands; 10074 Operands.reserve(I->getNumOperands()); 10075 bool MadeImprovement = false; 10076 for (Value *Op : I->operands()) { 10077 if (Constant *C = dyn_cast<Constant>(Op)) { 10078 Operands.push_back(C); 10079 continue; 10080 } 10081 10082 // If any of the operands is non-constant and if they are 10083 // non-integer and non-pointer, don't even try to analyze them 10084 // with scev techniques. 10085 if (!isSCEVable(Op->getType())) 10086 return V; 10087 10088 const SCEV *OrigV = getSCEV(Op); 10089 const SCEV *OpV = getSCEVAtScope(OrigV, L); 10090 MadeImprovement |= OrigV != OpV; 10091 10092 Constant *C = BuildConstantFromSCEV(OpV); 10093 if (!C) 10094 return V; 10095 if (C->getType() != Op->getType()) 10096 C = ConstantExpr::getCast( 10097 CastInst::getCastOpcode(C, false, Op->getType(), false), C, 10098 Op->getType()); 10099 Operands.push_back(C); 10100 } 10101 10102 // Check to see if getSCEVAtScope actually made an improvement. 10103 if (!MadeImprovement) 10104 return V; // This is some other type of SCEVUnknown, just return it. 10105 10106 Constant *C = nullptr; 10107 const DataLayout &DL = getDataLayout(); 10108 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 10109 if (!C) 10110 return V; 10111 return getSCEV(C); 10112 } 10113 case scCouldNotCompute: 10114 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10115 } 10116 llvm_unreachable("Unknown SCEV type!"); 10117 } 10118 10119 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 10120 return getSCEVAtScope(getSCEV(V), L); 10121 } 10122 10123 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 10124 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 10125 return stripInjectiveFunctions(ZExt->getOperand()); 10126 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 10127 return stripInjectiveFunctions(SExt->getOperand()); 10128 return S; 10129 } 10130 10131 /// Finds the minimum unsigned root of the following equation: 10132 /// 10133 /// A * X = B (mod N) 10134 /// 10135 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 10136 /// A and B isn't important. 10137 /// 10138 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 10139 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 10140 ScalarEvolution &SE) { 10141 uint32_t BW = A.getBitWidth(); 10142 assert(BW == SE.getTypeSizeInBits(B->getType())); 10143 assert(A != 0 && "A must be non-zero."); 10144 10145 // 1. D = gcd(A, N) 10146 // 10147 // The gcd of A and N may have only one prime factor: 2. The number of 10148 // trailing zeros in A is its multiplicity 10149 uint32_t Mult2 = A.countr_zero(); 10150 // D = 2^Mult2 10151 10152 // 2. Check if B is divisible by D. 10153 // 10154 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 10155 // is not less than multiplicity of this prime factor for D. 10156 if (SE.GetMinTrailingZeros(B) < Mult2) 10157 return SE.getCouldNotCompute(); 10158 10159 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 10160 // modulo (N / D). 10161 // 10162 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 10163 // (N / D) in general. The inverse itself always fits into BW bits, though, 10164 // so we immediately truncate it. 10165 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 10166 APInt Mod(BW + 1, 0); 10167 Mod.setBit(BW - Mult2); // Mod = N / D 10168 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 10169 10170 // 4. Compute the minimum unsigned root of the equation: 10171 // I * (B / D) mod (N / D) 10172 // To simplify the computation, we factor out the divide by D: 10173 // (I * B mod N) / D 10174 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 10175 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 10176 } 10177 10178 /// For a given quadratic addrec, generate coefficients of the corresponding 10179 /// quadratic equation, multiplied by a common value to ensure that they are 10180 /// integers. 10181 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 10182 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 10183 /// were multiplied by, and BitWidth is the bit width of the original addrec 10184 /// coefficients. 10185 /// This function returns std::nullopt if the addrec coefficients are not 10186 /// compile- time constants. 10187 static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 10188 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 10189 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 10190 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 10191 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 10192 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 10193 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 10194 << *AddRec << '\n'); 10195 10196 // We currently can only solve this if the coefficients are constants. 10197 if (!LC || !MC || !NC) { 10198 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 10199 return std::nullopt; 10200 } 10201 10202 APInt L = LC->getAPInt(); 10203 APInt M = MC->getAPInt(); 10204 APInt N = NC->getAPInt(); 10205 assert(!N.isZero() && "This is not a quadratic addrec"); 10206 10207 unsigned BitWidth = LC->getAPInt().getBitWidth(); 10208 unsigned NewWidth = BitWidth + 1; 10209 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 10210 << BitWidth << '\n'); 10211 // The sign-extension (as opposed to a zero-extension) here matches the 10212 // extension used in SolveQuadraticEquationWrap (with the same motivation). 10213 N = N.sext(NewWidth); 10214 M = M.sext(NewWidth); 10215 L = L.sext(NewWidth); 10216 10217 // The increments are M, M+N, M+2N, ..., so the accumulated values are 10218 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 10219 // L+M, L+2M+N, L+3M+3N, ... 10220 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 10221 // 10222 // The equation Acc = 0 is then 10223 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 10224 // In a quadratic form it becomes: 10225 // N n^2 + (2M-N) n + 2L = 0. 10226 10227 APInt A = N; 10228 APInt B = 2 * M - A; 10229 APInt C = 2 * L; 10230 APInt T = APInt(NewWidth, 2); 10231 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 10232 << "x + " << C << ", coeff bw: " << NewWidth 10233 << ", multiplied by " << T << '\n'); 10234 return std::make_tuple(A, B, C, T, BitWidth); 10235 } 10236 10237 /// Helper function to compare optional APInts: 10238 /// (a) if X and Y both exist, return min(X, Y), 10239 /// (b) if neither X nor Y exist, return std::nullopt, 10240 /// (c) if exactly one of X and Y exists, return that value. 10241 static std::optional<APInt> MinOptional(std::optional<APInt> X, 10242 std::optional<APInt> Y) { 10243 if (X && Y) { 10244 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 10245 APInt XW = X->sext(W); 10246 APInt YW = Y->sext(W); 10247 return XW.slt(YW) ? *X : *Y; 10248 } 10249 if (!X && !Y) 10250 return std::nullopt; 10251 return X ? *X : *Y; 10252 } 10253 10254 /// Helper function to truncate an optional APInt to a given BitWidth. 10255 /// When solving addrec-related equations, it is preferable to return a value 10256 /// that has the same bit width as the original addrec's coefficients. If the 10257 /// solution fits in the original bit width, truncate it (except for i1). 10258 /// Returning a value of a different bit width may inhibit some optimizations. 10259 /// 10260 /// In general, a solution to a quadratic equation generated from an addrec 10261 /// may require BW+1 bits, where BW is the bit width of the addrec's 10262 /// coefficients. The reason is that the coefficients of the quadratic 10263 /// equation are BW+1 bits wide (to avoid truncation when converting from 10264 /// the addrec to the equation). 10265 static std::optional<APInt> TruncIfPossible(std::optional<APInt> X, 10266 unsigned BitWidth) { 10267 if (!X) 10268 return std::nullopt; 10269 unsigned W = X->getBitWidth(); 10270 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 10271 return X->trunc(BitWidth); 10272 return X; 10273 } 10274 10275 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 10276 /// iterations. The values L, M, N are assumed to be signed, and they 10277 /// should all have the same bit widths. 10278 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 10279 /// where BW is the bit width of the addrec's coefficients. 10280 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 10281 /// returned as such, otherwise the bit width of the returned value may 10282 /// be greater than BW. 10283 /// 10284 /// This function returns std::nullopt if 10285 /// (a) the addrec coefficients are not constant, or 10286 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 10287 /// like x^2 = 5, no integer solutions exist, in other cases an integer 10288 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 10289 static std::optional<APInt> 10290 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 10291 APInt A, B, C, M; 10292 unsigned BitWidth; 10293 auto T = GetQuadraticEquation(AddRec); 10294 if (!T) 10295 return std::nullopt; 10296 10297 std::tie(A, B, C, M, BitWidth) = *T; 10298 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 10299 std::optional<APInt> X = 10300 APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth + 1); 10301 if (!X) 10302 return std::nullopt; 10303 10304 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 10305 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 10306 if (!V->isZero()) 10307 return std::nullopt; 10308 10309 return TruncIfPossible(X, BitWidth); 10310 } 10311 10312 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 10313 /// iterations. The values M, N are assumed to be signed, and they 10314 /// should all have the same bit widths. 10315 /// Find the least n such that c(n) does not belong to the given range, 10316 /// while c(n-1) does. 10317 /// 10318 /// This function returns std::nullopt if 10319 /// (a) the addrec coefficients are not constant, or 10320 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 10321 /// bounds of the range. 10322 static std::optional<APInt> 10323 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 10324 const ConstantRange &Range, ScalarEvolution &SE) { 10325 assert(AddRec->getOperand(0)->isZero() && 10326 "Starting value of addrec should be 0"); 10327 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 10328 << Range << ", addrec " << *AddRec << '\n'); 10329 // This case is handled in getNumIterationsInRange. Here we can assume that 10330 // we start in the range. 10331 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 10332 "Addrec's initial value should be in range"); 10333 10334 APInt A, B, C, M; 10335 unsigned BitWidth; 10336 auto T = GetQuadraticEquation(AddRec); 10337 if (!T) 10338 return std::nullopt; 10339 10340 // Be careful about the return value: there can be two reasons for not 10341 // returning an actual number. First, if no solutions to the equations 10342 // were found, and second, if the solutions don't leave the given range. 10343 // The first case means that the actual solution is "unknown", the second 10344 // means that it's known, but not valid. If the solution is unknown, we 10345 // cannot make any conclusions. 10346 // Return a pair: the optional solution and a flag indicating if the 10347 // solution was found. 10348 auto SolveForBoundary = 10349 [&](APInt Bound) -> std::pair<std::optional<APInt>, bool> { 10350 // Solve for signed overflow and unsigned overflow, pick the lower 10351 // solution. 10352 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 10353 << Bound << " (before multiplying by " << M << ")\n"); 10354 Bound *= M; // The quadratic equation multiplier. 10355 10356 std::optional<APInt> SO; 10357 if (BitWidth > 1) { 10358 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10359 "signed overflow\n"); 10360 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 10361 } 10362 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 10363 "unsigned overflow\n"); 10364 std::optional<APInt> UO = 10365 APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth + 1); 10366 10367 auto LeavesRange = [&] (const APInt &X) { 10368 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 10369 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 10370 if (Range.contains(V0->getValue())) 10371 return false; 10372 // X should be at least 1, so X-1 is non-negative. 10373 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 10374 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 10375 if (Range.contains(V1->getValue())) 10376 return true; 10377 return false; 10378 }; 10379 10380 // If SolveQuadraticEquationWrap returns std::nullopt, it means that there 10381 // can be a solution, but the function failed to find it. We cannot treat it 10382 // as "no solution". 10383 if (!SO || !UO) 10384 return {std::nullopt, false}; 10385 10386 // Check the smaller value first to see if it leaves the range. 10387 // At this point, both SO and UO must have values. 10388 std::optional<APInt> Min = MinOptional(SO, UO); 10389 if (LeavesRange(*Min)) 10390 return { Min, true }; 10391 std::optional<APInt> Max = Min == SO ? UO : SO; 10392 if (LeavesRange(*Max)) 10393 return { Max, true }; 10394 10395 // Solutions were found, but were eliminated, hence the "true". 10396 return {std::nullopt, true}; 10397 }; 10398 10399 std::tie(A, B, C, M, BitWidth) = *T; 10400 // Lower bound is inclusive, subtract 1 to represent the exiting value. 10401 APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; 10402 APInt Upper = Range.getUpper().sext(A.getBitWidth()); 10403 auto SL = SolveForBoundary(Lower); 10404 auto SU = SolveForBoundary(Upper); 10405 // If any of the solutions was unknown, no meaninigful conclusions can 10406 // be made. 10407 if (!SL.second || !SU.second) 10408 return std::nullopt; 10409 10410 // Claim: The correct solution is not some value between Min and Max. 10411 // 10412 // Justification: Assuming that Min and Max are different values, one of 10413 // them is when the first signed overflow happens, the other is when the 10414 // first unsigned overflow happens. Crossing the range boundary is only 10415 // possible via an overflow (treating 0 as a special case of it, modeling 10416 // an overflow as crossing k*2^W for some k). 10417 // 10418 // The interesting case here is when Min was eliminated as an invalid 10419 // solution, but Max was not. The argument is that if there was another 10420 // overflow between Min and Max, it would also have been eliminated if 10421 // it was considered. 10422 // 10423 // For a given boundary, it is possible to have two overflows of the same 10424 // type (signed/unsigned) without having the other type in between: this 10425 // can happen when the vertex of the parabola is between the iterations 10426 // corresponding to the overflows. This is only possible when the two 10427 // overflows cross k*2^W for the same k. In such case, if the second one 10428 // left the range (and was the first one to do so), the first overflow 10429 // would have to enter the range, which would mean that either we had left 10430 // the range before or that we started outside of it. Both of these cases 10431 // are contradictions. 10432 // 10433 // Claim: In the case where SolveForBoundary returns std::nullopt, the correct 10434 // solution is not some value between the Max for this boundary and the 10435 // Min of the other boundary. 10436 // 10437 // Justification: Assume that we had such Max_A and Min_B corresponding 10438 // to range boundaries A and B and such that Max_A < Min_B. If there was 10439 // a solution between Max_A and Min_B, it would have to be caused by an 10440 // overflow corresponding to either A or B. It cannot correspond to B, 10441 // since Min_B is the first occurrence of such an overflow. If it 10442 // corresponded to A, it would have to be either a signed or an unsigned 10443 // overflow that is larger than both eliminated overflows for A. But 10444 // between the eliminated overflows and this overflow, the values would 10445 // cover the entire value space, thus crossing the other boundary, which 10446 // is a contradiction. 10447 10448 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 10449 } 10450 10451 ScalarEvolution::ExitLimit 10452 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 10453 bool AllowPredicates) { 10454 10455 // This is only used for loops with a "x != y" exit test. The exit condition 10456 // is now expressed as a single expression, V = x-y. So the exit test is 10457 // effectively V != 0. We know and take advantage of the fact that this 10458 // expression only being used in a comparison by zero context. 10459 10460 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10461 // If the value is a constant 10462 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10463 // If the value is already zero, the branch will execute zero times. 10464 if (C->getValue()->isZero()) return C; 10465 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10466 } 10467 10468 const SCEVAddRecExpr *AddRec = 10469 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 10470 10471 if (!AddRec && AllowPredicates) 10472 // Try to make this an AddRec using runtime tests, in the first X 10473 // iterations of this loop, where X is the SCEV expression found by the 10474 // algorithm below. 10475 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 10476 10477 if (!AddRec || AddRec->getLoop() != L) 10478 return getCouldNotCompute(); 10479 10480 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 10481 // the quadratic equation to solve it. 10482 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 10483 // We can only use this value if the chrec ends up with an exact zero 10484 // value at this index. When solving for "X*X != 5", for example, we 10485 // should not accept a root of 2. 10486 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 10487 const auto *R = cast<SCEVConstant>(getConstant(*S)); 10488 return ExitLimit(R, R, R, false, Predicates); 10489 } 10490 return getCouldNotCompute(); 10491 } 10492 10493 // Otherwise we can only handle this if it is affine. 10494 if (!AddRec->isAffine()) 10495 return getCouldNotCompute(); 10496 10497 // If this is an affine expression, the execution count of this branch is 10498 // the minimum unsigned root of the following equation: 10499 // 10500 // Start + Step*N = 0 (mod 2^BW) 10501 // 10502 // equivalent to: 10503 // 10504 // Step*N = -Start (mod 2^BW) 10505 // 10506 // where BW is the common bit width of Start and Step. 10507 10508 // Get the initial value for the loop. 10509 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 10510 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 10511 10512 // For now we handle only constant steps. 10513 // 10514 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 10515 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 10516 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 10517 // We have not yet seen any such cases. 10518 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 10519 if (!StepC || StepC->getValue()->isZero()) 10520 return getCouldNotCompute(); 10521 10522 // For positive steps (counting up until unsigned overflow): 10523 // N = -Start/Step (as unsigned) 10524 // For negative steps (counting down to zero): 10525 // N = Start/-Step 10526 // First compute the unsigned distance from zero in the direction of Step. 10527 bool CountDown = StepC->getAPInt().isNegative(); 10528 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 10529 10530 // Handle unitary steps, which cannot wraparound. 10531 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 10532 // N = Distance (as unsigned) 10533 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 10534 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 10535 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); 10536 10537 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 10538 // we end up with a loop whose backedge-taken count is n - 1. Detect this 10539 // case, and see if we can improve the bound. 10540 // 10541 // Explicitly handling this here is necessary because getUnsignedRange 10542 // isn't context-sensitive; it doesn't know that we only care about the 10543 // range inside the loop. 10544 const SCEV *Zero = getZero(Distance->getType()); 10545 const SCEV *One = getOne(Distance->getType()); 10546 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 10547 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 10548 // If Distance + 1 doesn't overflow, we can compute the maximum distance 10549 // as "unsigned_max(Distance + 1) - 1". 10550 ConstantRange CR = getUnsignedRange(DistancePlusOne); 10551 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 10552 } 10553 return ExitLimit(Distance, getConstant(MaxBECount), Distance, false, 10554 Predicates); 10555 } 10556 10557 // If the condition controls loop exit (the loop exits only if the expression 10558 // is true) and the addition is no-wrap we can use unsigned divide to 10559 // compute the backedge count. In this case, the step may not divide the 10560 // distance, but we don't care because if the condition is "missed" the loop 10561 // will have undefined behavior due to wrapping. 10562 if (ControlsExit && AddRec->hasNoSelfWrap() && 10563 loopHasNoAbnormalExits(AddRec->getLoop())) { 10564 const SCEV *Exact = 10565 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 10566 const SCEV *ConstantMax = getCouldNotCompute(); 10567 if (Exact != getCouldNotCompute()) { 10568 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 10569 ConstantMax = 10570 getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); 10571 } 10572 const SCEV *SymbolicMax = 10573 isa<SCEVCouldNotCompute>(Exact) ? ConstantMax : Exact; 10574 return ExitLimit(Exact, ConstantMax, SymbolicMax, false, Predicates); 10575 } 10576 10577 // Solve the general equation. 10578 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 10579 getNegativeSCEV(Start), *this); 10580 10581 const SCEV *M = E; 10582 if (E != getCouldNotCompute()) { 10583 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); 10584 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); 10585 } 10586 auto *S = isa<SCEVCouldNotCompute>(E) ? M : E; 10587 return ExitLimit(E, M, S, false, Predicates); 10588 } 10589 10590 ScalarEvolution::ExitLimit 10591 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 10592 // Loops that look like: while (X == 0) are very strange indeed. We don't 10593 // handle them yet except for the trivial case. This could be expanded in the 10594 // future as needed. 10595 10596 // If the value is a constant, check to see if it is known to be non-zero 10597 // already. If so, the backedge will execute zero times. 10598 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10599 if (!C->getValue()->isZero()) 10600 return getZero(C->getType()); 10601 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10602 } 10603 10604 // We could implement others, but I really doubt anyone writes loops like 10605 // this, and if they did, they would already be constant folded. 10606 return getCouldNotCompute(); 10607 } 10608 10609 std::pair<const BasicBlock *, const BasicBlock *> 10610 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 10611 const { 10612 // If the block has a unique predecessor, then there is no path from the 10613 // predecessor to the block that does not go through the direct edge 10614 // from the predecessor to the block. 10615 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 10616 return {Pred, BB}; 10617 10618 // A loop's header is defined to be a block that dominates the loop. 10619 // If the header has a unique predecessor outside the loop, it must be 10620 // a block that has exactly one successor that can reach the loop. 10621 if (const Loop *L = LI.getLoopFor(BB)) 10622 return {L->getLoopPredecessor(), L->getHeader()}; 10623 10624 return {nullptr, nullptr}; 10625 } 10626 10627 /// SCEV structural equivalence is usually sufficient for testing whether two 10628 /// expressions are equal, however for the purposes of looking for a condition 10629 /// guarding a loop, it can be useful to be a little more general, since a 10630 /// front-end may have replicated the controlling expression. 10631 static bool HasSameValue(const SCEV *A, const SCEV *B) { 10632 // Quick check to see if they are the same SCEV. 10633 if (A == B) return true; 10634 10635 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 10636 // Not all instructions that are "identical" compute the same value. For 10637 // instance, two distinct alloca instructions allocating the same type are 10638 // identical and do not read memory; but compute distinct values. 10639 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 10640 }; 10641 10642 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 10643 // two different instructions with the same value. Check for this case. 10644 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 10645 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 10646 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 10647 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 10648 if (ComputesEqualValues(AI, BI)) 10649 return true; 10650 10651 // Otherwise assume they may have a different value. 10652 return false; 10653 } 10654 10655 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 10656 const SCEV *&LHS, const SCEV *&RHS, 10657 unsigned Depth, 10658 bool ControllingFiniteLoop) { 10659 bool Changed = false; 10660 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 10661 // '0 != 0'. 10662 auto TrivialCase = [&](bool TriviallyTrue) { 10663 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 10664 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 10665 return true; 10666 }; 10667 // If we hit the max recursion limit bail out. 10668 if (Depth >= 3) 10669 return false; 10670 10671 // Canonicalize a constant to the right side. 10672 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 10673 // Check for both operands constant. 10674 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 10675 if (ConstantExpr::getICmp(Pred, 10676 LHSC->getValue(), 10677 RHSC->getValue())->isNullValue()) 10678 return TrivialCase(false); 10679 else 10680 return TrivialCase(true); 10681 } 10682 // Otherwise swap the operands to put the constant on the right. 10683 std::swap(LHS, RHS); 10684 Pred = ICmpInst::getSwappedPredicate(Pred); 10685 Changed = true; 10686 } 10687 10688 // If we're comparing an addrec with a value which is loop-invariant in the 10689 // addrec's loop, put the addrec on the left. Also make a dominance check, 10690 // as both operands could be addrecs loop-invariant in each other's loop. 10691 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 10692 const Loop *L = AR->getLoop(); 10693 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 10694 std::swap(LHS, RHS); 10695 Pred = ICmpInst::getSwappedPredicate(Pred); 10696 Changed = true; 10697 } 10698 } 10699 10700 // If there's a constant operand, canonicalize comparisons with boundary 10701 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 10702 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 10703 const APInt &RA = RC->getAPInt(); 10704 10705 bool SimplifiedByConstantRange = false; 10706 10707 if (!ICmpInst::isEquality(Pred)) { 10708 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 10709 if (ExactCR.isFullSet()) 10710 return TrivialCase(true); 10711 else if (ExactCR.isEmptySet()) 10712 return TrivialCase(false); 10713 10714 APInt NewRHS; 10715 CmpInst::Predicate NewPred; 10716 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 10717 ICmpInst::isEquality(NewPred)) { 10718 // We were able to convert an inequality to an equality. 10719 Pred = NewPred; 10720 RHS = getConstant(NewRHS); 10721 Changed = SimplifiedByConstantRange = true; 10722 } 10723 } 10724 10725 if (!SimplifiedByConstantRange) { 10726 switch (Pred) { 10727 default: 10728 break; 10729 case ICmpInst::ICMP_EQ: 10730 case ICmpInst::ICMP_NE: 10731 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 10732 if (!RA) 10733 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 10734 if (const SCEVMulExpr *ME = 10735 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 10736 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 10737 ME->getOperand(0)->isAllOnesValue()) { 10738 RHS = AE->getOperand(1); 10739 LHS = ME->getOperand(1); 10740 Changed = true; 10741 } 10742 break; 10743 10744 10745 // The "Should have been caught earlier!" messages refer to the fact 10746 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 10747 // should have fired on the corresponding cases, and canonicalized the 10748 // check to trivial case. 10749 10750 case ICmpInst::ICMP_UGE: 10751 assert(!RA.isMinValue() && "Should have been caught earlier!"); 10752 Pred = ICmpInst::ICMP_UGT; 10753 RHS = getConstant(RA - 1); 10754 Changed = true; 10755 break; 10756 case ICmpInst::ICMP_ULE: 10757 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 10758 Pred = ICmpInst::ICMP_ULT; 10759 RHS = getConstant(RA + 1); 10760 Changed = true; 10761 break; 10762 case ICmpInst::ICMP_SGE: 10763 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 10764 Pred = ICmpInst::ICMP_SGT; 10765 RHS = getConstant(RA - 1); 10766 Changed = true; 10767 break; 10768 case ICmpInst::ICMP_SLE: 10769 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 10770 Pred = ICmpInst::ICMP_SLT; 10771 RHS = getConstant(RA + 1); 10772 Changed = true; 10773 break; 10774 } 10775 } 10776 } 10777 10778 // Check for obvious equality. 10779 if (HasSameValue(LHS, RHS)) { 10780 if (ICmpInst::isTrueWhenEqual(Pred)) 10781 return TrivialCase(true); 10782 if (ICmpInst::isFalseWhenEqual(Pred)) 10783 return TrivialCase(false); 10784 } 10785 10786 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 10787 // adding or subtracting 1 from one of the operands. This can be done for 10788 // one of two reasons: 10789 // 1) The range of the RHS does not include the (signed/unsigned) boundaries 10790 // 2) The loop is finite, with this comparison controlling the exit. Since the 10791 // loop is finite, the bound cannot include the corresponding boundary 10792 // (otherwise it would loop forever). 10793 switch (Pred) { 10794 case ICmpInst::ICMP_SLE: 10795 if (ControllingFiniteLoop || !getSignedRangeMax(RHS).isMaxSignedValue()) { 10796 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10797 SCEV::FlagNSW); 10798 Pred = ICmpInst::ICMP_SLT; 10799 Changed = true; 10800 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 10801 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 10802 SCEV::FlagNSW); 10803 Pred = ICmpInst::ICMP_SLT; 10804 Changed = true; 10805 } 10806 break; 10807 case ICmpInst::ICMP_SGE: 10808 if (ControllingFiniteLoop || !getSignedRangeMin(RHS).isMinSignedValue()) { 10809 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 10810 SCEV::FlagNSW); 10811 Pred = ICmpInst::ICMP_SGT; 10812 Changed = true; 10813 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 10814 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10815 SCEV::FlagNSW); 10816 Pred = ICmpInst::ICMP_SGT; 10817 Changed = true; 10818 } 10819 break; 10820 case ICmpInst::ICMP_ULE: 10821 if (ControllingFiniteLoop || !getUnsignedRangeMax(RHS).isMaxValue()) { 10822 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10823 SCEV::FlagNUW); 10824 Pred = ICmpInst::ICMP_ULT; 10825 Changed = true; 10826 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 10827 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 10828 Pred = ICmpInst::ICMP_ULT; 10829 Changed = true; 10830 } 10831 break; 10832 case ICmpInst::ICMP_UGE: 10833 if (ControllingFiniteLoop || !getUnsignedRangeMin(RHS).isMinValue()) { 10834 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 10835 Pred = ICmpInst::ICMP_UGT; 10836 Changed = true; 10837 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 10838 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10839 SCEV::FlagNUW); 10840 Pred = ICmpInst::ICMP_UGT; 10841 Changed = true; 10842 } 10843 break; 10844 default: 10845 break; 10846 } 10847 10848 // TODO: More simplifications are possible here. 10849 10850 // Recursively simplify until we either hit a recursion limit or nothing 10851 // changes. 10852 if (Changed) 10853 return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1, 10854 ControllingFiniteLoop); 10855 10856 return Changed; 10857 } 10858 10859 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 10860 return getSignedRangeMax(S).isNegative(); 10861 } 10862 10863 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 10864 return getSignedRangeMin(S).isStrictlyPositive(); 10865 } 10866 10867 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 10868 return !getSignedRangeMin(S).isNegative(); 10869 } 10870 10871 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 10872 return !getSignedRangeMax(S).isStrictlyPositive(); 10873 } 10874 10875 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 10876 return getUnsignedRangeMin(S) != 0; 10877 } 10878 10879 std::pair<const SCEV *, const SCEV *> 10880 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 10881 // Compute SCEV on entry of loop L. 10882 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 10883 if (Start == getCouldNotCompute()) 10884 return { Start, Start }; 10885 // Compute post increment SCEV for loop L. 10886 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 10887 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 10888 return { Start, PostInc }; 10889 } 10890 10891 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 10892 const SCEV *LHS, const SCEV *RHS) { 10893 // First collect all loops. 10894 SmallPtrSet<const Loop *, 8> LoopsUsed; 10895 getUsedLoops(LHS, LoopsUsed); 10896 getUsedLoops(RHS, LoopsUsed); 10897 10898 if (LoopsUsed.empty()) 10899 return false; 10900 10901 // Domination relationship must be a linear order on collected loops. 10902 #ifndef NDEBUG 10903 for (const auto *L1 : LoopsUsed) 10904 for (const auto *L2 : LoopsUsed) 10905 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 10906 DT.dominates(L2->getHeader(), L1->getHeader())) && 10907 "Domination relationship is not a linear order"); 10908 #endif 10909 10910 const Loop *MDL = 10911 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 10912 [&](const Loop *L1, const Loop *L2) { 10913 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 10914 }); 10915 10916 // Get init and post increment value for LHS. 10917 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 10918 // if LHS contains unknown non-invariant SCEV then bail out. 10919 if (SplitLHS.first == getCouldNotCompute()) 10920 return false; 10921 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 10922 // Get init and post increment value for RHS. 10923 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 10924 // if RHS contains unknown non-invariant SCEV then bail out. 10925 if (SplitRHS.first == getCouldNotCompute()) 10926 return false; 10927 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 10928 // It is possible that init SCEV contains an invariant load but it does 10929 // not dominate MDL and is not available at MDL loop entry, so we should 10930 // check it here. 10931 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 10932 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 10933 return false; 10934 10935 // It seems backedge guard check is faster than entry one so in some cases 10936 // it can speed up whole estimation by short circuit 10937 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 10938 SplitRHS.second) && 10939 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 10940 } 10941 10942 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 10943 const SCEV *LHS, const SCEV *RHS) { 10944 // Canonicalize the inputs first. 10945 (void)SimplifyICmpOperands(Pred, LHS, RHS); 10946 10947 if (isKnownViaInduction(Pred, LHS, RHS)) 10948 return true; 10949 10950 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 10951 return true; 10952 10953 // Otherwise see what can be done with some simple reasoning. 10954 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 10955 } 10956 10957 std::optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 10958 const SCEV *LHS, 10959 const SCEV *RHS) { 10960 if (isKnownPredicate(Pred, LHS, RHS)) 10961 return true; 10962 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 10963 return false; 10964 return std::nullopt; 10965 } 10966 10967 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 10968 const SCEV *LHS, const SCEV *RHS, 10969 const Instruction *CtxI) { 10970 // TODO: Analyze guards and assumes from Context's block. 10971 return isKnownPredicate(Pred, LHS, RHS) || 10972 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); 10973 } 10974 10975 std::optional<bool> 10976 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 10977 const SCEV *RHS, const Instruction *CtxI) { 10978 std::optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 10979 if (KnownWithoutContext) 10980 return KnownWithoutContext; 10981 10982 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) 10983 return true; 10984 else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), 10985 ICmpInst::getInversePredicate(Pred), 10986 LHS, RHS)) 10987 return false; 10988 return std::nullopt; 10989 } 10990 10991 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 10992 const SCEVAddRecExpr *LHS, 10993 const SCEV *RHS) { 10994 const Loop *L = LHS->getLoop(); 10995 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 10996 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 10997 } 10998 10999 std::optional<ScalarEvolution::MonotonicPredicateType> 11000 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 11001 ICmpInst::Predicate Pred) { 11002 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 11003 11004 #ifndef NDEBUG 11005 // Verify an invariant: inverting the predicate should turn a monotonically 11006 // increasing change to a monotonically decreasing one, and vice versa. 11007 if (Result) { 11008 auto ResultSwapped = 11009 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 11010 11011 assert(*ResultSwapped != *Result && 11012 "monotonicity should flip as we flip the predicate"); 11013 } 11014 #endif 11015 11016 return Result; 11017 } 11018 11019 std::optional<ScalarEvolution::MonotonicPredicateType> 11020 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 11021 ICmpInst::Predicate Pred) { 11022 // A zero step value for LHS means the induction variable is essentially a 11023 // loop invariant value. We don't really depend on the predicate actually 11024 // flipping from false to true (for increasing predicates, and the other way 11025 // around for decreasing predicates), all we care about is that *if* the 11026 // predicate changes then it only changes from false to true. 11027 // 11028 // A zero step value in itself is not very useful, but there may be places 11029 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 11030 // as general as possible. 11031 11032 // Only handle LE/LT/GE/GT predicates. 11033 if (!ICmpInst::isRelational(Pred)) 11034 return std::nullopt; 11035 11036 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 11037 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 11038 "Should be greater or less!"); 11039 11040 // Check that AR does not wrap. 11041 if (ICmpInst::isUnsigned(Pred)) { 11042 if (!LHS->hasNoUnsignedWrap()) 11043 return std::nullopt; 11044 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 11045 } else { 11046 assert(ICmpInst::isSigned(Pred) && 11047 "Relational predicate is either signed or unsigned!"); 11048 if (!LHS->hasNoSignedWrap()) 11049 return std::nullopt; 11050 11051 const SCEV *Step = LHS->getStepRecurrence(*this); 11052 11053 if (isKnownNonNegative(Step)) 11054 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 11055 11056 if (isKnownNonPositive(Step)) 11057 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 11058 11059 return std::nullopt; 11060 } 11061 } 11062 11063 std::optional<ScalarEvolution::LoopInvariantPredicate> 11064 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 11065 const SCEV *LHS, const SCEV *RHS, 11066 const Loop *L, 11067 const Instruction *CtxI) { 11068 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 11069 if (!isLoopInvariant(RHS, L)) { 11070 if (!isLoopInvariant(LHS, L)) 11071 return std::nullopt; 11072 11073 std::swap(LHS, RHS); 11074 Pred = ICmpInst::getSwappedPredicate(Pred); 11075 } 11076 11077 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 11078 if (!ArLHS || ArLHS->getLoop() != L) 11079 return std::nullopt; 11080 11081 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 11082 if (!MonotonicType) 11083 return std::nullopt; 11084 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 11085 // true as the loop iterates, and the backedge is control dependent on 11086 // "ArLHS `Pred` RHS" == true then we can reason as follows: 11087 // 11088 // * if the predicate was false in the first iteration then the predicate 11089 // is never evaluated again, since the loop exits without taking the 11090 // backedge. 11091 // * if the predicate was true in the first iteration then it will 11092 // continue to be true for all future iterations since it is 11093 // monotonically increasing. 11094 // 11095 // For both the above possibilities, we can replace the loop varying 11096 // predicate with its value on the first iteration of the loop (which is 11097 // loop invariant). 11098 // 11099 // A similar reasoning applies for a monotonically decreasing predicate, by 11100 // replacing true with false and false with true in the above two bullets. 11101 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 11102 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 11103 11104 if (isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 11105 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), 11106 RHS); 11107 11108 if (!CtxI) 11109 return std::nullopt; 11110 // Try to prove via context. 11111 // TODO: Support other cases. 11112 switch (Pred) { 11113 default: 11114 break; 11115 case ICmpInst::ICMP_ULE: 11116 case ICmpInst::ICMP_ULT: { 11117 assert(ArLHS->hasNoUnsignedWrap() && "Is a requirement of monotonicity!"); 11118 // Given preconditions 11119 // (1) ArLHS does not cross the border of positive and negative parts of 11120 // range because of: 11121 // - Positive step; (TODO: lift this limitation) 11122 // - nuw - does not cross zero boundary; 11123 // - nsw - does not cross SINT_MAX boundary; 11124 // (2) ArLHS <s RHS 11125 // (3) RHS >=s 0 11126 // we can replace the loop variant ArLHS <u RHS condition with loop 11127 // invariant Start(ArLHS) <u RHS. 11128 // 11129 // Because of (1) there are two options: 11130 // - ArLHS is always negative. It means that ArLHS <u RHS is always false; 11131 // - ArLHS is always non-negative. Because of (3) RHS is also non-negative. 11132 // It means that ArLHS <s RHS <=> ArLHS <u RHS. 11133 // Because of (2) ArLHS <u RHS is trivially true. 11134 // All together it means that ArLHS <u RHS <=> Start(ArLHS) >=s 0. 11135 // We can strengthen this to Start(ArLHS) <u RHS. 11136 auto SignFlippedPred = ICmpInst::getFlippedSignednessPredicate(Pred); 11137 if (ArLHS->hasNoSignedWrap() && ArLHS->isAffine() && 11138 isKnownPositive(ArLHS->getStepRecurrence(*this)) && 11139 isKnownNonNegative(RHS) && 11140 isKnownPredicateAt(SignFlippedPred, ArLHS, RHS, CtxI)) 11141 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), 11142 RHS); 11143 } 11144 } 11145 11146 return std::nullopt; 11147 } 11148 11149 std::optional<ScalarEvolution::LoopInvariantPredicate> 11150 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 11151 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 11152 const Instruction *CtxI, const SCEV *MaxIter) { 11153 if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl( 11154 Pred, LHS, RHS, L, CtxI, MaxIter)) 11155 return LIP; 11156 if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter)) 11157 // Number of iterations expressed as UMIN isn't always great for expressing 11158 // the value on the last iteration. If the straightforward approach didn't 11159 // work, try the following trick: if the a predicate is invariant for X, it 11160 // is also invariant for umin(X, ...). So try to find something that works 11161 // among subexpressions of MaxIter expressed as umin. 11162 for (auto *Op : UMin->operands()) 11163 if (auto LIP = getLoopInvariantExitCondDuringFirstIterationsImpl( 11164 Pred, LHS, RHS, L, CtxI, Op)) 11165 return LIP; 11166 return std::nullopt; 11167 } 11168 11169 std::optional<ScalarEvolution::LoopInvariantPredicate> 11170 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl( 11171 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 11172 const Instruction *CtxI, const SCEV *MaxIter) { 11173 // Try to prove the following set of facts: 11174 // - The predicate is monotonic in the iteration space. 11175 // - If the check does not fail on the 1st iteration: 11176 // - No overflow will happen during first MaxIter iterations; 11177 // - It will not fail on the MaxIter'th iteration. 11178 // If the check does fail on the 1st iteration, we leave the loop and no 11179 // other checks matter. 11180 11181 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 11182 if (!isLoopInvariant(RHS, L)) { 11183 if (!isLoopInvariant(LHS, L)) 11184 return std::nullopt; 11185 11186 std::swap(LHS, RHS); 11187 Pred = ICmpInst::getSwappedPredicate(Pred); 11188 } 11189 11190 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 11191 if (!AR || AR->getLoop() != L) 11192 return std::nullopt; 11193 11194 // The predicate must be relational (i.e. <, <=, >=, >). 11195 if (!ICmpInst::isRelational(Pred)) 11196 return std::nullopt; 11197 11198 // TODO: Support steps other than +/- 1. 11199 const SCEV *Step = AR->getStepRecurrence(*this); 11200 auto *One = getOne(Step->getType()); 11201 auto *MinusOne = getNegativeSCEV(One); 11202 if (Step != One && Step != MinusOne) 11203 return std::nullopt; 11204 11205 // Type mismatch here means that MaxIter is potentially larger than max 11206 // unsigned value in start type, which mean we cannot prove no wrap for the 11207 // indvar. 11208 if (AR->getType() != MaxIter->getType()) 11209 return std::nullopt; 11210 11211 // Value of IV on suggested last iteration. 11212 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 11213 // Does it still meet the requirement? 11214 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 11215 return std::nullopt; 11216 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 11217 // not exceed max unsigned value of this type), this effectively proves 11218 // that there is no wrap during the iteration. To prove that there is no 11219 // signed/unsigned wrap, we need to check that 11220 // Start <= Last for step = 1 or Start >= Last for step = -1. 11221 ICmpInst::Predicate NoOverflowPred = 11222 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 11223 if (Step == MinusOne) 11224 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 11225 const SCEV *Start = AR->getStart(); 11226 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) 11227 return std::nullopt; 11228 11229 // Everything is fine. 11230 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 11231 } 11232 11233 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 11234 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 11235 if (HasSameValue(LHS, RHS)) 11236 return ICmpInst::isTrueWhenEqual(Pred); 11237 11238 // This code is split out from isKnownPredicate because it is called from 11239 // within isLoopEntryGuardedByCond. 11240 11241 auto CheckRanges = [&](const ConstantRange &RangeLHS, 11242 const ConstantRange &RangeRHS) { 11243 return RangeLHS.icmp(Pred, RangeRHS); 11244 }; 11245 11246 // The check at the top of the function catches the case where the values are 11247 // known to be equal. 11248 if (Pred == CmpInst::ICMP_EQ) 11249 return false; 11250 11251 if (Pred == CmpInst::ICMP_NE) { 11252 auto SL = getSignedRange(LHS); 11253 auto SR = getSignedRange(RHS); 11254 if (CheckRanges(SL, SR)) 11255 return true; 11256 auto UL = getUnsignedRange(LHS); 11257 auto UR = getUnsignedRange(RHS); 11258 if (CheckRanges(UL, UR)) 11259 return true; 11260 auto *Diff = getMinusSCEV(LHS, RHS); 11261 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 11262 } 11263 11264 if (CmpInst::isSigned(Pred)) { 11265 auto SL = getSignedRange(LHS); 11266 auto SR = getSignedRange(RHS); 11267 return CheckRanges(SL, SR); 11268 } 11269 11270 auto UL = getUnsignedRange(LHS); 11271 auto UR = getUnsignedRange(RHS); 11272 return CheckRanges(UL, UR); 11273 } 11274 11275 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 11276 const SCEV *LHS, 11277 const SCEV *RHS) { 11278 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 11279 // C1 and C2 are constant integers. If either X or Y are not add expressions, 11280 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 11281 // OutC1 and OutC2. 11282 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 11283 APInt &OutC1, APInt &OutC2, 11284 SCEV::NoWrapFlags ExpectedFlags) { 11285 const SCEV *XNonConstOp, *XConstOp; 11286 const SCEV *YNonConstOp, *YConstOp; 11287 SCEV::NoWrapFlags XFlagsPresent; 11288 SCEV::NoWrapFlags YFlagsPresent; 11289 11290 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 11291 XConstOp = getZero(X->getType()); 11292 XNonConstOp = X; 11293 XFlagsPresent = ExpectedFlags; 11294 } 11295 if (!isa<SCEVConstant>(XConstOp) || 11296 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 11297 return false; 11298 11299 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 11300 YConstOp = getZero(Y->getType()); 11301 YNonConstOp = Y; 11302 YFlagsPresent = ExpectedFlags; 11303 } 11304 11305 if (!isa<SCEVConstant>(YConstOp) || 11306 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 11307 return false; 11308 11309 if (YNonConstOp != XNonConstOp) 11310 return false; 11311 11312 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 11313 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 11314 11315 return true; 11316 }; 11317 11318 APInt C1; 11319 APInt C2; 11320 11321 switch (Pred) { 11322 default: 11323 break; 11324 11325 case ICmpInst::ICMP_SGE: 11326 std::swap(LHS, RHS); 11327 [[fallthrough]]; 11328 case ICmpInst::ICMP_SLE: 11329 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 11330 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 11331 return true; 11332 11333 break; 11334 11335 case ICmpInst::ICMP_SGT: 11336 std::swap(LHS, RHS); 11337 [[fallthrough]]; 11338 case ICmpInst::ICMP_SLT: 11339 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 11340 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 11341 return true; 11342 11343 break; 11344 11345 case ICmpInst::ICMP_UGE: 11346 std::swap(LHS, RHS); 11347 [[fallthrough]]; 11348 case ICmpInst::ICMP_ULE: 11349 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 11350 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 11351 return true; 11352 11353 break; 11354 11355 case ICmpInst::ICMP_UGT: 11356 std::swap(LHS, RHS); 11357 [[fallthrough]]; 11358 case ICmpInst::ICMP_ULT: 11359 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 11360 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 11361 return true; 11362 break; 11363 } 11364 11365 return false; 11366 } 11367 11368 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 11369 const SCEV *LHS, 11370 const SCEV *RHS) { 11371 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 11372 return false; 11373 11374 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 11375 // the stack can result in exponential time complexity. 11376 SaveAndRestore Restore(ProvingSplitPredicate, true); 11377 11378 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 11379 // 11380 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 11381 // isKnownPredicate. isKnownPredicate is more powerful, but also more 11382 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 11383 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 11384 // use isKnownPredicate later if needed. 11385 return isKnownNonNegative(RHS) && 11386 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 11387 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 11388 } 11389 11390 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 11391 ICmpInst::Predicate Pred, 11392 const SCEV *LHS, const SCEV *RHS) { 11393 // No need to even try if we know the module has no guards. 11394 if (!HasGuards) 11395 return false; 11396 11397 return any_of(*BB, [&](const Instruction &I) { 11398 using namespace llvm::PatternMatch; 11399 11400 Value *Condition; 11401 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 11402 m_Value(Condition))) && 11403 isImpliedCond(Pred, LHS, RHS, Condition, false); 11404 }); 11405 } 11406 11407 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 11408 /// protected by a conditional between LHS and RHS. This is used to 11409 /// to eliminate casts. 11410 bool 11411 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 11412 ICmpInst::Predicate Pred, 11413 const SCEV *LHS, const SCEV *RHS) { 11414 // Interpret a null as meaning no loop, where there is obviously no guard 11415 // (interprocedural conditions notwithstanding). Do not bother about 11416 // unreachable loops. 11417 if (!L || !DT.isReachableFromEntry(L->getHeader())) 11418 return true; 11419 11420 if (VerifyIR) 11421 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 11422 "This cannot be done on broken IR!"); 11423 11424 11425 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11426 return true; 11427 11428 BasicBlock *Latch = L->getLoopLatch(); 11429 if (!Latch) 11430 return false; 11431 11432 BranchInst *LoopContinuePredicate = 11433 dyn_cast<BranchInst>(Latch->getTerminator()); 11434 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 11435 isImpliedCond(Pred, LHS, RHS, 11436 LoopContinuePredicate->getCondition(), 11437 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 11438 return true; 11439 11440 // We don't want more than one activation of the following loops on the stack 11441 // -- that can lead to O(n!) time complexity. 11442 if (WalkingBEDominatingConds) 11443 return false; 11444 11445 SaveAndRestore ClearOnExit(WalkingBEDominatingConds, true); 11446 11447 // See if we can exploit a trip count to prove the predicate. 11448 const auto &BETakenInfo = getBackedgeTakenInfo(L); 11449 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 11450 if (LatchBECount != getCouldNotCompute()) { 11451 // We know that Latch branches back to the loop header exactly 11452 // LatchBECount times. This means the backdege condition at Latch is 11453 // equivalent to "{0,+,1} u< LatchBECount". 11454 Type *Ty = LatchBECount->getType(); 11455 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 11456 const SCEV *LoopCounter = 11457 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 11458 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 11459 LatchBECount)) 11460 return true; 11461 } 11462 11463 // Check conditions due to any @llvm.assume intrinsics. 11464 for (auto &AssumeVH : AC.assumptions()) { 11465 if (!AssumeVH) 11466 continue; 11467 auto *CI = cast<CallInst>(AssumeVH); 11468 if (!DT.dominates(CI, Latch->getTerminator())) 11469 continue; 11470 11471 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 11472 return true; 11473 } 11474 11475 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 11476 return true; 11477 11478 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 11479 DTN != HeaderDTN; DTN = DTN->getIDom()) { 11480 assert(DTN && "should reach the loop header before reaching the root!"); 11481 11482 BasicBlock *BB = DTN->getBlock(); 11483 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 11484 return true; 11485 11486 BasicBlock *PBB = BB->getSinglePredecessor(); 11487 if (!PBB) 11488 continue; 11489 11490 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 11491 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 11492 continue; 11493 11494 Value *Condition = ContinuePredicate->getCondition(); 11495 11496 // If we have an edge `E` within the loop body that dominates the only 11497 // latch, the condition guarding `E` also guards the backedge. This 11498 // reasoning works only for loops with a single latch. 11499 11500 BasicBlockEdge DominatingEdge(PBB, BB); 11501 if (DominatingEdge.isSingleEdge()) { 11502 // We're constructively (and conservatively) enumerating edges within the 11503 // loop body that dominate the latch. The dominator tree better agree 11504 // with us on this: 11505 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 11506 11507 if (isImpliedCond(Pred, LHS, RHS, Condition, 11508 BB != ContinuePredicate->getSuccessor(0))) 11509 return true; 11510 } 11511 } 11512 11513 return false; 11514 } 11515 11516 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 11517 ICmpInst::Predicate Pred, 11518 const SCEV *LHS, 11519 const SCEV *RHS) { 11520 // Do not bother proving facts for unreachable code. 11521 if (!DT.isReachableFromEntry(BB)) 11522 return true; 11523 if (VerifyIR) 11524 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 11525 "This cannot be done on broken IR!"); 11526 11527 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 11528 // the facts (a >= b && a != b) separately. A typical situation is when the 11529 // non-strict comparison is known from ranges and non-equality is known from 11530 // dominating predicates. If we are proving strict comparison, we always try 11531 // to prove non-equality and non-strict comparison separately. 11532 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 11533 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 11534 bool ProvedNonStrictComparison = false; 11535 bool ProvedNonEquality = false; 11536 11537 auto SplitAndProve = 11538 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 11539 if (!ProvedNonStrictComparison) 11540 ProvedNonStrictComparison = Fn(NonStrictPredicate); 11541 if (!ProvedNonEquality) 11542 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 11543 if (ProvedNonStrictComparison && ProvedNonEquality) 11544 return true; 11545 return false; 11546 }; 11547 11548 if (ProvingStrictComparison) { 11549 auto ProofFn = [&](ICmpInst::Predicate P) { 11550 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 11551 }; 11552 if (SplitAndProve(ProofFn)) 11553 return true; 11554 } 11555 11556 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 11557 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 11558 const Instruction *CtxI = &BB->front(); 11559 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) 11560 return true; 11561 if (ProvingStrictComparison) { 11562 auto ProofFn = [&](ICmpInst::Predicate P) { 11563 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); 11564 }; 11565 if (SplitAndProve(ProofFn)) 11566 return true; 11567 } 11568 return false; 11569 }; 11570 11571 // Starting at the block's predecessor, climb up the predecessor chain, as long 11572 // as there are predecessors that can be found that have unique successors 11573 // leading to the original block. 11574 const Loop *ContainingLoop = LI.getLoopFor(BB); 11575 const BasicBlock *PredBB; 11576 if (ContainingLoop && ContainingLoop->getHeader() == BB) 11577 PredBB = ContainingLoop->getLoopPredecessor(); 11578 else 11579 PredBB = BB->getSinglePredecessor(); 11580 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 11581 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 11582 const BranchInst *BlockEntryPredicate = 11583 dyn_cast<BranchInst>(Pair.first->getTerminator()); 11584 if (!BlockEntryPredicate || BlockEntryPredicate->isUnconditional()) 11585 continue; 11586 11587 if (ProveViaCond(BlockEntryPredicate->getCondition(), 11588 BlockEntryPredicate->getSuccessor(0) != Pair.second)) 11589 return true; 11590 } 11591 11592 // Check conditions due to any @llvm.assume intrinsics. 11593 for (auto &AssumeVH : AC.assumptions()) { 11594 if (!AssumeVH) 11595 continue; 11596 auto *CI = cast<CallInst>(AssumeVH); 11597 if (!DT.dominates(CI, BB)) 11598 continue; 11599 11600 if (ProveViaCond(CI->getArgOperand(0), false)) 11601 return true; 11602 } 11603 11604 // Check conditions due to any @llvm.experimental.guard intrinsics. 11605 auto *GuardDecl = F.getParent()->getFunction( 11606 Intrinsic::getName(Intrinsic::experimental_guard)); 11607 if (GuardDecl) 11608 for (const auto *GU : GuardDecl->users()) 11609 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) 11610 if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB)) 11611 if (ProveViaCond(Guard->getArgOperand(0), false)) 11612 return true; 11613 return false; 11614 } 11615 11616 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 11617 ICmpInst::Predicate Pred, 11618 const SCEV *LHS, 11619 const SCEV *RHS) { 11620 // Interpret a null as meaning no loop, where there is obviously no guard 11621 // (interprocedural conditions notwithstanding). 11622 if (!L) 11623 return false; 11624 11625 // Both LHS and RHS must be available at loop entry. 11626 assert(isAvailableAtLoopEntry(LHS, L) && 11627 "LHS is not available at Loop Entry"); 11628 assert(isAvailableAtLoopEntry(RHS, L) && 11629 "RHS is not available at Loop Entry"); 11630 11631 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11632 return true; 11633 11634 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 11635 } 11636 11637 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11638 const SCEV *RHS, 11639 const Value *FoundCondValue, bool Inverse, 11640 const Instruction *CtxI) { 11641 // False conditions implies anything. Do not bother analyzing it further. 11642 if (FoundCondValue == 11643 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 11644 return true; 11645 11646 if (!PendingLoopPredicates.insert(FoundCondValue).second) 11647 return false; 11648 11649 auto ClearOnExit = 11650 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 11651 11652 // Recursively handle And and Or conditions. 11653 const Value *Op0, *Op1; 11654 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 11655 if (!Inverse) 11656 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11657 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11658 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 11659 if (Inverse) 11660 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11661 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11662 } 11663 11664 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 11665 if (!ICI) return false; 11666 11667 // Now that we found a conditional branch that dominates the loop or controls 11668 // the loop latch. Check to see if it is the comparison we are looking for. 11669 ICmpInst::Predicate FoundPred; 11670 if (Inverse) 11671 FoundPred = ICI->getInversePredicate(); 11672 else 11673 FoundPred = ICI->getPredicate(); 11674 11675 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 11676 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 11677 11678 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); 11679 } 11680 11681 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11682 const SCEV *RHS, 11683 ICmpInst::Predicate FoundPred, 11684 const SCEV *FoundLHS, const SCEV *FoundRHS, 11685 const Instruction *CtxI) { 11686 // Balance the types. 11687 if (getTypeSizeInBits(LHS->getType()) < 11688 getTypeSizeInBits(FoundLHS->getType())) { 11689 // For unsigned and equality predicates, try to prove that both found 11690 // operands fit into narrow unsigned range. If so, try to prove facts in 11691 // narrow types. 11692 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() && 11693 !FoundRHS->getType()->isPointerTy()) { 11694 auto *NarrowType = LHS->getType(); 11695 auto *WideType = FoundLHS->getType(); 11696 auto BitWidth = getTypeSizeInBits(NarrowType); 11697 const SCEV *MaxValue = getZeroExtendExpr( 11698 getConstant(APInt::getMaxValue(BitWidth)), WideType); 11699 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, 11700 MaxValue) && 11701 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, 11702 MaxValue)) { 11703 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 11704 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 11705 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 11706 TruncFoundRHS, CtxI)) 11707 return true; 11708 } 11709 } 11710 11711 if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy()) 11712 return false; 11713 if (CmpInst::isSigned(Pred)) { 11714 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 11715 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 11716 } else { 11717 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 11718 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 11719 } 11720 } else if (getTypeSizeInBits(LHS->getType()) > 11721 getTypeSizeInBits(FoundLHS->getType())) { 11722 if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy()) 11723 return false; 11724 if (CmpInst::isSigned(FoundPred)) { 11725 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 11726 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 11727 } else { 11728 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 11729 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 11730 } 11731 } 11732 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 11733 FoundRHS, CtxI); 11734 } 11735 11736 bool ScalarEvolution::isImpliedCondBalancedTypes( 11737 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11738 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 11739 const Instruction *CtxI) { 11740 assert(getTypeSizeInBits(LHS->getType()) == 11741 getTypeSizeInBits(FoundLHS->getType()) && 11742 "Types should be balanced!"); 11743 // Canonicalize the query to match the way instcombine will have 11744 // canonicalized the comparison. 11745 if (SimplifyICmpOperands(Pred, LHS, RHS)) 11746 if (LHS == RHS) 11747 return CmpInst::isTrueWhenEqual(Pred); 11748 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 11749 if (FoundLHS == FoundRHS) 11750 return CmpInst::isFalseWhenEqual(FoundPred); 11751 11752 // Check to see if we can make the LHS or RHS match. 11753 if (LHS == FoundRHS || RHS == FoundLHS) { 11754 if (isa<SCEVConstant>(RHS)) { 11755 std::swap(FoundLHS, FoundRHS); 11756 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 11757 } else { 11758 std::swap(LHS, RHS); 11759 Pred = ICmpInst::getSwappedPredicate(Pred); 11760 } 11761 } 11762 11763 // Check whether the found predicate is the same as the desired predicate. 11764 if (FoundPred == Pred) 11765 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11766 11767 // Check whether swapping the found predicate makes it the same as the 11768 // desired predicate. 11769 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 11770 // We can write the implication 11771 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 11772 // using one of the following ways: 11773 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 11774 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 11775 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 11776 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 11777 // Forms 1. and 2. require swapping the operands of one condition. Don't 11778 // do this if it would break canonical constant/addrec ordering. 11779 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 11780 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 11781 CtxI); 11782 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 11783 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); 11784 11785 // There's no clear preference between forms 3. and 4., try both. Avoid 11786 // forming getNotSCEV of pointer values as the resulting subtract is 11787 // not legal. 11788 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && 11789 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 11790 FoundLHS, FoundRHS, CtxI)) 11791 return true; 11792 11793 if (!FoundLHS->getType()->isPointerTy() && 11794 !FoundRHS->getType()->isPointerTy() && 11795 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 11796 getNotSCEV(FoundRHS), CtxI)) 11797 return true; 11798 11799 return false; 11800 } 11801 11802 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, 11803 CmpInst::Predicate P2) { 11804 assert(P1 != P2 && "Handled earlier!"); 11805 return CmpInst::isRelational(P2) && 11806 P1 == CmpInst::getFlippedSignednessPredicate(P2); 11807 }; 11808 if (IsSignFlippedPredicate(Pred, FoundPred)) { 11809 // Unsigned comparison is the same as signed comparison when both the 11810 // operands are non-negative or negative. 11811 if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || 11812 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) 11813 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11814 // Create local copies that we can freely swap and canonicalize our 11815 // conditions to "le/lt". 11816 ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; 11817 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, 11818 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; 11819 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { 11820 CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); 11821 CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); 11822 std::swap(CanonicalLHS, CanonicalRHS); 11823 std::swap(CanonicalFoundLHS, CanonicalFoundRHS); 11824 } 11825 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && 11826 "Must be!"); 11827 assert((ICmpInst::isLT(CanonicalFoundPred) || 11828 ICmpInst::isLE(CanonicalFoundPred)) && 11829 "Must be!"); 11830 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) 11831 // Use implication: 11832 // x <u y && y >=s 0 --> x <s y. 11833 // If we can prove the left part, the right part is also proven. 11834 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11835 CanonicalRHS, CanonicalFoundLHS, 11836 CanonicalFoundRHS); 11837 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS)) 11838 // Use implication: 11839 // x <s y && y <s 0 --> x <u y. 11840 // If we can prove the left part, the right part is also proven. 11841 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11842 CanonicalRHS, CanonicalFoundLHS, 11843 CanonicalFoundRHS); 11844 } 11845 11846 // Check if we can make progress by sharpening ranges. 11847 if (FoundPred == ICmpInst::ICMP_NE && 11848 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 11849 11850 const SCEVConstant *C = nullptr; 11851 const SCEV *V = nullptr; 11852 11853 if (isa<SCEVConstant>(FoundLHS)) { 11854 C = cast<SCEVConstant>(FoundLHS); 11855 V = FoundRHS; 11856 } else { 11857 C = cast<SCEVConstant>(FoundRHS); 11858 V = FoundLHS; 11859 } 11860 11861 // The guarding predicate tells us that C != V. If the known range 11862 // of V is [C, t), we can sharpen the range to [C + 1, t). The 11863 // range we consider has to correspond to same signedness as the 11864 // predicate we're interested in folding. 11865 11866 APInt Min = ICmpInst::isSigned(Pred) ? 11867 getSignedRangeMin(V) : getUnsignedRangeMin(V); 11868 11869 if (Min == C->getAPInt()) { 11870 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 11871 // This is true even if (Min + 1) wraps around -- in case of 11872 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 11873 11874 APInt SharperMin = Min + 1; 11875 11876 switch (Pred) { 11877 case ICmpInst::ICMP_SGE: 11878 case ICmpInst::ICMP_UGE: 11879 // We know V `Pred` SharperMin. If this implies LHS `Pred` 11880 // RHS, we're done. 11881 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 11882 CtxI)) 11883 return true; 11884 [[fallthrough]]; 11885 11886 case ICmpInst::ICMP_SGT: 11887 case ICmpInst::ICMP_UGT: 11888 // We know from the range information that (V `Pred` Min || 11889 // V == Min). We know from the guarding condition that !(V 11890 // == Min). This gives us 11891 // 11892 // V `Pred` Min || V == Min && !(V == Min) 11893 // => V `Pred` Min 11894 // 11895 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 11896 11897 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) 11898 return true; 11899 break; 11900 11901 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 11902 case ICmpInst::ICMP_SLE: 11903 case ICmpInst::ICMP_ULE: 11904 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11905 LHS, V, getConstant(SharperMin), CtxI)) 11906 return true; 11907 [[fallthrough]]; 11908 11909 case ICmpInst::ICMP_SLT: 11910 case ICmpInst::ICMP_ULT: 11911 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11912 LHS, V, getConstant(Min), CtxI)) 11913 return true; 11914 break; 11915 11916 default: 11917 // No change 11918 break; 11919 } 11920 } 11921 } 11922 11923 // Check whether the actual condition is beyond sufficient. 11924 if (FoundPred == ICmpInst::ICMP_EQ) 11925 if (ICmpInst::isTrueWhenEqual(Pred)) 11926 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11927 return true; 11928 if (Pred == ICmpInst::ICMP_NE) 11929 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 11930 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11931 return true; 11932 11933 // Otherwise assume the worst. 11934 return false; 11935 } 11936 11937 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 11938 const SCEV *&L, const SCEV *&R, 11939 SCEV::NoWrapFlags &Flags) { 11940 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 11941 if (!AE || AE->getNumOperands() != 2) 11942 return false; 11943 11944 L = AE->getOperand(0); 11945 R = AE->getOperand(1); 11946 Flags = AE->getNoWrapFlags(); 11947 return true; 11948 } 11949 11950 std::optional<APInt> 11951 ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) { 11952 // We avoid subtracting expressions here because this function is usually 11953 // fairly deep in the call stack (i.e. is called many times). 11954 11955 // X - X = 0. 11956 if (More == Less) 11957 return APInt(getTypeSizeInBits(More->getType()), 0); 11958 11959 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 11960 const auto *LAR = cast<SCEVAddRecExpr>(Less); 11961 const auto *MAR = cast<SCEVAddRecExpr>(More); 11962 11963 if (LAR->getLoop() != MAR->getLoop()) 11964 return std::nullopt; 11965 11966 // We look at affine expressions only; not for correctness but to keep 11967 // getStepRecurrence cheap. 11968 if (!LAR->isAffine() || !MAR->isAffine()) 11969 return std::nullopt; 11970 11971 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 11972 return std::nullopt; 11973 11974 Less = LAR->getStart(); 11975 More = MAR->getStart(); 11976 11977 // fall through 11978 } 11979 11980 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 11981 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 11982 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 11983 return M - L; 11984 } 11985 11986 SCEV::NoWrapFlags Flags; 11987 const SCEV *LLess = nullptr, *RLess = nullptr; 11988 const SCEV *LMore = nullptr, *RMore = nullptr; 11989 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 11990 // Compare (X + C1) vs X. 11991 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 11992 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 11993 if (RLess == More) 11994 return -(C1->getAPInt()); 11995 11996 // Compare X vs (X + C2). 11997 if (splitBinaryAdd(More, LMore, RMore, Flags)) 11998 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 11999 if (RMore == Less) 12000 return C2->getAPInt(); 12001 12002 // Compare (X + C1) vs (X + C2). 12003 if (C1 && C2 && RLess == RMore) 12004 return C2->getAPInt() - C1->getAPInt(); 12005 12006 return std::nullopt; 12007 } 12008 12009 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 12010 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 12011 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { 12012 // Try to recognize the following pattern: 12013 // 12014 // FoundRHS = ... 12015 // ... 12016 // loop: 12017 // FoundLHS = {Start,+,W} 12018 // context_bb: // Basic block from the same loop 12019 // known(Pred, FoundLHS, FoundRHS) 12020 // 12021 // If some predicate is known in the context of a loop, it is also known on 12022 // each iteration of this loop, including the first iteration. Therefore, in 12023 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 12024 // prove the original pred using this fact. 12025 if (!CtxI) 12026 return false; 12027 const BasicBlock *ContextBB = CtxI->getParent(); 12028 // Make sure AR varies in the context block. 12029 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 12030 const Loop *L = AR->getLoop(); 12031 // Make sure that context belongs to the loop and executes on 1st iteration 12032 // (if it ever executes at all). 12033 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 12034 return false; 12035 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 12036 return false; 12037 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 12038 } 12039 12040 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 12041 const Loop *L = AR->getLoop(); 12042 // Make sure that context belongs to the loop and executes on 1st iteration 12043 // (if it ever executes at all). 12044 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 12045 return false; 12046 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 12047 return false; 12048 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 12049 } 12050 12051 return false; 12052 } 12053 12054 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 12055 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 12056 const SCEV *FoundLHS, const SCEV *FoundRHS) { 12057 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 12058 return false; 12059 12060 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 12061 if (!AddRecLHS) 12062 return false; 12063 12064 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 12065 if (!AddRecFoundLHS) 12066 return false; 12067 12068 // We'd like to let SCEV reason about control dependencies, so we constrain 12069 // both the inequalities to be about add recurrences on the same loop. This 12070 // way we can use isLoopEntryGuardedByCond later. 12071 12072 const Loop *L = AddRecFoundLHS->getLoop(); 12073 if (L != AddRecLHS->getLoop()) 12074 return false; 12075 12076 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 12077 // 12078 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 12079 // ... (2) 12080 // 12081 // Informal proof for (2), assuming (1) [*]: 12082 // 12083 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 12084 // 12085 // Then 12086 // 12087 // FoundLHS s< FoundRHS s< INT_MIN - C 12088 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 12089 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 12090 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 12091 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 12092 // <=> FoundLHS + C s< FoundRHS + C 12093 // 12094 // [*]: (1) can be proved by ruling out overflow. 12095 // 12096 // [**]: This can be proved by analyzing all the four possibilities: 12097 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 12098 // (A s>= 0, B s>= 0). 12099 // 12100 // Note: 12101 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 12102 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 12103 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 12104 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 12105 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 12106 // C)". 12107 12108 std::optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 12109 std::optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 12110 if (!LDiff || !RDiff || *LDiff != *RDiff) 12111 return false; 12112 12113 if (LDiff->isMinValue()) 12114 return true; 12115 12116 APInt FoundRHSLimit; 12117 12118 if (Pred == CmpInst::ICMP_ULT) { 12119 FoundRHSLimit = -(*RDiff); 12120 } else { 12121 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 12122 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 12123 } 12124 12125 // Try to prove (1) or (2), as needed. 12126 return isAvailableAtLoopEntry(FoundRHS, L) && 12127 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 12128 getConstant(FoundRHSLimit)); 12129 } 12130 12131 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 12132 const SCEV *LHS, const SCEV *RHS, 12133 const SCEV *FoundLHS, 12134 const SCEV *FoundRHS, unsigned Depth) { 12135 const PHINode *LPhi = nullptr, *RPhi = nullptr; 12136 12137 auto ClearOnExit = make_scope_exit([&]() { 12138 if (LPhi) { 12139 bool Erased = PendingMerges.erase(LPhi); 12140 assert(Erased && "Failed to erase LPhi!"); 12141 (void)Erased; 12142 } 12143 if (RPhi) { 12144 bool Erased = PendingMerges.erase(RPhi); 12145 assert(Erased && "Failed to erase RPhi!"); 12146 (void)Erased; 12147 } 12148 }); 12149 12150 // Find respective Phis and check that they are not being pending. 12151 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 12152 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 12153 if (!PendingMerges.insert(Phi).second) 12154 return false; 12155 LPhi = Phi; 12156 } 12157 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 12158 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 12159 // If we detect a loop of Phi nodes being processed by this method, for 12160 // example: 12161 // 12162 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 12163 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 12164 // 12165 // we don't want to deal with a case that complex, so return conservative 12166 // answer false. 12167 if (!PendingMerges.insert(Phi).second) 12168 return false; 12169 RPhi = Phi; 12170 } 12171 12172 // If none of LHS, RHS is a Phi, nothing to do here. 12173 if (!LPhi && !RPhi) 12174 return false; 12175 12176 // If there is a SCEVUnknown Phi we are interested in, make it left. 12177 if (!LPhi) { 12178 std::swap(LHS, RHS); 12179 std::swap(FoundLHS, FoundRHS); 12180 std::swap(LPhi, RPhi); 12181 Pred = ICmpInst::getSwappedPredicate(Pred); 12182 } 12183 12184 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 12185 const BasicBlock *LBB = LPhi->getParent(); 12186 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 12187 12188 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 12189 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 12190 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 12191 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 12192 }; 12193 12194 if (RPhi && RPhi->getParent() == LBB) { 12195 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 12196 // If we compare two Phis from the same block, and for each entry block 12197 // the predicate is true for incoming values from this block, then the 12198 // predicate is also true for the Phis. 12199 for (const BasicBlock *IncBB : predecessors(LBB)) { 12200 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 12201 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 12202 if (!ProvedEasily(L, R)) 12203 return false; 12204 } 12205 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 12206 // Case two: RHS is also a Phi from the same basic block, and it is an 12207 // AddRec. It means that there is a loop which has both AddRec and Unknown 12208 // PHIs, for it we can compare incoming values of AddRec from above the loop 12209 // and latch with their respective incoming values of LPhi. 12210 // TODO: Generalize to handle loops with many inputs in a header. 12211 if (LPhi->getNumIncomingValues() != 2) return false; 12212 12213 auto *RLoop = RAR->getLoop(); 12214 auto *Predecessor = RLoop->getLoopPredecessor(); 12215 assert(Predecessor && "Loop with AddRec with no predecessor?"); 12216 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 12217 if (!ProvedEasily(L1, RAR->getStart())) 12218 return false; 12219 auto *Latch = RLoop->getLoopLatch(); 12220 assert(Latch && "Loop with AddRec with no latch?"); 12221 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 12222 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 12223 return false; 12224 } else { 12225 // In all other cases go over inputs of LHS and compare each of them to RHS, 12226 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 12227 // At this point RHS is either a non-Phi, or it is a Phi from some block 12228 // different from LBB. 12229 for (const BasicBlock *IncBB : predecessors(LBB)) { 12230 // Check that RHS is available in this block. 12231 if (!dominates(RHS, IncBB)) 12232 return false; 12233 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 12234 // Make sure L does not refer to a value from a potentially previous 12235 // iteration of a loop. 12236 if (!properlyDominates(L, LBB)) 12237 return false; 12238 if (!ProvedEasily(L, RHS)) 12239 return false; 12240 } 12241 } 12242 return true; 12243 } 12244 12245 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, 12246 const SCEV *LHS, 12247 const SCEV *RHS, 12248 const SCEV *FoundLHS, 12249 const SCEV *FoundRHS) { 12250 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make 12251 // sure that we are dealing with same LHS. 12252 if (RHS == FoundRHS) { 12253 std::swap(LHS, RHS); 12254 std::swap(FoundLHS, FoundRHS); 12255 Pred = ICmpInst::getSwappedPredicate(Pred); 12256 } 12257 if (LHS != FoundLHS) 12258 return false; 12259 12260 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS); 12261 if (!SUFoundRHS) 12262 return false; 12263 12264 Value *Shiftee, *ShiftValue; 12265 12266 using namespace PatternMatch; 12267 if (match(SUFoundRHS->getValue(), 12268 m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) { 12269 auto *ShifteeS = getSCEV(Shiftee); 12270 // Prove one of the following: 12271 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS 12272 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS 12273 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 12274 // ---> LHS <s RHS 12275 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 12276 // ---> LHS <=s RHS 12277 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 12278 return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS); 12279 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 12280 if (isKnownNonNegative(ShifteeS)) 12281 return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS); 12282 } 12283 12284 return false; 12285 } 12286 12287 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 12288 const SCEV *LHS, const SCEV *RHS, 12289 const SCEV *FoundLHS, 12290 const SCEV *FoundRHS, 12291 const Instruction *CtxI) { 12292 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12293 return true; 12294 12295 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12296 return true; 12297 12298 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12299 return true; 12300 12301 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 12302 CtxI)) 12303 return true; 12304 12305 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 12306 FoundLHS, FoundRHS); 12307 } 12308 12309 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 12310 template <typename MinMaxExprType> 12311 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 12312 const SCEV *Candidate) { 12313 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 12314 if (!MinMaxExpr) 12315 return false; 12316 12317 return is_contained(MinMaxExpr->operands(), Candidate); 12318 } 12319 12320 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 12321 ICmpInst::Predicate Pred, 12322 const SCEV *LHS, const SCEV *RHS) { 12323 // If both sides are affine addrecs for the same loop, with equal 12324 // steps, and we know the recurrences don't wrap, then we only 12325 // need to check the predicate on the starting values. 12326 12327 if (!ICmpInst::isRelational(Pred)) 12328 return false; 12329 12330 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 12331 if (!LAR) 12332 return false; 12333 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 12334 if (!RAR) 12335 return false; 12336 if (LAR->getLoop() != RAR->getLoop()) 12337 return false; 12338 if (!LAR->isAffine() || !RAR->isAffine()) 12339 return false; 12340 12341 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 12342 return false; 12343 12344 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 12345 SCEV::FlagNSW : SCEV::FlagNUW; 12346 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 12347 return false; 12348 12349 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 12350 } 12351 12352 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 12353 /// expression? 12354 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 12355 ICmpInst::Predicate Pred, 12356 const SCEV *LHS, const SCEV *RHS) { 12357 switch (Pred) { 12358 default: 12359 return false; 12360 12361 case ICmpInst::ICMP_SGE: 12362 std::swap(LHS, RHS); 12363 [[fallthrough]]; 12364 case ICmpInst::ICMP_SLE: 12365 return 12366 // min(A, ...) <= A 12367 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 12368 // A <= max(A, ...) 12369 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 12370 12371 case ICmpInst::ICMP_UGE: 12372 std::swap(LHS, RHS); 12373 [[fallthrough]]; 12374 case ICmpInst::ICMP_ULE: 12375 return 12376 // min(A, ...) <= A 12377 // FIXME: what about umin_seq? 12378 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 12379 // A <= max(A, ...) 12380 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 12381 } 12382 12383 llvm_unreachable("covered switch fell through?!"); 12384 } 12385 12386 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 12387 const SCEV *LHS, const SCEV *RHS, 12388 const SCEV *FoundLHS, 12389 const SCEV *FoundRHS, 12390 unsigned Depth) { 12391 assert(getTypeSizeInBits(LHS->getType()) == 12392 getTypeSizeInBits(RHS->getType()) && 12393 "LHS and RHS have different sizes?"); 12394 assert(getTypeSizeInBits(FoundLHS->getType()) == 12395 getTypeSizeInBits(FoundRHS->getType()) && 12396 "FoundLHS and FoundRHS have different sizes?"); 12397 // We want to avoid hurting the compile time with analysis of too big trees. 12398 if (Depth > MaxSCEVOperationsImplicationDepth) 12399 return false; 12400 12401 // We only want to work with GT comparison so far. 12402 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 12403 Pred = CmpInst::getSwappedPredicate(Pred); 12404 std::swap(LHS, RHS); 12405 std::swap(FoundLHS, FoundRHS); 12406 } 12407 12408 // For unsigned, try to reduce it to corresponding signed comparison. 12409 if (Pred == ICmpInst::ICMP_UGT) 12410 // We can replace unsigned predicate with its signed counterpart if all 12411 // involved values are non-negative. 12412 // TODO: We could have better support for unsigned. 12413 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 12414 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 12415 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 12416 // use this fact to prove that LHS and RHS are non-negative. 12417 const SCEV *MinusOne = getMinusOne(LHS->getType()); 12418 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 12419 FoundRHS) && 12420 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 12421 FoundRHS)) 12422 Pred = ICmpInst::ICMP_SGT; 12423 } 12424 12425 if (Pred != ICmpInst::ICMP_SGT) 12426 return false; 12427 12428 auto GetOpFromSExt = [&](const SCEV *S) { 12429 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 12430 return Ext->getOperand(); 12431 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 12432 // the constant in some cases. 12433 return S; 12434 }; 12435 12436 // Acquire values from extensions. 12437 auto *OrigLHS = LHS; 12438 auto *OrigFoundLHS = FoundLHS; 12439 LHS = GetOpFromSExt(LHS); 12440 FoundLHS = GetOpFromSExt(FoundLHS); 12441 12442 // Is the SGT predicate can be proved trivially or using the found context. 12443 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 12444 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 12445 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 12446 FoundRHS, Depth + 1); 12447 }; 12448 12449 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 12450 // We want to avoid creation of any new non-constant SCEV. Since we are 12451 // going to compare the operands to RHS, we should be certain that we don't 12452 // need any size extensions for this. So let's decline all cases when the 12453 // sizes of types of LHS and RHS do not match. 12454 // TODO: Maybe try to get RHS from sext to catch more cases? 12455 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 12456 return false; 12457 12458 // Should not overflow. 12459 if (!LHSAddExpr->hasNoSignedWrap()) 12460 return false; 12461 12462 auto *LL = LHSAddExpr->getOperand(0); 12463 auto *LR = LHSAddExpr->getOperand(1); 12464 auto *MinusOne = getMinusOne(RHS->getType()); 12465 12466 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 12467 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 12468 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 12469 }; 12470 // Try to prove the following rule: 12471 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 12472 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 12473 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 12474 return true; 12475 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 12476 Value *LL, *LR; 12477 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 12478 12479 using namespace llvm::PatternMatch; 12480 12481 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 12482 // Rules for division. 12483 // We are going to perform some comparisons with Denominator and its 12484 // derivative expressions. In general case, creating a SCEV for it may 12485 // lead to a complex analysis of the entire graph, and in particular it 12486 // can request trip count recalculation for the same loop. This would 12487 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 12488 // this, we only want to create SCEVs that are constants in this section. 12489 // So we bail if Denominator is not a constant. 12490 if (!isa<ConstantInt>(LR)) 12491 return false; 12492 12493 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 12494 12495 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 12496 // then a SCEV for the numerator already exists and matches with FoundLHS. 12497 auto *Numerator = getExistingSCEV(LL); 12498 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 12499 return false; 12500 12501 // Make sure that the numerator matches with FoundLHS and the denominator 12502 // is positive. 12503 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 12504 return false; 12505 12506 auto *DTy = Denominator->getType(); 12507 auto *FRHSTy = FoundRHS->getType(); 12508 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 12509 // One of types is a pointer and another one is not. We cannot extend 12510 // them properly to a wider type, so let us just reject this case. 12511 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 12512 // to avoid this check. 12513 return false; 12514 12515 // Given that: 12516 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 12517 auto *WTy = getWiderType(DTy, FRHSTy); 12518 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 12519 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 12520 12521 // Try to prove the following rule: 12522 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 12523 // For example, given that FoundLHS > 2. It means that FoundLHS is at 12524 // least 3. If we divide it by Denominator < 4, we will have at least 1. 12525 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 12526 if (isKnownNonPositive(RHS) && 12527 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 12528 return true; 12529 12530 // Try to prove the following rule: 12531 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 12532 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 12533 // If we divide it by Denominator > 2, then: 12534 // 1. If FoundLHS is negative, then the result is 0. 12535 // 2. If FoundLHS is non-negative, then the result is non-negative. 12536 // Anyways, the result is non-negative. 12537 auto *MinusOne = getMinusOne(WTy); 12538 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 12539 if (isKnownNegative(RHS) && 12540 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 12541 return true; 12542 } 12543 } 12544 12545 // If our expression contained SCEVUnknown Phis, and we split it down and now 12546 // need to prove something for them, try to prove the predicate for every 12547 // possible incoming values of those Phis. 12548 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 12549 return true; 12550 12551 return false; 12552 } 12553 12554 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 12555 const SCEV *LHS, const SCEV *RHS) { 12556 // zext x u<= sext x, sext x s<= zext x 12557 switch (Pred) { 12558 case ICmpInst::ICMP_SGE: 12559 std::swap(LHS, RHS); 12560 [[fallthrough]]; 12561 case ICmpInst::ICMP_SLE: { 12562 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 12563 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 12564 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 12565 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12566 return true; 12567 break; 12568 } 12569 case ICmpInst::ICMP_UGE: 12570 std::swap(LHS, RHS); 12571 [[fallthrough]]; 12572 case ICmpInst::ICMP_ULE: { 12573 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 12574 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 12575 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 12576 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 12577 return true; 12578 break; 12579 } 12580 default: 12581 break; 12582 }; 12583 return false; 12584 } 12585 12586 bool 12587 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 12588 const SCEV *LHS, const SCEV *RHS) { 12589 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 12590 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 12591 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 12592 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 12593 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 12594 } 12595 12596 bool 12597 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 12598 const SCEV *LHS, const SCEV *RHS, 12599 const SCEV *FoundLHS, 12600 const SCEV *FoundRHS) { 12601 switch (Pred) { 12602 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 12603 case ICmpInst::ICMP_EQ: 12604 case ICmpInst::ICMP_NE: 12605 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 12606 return true; 12607 break; 12608 case ICmpInst::ICMP_SLT: 12609 case ICmpInst::ICMP_SLE: 12610 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 12611 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 12612 return true; 12613 break; 12614 case ICmpInst::ICMP_SGT: 12615 case ICmpInst::ICMP_SGE: 12616 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 12617 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 12618 return true; 12619 break; 12620 case ICmpInst::ICMP_ULT: 12621 case ICmpInst::ICMP_ULE: 12622 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 12623 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 12624 return true; 12625 break; 12626 case ICmpInst::ICMP_UGT: 12627 case ICmpInst::ICMP_UGE: 12628 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 12629 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 12630 return true; 12631 break; 12632 } 12633 12634 // Maybe it can be proved via operations? 12635 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12636 return true; 12637 12638 return false; 12639 } 12640 12641 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 12642 const SCEV *LHS, 12643 const SCEV *RHS, 12644 const SCEV *FoundLHS, 12645 const SCEV *FoundRHS) { 12646 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 12647 // The restriction on `FoundRHS` be lifted easily -- it exists only to 12648 // reduce the compile time impact of this optimization. 12649 return false; 12650 12651 std::optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 12652 if (!Addend) 12653 return false; 12654 12655 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 12656 12657 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 12658 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 12659 ConstantRange FoundLHSRange = 12660 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); 12661 12662 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 12663 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 12664 12665 // We can also compute the range of values for `LHS` that satisfy the 12666 // consequent, "`LHS` `Pred` `RHS`": 12667 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 12668 // The antecedent implies the consequent if every value of `LHS` that 12669 // satisfies the antecedent also satisfies the consequent. 12670 return LHSRange.icmp(Pred, ConstRHS); 12671 } 12672 12673 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 12674 bool IsSigned) { 12675 assert(isKnownPositive(Stride) && "Positive stride expected!"); 12676 12677 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12678 const SCEV *One = getOne(Stride->getType()); 12679 12680 if (IsSigned) { 12681 APInt MaxRHS = getSignedRangeMax(RHS); 12682 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 12683 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12684 12685 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 12686 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 12687 } 12688 12689 APInt MaxRHS = getUnsignedRangeMax(RHS); 12690 APInt MaxValue = APInt::getMaxValue(BitWidth); 12691 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12692 12693 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 12694 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 12695 } 12696 12697 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 12698 bool IsSigned) { 12699 12700 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12701 const SCEV *One = getOne(Stride->getType()); 12702 12703 if (IsSigned) { 12704 APInt MinRHS = getSignedRangeMin(RHS); 12705 APInt MinValue = APInt::getSignedMinValue(BitWidth); 12706 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12707 12708 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 12709 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 12710 } 12711 12712 APInt MinRHS = getUnsignedRangeMin(RHS); 12713 APInt MinValue = APInt::getMinValue(BitWidth); 12714 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12715 12716 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 12717 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 12718 } 12719 12720 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { 12721 // umin(N, 1) + floor((N - umin(N, 1)) / D) 12722 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin 12723 // expression fixes the case of N=0. 12724 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); 12725 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); 12726 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); 12727 } 12728 12729 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 12730 const SCEV *Stride, 12731 const SCEV *End, 12732 unsigned BitWidth, 12733 bool IsSigned) { 12734 // The logic in this function assumes we can represent a positive stride. 12735 // If we can't, the backedge-taken count must be zero. 12736 if (IsSigned && BitWidth == 1) 12737 return getZero(Stride->getType()); 12738 12739 // This code below only been closely audited for negative strides in the 12740 // unsigned comparison case, it may be correct for signed comparison, but 12741 // that needs to be established. 12742 if (IsSigned && isKnownNegative(Stride)) 12743 return getCouldNotCompute(); 12744 12745 // Calculate the maximum backedge count based on the range of values 12746 // permitted by Start, End, and Stride. 12747 APInt MinStart = 12748 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 12749 12750 APInt MinStride = 12751 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 12752 12753 // We assume either the stride is positive, or the backedge-taken count 12754 // is zero. So force StrideForMaxBECount to be at least one. 12755 APInt One(BitWidth, 1); 12756 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) 12757 : APIntOps::umax(One, MinStride); 12758 12759 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 12760 : APInt::getMaxValue(BitWidth); 12761 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 12762 12763 // Although End can be a MAX expression we estimate MaxEnd considering only 12764 // the case End = RHS of the loop termination condition. This is safe because 12765 // in the other case (End - Start) is zero, leading to a zero maximum backedge 12766 // taken count. 12767 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 12768 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 12769 12770 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) 12771 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) 12772 : APIntOps::umax(MaxEnd, MinStart); 12773 12774 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, 12775 getConstant(StrideForMaxBECount) /* Step */); 12776 } 12777 12778 ScalarEvolution::ExitLimit 12779 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 12780 const Loop *L, bool IsSigned, 12781 bool ControlsExit, bool AllowPredicates) { 12782 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12783 12784 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12785 bool PredicatedIV = false; 12786 12787 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { 12788 // Can we prove this loop *must* be UB if overflow of IV occurs? 12789 // Reasoning goes as follows: 12790 // * Suppose the IV did self wrap. 12791 // * If Stride evenly divides the iteration space, then once wrap 12792 // occurs, the loop must revisit the same values. 12793 // * We know that RHS is invariant, and that none of those values 12794 // caused this exit to be taken previously. Thus, this exit is 12795 // dynamically dead. 12796 // * If this is the sole exit, then a dead exit implies the loop 12797 // must be infinite if there are no abnormal exits. 12798 // * If the loop were infinite, then it must either not be mustprogress 12799 // or have side effects. Otherwise, it must be UB. 12800 // * It can't (by assumption), be UB so we have contradicted our 12801 // premise and can conclude the IV did not in fact self-wrap. 12802 if (!isLoopInvariant(RHS, L)) 12803 return false; 12804 12805 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 12806 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 12807 return false; 12808 12809 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 12810 return false; 12811 12812 return loopIsFiniteByAssumption(L); 12813 }; 12814 12815 if (!IV) { 12816 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) { 12817 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand()); 12818 if (AR && AR->getLoop() == L && AR->isAffine()) { 12819 auto canProveNUW = [&]() { 12820 if (!isLoopInvariant(RHS, L)) 12821 return false; 12822 12823 if (!isKnownNonZero(AR->getStepRecurrence(*this))) 12824 // We need the sequence defined by AR to strictly increase in the 12825 // unsigned integer domain for the logic below to hold. 12826 return false; 12827 12828 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); 12829 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); 12830 // If RHS <=u Limit, then there must exist a value V in the sequence 12831 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and 12832 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned 12833 // overflow occurs. This limit also implies that a signed comparison 12834 // (in the wide bitwidth) is equivalent to an unsigned comparison as 12835 // the high bits on both sides must be zero. 12836 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); 12837 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); 12838 Limit = Limit.zext(OuterBitWidth); 12839 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); 12840 }; 12841 auto Flags = AR->getNoWrapFlags(); 12842 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) 12843 Flags = setFlags(Flags, SCEV::FlagNUW); 12844 12845 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 12846 if (AR->hasNoUnsignedWrap()) { 12847 // Emulate what getZeroExtendExpr would have done during construction 12848 // if we'd been able to infer the fact just above at that time. 12849 const SCEV *Step = AR->getStepRecurrence(*this); 12850 Type *Ty = ZExt->getType(); 12851 auto *S = getAddRecExpr( 12852 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0), 12853 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); 12854 IV = dyn_cast<SCEVAddRecExpr>(S); 12855 } 12856 } 12857 } 12858 } 12859 12860 12861 if (!IV && AllowPredicates) { 12862 // Try to make this an AddRec using runtime tests, in the first X 12863 // iterations of this loop, where X is the SCEV expression found by the 12864 // algorithm below. 12865 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12866 PredicatedIV = true; 12867 } 12868 12869 // Avoid weird loops 12870 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12871 return getCouldNotCompute(); 12872 12873 // A precondition of this method is that the condition being analyzed 12874 // reaches an exiting branch which dominates the latch. Given that, we can 12875 // assume that an increment which violates the nowrap specification and 12876 // produces poison must cause undefined behavior when the resulting poison 12877 // value is branched upon and thus we can conclude that the backedge is 12878 // taken no more often than would be required to produce that poison value. 12879 // Note that a well defined loop can exit on the iteration which violates 12880 // the nowrap specification if there is another exit (either explicit or 12881 // implicit/exceptional) which causes the loop to execute before the 12882 // exiting instruction we're analyzing would trigger UB. 12883 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12884 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12885 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 12886 12887 const SCEV *Stride = IV->getStepRecurrence(*this); 12888 12889 bool PositiveStride = isKnownPositive(Stride); 12890 12891 // Avoid negative or zero stride values. 12892 if (!PositiveStride) { 12893 // We can compute the correct backedge taken count for loops with unknown 12894 // strides if we can prove that the loop is not an infinite loop with side 12895 // effects. Here's the loop structure we are trying to handle - 12896 // 12897 // i = start 12898 // do { 12899 // A[i] = i; 12900 // i += s; 12901 // } while (i < end); 12902 // 12903 // The backedge taken count for such loops is evaluated as - 12904 // (max(end, start + stride) - start - 1) /u stride 12905 // 12906 // The additional preconditions that we need to check to prove correctness 12907 // of the above formula is as follows - 12908 // 12909 // a) IV is either nuw or nsw depending upon signedness (indicated by the 12910 // NoWrap flag). 12911 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has 12912 // no side effects within the loop) 12913 // c) loop has a single static exit (with no abnormal exits) 12914 // 12915 // Precondition a) implies that if the stride is negative, this is a single 12916 // trip loop. The backedge taken count formula reduces to zero in this case. 12917 // 12918 // Precondition b) and c) combine to imply that if rhs is invariant in L, 12919 // then a zero stride means the backedge can't be taken without executing 12920 // undefined behavior. 12921 // 12922 // The positive stride case is the same as isKnownPositive(Stride) returning 12923 // true (original behavior of the function). 12924 // 12925 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || 12926 !loopHasNoAbnormalExits(L)) 12927 return getCouldNotCompute(); 12928 12929 if (!isKnownNonZero(Stride)) { 12930 // If we have a step of zero, and RHS isn't invariant in L, we don't know 12931 // if it might eventually be greater than start and if so, on which 12932 // iteration. We can't even produce a useful upper bound. 12933 if (!isLoopInvariant(RHS, L)) 12934 return getCouldNotCompute(); 12935 12936 // We allow a potentially zero stride, but we need to divide by stride 12937 // below. Since the loop can't be infinite and this check must control 12938 // the sole exit, we can infer the exit must be taken on the first 12939 // iteration (e.g. backedge count = 0) if the stride is zero. Given that, 12940 // we know the numerator in the divides below must be zero, so we can 12941 // pick an arbitrary non-zero value for the denominator (e.g. stride) 12942 // and produce the right result. 12943 // FIXME: Handle the case where Stride is poison? 12944 auto wouldZeroStrideBeUB = [&]() { 12945 // Proof by contradiction. Suppose the stride were zero. If we can 12946 // prove that the backedge *is* taken on the first iteration, then since 12947 // we know this condition controls the sole exit, we must have an 12948 // infinite loop. We can't have a (well defined) infinite loop per 12949 // check just above. 12950 // Note: The (Start - Stride) term is used to get the start' term from 12951 // (start' + stride,+,stride). Remember that we only care about the 12952 // result of this expression when stride == 0 at runtime. 12953 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); 12954 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); 12955 }; 12956 if (!wouldZeroStrideBeUB()) { 12957 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); 12958 } 12959 } 12960 } else if (!Stride->isOne() && !NoWrap) { 12961 auto isUBOnWrap = [&]() { 12962 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 12963 // follows trivially from the fact that every (un)signed-wrapped, but 12964 // not self-wrapped value must be LT than the last value before 12965 // (un)signed wrap. Since we know that last value didn't exit, nor 12966 // will any smaller one. 12967 return canAssumeNoSelfWrap(IV); 12968 }; 12969 12970 // Avoid proven overflow cases: this will ensure that the backedge taken 12971 // count will not generate any unsigned overflow. Relaxed no-overflow 12972 // conditions exploit NoWrapFlags, allowing to optimize in presence of 12973 // undefined behaviors like the case of C language. 12974 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 12975 return getCouldNotCompute(); 12976 } 12977 12978 // On all paths just preceeding, we established the following invariant: 12979 // IV can be assumed not to overflow up to and including the exiting 12980 // iteration. We proved this in one of two ways: 12981 // 1) We can show overflow doesn't occur before the exiting iteration 12982 // 1a) canIVOverflowOnLT, and b) step of one 12983 // 2) We can show that if overflow occurs, the loop must execute UB 12984 // before any possible exit. 12985 // Note that we have not yet proved RHS invariant (in general). 12986 12987 const SCEV *Start = IV->getStart(); 12988 12989 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 12990 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. 12991 // Use integer-typed versions for actual computation; we can't subtract 12992 // pointers in general. 12993 const SCEV *OrigStart = Start; 12994 const SCEV *OrigRHS = RHS; 12995 if (Start->getType()->isPointerTy()) { 12996 Start = getLosslessPtrToIntExpr(Start); 12997 if (isa<SCEVCouldNotCompute>(Start)) 12998 return Start; 12999 } 13000 if (RHS->getType()->isPointerTy()) { 13001 RHS = getLosslessPtrToIntExpr(RHS); 13002 if (isa<SCEVCouldNotCompute>(RHS)) 13003 return RHS; 13004 } 13005 13006 // When the RHS is not invariant, we do not know the end bound of the loop and 13007 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 13008 // calculate the MaxBECount, given the start, stride and max value for the end 13009 // bound of the loop (RHS), and the fact that IV does not overflow (which is 13010 // checked above). 13011 if (!isLoopInvariant(RHS, L)) { 13012 const SCEV *MaxBECount = computeMaxBECountForLT( 13013 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 13014 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 13015 MaxBECount, false /*MaxOrZero*/, Predicates); 13016 } 13017 13018 // We use the expression (max(End,Start)-Start)/Stride to describe the 13019 // backedge count, as if the backedge is taken at least once max(End,Start) 13020 // is End and so the result is as above, and if not max(End,Start) is Start 13021 // so we get a backedge count of zero. 13022 const SCEV *BECount = nullptr; 13023 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); 13024 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); 13025 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); 13026 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); 13027 // Can we prove (max(RHS,Start) > Start - Stride? 13028 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && 13029 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { 13030 // In this case, we can use a refined formula for computing backedge taken 13031 // count. The general formula remains: 13032 // "End-Start /uceiling Stride" where "End = max(RHS,Start)" 13033 // We want to use the alternate formula: 13034 // "((End - 1) - (Start - Stride)) /u Stride" 13035 // Let's do a quick case analysis to show these are equivalent under 13036 // our precondition that max(RHS,Start) > Start - Stride. 13037 // * For RHS <= Start, the backedge-taken count must be zero. 13038 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 13039 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to 13040 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values 13041 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing 13042 // this to the stride of 1 case. 13043 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". 13044 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 13045 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to 13046 // "((RHS - (Start - Stride) - 1) /u Stride". 13047 // Our preconditions trivially imply no overflow in that form. 13048 const SCEV *MinusOne = getMinusOne(Stride->getType()); 13049 const SCEV *Numerator = 13050 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); 13051 BECount = getUDivExpr(Numerator, Stride); 13052 } 13053 13054 const SCEV *BECountIfBackedgeTaken = nullptr; 13055 if (!BECount) { 13056 auto canProveRHSGreaterThanEqualStart = [&]() { 13057 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 13058 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) 13059 return true; 13060 13061 // (RHS > Start - 1) implies RHS >= Start. 13062 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if 13063 // "Start - 1" doesn't overflow. 13064 // * For signed comparison, if Start - 1 does overflow, it's equal 13065 // to INT_MAX, and "RHS >s INT_MAX" is trivially false. 13066 // * For unsigned comparison, if Start - 1 does overflow, it's equal 13067 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. 13068 // 13069 // FIXME: Should isLoopEntryGuardedByCond do this for us? 13070 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 13071 auto *StartMinusOne = getAddExpr(OrigStart, 13072 getMinusOne(OrigStart->getType())); 13073 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); 13074 }; 13075 13076 // If we know that RHS >= Start in the context of loop, then we know that 13077 // max(RHS, Start) = RHS at this point. 13078 const SCEV *End; 13079 if (canProveRHSGreaterThanEqualStart()) { 13080 End = RHS; 13081 } else { 13082 // If RHS < Start, the backedge will be taken zero times. So in 13083 // general, we can write the backedge-taken count as: 13084 // 13085 // RHS >= Start ? ceil(RHS - Start) / Stride : 0 13086 // 13087 // We convert it to the following to make it more convenient for SCEV: 13088 // 13089 // ceil(max(RHS, Start) - Start) / Stride 13090 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 13091 13092 // See what would happen if we assume the backedge is taken. This is 13093 // used to compute MaxBECount. 13094 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); 13095 } 13096 13097 // At this point, we know: 13098 // 13099 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End 13100 // 2. The index variable doesn't overflow. 13101 // 13102 // Therefore, we know N exists such that 13103 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" 13104 // doesn't overflow. 13105 // 13106 // Using this information, try to prove whether the addition in 13107 // "(Start - End) + (Stride - 1)" has unsigned overflow. 13108 const SCEV *One = getOne(Stride->getType()); 13109 bool MayAddOverflow = [&] { 13110 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { 13111 if (StrideC->getAPInt().isPowerOf2()) { 13112 // Suppose Stride is a power of two, and Start/End are unsigned 13113 // integers. Let UMAX be the largest representable unsigned 13114 // integer. 13115 // 13116 // By the preconditions of this function, we know 13117 // "(Start + Stride * N) >= End", and this doesn't overflow. 13118 // As a formula: 13119 // 13120 // End <= (Start + Stride * N) <= UMAX 13121 // 13122 // Subtracting Start from all the terms: 13123 // 13124 // End - Start <= Stride * N <= UMAX - Start 13125 // 13126 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: 13127 // 13128 // End - Start <= Stride * N <= UMAX 13129 // 13130 // Stride * N is a multiple of Stride. Therefore, 13131 // 13132 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) 13133 // 13134 // Since Stride is a power of two, UMAX + 1 is divisible by Stride. 13135 // Therefore, UMAX mod Stride == Stride - 1. So we can write: 13136 // 13137 // End - Start <= Stride * N <= UMAX - Stride - 1 13138 // 13139 // Dropping the middle term: 13140 // 13141 // End - Start <= UMAX - Stride - 1 13142 // 13143 // Adding Stride - 1 to both sides: 13144 // 13145 // (End - Start) + (Stride - 1) <= UMAX 13146 // 13147 // In other words, the addition doesn't have unsigned overflow. 13148 // 13149 // A similar proof works if we treat Start/End as signed values. 13150 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to 13151 // use signed max instead of unsigned max. Note that we're trying 13152 // to prove a lack of unsigned overflow in either case. 13153 return false; 13154 } 13155 } 13156 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { 13157 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. 13158 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. 13159 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. 13160 // 13161 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. 13162 return false; 13163 } 13164 return true; 13165 }(); 13166 13167 const SCEV *Delta = getMinusSCEV(End, Start); 13168 if (!MayAddOverflow) { 13169 // floor((D + (S - 1)) / S) 13170 // We prefer this formulation if it's legal because it's fewer operations. 13171 BECount = 13172 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); 13173 } else { 13174 BECount = getUDivCeilSCEV(Delta, Stride); 13175 } 13176 } 13177 13178 const SCEV *ConstantMaxBECount; 13179 bool MaxOrZero = false; 13180 if (isa<SCEVConstant>(BECount)) { 13181 ConstantMaxBECount = BECount; 13182 } else if (BECountIfBackedgeTaken && 13183 isa<SCEVConstant>(BECountIfBackedgeTaken)) { 13184 // If we know exactly how many times the backedge will be taken if it's 13185 // taken at least once, then the backedge count will either be that or 13186 // zero. 13187 ConstantMaxBECount = BECountIfBackedgeTaken; 13188 MaxOrZero = true; 13189 } else { 13190 ConstantMaxBECount = computeMaxBECountForLT( 13191 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 13192 } 13193 13194 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) && 13195 !isa<SCEVCouldNotCompute>(BECount)) 13196 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount)); 13197 13198 const SCEV *SymbolicMaxBECount = 13199 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 13200 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero, 13201 Predicates); 13202 } 13203 13204 ScalarEvolution::ExitLimit 13205 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 13206 const Loop *L, bool IsSigned, 13207 bool ControlsExit, bool AllowPredicates) { 13208 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 13209 // We handle only IV > Invariant 13210 if (!isLoopInvariant(RHS, L)) 13211 return getCouldNotCompute(); 13212 13213 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 13214 if (!IV && AllowPredicates) 13215 // Try to make this an AddRec using runtime tests, in the first X 13216 // iterations of this loop, where X is the SCEV expression found by the 13217 // algorithm below. 13218 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 13219 13220 // Avoid weird loops 13221 if (!IV || IV->getLoop() != L || !IV->isAffine()) 13222 return getCouldNotCompute(); 13223 13224 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 13225 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 13226 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 13227 13228 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 13229 13230 // Avoid negative or zero stride values 13231 if (!isKnownPositive(Stride)) 13232 return getCouldNotCompute(); 13233 13234 // Avoid proven overflow cases: this will ensure that the backedge taken count 13235 // will not generate any unsigned overflow. Relaxed no-overflow conditions 13236 // exploit NoWrapFlags, allowing to optimize in presence of undefined 13237 // behaviors like the case of C language. 13238 if (!Stride->isOne() && !NoWrap) 13239 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 13240 return getCouldNotCompute(); 13241 13242 const SCEV *Start = IV->getStart(); 13243 const SCEV *End = RHS; 13244 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 13245 // If we know that Start >= RHS in the context of loop, then we know that 13246 // min(RHS, Start) = RHS at this point. 13247 if (isLoopEntryGuardedByCond( 13248 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 13249 End = RHS; 13250 else 13251 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 13252 } 13253 13254 if (Start->getType()->isPointerTy()) { 13255 Start = getLosslessPtrToIntExpr(Start); 13256 if (isa<SCEVCouldNotCompute>(Start)) 13257 return Start; 13258 } 13259 if (End->getType()->isPointerTy()) { 13260 End = getLosslessPtrToIntExpr(End); 13261 if (isa<SCEVCouldNotCompute>(End)) 13262 return End; 13263 } 13264 13265 // Compute ((Start - End) + (Stride - 1)) / Stride. 13266 // FIXME: This can overflow. Holding off on fixing this for now; 13267 // howManyGreaterThans will hopefully be gone soon. 13268 const SCEV *One = getOne(Stride->getType()); 13269 const SCEV *BECount = getUDivExpr( 13270 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); 13271 13272 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 13273 : getUnsignedRangeMax(Start); 13274 13275 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 13276 : getUnsignedRangeMin(Stride); 13277 13278 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 13279 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 13280 : APInt::getMinValue(BitWidth) + (MinStride - 1); 13281 13282 // Although End can be a MIN expression we estimate MinEnd considering only 13283 // the case End = RHS. This is safe because in the other case (Start - End) 13284 // is zero, leading to a zero maximum backedge taken count. 13285 APInt MinEnd = 13286 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 13287 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 13288 13289 const SCEV *ConstantMaxBECount = 13290 isa<SCEVConstant>(BECount) 13291 ? BECount 13292 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), 13293 getConstant(MinStride)); 13294 13295 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount)) 13296 ConstantMaxBECount = BECount; 13297 const SCEV *SymbolicMaxBECount = 13298 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount; 13299 13300 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false, 13301 Predicates); 13302 } 13303 13304 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 13305 ScalarEvolution &SE) const { 13306 if (Range.isFullSet()) // Infinite loop. 13307 return SE.getCouldNotCompute(); 13308 13309 // If the start is a non-zero constant, shift the range to simplify things. 13310 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 13311 if (!SC->getValue()->isZero()) { 13312 SmallVector<const SCEV *, 4> Operands(operands()); 13313 Operands[0] = SE.getZero(SC->getType()); 13314 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 13315 getNoWrapFlags(FlagNW)); 13316 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 13317 return ShiftedAddRec->getNumIterationsInRange( 13318 Range.subtract(SC->getAPInt()), SE); 13319 // This is strange and shouldn't happen. 13320 return SE.getCouldNotCompute(); 13321 } 13322 13323 // The only time we can solve this is when we have all constant indices. 13324 // Otherwise, we cannot determine the overflow conditions. 13325 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 13326 return SE.getCouldNotCompute(); 13327 13328 // Okay at this point we know that all elements of the chrec are constants and 13329 // that the start element is zero. 13330 13331 // First check to see if the range contains zero. If not, the first 13332 // iteration exits. 13333 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 13334 if (!Range.contains(APInt(BitWidth, 0))) 13335 return SE.getZero(getType()); 13336 13337 if (isAffine()) { 13338 // If this is an affine expression then we have this situation: 13339 // Solve {0,+,A} in Range === Ax in Range 13340 13341 // We know that zero is in the range. If A is positive then we know that 13342 // the upper value of the range must be the first possible exit value. 13343 // If A is negative then the lower of the range is the last possible loop 13344 // value. Also note that we already checked for a full range. 13345 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 13346 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 13347 13348 // The exit value should be (End+A)/A. 13349 APInt ExitVal = (End + A).udiv(A); 13350 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 13351 13352 // Evaluate at the exit value. If we really did fall out of the valid 13353 // range, then we computed our trip count, otherwise wrap around or other 13354 // things must have happened. 13355 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 13356 if (Range.contains(Val->getValue())) 13357 return SE.getCouldNotCompute(); // Something strange happened 13358 13359 // Ensure that the previous value is in the range. 13360 assert(Range.contains( 13361 EvaluateConstantChrecAtConstant(this, 13362 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 13363 "Linear scev computation is off in a bad way!"); 13364 return SE.getConstant(ExitValue); 13365 } 13366 13367 if (isQuadratic()) { 13368 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 13369 return SE.getConstant(*S); 13370 } 13371 13372 return SE.getCouldNotCompute(); 13373 } 13374 13375 const SCEVAddRecExpr * 13376 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 13377 assert(getNumOperands() > 1 && "AddRec with zero step?"); 13378 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 13379 // but in this case we cannot guarantee that the value returned will be an 13380 // AddRec because SCEV does not have a fixed point where it stops 13381 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 13382 // may happen if we reach arithmetic depth limit while simplifying. So we 13383 // construct the returned value explicitly. 13384 SmallVector<const SCEV *, 3> Ops; 13385 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 13386 // (this + Step) is {A+B,+,B+C,+...,+,N}. 13387 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 13388 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 13389 // We know that the last operand is not a constant zero (otherwise it would 13390 // have been popped out earlier). This guarantees us that if the result has 13391 // the same last operand, then it will also not be popped out, meaning that 13392 // the returned value will be an AddRec. 13393 const SCEV *Last = getOperand(getNumOperands() - 1); 13394 assert(!Last->isZero() && "Recurrency with zero step?"); 13395 Ops.push_back(Last); 13396 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 13397 SCEV::FlagAnyWrap)); 13398 } 13399 13400 // Return true when S contains at least an undef value. 13401 bool ScalarEvolution::containsUndefs(const SCEV *S) const { 13402 return SCEVExprContains(S, [](const SCEV *S) { 13403 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13404 return isa<UndefValue>(SU->getValue()); 13405 return false; 13406 }); 13407 } 13408 13409 // Return true when S contains a value that is a nullptr. 13410 bool ScalarEvolution::containsErasedValue(const SCEV *S) const { 13411 return SCEVExprContains(S, [](const SCEV *S) { 13412 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 13413 return SU->getValue() == nullptr; 13414 return false; 13415 }); 13416 } 13417 13418 /// Return the size of an element read or written by Inst. 13419 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 13420 Type *Ty; 13421 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 13422 Ty = Store->getValueOperand()->getType(); 13423 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 13424 Ty = Load->getType(); 13425 else 13426 return nullptr; 13427 13428 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 13429 return getSizeOfExpr(ETy, Ty); 13430 } 13431 13432 //===----------------------------------------------------------------------===// 13433 // SCEVCallbackVH Class Implementation 13434 //===----------------------------------------------------------------------===// 13435 13436 void ScalarEvolution::SCEVCallbackVH::deleted() { 13437 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13438 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 13439 SE->ConstantEvolutionLoopExitValue.erase(PN); 13440 SE->eraseValueFromMap(getValPtr()); 13441 // this now dangles! 13442 } 13443 13444 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 13445 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 13446 13447 // Forget all the expressions associated with users of the old value, 13448 // so that future queries will recompute the expressions using the new 13449 // value. 13450 Value *Old = getValPtr(); 13451 SmallVector<User *, 16> Worklist(Old->users()); 13452 SmallPtrSet<User *, 8> Visited; 13453 while (!Worklist.empty()) { 13454 User *U = Worklist.pop_back_val(); 13455 // Deleting the Old value will cause this to dangle. Postpone 13456 // that until everything else is done. 13457 if (U == Old) 13458 continue; 13459 if (!Visited.insert(U).second) 13460 continue; 13461 if (PHINode *PN = dyn_cast<PHINode>(U)) 13462 SE->ConstantEvolutionLoopExitValue.erase(PN); 13463 SE->eraseValueFromMap(U); 13464 llvm::append_range(Worklist, U->users()); 13465 } 13466 // Delete the Old value. 13467 if (PHINode *PN = dyn_cast<PHINode>(Old)) 13468 SE->ConstantEvolutionLoopExitValue.erase(PN); 13469 SE->eraseValueFromMap(Old); 13470 // this now dangles! 13471 } 13472 13473 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 13474 : CallbackVH(V), SE(se) {} 13475 13476 //===----------------------------------------------------------------------===// 13477 // ScalarEvolution Class Implementation 13478 //===----------------------------------------------------------------------===// 13479 13480 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 13481 AssumptionCache &AC, DominatorTree &DT, 13482 LoopInfo &LI) 13483 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 13484 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 13485 LoopDispositions(64), BlockDispositions(64) { 13486 // To use guards for proving predicates, we need to scan every instruction in 13487 // relevant basic blocks, and not just terminators. Doing this is a waste of 13488 // time if the IR does not actually contain any calls to 13489 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 13490 // 13491 // This pessimizes the case where a pass that preserves ScalarEvolution wants 13492 // to _add_ guards to the module when there weren't any before, and wants 13493 // ScalarEvolution to optimize based on those guards. For now we prefer to be 13494 // efficient in lieu of being smart in that rather obscure case. 13495 13496 auto *GuardDecl = F.getParent()->getFunction( 13497 Intrinsic::getName(Intrinsic::experimental_guard)); 13498 HasGuards = GuardDecl && !GuardDecl->use_empty(); 13499 } 13500 13501 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 13502 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 13503 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 13504 ValueExprMap(std::move(Arg.ValueExprMap)), 13505 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 13506 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 13507 PendingMerges(std::move(Arg.PendingMerges)), 13508 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 13509 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 13510 PredicatedBackedgeTakenCounts( 13511 std::move(Arg.PredicatedBackedgeTakenCounts)), 13512 BECountUsers(std::move(Arg.BECountUsers)), 13513 ConstantEvolutionLoopExitValue( 13514 std::move(Arg.ConstantEvolutionLoopExitValue)), 13515 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 13516 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), 13517 LoopDispositions(std::move(Arg.LoopDispositions)), 13518 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 13519 BlockDispositions(std::move(Arg.BlockDispositions)), 13520 SCEVUsers(std::move(Arg.SCEVUsers)), 13521 UnsignedRanges(std::move(Arg.UnsignedRanges)), 13522 SignedRanges(std::move(Arg.SignedRanges)), 13523 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 13524 UniquePreds(std::move(Arg.UniquePreds)), 13525 SCEVAllocator(std::move(Arg.SCEVAllocator)), 13526 LoopUsers(std::move(Arg.LoopUsers)), 13527 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 13528 FirstUnknown(Arg.FirstUnknown) { 13529 Arg.FirstUnknown = nullptr; 13530 } 13531 13532 ScalarEvolution::~ScalarEvolution() { 13533 // Iterate through all the SCEVUnknown instances and call their 13534 // destructors, so that they release their references to their values. 13535 for (SCEVUnknown *U = FirstUnknown; U;) { 13536 SCEVUnknown *Tmp = U; 13537 U = U->Next; 13538 Tmp->~SCEVUnknown(); 13539 } 13540 FirstUnknown = nullptr; 13541 13542 ExprValueMap.clear(); 13543 ValueExprMap.clear(); 13544 HasRecMap.clear(); 13545 BackedgeTakenCounts.clear(); 13546 PredicatedBackedgeTakenCounts.clear(); 13547 13548 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 13549 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 13550 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 13551 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 13552 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 13553 } 13554 13555 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 13556 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 13557 } 13558 13559 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 13560 const Loop *L) { 13561 // Print all inner loops first 13562 for (Loop *I : *L) 13563 PrintLoopInfo(OS, SE, I); 13564 13565 OS << "Loop "; 13566 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13567 OS << ": "; 13568 13569 SmallVector<BasicBlock *, 8> ExitingBlocks; 13570 L->getExitingBlocks(ExitingBlocks); 13571 if (ExitingBlocks.size() != 1) 13572 OS << "<multiple exits> "; 13573 13574 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 13575 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 13576 else 13577 OS << "Unpredictable backedge-taken count.\n"; 13578 13579 if (ExitingBlocks.size() > 1) 13580 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13581 OS << " exit count for " << ExitingBlock->getName() << ": " 13582 << *SE->getExitCount(L, ExitingBlock) << "\n"; 13583 } 13584 13585 OS << "Loop "; 13586 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13587 OS << ": "; 13588 13589 auto *ConstantBTC = SE->getConstantMaxBackedgeTakenCount(L); 13590 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) { 13591 OS << "constant max backedge-taken count is " << *ConstantBTC; 13592 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13593 OS << ", actual taken count either this or zero."; 13594 } else { 13595 OS << "Unpredictable constant max backedge-taken count. "; 13596 } 13597 13598 OS << "\n" 13599 "Loop "; 13600 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13601 OS << ": "; 13602 13603 auto *SymbolicBTC = SE->getSymbolicMaxBackedgeTakenCount(L); 13604 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) { 13605 OS << "symbolic max backedge-taken count is " << *SymbolicBTC; 13606 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13607 OS << ", actual taken count either this or zero."; 13608 } else { 13609 OS << "Unpredictable symbolic max backedge-taken count. "; 13610 } 13611 13612 OS << "\n"; 13613 if (ExitingBlocks.size() > 1) 13614 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13615 OS << " symbolic max exit count for " << ExitingBlock->getName() << ": " 13616 << *SE->getExitCount(L, ExitingBlock, ScalarEvolution::SymbolicMaximum) 13617 << "\n"; 13618 } 13619 13620 OS << "Loop "; 13621 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13622 OS << ": "; 13623 13624 SmallVector<const SCEVPredicate *, 4> Preds; 13625 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds); 13626 if (!isa<SCEVCouldNotCompute>(PBT)) { 13627 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 13628 OS << " Predicates:\n"; 13629 for (const auto *P : Preds) 13630 P->print(OS, 4); 13631 } else { 13632 OS << "Unpredictable predicated backedge-taken count. "; 13633 } 13634 OS << "\n"; 13635 13636 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 13637 OS << "Loop "; 13638 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13639 OS << ": "; 13640 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 13641 } 13642 } 13643 13644 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 13645 switch (LD) { 13646 case ScalarEvolution::LoopVariant: 13647 return "Variant"; 13648 case ScalarEvolution::LoopInvariant: 13649 return "Invariant"; 13650 case ScalarEvolution::LoopComputable: 13651 return "Computable"; 13652 } 13653 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 13654 } 13655 13656 void ScalarEvolution::print(raw_ostream &OS) const { 13657 // ScalarEvolution's implementation of the print method is to print 13658 // out SCEV values of all instructions that are interesting. Doing 13659 // this potentially causes it to create new SCEV objects though, 13660 // which technically conflicts with the const qualifier. This isn't 13661 // observable from outside the class though, so casting away the 13662 // const isn't dangerous. 13663 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13664 13665 if (ClassifyExpressions) { 13666 OS << "Classifying expressions for: "; 13667 F.printAsOperand(OS, /*PrintType=*/false); 13668 OS << "\n"; 13669 for (Instruction &I : instructions(F)) 13670 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 13671 OS << I << '\n'; 13672 OS << " --> "; 13673 const SCEV *SV = SE.getSCEV(&I); 13674 SV->print(OS); 13675 if (!isa<SCEVCouldNotCompute>(SV)) { 13676 OS << " U: "; 13677 SE.getUnsignedRange(SV).print(OS); 13678 OS << " S: "; 13679 SE.getSignedRange(SV).print(OS); 13680 } 13681 13682 const Loop *L = LI.getLoopFor(I.getParent()); 13683 13684 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 13685 if (AtUse != SV) { 13686 OS << " --> "; 13687 AtUse->print(OS); 13688 if (!isa<SCEVCouldNotCompute>(AtUse)) { 13689 OS << " U: "; 13690 SE.getUnsignedRange(AtUse).print(OS); 13691 OS << " S: "; 13692 SE.getSignedRange(AtUse).print(OS); 13693 } 13694 } 13695 13696 if (L) { 13697 OS << "\t\t" "Exits: "; 13698 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 13699 if (!SE.isLoopInvariant(ExitValue, L)) { 13700 OS << "<<Unknown>>"; 13701 } else { 13702 OS << *ExitValue; 13703 } 13704 13705 bool First = true; 13706 for (const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 13707 if (First) { 13708 OS << "\t\t" "LoopDispositions: { "; 13709 First = false; 13710 } else { 13711 OS << ", "; 13712 } 13713 13714 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13715 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 13716 } 13717 13718 for (const auto *InnerL : depth_first(L)) { 13719 if (InnerL == L) 13720 continue; 13721 if (First) { 13722 OS << "\t\t" "LoopDispositions: { "; 13723 First = false; 13724 } else { 13725 OS << ", "; 13726 } 13727 13728 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13729 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 13730 } 13731 13732 OS << " }"; 13733 } 13734 13735 OS << "\n"; 13736 } 13737 } 13738 13739 OS << "Determining loop execution counts for: "; 13740 F.printAsOperand(OS, /*PrintType=*/false); 13741 OS << "\n"; 13742 for (Loop *I : LI) 13743 PrintLoopInfo(OS, &SE, I); 13744 } 13745 13746 ScalarEvolution::LoopDisposition 13747 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 13748 auto &Values = LoopDispositions[S]; 13749 for (auto &V : Values) { 13750 if (V.getPointer() == L) 13751 return V.getInt(); 13752 } 13753 Values.emplace_back(L, LoopVariant); 13754 LoopDisposition D = computeLoopDisposition(S, L); 13755 auto &Values2 = LoopDispositions[S]; 13756 for (auto &V : llvm::reverse(Values2)) { 13757 if (V.getPointer() == L) { 13758 V.setInt(D); 13759 break; 13760 } 13761 } 13762 return D; 13763 } 13764 13765 ScalarEvolution::LoopDisposition 13766 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 13767 switch (S->getSCEVType()) { 13768 case scConstant: 13769 return LoopInvariant; 13770 case scAddRecExpr: { 13771 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13772 13773 // If L is the addrec's loop, it's computable. 13774 if (AR->getLoop() == L) 13775 return LoopComputable; 13776 13777 // Add recurrences are never invariant in the function-body (null loop). 13778 if (!L) 13779 return LoopVariant; 13780 13781 // Everything that is not defined at loop entry is variant. 13782 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 13783 return LoopVariant; 13784 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 13785 " dominate the contained loop's header?"); 13786 13787 // This recurrence is invariant w.r.t. L if AR's loop contains L. 13788 if (AR->getLoop()->contains(L)) 13789 return LoopInvariant; 13790 13791 // This recurrence is variant w.r.t. L if any of its operands 13792 // are variant. 13793 for (const auto *Op : AR->operands()) 13794 if (!isLoopInvariant(Op, L)) 13795 return LoopVariant; 13796 13797 // Otherwise it's loop-invariant. 13798 return LoopInvariant; 13799 } 13800 case scTruncate: 13801 case scZeroExtend: 13802 case scSignExtend: 13803 case scPtrToInt: 13804 case scAddExpr: 13805 case scMulExpr: 13806 case scUDivExpr: 13807 case scUMaxExpr: 13808 case scSMaxExpr: 13809 case scUMinExpr: 13810 case scSMinExpr: 13811 case scSequentialUMinExpr: { 13812 bool HasVarying = false; 13813 for (const auto *Op : S->operands()) { 13814 LoopDisposition D = getLoopDisposition(Op, L); 13815 if (D == LoopVariant) 13816 return LoopVariant; 13817 if (D == LoopComputable) 13818 HasVarying = true; 13819 } 13820 return HasVarying ? LoopComputable : LoopInvariant; 13821 } 13822 case scUnknown: 13823 // All non-instruction values are loop invariant. All instructions are loop 13824 // invariant if they are not contained in the specified loop. 13825 // Instructions are never considered invariant in the function body 13826 // (null loop) because they are defined within the "loop". 13827 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 13828 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 13829 return LoopInvariant; 13830 case scCouldNotCompute: 13831 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13832 } 13833 llvm_unreachable("Unknown SCEV kind!"); 13834 } 13835 13836 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 13837 return getLoopDisposition(S, L) == LoopInvariant; 13838 } 13839 13840 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 13841 return getLoopDisposition(S, L) == LoopComputable; 13842 } 13843 13844 ScalarEvolution::BlockDisposition 13845 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13846 auto &Values = BlockDispositions[S]; 13847 for (auto &V : Values) { 13848 if (V.getPointer() == BB) 13849 return V.getInt(); 13850 } 13851 Values.emplace_back(BB, DoesNotDominateBlock); 13852 BlockDisposition D = computeBlockDisposition(S, BB); 13853 auto &Values2 = BlockDispositions[S]; 13854 for (auto &V : llvm::reverse(Values2)) { 13855 if (V.getPointer() == BB) { 13856 V.setInt(D); 13857 break; 13858 } 13859 } 13860 return D; 13861 } 13862 13863 ScalarEvolution::BlockDisposition 13864 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13865 switch (S->getSCEVType()) { 13866 case scConstant: 13867 return ProperlyDominatesBlock; 13868 case scAddRecExpr: { 13869 // This uses a "dominates" query instead of "properly dominates" query 13870 // to test for proper dominance too, because the instruction which 13871 // produces the addrec's value is a PHI, and a PHI effectively properly 13872 // dominates its entire containing block. 13873 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13874 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 13875 return DoesNotDominateBlock; 13876 13877 // Fall through into SCEVNAryExpr handling. 13878 [[fallthrough]]; 13879 } 13880 case scTruncate: 13881 case scZeroExtend: 13882 case scSignExtend: 13883 case scPtrToInt: 13884 case scAddExpr: 13885 case scMulExpr: 13886 case scUDivExpr: 13887 case scUMaxExpr: 13888 case scSMaxExpr: 13889 case scUMinExpr: 13890 case scSMinExpr: 13891 case scSequentialUMinExpr: { 13892 bool Proper = true; 13893 for (const SCEV *NAryOp : S->operands()) { 13894 BlockDisposition D = getBlockDisposition(NAryOp, BB); 13895 if (D == DoesNotDominateBlock) 13896 return DoesNotDominateBlock; 13897 if (D == DominatesBlock) 13898 Proper = false; 13899 } 13900 return Proper ? ProperlyDominatesBlock : DominatesBlock; 13901 } 13902 case scUnknown: 13903 if (Instruction *I = 13904 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 13905 if (I->getParent() == BB) 13906 return DominatesBlock; 13907 if (DT.properlyDominates(I->getParent(), BB)) 13908 return ProperlyDominatesBlock; 13909 return DoesNotDominateBlock; 13910 } 13911 return ProperlyDominatesBlock; 13912 case scCouldNotCompute: 13913 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13914 } 13915 llvm_unreachable("Unknown SCEV kind!"); 13916 } 13917 13918 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 13919 return getBlockDisposition(S, BB) >= DominatesBlock; 13920 } 13921 13922 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 13923 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 13924 } 13925 13926 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 13927 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 13928 } 13929 13930 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, 13931 bool Predicated) { 13932 auto &BECounts = 13933 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13934 auto It = BECounts.find(L); 13935 if (It != BECounts.end()) { 13936 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { 13937 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) { 13938 if (!isa<SCEVConstant>(S)) { 13939 auto UserIt = BECountUsers.find(S); 13940 assert(UserIt != BECountUsers.end()); 13941 UserIt->second.erase({L, Predicated}); 13942 } 13943 } 13944 } 13945 BECounts.erase(It); 13946 } 13947 } 13948 13949 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) { 13950 SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end()); 13951 SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end()); 13952 13953 while (!Worklist.empty()) { 13954 const SCEV *Curr = Worklist.pop_back_val(); 13955 auto Users = SCEVUsers.find(Curr); 13956 if (Users != SCEVUsers.end()) 13957 for (const auto *User : Users->second) 13958 if (ToForget.insert(User).second) 13959 Worklist.push_back(User); 13960 } 13961 13962 for (const auto *S : ToForget) 13963 forgetMemoizedResultsImpl(S); 13964 13965 for (auto I = PredicatedSCEVRewrites.begin(); 13966 I != PredicatedSCEVRewrites.end();) { 13967 std::pair<const SCEV *, const Loop *> Entry = I->first; 13968 if (ToForget.count(Entry.first)) 13969 PredicatedSCEVRewrites.erase(I++); 13970 else 13971 ++I; 13972 } 13973 } 13974 13975 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { 13976 LoopDispositions.erase(S); 13977 BlockDispositions.erase(S); 13978 UnsignedRanges.erase(S); 13979 SignedRanges.erase(S); 13980 HasRecMap.erase(S); 13981 MinTrailingZerosCache.erase(S); 13982 13983 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) { 13984 UnsignedWrapViaInductionTried.erase(AR); 13985 SignedWrapViaInductionTried.erase(AR); 13986 } 13987 13988 auto ExprIt = ExprValueMap.find(S); 13989 if (ExprIt != ExprValueMap.end()) { 13990 for (Value *V : ExprIt->second) { 13991 auto ValueIt = ValueExprMap.find_as(V); 13992 if (ValueIt != ValueExprMap.end()) 13993 ValueExprMap.erase(ValueIt); 13994 } 13995 ExprValueMap.erase(ExprIt); 13996 } 13997 13998 auto ScopeIt = ValuesAtScopes.find(S); 13999 if (ScopeIt != ValuesAtScopes.end()) { 14000 for (const auto &Pair : ScopeIt->second) 14001 if (!isa_and_nonnull<SCEVConstant>(Pair.second)) 14002 erase_value(ValuesAtScopesUsers[Pair.second], 14003 std::make_pair(Pair.first, S)); 14004 ValuesAtScopes.erase(ScopeIt); 14005 } 14006 14007 auto ScopeUserIt = ValuesAtScopesUsers.find(S); 14008 if (ScopeUserIt != ValuesAtScopesUsers.end()) { 14009 for (const auto &Pair : ScopeUserIt->second) 14010 erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); 14011 ValuesAtScopesUsers.erase(ScopeUserIt); 14012 } 14013 14014 auto BEUsersIt = BECountUsers.find(S); 14015 if (BEUsersIt != BECountUsers.end()) { 14016 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. 14017 auto Copy = BEUsersIt->second; 14018 for (const auto &Pair : Copy) 14019 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); 14020 BECountUsers.erase(BEUsersIt); 14021 } 14022 14023 auto FoldUser = FoldCacheUser.find(S); 14024 if (FoldUser != FoldCacheUser.end()) 14025 for (auto &KV : FoldUser->second) 14026 FoldCache.erase(KV); 14027 FoldCacheUser.erase(S); 14028 } 14029 14030 void 14031 ScalarEvolution::getUsedLoops(const SCEV *S, 14032 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 14033 struct FindUsedLoops { 14034 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 14035 : LoopsUsed(LoopsUsed) {} 14036 SmallPtrSetImpl<const Loop *> &LoopsUsed; 14037 bool follow(const SCEV *S) { 14038 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 14039 LoopsUsed.insert(AR->getLoop()); 14040 return true; 14041 } 14042 14043 bool isDone() const { return false; } 14044 }; 14045 14046 FindUsedLoops F(LoopsUsed); 14047 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 14048 } 14049 14050 void ScalarEvolution::getReachableBlocks( 14051 SmallPtrSetImpl<BasicBlock *> &Reachable, Function &F) { 14052 SmallVector<BasicBlock *> Worklist; 14053 Worklist.push_back(&F.getEntryBlock()); 14054 while (!Worklist.empty()) { 14055 BasicBlock *BB = Worklist.pop_back_val(); 14056 if (!Reachable.insert(BB).second) 14057 continue; 14058 14059 Value *Cond; 14060 BasicBlock *TrueBB, *FalseBB; 14061 if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB), 14062 m_BasicBlock(FalseBB)))) { 14063 if (auto *C = dyn_cast<ConstantInt>(Cond)) { 14064 Worklist.push_back(C->isOne() ? TrueBB : FalseBB); 14065 continue; 14066 } 14067 14068 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 14069 const SCEV *L = getSCEV(Cmp->getOperand(0)); 14070 const SCEV *R = getSCEV(Cmp->getOperand(1)); 14071 if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) { 14072 Worklist.push_back(TrueBB); 14073 continue; 14074 } 14075 if (isKnownPredicateViaConstantRanges(Cmp->getInversePredicate(), L, 14076 R)) { 14077 Worklist.push_back(FalseBB); 14078 continue; 14079 } 14080 } 14081 } 14082 14083 append_range(Worklist, successors(BB)); 14084 } 14085 } 14086 14087 void ScalarEvolution::verify() const { 14088 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 14089 ScalarEvolution SE2(F, TLI, AC, DT, LI); 14090 14091 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 14092 14093 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 14094 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 14095 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 14096 14097 const SCEV *visitConstant(const SCEVConstant *Constant) { 14098 return SE.getConstant(Constant->getAPInt()); 14099 } 14100 14101 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14102 return SE.getUnknown(Expr->getValue()); 14103 } 14104 14105 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 14106 return SE.getCouldNotCompute(); 14107 } 14108 }; 14109 14110 SCEVMapper SCM(SE2); 14111 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 14112 SE2.getReachableBlocks(ReachableBlocks, F); 14113 14114 auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * { 14115 if (containsUndefs(Old) || containsUndefs(New)) { 14116 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 14117 // not propagate undef aggressively). This means we can (and do) fail 14118 // verification in cases where a transform makes a value go from "undef" 14119 // to "undef+1" (say). The transform is fine, since in both cases the 14120 // result is "undef", but SCEV thinks the value increased by 1. 14121 return nullptr; 14122 } 14123 14124 // Unless VerifySCEVStrict is set, we only compare constant deltas. 14125 const SCEV *Delta = SE2.getMinusSCEV(Old, New); 14126 if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta)) 14127 return nullptr; 14128 14129 return Delta; 14130 }; 14131 14132 while (!LoopStack.empty()) { 14133 auto *L = LoopStack.pop_back_val(); 14134 llvm::append_range(LoopStack, *L); 14135 14136 // Only verify BECounts in reachable loops. For an unreachable loop, 14137 // any BECount is legal. 14138 if (!ReachableBlocks.contains(L->getHeader())) 14139 continue; 14140 14141 // Only verify cached BECounts. Computing new BECounts may change the 14142 // results of subsequent SCEV uses. 14143 auto It = BackedgeTakenCounts.find(L); 14144 if (It == BackedgeTakenCounts.end()) 14145 continue; 14146 14147 auto *CurBECount = 14148 SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this))); 14149 auto *NewBECount = SE2.getBackedgeTakenCount(L); 14150 14151 if (CurBECount == SE2.getCouldNotCompute() || 14152 NewBECount == SE2.getCouldNotCompute()) { 14153 // NB! This situation is legal, but is very suspicious -- whatever pass 14154 // change the loop to make a trip count go from could not compute to 14155 // computable or vice-versa *should have* invalidated SCEV. However, we 14156 // choose not to assert here (for now) since we don't want false 14157 // positives. 14158 continue; 14159 } 14160 14161 if (SE.getTypeSizeInBits(CurBECount->getType()) > 14162 SE.getTypeSizeInBits(NewBECount->getType())) 14163 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 14164 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 14165 SE.getTypeSizeInBits(NewBECount->getType())) 14166 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 14167 14168 const SCEV *Delta = GetDelta(CurBECount, NewBECount); 14169 if (Delta && !Delta->isZero()) { 14170 dbgs() << "Trip Count for " << *L << " Changed!\n"; 14171 dbgs() << "Old: " << *CurBECount << "\n"; 14172 dbgs() << "New: " << *NewBECount << "\n"; 14173 dbgs() << "Delta: " << *Delta << "\n"; 14174 std::abort(); 14175 } 14176 } 14177 14178 // Collect all valid loops currently in LoopInfo. 14179 SmallPtrSet<Loop *, 32> ValidLoops; 14180 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 14181 while (!Worklist.empty()) { 14182 Loop *L = Worklist.pop_back_val(); 14183 if (ValidLoops.insert(L).second) 14184 Worklist.append(L->begin(), L->end()); 14185 } 14186 for (const auto &KV : ValueExprMap) { 14187 #ifndef NDEBUG 14188 // Check for SCEV expressions referencing invalid/deleted loops. 14189 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) { 14190 assert(ValidLoops.contains(AR->getLoop()) && 14191 "AddRec references invalid loop"); 14192 } 14193 #endif 14194 14195 // Check that the value is also part of the reverse map. 14196 auto It = ExprValueMap.find(KV.second); 14197 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) { 14198 dbgs() << "Value " << *KV.first 14199 << " is in ValueExprMap but not in ExprValueMap\n"; 14200 std::abort(); 14201 } 14202 14203 if (auto *I = dyn_cast<Instruction>(&*KV.first)) { 14204 if (!ReachableBlocks.contains(I->getParent())) 14205 continue; 14206 const SCEV *OldSCEV = SCM.visit(KV.second); 14207 const SCEV *NewSCEV = SE2.getSCEV(I); 14208 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV); 14209 if (Delta && !Delta->isZero()) { 14210 dbgs() << "SCEV for value " << *I << " changed!\n" 14211 << "Old: " << *OldSCEV << "\n" 14212 << "New: " << *NewSCEV << "\n" 14213 << "Delta: " << *Delta << "\n"; 14214 std::abort(); 14215 } 14216 } 14217 } 14218 14219 for (const auto &KV : ExprValueMap) { 14220 for (Value *V : KV.second) { 14221 auto It = ValueExprMap.find_as(V); 14222 if (It == ValueExprMap.end()) { 14223 dbgs() << "Value " << *V 14224 << " is in ExprValueMap but not in ValueExprMap\n"; 14225 std::abort(); 14226 } 14227 if (It->second != KV.first) { 14228 dbgs() << "Value " << *V << " mapped to " << *It->second 14229 << " rather than " << *KV.first << "\n"; 14230 std::abort(); 14231 } 14232 } 14233 } 14234 14235 // Verify integrity of SCEV users. 14236 for (const auto &S : UniqueSCEVs) { 14237 for (const auto *Op : S.operands()) { 14238 // We do not store dependencies of constants. 14239 if (isa<SCEVConstant>(Op)) 14240 continue; 14241 auto It = SCEVUsers.find(Op); 14242 if (It != SCEVUsers.end() && It->second.count(&S)) 14243 continue; 14244 dbgs() << "Use of operand " << *Op << " by user " << S 14245 << " is not being tracked!\n"; 14246 std::abort(); 14247 } 14248 } 14249 14250 // Verify integrity of ValuesAtScopes users. 14251 for (const auto &ValueAndVec : ValuesAtScopes) { 14252 const SCEV *Value = ValueAndVec.first; 14253 for (const auto &LoopAndValueAtScope : ValueAndVec.second) { 14254 const Loop *L = LoopAndValueAtScope.first; 14255 const SCEV *ValueAtScope = LoopAndValueAtScope.second; 14256 if (!isa<SCEVConstant>(ValueAtScope)) { 14257 auto It = ValuesAtScopesUsers.find(ValueAtScope); 14258 if (It != ValuesAtScopesUsers.end() && 14259 is_contained(It->second, std::make_pair(L, Value))) 14260 continue; 14261 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 14262 << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; 14263 std::abort(); 14264 } 14265 } 14266 } 14267 14268 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { 14269 const SCEV *ValueAtScope = ValueAtScopeAndVec.first; 14270 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { 14271 const Loop *L = LoopAndValue.first; 14272 const SCEV *Value = LoopAndValue.second; 14273 assert(!isa<SCEVConstant>(Value)); 14274 auto It = ValuesAtScopes.find(Value); 14275 if (It != ValuesAtScopes.end() && 14276 is_contained(It->second, std::make_pair(L, ValueAtScope))) 14277 continue; 14278 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 14279 << *ValueAtScope << " missing in ValuesAtScopes\n"; 14280 std::abort(); 14281 } 14282 } 14283 14284 // Verify integrity of BECountUsers. 14285 auto VerifyBECountUsers = [&](bool Predicated) { 14286 auto &BECounts = 14287 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 14288 for (const auto &LoopAndBEInfo : BECounts) { 14289 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { 14290 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) { 14291 if (!isa<SCEVConstant>(S)) { 14292 auto UserIt = BECountUsers.find(S); 14293 if (UserIt != BECountUsers.end() && 14294 UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) 14295 continue; 14296 dbgs() << "Value " << *S << " for loop " << *LoopAndBEInfo.first 14297 << " missing from BECountUsers\n"; 14298 std::abort(); 14299 } 14300 } 14301 } 14302 } 14303 }; 14304 VerifyBECountUsers(/* Predicated */ false); 14305 VerifyBECountUsers(/* Predicated */ true); 14306 14307 // Verify intergity of loop disposition cache. 14308 for (auto &[S, Values] : LoopDispositions) { 14309 for (auto [Loop, CachedDisposition] : Values) { 14310 const auto RecomputedDisposition = SE2.getLoopDisposition(S, Loop); 14311 if (CachedDisposition != RecomputedDisposition) { 14312 dbgs() << "Cached disposition of " << *S << " for loop " << *Loop 14313 << " is incorrect: cached " 14314 << loopDispositionToStr(CachedDisposition) << ", actual " 14315 << loopDispositionToStr(RecomputedDisposition) << "\n"; 14316 std::abort(); 14317 } 14318 } 14319 } 14320 14321 // Verify integrity of the block disposition cache. 14322 for (auto &[S, Values] : BlockDispositions) { 14323 for (auto [BB, CachedDisposition] : Values) { 14324 const auto RecomputedDisposition = SE2.getBlockDisposition(S, BB); 14325 if (CachedDisposition != RecomputedDisposition) { 14326 dbgs() << "Cached disposition of " << *S << " for block %" 14327 << BB->getName() << " is incorrect! \n"; 14328 std::abort(); 14329 } 14330 } 14331 } 14332 14333 // Verify FoldCache/FoldCacheUser caches. 14334 for (auto [FoldID, Expr] : FoldCache) { 14335 auto I = FoldCacheUser.find(Expr); 14336 if (I == FoldCacheUser.end()) { 14337 dbgs() << "Missing entry in FoldCacheUser for cached expression " << *Expr 14338 << "!\n"; 14339 std::abort(); 14340 } 14341 if (!is_contained(I->second, FoldID)) { 14342 dbgs() << "Missing FoldID in cached users of " << *Expr << "!\n"; 14343 std::abort(); 14344 } 14345 } 14346 for (auto [Expr, IDs] : FoldCacheUser) { 14347 for (auto &FoldID : IDs) { 14348 auto I = FoldCache.find(FoldID); 14349 if (I == FoldCache.end()) { 14350 dbgs() << "Missing entry in FoldCache for expression " << *Expr 14351 << "!\n"; 14352 std::abort(); 14353 } 14354 if (I->second != Expr) { 14355 dbgs() << "Entry in FoldCache doesn't match FoldCacheUser: " 14356 << *I->second << " != " << *Expr << "!\n"; 14357 std::abort(); 14358 } 14359 } 14360 } 14361 } 14362 14363 bool ScalarEvolution::invalidate( 14364 Function &F, const PreservedAnalyses &PA, 14365 FunctionAnalysisManager::Invalidator &Inv) { 14366 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 14367 // of its dependencies is invalidated. 14368 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 14369 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 14370 Inv.invalidate<AssumptionAnalysis>(F, PA) || 14371 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 14372 Inv.invalidate<LoopAnalysis>(F, PA); 14373 } 14374 14375 AnalysisKey ScalarEvolutionAnalysis::Key; 14376 14377 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 14378 FunctionAnalysisManager &AM) { 14379 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 14380 AM.getResult<AssumptionAnalysis>(F), 14381 AM.getResult<DominatorTreeAnalysis>(F), 14382 AM.getResult<LoopAnalysis>(F)); 14383 } 14384 14385 PreservedAnalyses 14386 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 14387 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 14388 return PreservedAnalyses::all(); 14389 } 14390 14391 PreservedAnalyses 14392 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 14393 // For compatibility with opt's -analyze feature under legacy pass manager 14394 // which was not ported to NPM. This keeps tests using 14395 // update_analyze_test_checks.py working. 14396 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 14397 << F.getName() << "':\n"; 14398 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 14399 return PreservedAnalyses::all(); 14400 } 14401 14402 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 14403 "Scalar Evolution Analysis", false, true) 14404 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 14405 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 14406 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 14407 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 14408 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 14409 "Scalar Evolution Analysis", false, true) 14410 14411 char ScalarEvolutionWrapperPass::ID = 0; 14412 14413 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 14414 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 14415 } 14416 14417 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 14418 SE.reset(new ScalarEvolution( 14419 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 14420 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 14421 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 14422 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 14423 return false; 14424 } 14425 14426 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 14427 14428 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 14429 SE->print(OS); 14430 } 14431 14432 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 14433 if (!VerifySCEV) 14434 return; 14435 14436 SE->verify(); 14437 } 14438 14439 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 14440 AU.setPreservesAll(); 14441 AU.addRequiredTransitive<AssumptionCacheTracker>(); 14442 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 14443 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 14444 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 14445 } 14446 14447 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 14448 const SCEV *RHS) { 14449 return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS); 14450 } 14451 14452 const SCEVPredicate * 14453 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred, 14454 const SCEV *LHS, const SCEV *RHS) { 14455 FoldingSetNodeID ID; 14456 assert(LHS->getType() == RHS->getType() && 14457 "Type mismatch between LHS and RHS"); 14458 // Unique this node based on the arguments 14459 ID.AddInteger(SCEVPredicate::P_Compare); 14460 ID.AddInteger(Pred); 14461 ID.AddPointer(LHS); 14462 ID.AddPointer(RHS); 14463 void *IP = nullptr; 14464 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14465 return S; 14466 SCEVComparePredicate *Eq = new (SCEVAllocator) 14467 SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS); 14468 UniquePreds.InsertNode(Eq, IP); 14469 return Eq; 14470 } 14471 14472 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 14473 const SCEVAddRecExpr *AR, 14474 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14475 FoldingSetNodeID ID; 14476 // Unique this node based on the arguments 14477 ID.AddInteger(SCEVPredicate::P_Wrap); 14478 ID.AddPointer(AR); 14479 ID.AddInteger(AddedFlags); 14480 void *IP = nullptr; 14481 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 14482 return S; 14483 auto *OF = new (SCEVAllocator) 14484 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 14485 UniquePreds.InsertNode(OF, IP); 14486 return OF; 14487 } 14488 14489 namespace { 14490 14491 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 14492 public: 14493 14494 /// Rewrites \p S in the context of a loop L and the SCEV predication 14495 /// infrastructure. 14496 /// 14497 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 14498 /// equivalences present in \p Pred. 14499 /// 14500 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 14501 /// \p NewPreds such that the result will be an AddRecExpr. 14502 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 14503 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14504 const SCEVPredicate *Pred) { 14505 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 14506 return Rewriter.visit(S); 14507 } 14508 14509 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14510 if (Pred) { 14511 if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) { 14512 for (const auto *Pred : U->getPredicates()) 14513 if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) 14514 if (IPred->getLHS() == Expr && 14515 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14516 return IPred->getRHS(); 14517 } else if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) { 14518 if (IPred->getLHS() == Expr && 14519 IPred->getPredicate() == ICmpInst::ICMP_EQ) 14520 return IPred->getRHS(); 14521 } 14522 } 14523 return convertToAddRecWithPreds(Expr); 14524 } 14525 14526 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14527 const SCEV *Operand = visit(Expr->getOperand()); 14528 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14529 if (AR && AR->getLoop() == L && AR->isAffine()) { 14530 // This couldn't be folded because the operand didn't have the nuw 14531 // flag. Add the nusw flag as an assumption that we could make. 14532 const SCEV *Step = AR->getStepRecurrence(SE); 14533 Type *Ty = Expr->getType(); 14534 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 14535 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 14536 SE.getSignExtendExpr(Step, Ty), L, 14537 AR->getNoWrapFlags()); 14538 } 14539 return SE.getZeroExtendExpr(Operand, Expr->getType()); 14540 } 14541 14542 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 14543 const SCEV *Operand = visit(Expr->getOperand()); 14544 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 14545 if (AR && AR->getLoop() == L && AR->isAffine()) { 14546 // This couldn't be folded because the operand didn't have the nsw 14547 // flag. Add the nssw flag as an assumption that we could make. 14548 const SCEV *Step = AR->getStepRecurrence(SE); 14549 Type *Ty = Expr->getType(); 14550 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 14551 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 14552 SE.getSignExtendExpr(Step, Ty), L, 14553 AR->getNoWrapFlags()); 14554 } 14555 return SE.getSignExtendExpr(Operand, Expr->getType()); 14556 } 14557 14558 private: 14559 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 14560 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 14561 const SCEVPredicate *Pred) 14562 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 14563 14564 bool addOverflowAssumption(const SCEVPredicate *P) { 14565 if (!NewPreds) { 14566 // Check if we've already made this assumption. 14567 return Pred && Pred->implies(P); 14568 } 14569 NewPreds->insert(P); 14570 return true; 14571 } 14572 14573 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 14574 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 14575 auto *A = SE.getWrapPredicate(AR, AddedFlags); 14576 return addOverflowAssumption(A); 14577 } 14578 14579 // If \p Expr represents a PHINode, we try to see if it can be represented 14580 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 14581 // to add this predicate as a runtime overflow check, we return the AddRec. 14582 // If \p Expr does not meet these conditions (is not a PHI node, or we 14583 // couldn't create an AddRec for it, or couldn't add the predicate), we just 14584 // return \p Expr. 14585 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 14586 if (!isa<PHINode>(Expr->getValue())) 14587 return Expr; 14588 std::optional< 14589 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 14590 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 14591 if (!PredicatedRewrite) 14592 return Expr; 14593 for (const auto *P : PredicatedRewrite->second){ 14594 // Wrap predicates from outer loops are not supported. 14595 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 14596 if (L != WP->getExpr()->getLoop()) 14597 return Expr; 14598 } 14599 if (!addOverflowAssumption(P)) 14600 return Expr; 14601 } 14602 return PredicatedRewrite->first; 14603 } 14604 14605 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 14606 const SCEVPredicate *Pred; 14607 const Loop *L; 14608 }; 14609 14610 } // end anonymous namespace 14611 14612 const SCEV * 14613 ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 14614 const SCEVPredicate &Preds) { 14615 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 14616 } 14617 14618 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 14619 const SCEV *S, const Loop *L, 14620 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 14621 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 14622 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 14623 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 14624 14625 if (!AddRec) 14626 return nullptr; 14627 14628 // Since the transformation was successful, we can now transfer the SCEV 14629 // predicates. 14630 for (const auto *P : TransformPreds) 14631 Preds.insert(P); 14632 14633 return AddRec; 14634 } 14635 14636 /// SCEV predicates 14637 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 14638 SCEVPredicateKind Kind) 14639 : FastID(ID), Kind(Kind) {} 14640 14641 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID, 14642 const ICmpInst::Predicate Pred, 14643 const SCEV *LHS, const SCEV *RHS) 14644 : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) { 14645 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 14646 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 14647 } 14648 14649 bool SCEVComparePredicate::implies(const SCEVPredicate *N) const { 14650 const auto *Op = dyn_cast<SCEVComparePredicate>(N); 14651 14652 if (!Op) 14653 return false; 14654 14655 if (Pred != ICmpInst::ICMP_EQ) 14656 return false; 14657 14658 return Op->LHS == LHS && Op->RHS == RHS; 14659 } 14660 14661 bool SCEVComparePredicate::isAlwaysTrue() const { return false; } 14662 14663 void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const { 14664 if (Pred == ICmpInst::ICMP_EQ) 14665 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 14666 else 14667 OS.indent(Depth) << "Compare predicate: " << *LHS 14668 << " " << CmpInst::getPredicateName(Pred) << ") " 14669 << *RHS << "\n"; 14670 14671 } 14672 14673 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 14674 const SCEVAddRecExpr *AR, 14675 IncrementWrapFlags Flags) 14676 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 14677 14678 const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; } 14679 14680 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 14681 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 14682 14683 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 14684 } 14685 14686 bool SCEVWrapPredicate::isAlwaysTrue() const { 14687 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 14688 IncrementWrapFlags IFlags = Flags; 14689 14690 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 14691 IFlags = clearFlags(IFlags, IncrementNSSW); 14692 14693 return IFlags == IncrementAnyWrap; 14694 } 14695 14696 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 14697 OS.indent(Depth) << *getExpr() << " Added Flags: "; 14698 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 14699 OS << "<nusw>"; 14700 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 14701 OS << "<nssw>"; 14702 OS << "\n"; 14703 } 14704 14705 SCEVWrapPredicate::IncrementWrapFlags 14706 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 14707 ScalarEvolution &SE) { 14708 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 14709 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 14710 14711 // We can safely transfer the NSW flag as NSSW. 14712 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 14713 ImpliedFlags = IncrementNSSW; 14714 14715 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 14716 // If the increment is positive, the SCEV NUW flag will also imply the 14717 // WrapPredicate NUSW flag. 14718 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 14719 if (Step->getValue()->getValue().isNonNegative()) 14720 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 14721 } 14722 14723 return ImpliedFlags; 14724 } 14725 14726 /// Union predicates don't get cached so create a dummy set ID for it. 14727 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds) 14728 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) { 14729 for (const auto *P : Preds) 14730 add(P); 14731 } 14732 14733 bool SCEVUnionPredicate::isAlwaysTrue() const { 14734 return all_of(Preds, 14735 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 14736 } 14737 14738 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 14739 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 14740 return all_of(Set->Preds, 14741 [this](const SCEVPredicate *I) { return this->implies(I); }); 14742 14743 return any_of(Preds, 14744 [N](const SCEVPredicate *I) { return I->implies(N); }); 14745 } 14746 14747 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 14748 for (const auto *Pred : Preds) 14749 Pred->print(OS, Depth); 14750 } 14751 14752 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 14753 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 14754 for (const auto *Pred : Set->Preds) 14755 add(Pred); 14756 return; 14757 } 14758 14759 Preds.push_back(N); 14760 } 14761 14762 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 14763 Loop &L) 14764 : SE(SE), L(L) { 14765 SmallVector<const SCEVPredicate*, 4> Empty; 14766 Preds = std::make_unique<SCEVUnionPredicate>(Empty); 14767 } 14768 14769 void ScalarEvolution::registerUser(const SCEV *User, 14770 ArrayRef<const SCEV *> Ops) { 14771 for (const auto *Op : Ops) 14772 // We do not expect that forgetting cached data for SCEVConstants will ever 14773 // open any prospects for sharpening or introduce any correctness issues, 14774 // so we don't bother storing their dependencies. 14775 if (!isa<SCEVConstant>(Op)) 14776 SCEVUsers[Op].insert(User); 14777 } 14778 14779 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 14780 const SCEV *Expr = SE.getSCEV(V); 14781 RewriteEntry &Entry = RewriteMap[Expr]; 14782 14783 // If we already have an entry and the version matches, return it. 14784 if (Entry.second && Generation == Entry.first) 14785 return Entry.second; 14786 14787 // We found an entry but it's stale. Rewrite the stale entry 14788 // according to the current predicate. 14789 if (Entry.second) 14790 Expr = Entry.second; 14791 14792 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds); 14793 Entry = {Generation, NewSCEV}; 14794 14795 return NewSCEV; 14796 } 14797 14798 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 14799 if (!BackedgeCount) { 14800 SmallVector<const SCEVPredicate *, 4> Preds; 14801 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds); 14802 for (const auto *P : Preds) 14803 addPredicate(*P); 14804 } 14805 return BackedgeCount; 14806 } 14807 14808 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 14809 if (Preds->implies(&Pred)) 14810 return; 14811 14812 auto &OldPreds = Preds->getPredicates(); 14813 SmallVector<const SCEVPredicate*, 4> NewPreds(OldPreds.begin(), OldPreds.end()); 14814 NewPreds.push_back(&Pred); 14815 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds); 14816 updateGeneration(); 14817 } 14818 14819 const SCEVPredicate &PredicatedScalarEvolution::getPredicate() const { 14820 return *Preds; 14821 } 14822 14823 void PredicatedScalarEvolution::updateGeneration() { 14824 // If the generation number wrapped recompute everything. 14825 if (++Generation == 0) { 14826 for (auto &II : RewriteMap) { 14827 const SCEV *Rewritten = II.second.second; 14828 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)}; 14829 } 14830 } 14831 } 14832 14833 void PredicatedScalarEvolution::setNoOverflow( 14834 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14835 const SCEV *Expr = getSCEV(V); 14836 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14837 14838 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 14839 14840 // Clear the statically implied flags. 14841 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 14842 addPredicate(*SE.getWrapPredicate(AR, Flags)); 14843 14844 auto II = FlagsMap.insert({V, Flags}); 14845 if (!II.second) 14846 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 14847 } 14848 14849 bool PredicatedScalarEvolution::hasNoOverflow( 14850 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14851 const SCEV *Expr = getSCEV(V); 14852 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14853 14854 Flags = SCEVWrapPredicate::clearFlags( 14855 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 14856 14857 auto II = FlagsMap.find(V); 14858 14859 if (II != FlagsMap.end()) 14860 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 14861 14862 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 14863 } 14864 14865 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 14866 const SCEV *Expr = this->getSCEV(V); 14867 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 14868 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 14869 14870 if (!New) 14871 return nullptr; 14872 14873 for (const auto *P : NewPreds) 14874 addPredicate(*P); 14875 14876 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 14877 return New; 14878 } 14879 14880 PredicatedScalarEvolution::PredicatedScalarEvolution( 14881 const PredicatedScalarEvolution &Init) 14882 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), 14883 Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates())), 14884 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 14885 for (auto I : Init.FlagsMap) 14886 FlagsMap.insert(I); 14887 } 14888 14889 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 14890 // For each block. 14891 for (auto *BB : L.getBlocks()) 14892 for (auto &I : *BB) { 14893 if (!SE.isSCEVable(I.getType())) 14894 continue; 14895 14896 auto *Expr = SE.getSCEV(&I); 14897 auto II = RewriteMap.find(Expr); 14898 14899 if (II == RewriteMap.end()) 14900 continue; 14901 14902 // Don't print things that are not interesting. 14903 if (II->second.second == Expr) 14904 continue; 14905 14906 OS.indent(Depth) << "[PSE]" << I << ":\n"; 14907 OS.indent(Depth + 2) << *Expr << "\n"; 14908 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 14909 } 14910 } 14911 14912 // Match the mathematical pattern A - (A / B) * B, where A and B can be 14913 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 14914 // for URem with constant power-of-2 second operands. 14915 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 14916 // 4, A / B becomes X / 8). 14917 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 14918 const SCEV *&RHS) { 14919 // Try to match 'zext (trunc A to iB) to iY', which is used 14920 // for URem with constant power-of-2 second operands. Make sure the size of 14921 // the operand A matches the size of the whole expressions. 14922 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 14923 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 14924 LHS = Trunc->getOperand(); 14925 // Bail out if the type of the LHS is larger than the type of the 14926 // expression for now. 14927 if (getTypeSizeInBits(LHS->getType()) > 14928 getTypeSizeInBits(Expr->getType())) 14929 return false; 14930 if (LHS->getType() != Expr->getType()) 14931 LHS = getZeroExtendExpr(LHS, Expr->getType()); 14932 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 14933 << getTypeSizeInBits(Trunc->getType())); 14934 return true; 14935 } 14936 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 14937 if (Add == nullptr || Add->getNumOperands() != 2) 14938 return false; 14939 14940 const SCEV *A = Add->getOperand(1); 14941 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 14942 14943 if (Mul == nullptr) 14944 return false; 14945 14946 const auto MatchURemWithDivisor = [&](const SCEV *B) { 14947 // (SomeExpr + (-(SomeExpr / B) * B)). 14948 if (Expr == getURemExpr(A, B)) { 14949 LHS = A; 14950 RHS = B; 14951 return true; 14952 } 14953 return false; 14954 }; 14955 14956 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 14957 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 14958 return MatchURemWithDivisor(Mul->getOperand(1)) || 14959 MatchURemWithDivisor(Mul->getOperand(2)); 14960 14961 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 14962 if (Mul->getNumOperands() == 2) 14963 return MatchURemWithDivisor(Mul->getOperand(1)) || 14964 MatchURemWithDivisor(Mul->getOperand(0)) || 14965 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 14966 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 14967 return false; 14968 } 14969 14970 const SCEV * 14971 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 14972 SmallVector<BasicBlock*, 16> ExitingBlocks; 14973 L->getExitingBlocks(ExitingBlocks); 14974 14975 // Form an expression for the maximum exit count possible for this loop. We 14976 // merge the max and exact information to approximate a version of 14977 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 14978 SmallVector<const SCEV*, 4> ExitCounts; 14979 for (BasicBlock *ExitingBB : ExitingBlocks) { 14980 const SCEV *ExitCount = 14981 getExitCount(L, ExitingBB, ScalarEvolution::SymbolicMaximum); 14982 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 14983 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 14984 "We should only have known counts for exiting blocks that " 14985 "dominate latch!"); 14986 ExitCounts.push_back(ExitCount); 14987 } 14988 } 14989 if (ExitCounts.empty()) 14990 return getCouldNotCompute(); 14991 return getUMinFromMismatchedTypes(ExitCounts, /*Sequential*/ true); 14992 } 14993 14994 /// A rewriter to replace SCEV expressions in Map with the corresponding entry 14995 /// in the map. It skips AddRecExpr because we cannot guarantee that the 14996 /// replacement is loop invariant in the loop of the AddRec. 14997 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 14998 const DenseMap<const SCEV *, const SCEV *> ⤅ 14999 15000 public: 15001 SCEVLoopGuardRewriter(ScalarEvolution &SE, 15002 DenseMap<const SCEV *, const SCEV *> &M) 15003 : SCEVRewriteVisitor(SE), Map(M) {} 15004 15005 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 15006 15007 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 15008 auto I = Map.find(Expr); 15009 if (I == Map.end()) 15010 return Expr; 15011 return I->second; 15012 } 15013 15014 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 15015 auto I = Map.find(Expr); 15016 if (I == Map.end()) 15017 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr( 15018 Expr); 15019 return I->second; 15020 } 15021 15022 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 15023 auto I = Map.find(Expr); 15024 if (I == Map.end()) 15025 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr( 15026 Expr); 15027 return I->second; 15028 } 15029 15030 const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) { 15031 auto I = Map.find(Expr); 15032 if (I == Map.end()) 15033 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr); 15034 return I->second; 15035 } 15036 15037 const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) { 15038 auto I = Map.find(Expr); 15039 if (I == Map.end()) 15040 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr); 15041 return I->second; 15042 } 15043 }; 15044 15045 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 15046 SmallVector<const SCEV *> ExprsToRewrite; 15047 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 15048 const SCEV *RHS, 15049 DenseMap<const SCEV *, const SCEV *> 15050 &RewriteMap) { 15051 // WARNING: It is generally unsound to apply any wrap flags to the proposed 15052 // replacement SCEV which isn't directly implied by the structure of that 15053 // SCEV. In particular, using contextual facts to imply flags is *NOT* 15054 // legal. See the scoping rules for flags in the header to understand why. 15055 15056 // If LHS is a constant, apply information to the other expression. 15057 if (isa<SCEVConstant>(LHS)) { 15058 std::swap(LHS, RHS); 15059 Predicate = CmpInst::getSwappedPredicate(Predicate); 15060 } 15061 15062 // Check for a condition of the form (-C1 + X < C2). InstCombine will 15063 // create this form when combining two checks of the form (X u< C2 + C1) and 15064 // (X >=u C1). 15065 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, 15066 &ExprsToRewrite]() { 15067 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 15068 if (!AddExpr || AddExpr->getNumOperands() != 2) 15069 return false; 15070 15071 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 15072 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 15073 auto *C2 = dyn_cast<SCEVConstant>(RHS); 15074 if (!C1 || !C2 || !LHSUnknown) 15075 return false; 15076 15077 auto ExactRegion = 15078 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 15079 .sub(C1->getAPInt()); 15080 15081 // Bail out, unless we have a non-wrapping, monotonic range. 15082 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 15083 return false; 15084 auto I = RewriteMap.find(LHSUnknown); 15085 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; 15086 RewriteMap[LHSUnknown] = getUMaxExpr( 15087 getConstant(ExactRegion.getUnsignedMin()), 15088 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 15089 ExprsToRewrite.push_back(LHSUnknown); 15090 return true; 15091 }; 15092 if (MatchRangeCheckIdiom()) 15093 return; 15094 15095 // If we have LHS == 0, check if LHS is computing a property of some unknown 15096 // SCEV %v which we can rewrite %v to express explicitly. 15097 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 15098 if (Predicate == CmpInst::ICMP_EQ && RHSC && 15099 RHSC->getValue()->isNullValue()) { 15100 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 15101 // explicitly express that. 15102 const SCEV *URemLHS = nullptr; 15103 const SCEV *URemRHS = nullptr; 15104 if (matchURem(LHS, URemLHS, URemRHS)) { 15105 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 15106 const auto *Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS); 15107 RewriteMap[LHSUnknown] = Multiple; 15108 ExprsToRewrite.push_back(LHSUnknown); 15109 return; 15110 } 15111 } 15112 } 15113 15114 // Do not apply information for constants or if RHS contains an AddRec. 15115 if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS)) 15116 return; 15117 15118 // If RHS is SCEVUnknown, make sure the information is applied to it. 15119 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 15120 std::swap(LHS, RHS); 15121 Predicate = CmpInst::getSwappedPredicate(Predicate); 15122 } 15123 15124 // Check whether LHS has already been rewritten. In that case we want to 15125 // chain further rewrites onto the already rewritten value. 15126 auto I = RewriteMap.find(LHS); 15127 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 15128 15129 const SCEV *RewrittenRHS = nullptr; 15130 switch (Predicate) { 15131 case CmpInst::ICMP_ULT: { 15132 if (RHS->getType()->isPointerTy()) 15133 break; 15134 const SCEV *One = getOne(RHS->getType()); 15135 RewrittenRHS = 15136 getUMinExpr(RewrittenLHS, getMinusSCEV(getUMaxExpr(RHS, One), One)); 15137 break; 15138 } 15139 case CmpInst::ICMP_SLT: 15140 RewrittenRHS = 15141 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 15142 break; 15143 case CmpInst::ICMP_ULE: 15144 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); 15145 break; 15146 case CmpInst::ICMP_SLE: 15147 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); 15148 break; 15149 case CmpInst::ICMP_UGT: 15150 RewrittenRHS = 15151 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 15152 break; 15153 case CmpInst::ICMP_SGT: 15154 RewrittenRHS = 15155 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 15156 break; 15157 case CmpInst::ICMP_UGE: 15158 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); 15159 break; 15160 case CmpInst::ICMP_SGE: 15161 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); 15162 break; 15163 case CmpInst::ICMP_EQ: 15164 if (isa<SCEVConstant>(RHS)) 15165 RewrittenRHS = RHS; 15166 break; 15167 case CmpInst::ICMP_NE: 15168 if (isa<SCEVConstant>(RHS) && 15169 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 15170 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 15171 break; 15172 default: 15173 break; 15174 } 15175 15176 if (RewrittenRHS) { 15177 RewriteMap[LHS] = RewrittenRHS; 15178 if (LHS == RewrittenLHS) 15179 ExprsToRewrite.push_back(LHS); 15180 } 15181 }; 15182 15183 BasicBlock *Header = L->getHeader(); 15184 SmallVector<PointerIntPair<Value *, 1, bool>> Terms; 15185 // First, collect information from assumptions dominating the loop. 15186 for (auto &AssumeVH : AC.assumptions()) { 15187 if (!AssumeVH) 15188 continue; 15189 auto *AssumeI = cast<CallInst>(AssumeVH); 15190 if (!DT.dominates(AssumeI, Header)) 15191 continue; 15192 Terms.emplace_back(AssumeI->getOperand(0), true); 15193 } 15194 15195 // Second, collect information from llvm.experimental.guards dominating the loop. 15196 auto *GuardDecl = F.getParent()->getFunction( 15197 Intrinsic::getName(Intrinsic::experimental_guard)); 15198 if (GuardDecl) 15199 for (const auto *GU : GuardDecl->users()) 15200 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) 15201 if (Guard->getFunction() == Header->getParent() && DT.dominates(Guard, Header)) 15202 Terms.emplace_back(Guard->getArgOperand(0), true); 15203 15204 // Third, collect conditions from dominating branches. Starting at the loop 15205 // predecessor, climb up the predecessor chain, as long as there are 15206 // predecessors that can be found that have unique successors leading to the 15207 // original header. 15208 // TODO: share this logic with isLoopEntryGuardedByCond. 15209 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 15210 L->getLoopPredecessor(), Header); 15211 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 15212 15213 const BranchInst *LoopEntryPredicate = 15214 dyn_cast<BranchInst>(Pair.first->getTerminator()); 15215 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 15216 continue; 15217 15218 Terms.emplace_back(LoopEntryPredicate->getCondition(), 15219 LoopEntryPredicate->getSuccessor(0) == Pair.second); 15220 } 15221 15222 // Now apply the information from the collected conditions to RewriteMap. 15223 // Conditions are processed in reverse order, so the earliest conditions is 15224 // processed first. This ensures the SCEVs with the shortest dependency chains 15225 // are constructed first. 15226 DenseMap<const SCEV *, const SCEV *> RewriteMap; 15227 for (auto [Term, EnterIfTrue] : reverse(Terms)) { 15228 SmallVector<Value *, 8> Worklist; 15229 SmallPtrSet<Value *, 8> Visited; 15230 Worklist.push_back(Term); 15231 while (!Worklist.empty()) { 15232 Value *Cond = Worklist.pop_back_val(); 15233 if (!Visited.insert(Cond).second) 15234 continue; 15235 15236 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 15237 auto Predicate = 15238 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 15239 const auto *LHS = getSCEV(Cmp->getOperand(0)); 15240 const auto *RHS = getSCEV(Cmp->getOperand(1)); 15241 CollectCondition(Predicate, LHS, RHS, RewriteMap); 15242 continue; 15243 } 15244 15245 Value *L, *R; 15246 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 15247 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 15248 Worklist.push_back(L); 15249 Worklist.push_back(R); 15250 } 15251 } 15252 } 15253 15254 if (RewriteMap.empty()) 15255 return Expr; 15256 15257 // Now that all rewrite information is collect, rewrite the collected 15258 // expressions with the information in the map. This applies information to 15259 // sub-expressions. 15260 if (ExprsToRewrite.size() > 1) { 15261 for (const SCEV *Expr : ExprsToRewrite) { 15262 const SCEV *RewriteTo = RewriteMap[Expr]; 15263 RewriteMap.erase(Expr); 15264 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 15265 RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); 15266 } 15267 } 15268 15269 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 15270 return Rewriter.visit(Expr); 15271 } 15272