1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/SmallPtrSet.h" 65 #include "llvm/ADT/Statistic.h" 66 #include "llvm/Analysis/AssumptionCache.h" 67 #include "llvm/Analysis/ConstantFolding.h" 68 #include "llvm/Analysis/InstructionSimplify.h" 69 #include "llvm/Analysis/LoopInfo.h" 70 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 71 #include "llvm/Analysis/TargetLibraryInfo.h" 72 #include "llvm/Analysis/ValueTracking.h" 73 #include "llvm/IR/ConstantRange.h" 74 #include "llvm/IR/Constants.h" 75 #include "llvm/IR/DataLayout.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/Dominators.h" 78 #include "llvm/IR/GetElementPtrTypeIterator.h" 79 #include "llvm/IR/GlobalAlias.h" 80 #include "llvm/IR/GlobalVariable.h" 81 #include "llvm/IR/InstIterator.h" 82 #include "llvm/IR/Instructions.h" 83 #include "llvm/IR/LLVMContext.h" 84 #include "llvm/IR/Metadata.h" 85 #include "llvm/IR/Operator.h" 86 #include "llvm/IR/PatternMatch.h" 87 #include "llvm/Support/CommandLine.h" 88 #include "llvm/Support/Debug.h" 89 #include "llvm/Support/ErrorHandling.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Support/SaveAndRestore.h" 93 #include <algorithm> 94 using namespace llvm; 95 96 #define DEBUG_TYPE "scalar-evolution" 97 98 STATISTIC(NumArrayLenItCounts, 99 "Number of trip counts computed with array length"); 100 STATISTIC(NumTripCountsComputed, 101 "Number of loops with predictable loop counts"); 102 STATISTIC(NumTripCountsNotComputed, 103 "Number of loops without predictable loop counts"); 104 STATISTIC(NumBruteForceTripCountsComputed, 105 "Number of loops with trip counts computed by force"); 106 107 static cl::opt<unsigned> 108 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 109 cl::desc("Maximum number of iterations SCEV will " 110 "symbolically execute a constant " 111 "derived loop"), 112 cl::init(100)); 113 114 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 115 static cl::opt<bool> 116 VerifySCEV("verify-scev", 117 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 118 static cl::opt<bool> 119 VerifySCEVMap("verify-scev-maps", 120 cl::desc("Verify no dangling value in ScalarEvolution's " 121 "ExprValueMap (slow)")); 122 123 //===----------------------------------------------------------------------===// 124 // SCEV class definitions 125 //===----------------------------------------------------------------------===// 126 127 //===----------------------------------------------------------------------===// 128 // Implementation of the SCEV class. 129 // 130 131 LLVM_DUMP_METHOD 132 void SCEV::dump() const { 133 print(dbgs()); 134 dbgs() << '\n'; 135 } 136 137 void SCEV::print(raw_ostream &OS) const { 138 switch (static_cast<SCEVTypes>(getSCEVType())) { 139 case scConstant: 140 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 141 return; 142 case scTruncate: { 143 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 144 const SCEV *Op = Trunc->getOperand(); 145 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 146 << *Trunc->getType() << ")"; 147 return; 148 } 149 case scZeroExtend: { 150 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 151 const SCEV *Op = ZExt->getOperand(); 152 OS << "(zext " << *Op->getType() << " " << *Op << " to " 153 << *ZExt->getType() << ")"; 154 return; 155 } 156 case scSignExtend: { 157 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 158 const SCEV *Op = SExt->getOperand(); 159 OS << "(sext " << *Op->getType() << " " << *Op << " to " 160 << *SExt->getType() << ")"; 161 return; 162 } 163 case scAddRecExpr: { 164 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 165 OS << "{" << *AR->getOperand(0); 166 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 167 OS << ",+," << *AR->getOperand(i); 168 OS << "}<"; 169 if (AR->hasNoUnsignedWrap()) 170 OS << "nuw><"; 171 if (AR->hasNoSignedWrap()) 172 OS << "nsw><"; 173 if (AR->hasNoSelfWrap() && 174 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 175 OS << "nw><"; 176 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 177 OS << ">"; 178 return; 179 } 180 case scAddExpr: 181 case scMulExpr: 182 case scUMaxExpr: 183 case scSMaxExpr: { 184 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 185 const char *OpStr = nullptr; 186 switch (NAry->getSCEVType()) { 187 case scAddExpr: OpStr = " + "; break; 188 case scMulExpr: OpStr = " * "; break; 189 case scUMaxExpr: OpStr = " umax "; break; 190 case scSMaxExpr: OpStr = " smax "; break; 191 } 192 OS << "("; 193 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 194 I != E; ++I) { 195 OS << **I; 196 if (std::next(I) != E) 197 OS << OpStr; 198 } 199 OS << ")"; 200 switch (NAry->getSCEVType()) { 201 case scAddExpr: 202 case scMulExpr: 203 if (NAry->hasNoUnsignedWrap()) 204 OS << "<nuw>"; 205 if (NAry->hasNoSignedWrap()) 206 OS << "<nsw>"; 207 } 208 return; 209 } 210 case scUDivExpr: { 211 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 212 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 213 return; 214 } 215 case scUnknown: { 216 const SCEVUnknown *U = cast<SCEVUnknown>(this); 217 Type *AllocTy; 218 if (U->isSizeOf(AllocTy)) { 219 OS << "sizeof(" << *AllocTy << ")"; 220 return; 221 } 222 if (U->isAlignOf(AllocTy)) { 223 OS << "alignof(" << *AllocTy << ")"; 224 return; 225 } 226 227 Type *CTy; 228 Constant *FieldNo; 229 if (U->isOffsetOf(CTy, FieldNo)) { 230 OS << "offsetof(" << *CTy << ", "; 231 FieldNo->printAsOperand(OS, false); 232 OS << ")"; 233 return; 234 } 235 236 // Otherwise just print it normally. 237 U->getValue()->printAsOperand(OS, false); 238 return; 239 } 240 case scCouldNotCompute: 241 OS << "***COULDNOTCOMPUTE***"; 242 return; 243 } 244 llvm_unreachable("Unknown SCEV kind!"); 245 } 246 247 Type *SCEV::getType() const { 248 switch (static_cast<SCEVTypes>(getSCEVType())) { 249 case scConstant: 250 return cast<SCEVConstant>(this)->getType(); 251 case scTruncate: 252 case scZeroExtend: 253 case scSignExtend: 254 return cast<SCEVCastExpr>(this)->getType(); 255 case scAddRecExpr: 256 case scMulExpr: 257 case scUMaxExpr: 258 case scSMaxExpr: 259 return cast<SCEVNAryExpr>(this)->getType(); 260 case scAddExpr: 261 return cast<SCEVAddExpr>(this)->getType(); 262 case scUDivExpr: 263 return cast<SCEVUDivExpr>(this)->getType(); 264 case scUnknown: 265 return cast<SCEVUnknown>(this)->getType(); 266 case scCouldNotCompute: 267 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 268 } 269 llvm_unreachable("Unknown SCEV kind!"); 270 } 271 272 bool SCEV::isZero() const { 273 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 274 return SC->getValue()->isZero(); 275 return false; 276 } 277 278 bool SCEV::isOne() const { 279 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 280 return SC->getValue()->isOne(); 281 return false; 282 } 283 284 bool SCEV::isAllOnesValue() const { 285 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 286 return SC->getValue()->isAllOnesValue(); 287 return false; 288 } 289 290 bool SCEV::isNonConstantNegative() const { 291 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 292 if (!Mul) return false; 293 294 // If there is a constant factor, it will be first. 295 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 296 if (!SC) return false; 297 298 // Return true if the value is negative, this matches things like (-42 * V). 299 return SC->getAPInt().isNegative(); 300 } 301 302 SCEVCouldNotCompute::SCEVCouldNotCompute() : 303 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 304 305 bool SCEVCouldNotCompute::classof(const SCEV *S) { 306 return S->getSCEVType() == scCouldNotCompute; 307 } 308 309 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 310 FoldingSetNodeID ID; 311 ID.AddInteger(scConstant); 312 ID.AddPointer(V); 313 void *IP = nullptr; 314 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 315 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 316 UniqueSCEVs.InsertNode(S, IP); 317 return S; 318 } 319 320 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 321 return getConstant(ConstantInt::get(getContext(), Val)); 322 } 323 324 const SCEV * 325 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 326 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 327 return getConstant(ConstantInt::get(ITy, V, isSigned)); 328 } 329 330 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 331 unsigned SCEVTy, const SCEV *op, Type *ty) 332 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 333 334 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 335 const SCEV *op, Type *ty) 336 : SCEVCastExpr(ID, scTruncate, op, ty) { 337 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 338 (Ty->isIntegerTy() || Ty->isPointerTy()) && 339 "Cannot truncate non-integer value!"); 340 } 341 342 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 343 const SCEV *op, Type *ty) 344 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 345 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 346 (Ty->isIntegerTy() || Ty->isPointerTy()) && 347 "Cannot zero extend non-integer value!"); 348 } 349 350 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 351 const SCEV *op, Type *ty) 352 : SCEVCastExpr(ID, scSignExtend, op, ty) { 353 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 354 (Ty->isIntegerTy() || Ty->isPointerTy()) && 355 "Cannot sign extend non-integer value!"); 356 } 357 358 void SCEVUnknown::deleted() { 359 // Clear this SCEVUnknown from various maps. 360 SE->forgetMemoizedResults(this); 361 362 // Remove this SCEVUnknown from the uniquing map. 363 SE->UniqueSCEVs.RemoveNode(this); 364 365 // Release the value. 366 setValPtr(nullptr); 367 } 368 369 void SCEVUnknown::allUsesReplacedWith(Value *New) { 370 // Clear this SCEVUnknown from various maps. 371 SE->forgetMemoizedResults(this); 372 373 // Remove this SCEVUnknown from the uniquing map. 374 SE->UniqueSCEVs.RemoveNode(this); 375 376 // Update this SCEVUnknown to point to the new value. This is needed 377 // because there may still be outstanding SCEVs which still point to 378 // this SCEVUnknown. 379 setValPtr(New); 380 } 381 382 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 383 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 384 if (VCE->getOpcode() == Instruction::PtrToInt) 385 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 386 if (CE->getOpcode() == Instruction::GetElementPtr && 387 CE->getOperand(0)->isNullValue() && 388 CE->getNumOperands() == 2) 389 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 390 if (CI->isOne()) { 391 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 392 ->getElementType(); 393 return true; 394 } 395 396 return false; 397 } 398 399 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 400 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 401 if (VCE->getOpcode() == Instruction::PtrToInt) 402 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 403 if (CE->getOpcode() == Instruction::GetElementPtr && 404 CE->getOperand(0)->isNullValue()) { 405 Type *Ty = 406 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 407 if (StructType *STy = dyn_cast<StructType>(Ty)) 408 if (!STy->isPacked() && 409 CE->getNumOperands() == 3 && 410 CE->getOperand(1)->isNullValue()) { 411 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 412 if (CI->isOne() && 413 STy->getNumElements() == 2 && 414 STy->getElementType(0)->isIntegerTy(1)) { 415 AllocTy = STy->getElementType(1); 416 return true; 417 } 418 } 419 } 420 421 return false; 422 } 423 424 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 425 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 426 if (VCE->getOpcode() == Instruction::PtrToInt) 427 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 428 if (CE->getOpcode() == Instruction::GetElementPtr && 429 CE->getNumOperands() == 3 && 430 CE->getOperand(0)->isNullValue() && 431 CE->getOperand(1)->isNullValue()) { 432 Type *Ty = 433 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 434 // Ignore vector types here so that ScalarEvolutionExpander doesn't 435 // emit getelementptrs that index into vectors. 436 if (Ty->isStructTy() || Ty->isArrayTy()) { 437 CTy = Ty; 438 FieldNo = CE->getOperand(2); 439 return true; 440 } 441 } 442 443 return false; 444 } 445 446 //===----------------------------------------------------------------------===// 447 // SCEV Utilities 448 //===----------------------------------------------------------------------===// 449 450 namespace { 451 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 452 /// than the complexity of the RHS. This comparator is used to canonicalize 453 /// expressions. 454 class SCEVComplexityCompare { 455 const LoopInfo *const LI; 456 public: 457 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} 458 459 // Return true or false if LHS is less than, or at least RHS, respectively. 460 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 461 return compare(LHS, RHS) < 0; 462 } 463 464 // Return negative, zero, or positive, if LHS is less than, equal to, or 465 // greater than RHS, respectively. A three-way result allows recursive 466 // comparisons to be more efficient. 467 int compare(const SCEV *LHS, const SCEV *RHS) const { 468 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 469 if (LHS == RHS) 470 return 0; 471 472 // Primarily, sort the SCEVs by their getSCEVType(). 473 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 474 if (LType != RType) 475 return (int)LType - (int)RType; 476 477 // Aside from the getSCEVType() ordering, the particular ordering 478 // isn't very important except that it's beneficial to be consistent, 479 // so that (a + b) and (b + a) don't end up as different expressions. 480 switch (static_cast<SCEVTypes>(LType)) { 481 case scUnknown: { 482 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 483 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 484 485 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 486 // not as complete as it could be. 487 const Value *LV = LU->getValue(), *RV = RU->getValue(); 488 489 // Order pointer values after integer values. This helps SCEVExpander 490 // form GEPs. 491 bool LIsPointer = LV->getType()->isPointerTy(), 492 RIsPointer = RV->getType()->isPointerTy(); 493 if (LIsPointer != RIsPointer) 494 return (int)LIsPointer - (int)RIsPointer; 495 496 // Compare getValueID values. 497 unsigned LID = LV->getValueID(), 498 RID = RV->getValueID(); 499 if (LID != RID) 500 return (int)LID - (int)RID; 501 502 // Sort arguments by their position. 503 if (const Argument *LA = dyn_cast<Argument>(LV)) { 504 const Argument *RA = cast<Argument>(RV); 505 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 506 return (int)LArgNo - (int)RArgNo; 507 } 508 509 // For instructions, compare their loop depth, and their operand 510 // count. This is pretty loose. 511 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { 512 const Instruction *RInst = cast<Instruction>(RV); 513 514 // Compare loop depths. 515 const BasicBlock *LParent = LInst->getParent(), 516 *RParent = RInst->getParent(); 517 if (LParent != RParent) { 518 unsigned LDepth = LI->getLoopDepth(LParent), 519 RDepth = LI->getLoopDepth(RParent); 520 if (LDepth != RDepth) 521 return (int)LDepth - (int)RDepth; 522 } 523 524 // Compare the number of operands. 525 unsigned LNumOps = LInst->getNumOperands(), 526 RNumOps = RInst->getNumOperands(); 527 return (int)LNumOps - (int)RNumOps; 528 } 529 530 return 0; 531 } 532 533 case scConstant: { 534 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 535 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 536 537 // Compare constant values. 538 const APInt &LA = LC->getAPInt(); 539 const APInt &RA = RC->getAPInt(); 540 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 541 if (LBitWidth != RBitWidth) 542 return (int)LBitWidth - (int)RBitWidth; 543 return LA.ult(RA) ? -1 : 1; 544 } 545 546 case scAddRecExpr: { 547 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 548 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 549 550 // Compare addrec loop depths. 551 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 552 if (LLoop != RLoop) { 553 unsigned LDepth = LLoop->getLoopDepth(), 554 RDepth = RLoop->getLoopDepth(); 555 if (LDepth != RDepth) 556 return (int)LDepth - (int)RDepth; 557 } 558 559 // Addrec complexity grows with operand count. 560 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 561 if (LNumOps != RNumOps) 562 return (int)LNumOps - (int)RNumOps; 563 564 // Lexicographically compare. 565 for (unsigned i = 0; i != LNumOps; ++i) { 566 long X = compare(LA->getOperand(i), RA->getOperand(i)); 567 if (X != 0) 568 return X; 569 } 570 571 return 0; 572 } 573 574 case scAddExpr: 575 case scMulExpr: 576 case scSMaxExpr: 577 case scUMaxExpr: { 578 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 579 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 580 581 // Lexicographically compare n-ary expressions. 582 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 583 if (LNumOps != RNumOps) 584 return (int)LNumOps - (int)RNumOps; 585 586 for (unsigned i = 0; i != LNumOps; ++i) { 587 if (i >= RNumOps) 588 return 1; 589 long X = compare(LC->getOperand(i), RC->getOperand(i)); 590 if (X != 0) 591 return X; 592 } 593 return (int)LNumOps - (int)RNumOps; 594 } 595 596 case scUDivExpr: { 597 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 598 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 599 600 // Lexicographically compare udiv expressions. 601 long X = compare(LC->getLHS(), RC->getLHS()); 602 if (X != 0) 603 return X; 604 return compare(LC->getRHS(), RC->getRHS()); 605 } 606 607 case scTruncate: 608 case scZeroExtend: 609 case scSignExtend: { 610 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 611 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 612 613 // Compare cast expressions by operand. 614 return compare(LC->getOperand(), RC->getOperand()); 615 } 616 617 case scCouldNotCompute: 618 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 619 } 620 llvm_unreachable("Unknown SCEV kind!"); 621 } 622 }; 623 } // end anonymous namespace 624 625 /// Given a list of SCEV objects, order them by their complexity, and group 626 /// objects of the same complexity together by value. When this routine is 627 /// finished, we know that any duplicates in the vector are consecutive and that 628 /// complexity is monotonically increasing. 629 /// 630 /// Note that we go take special precautions to ensure that we get deterministic 631 /// results from this routine. In other words, we don't want the results of 632 /// this to depend on where the addresses of various SCEV objects happened to 633 /// land in memory. 634 /// 635 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 636 LoopInfo *LI) { 637 if (Ops.size() < 2) return; // Noop 638 if (Ops.size() == 2) { 639 // This is the common case, which also happens to be trivially simple. 640 // Special case it. 641 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 642 if (SCEVComplexityCompare(LI)(RHS, LHS)) 643 std::swap(LHS, RHS); 644 return; 645 } 646 647 // Do the rough sort by complexity. 648 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 649 650 // Now that we are sorted by complexity, group elements of the same 651 // complexity. Note that this is, at worst, N^2, but the vector is likely to 652 // be extremely short in practice. Note that we take this approach because we 653 // do not want to depend on the addresses of the objects we are grouping. 654 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 655 const SCEV *S = Ops[i]; 656 unsigned Complexity = S->getSCEVType(); 657 658 // If there are any objects of the same complexity and same value as this 659 // one, group them. 660 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 661 if (Ops[j] == S) { // Found a duplicate. 662 // Move it to immediately after i'th element. 663 std::swap(Ops[i+1], Ops[j]); 664 ++i; // no need to rescan it. 665 if (i == e-2) return; // Done! 666 } 667 } 668 } 669 } 670 671 // Returns the size of the SCEV S. 672 static inline int sizeOfSCEV(const SCEV *S) { 673 struct FindSCEVSize { 674 int Size; 675 FindSCEVSize() : Size(0) {} 676 677 bool follow(const SCEV *S) { 678 ++Size; 679 // Keep looking at all operands of S. 680 return true; 681 } 682 bool isDone() const { 683 return false; 684 } 685 }; 686 687 FindSCEVSize F; 688 SCEVTraversal<FindSCEVSize> ST(F); 689 ST.visitAll(S); 690 return F.Size; 691 } 692 693 namespace { 694 695 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 696 public: 697 // Computes the Quotient and Remainder of the division of Numerator by 698 // Denominator. 699 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 700 const SCEV *Denominator, const SCEV **Quotient, 701 const SCEV **Remainder) { 702 assert(Numerator && Denominator && "Uninitialized SCEV"); 703 704 SCEVDivision D(SE, Numerator, Denominator); 705 706 // Check for the trivial case here to avoid having to check for it in the 707 // rest of the code. 708 if (Numerator == Denominator) { 709 *Quotient = D.One; 710 *Remainder = D.Zero; 711 return; 712 } 713 714 if (Numerator->isZero()) { 715 *Quotient = D.Zero; 716 *Remainder = D.Zero; 717 return; 718 } 719 720 // A simple case when N/1. The quotient is N. 721 if (Denominator->isOne()) { 722 *Quotient = Numerator; 723 *Remainder = D.Zero; 724 return; 725 } 726 727 // Split the Denominator when it is a product. 728 if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) { 729 const SCEV *Q, *R; 730 *Quotient = Numerator; 731 for (const SCEV *Op : T->operands()) { 732 divide(SE, *Quotient, Op, &Q, &R); 733 *Quotient = Q; 734 735 // Bail out when the Numerator is not divisible by one of the terms of 736 // the Denominator. 737 if (!R->isZero()) { 738 *Quotient = D.Zero; 739 *Remainder = Numerator; 740 return; 741 } 742 } 743 *Remainder = D.Zero; 744 return; 745 } 746 747 D.visit(Numerator); 748 *Quotient = D.Quotient; 749 *Remainder = D.Remainder; 750 } 751 752 // Except in the trivial case described above, we do not know how to divide 753 // Expr by Denominator for the following functions with empty implementation. 754 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 755 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 756 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 757 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 758 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 759 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 760 void visitUnknown(const SCEVUnknown *Numerator) {} 761 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 762 763 void visitConstant(const SCEVConstant *Numerator) { 764 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 765 APInt NumeratorVal = Numerator->getAPInt(); 766 APInt DenominatorVal = D->getAPInt(); 767 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 768 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 769 770 if (NumeratorBW > DenominatorBW) 771 DenominatorVal = DenominatorVal.sext(NumeratorBW); 772 else if (NumeratorBW < DenominatorBW) 773 NumeratorVal = NumeratorVal.sext(DenominatorBW); 774 775 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 776 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 777 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 778 Quotient = SE.getConstant(QuotientVal); 779 Remainder = SE.getConstant(RemainderVal); 780 return; 781 } 782 } 783 784 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 785 const SCEV *StartQ, *StartR, *StepQ, *StepR; 786 if (!Numerator->isAffine()) 787 return cannotDivide(Numerator); 788 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 789 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 790 // Bail out if the types do not match. 791 Type *Ty = Denominator->getType(); 792 if (Ty != StartQ->getType() || Ty != StartR->getType() || 793 Ty != StepQ->getType() || Ty != StepR->getType()) 794 return cannotDivide(Numerator); 795 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 796 Numerator->getNoWrapFlags()); 797 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 798 Numerator->getNoWrapFlags()); 799 } 800 801 void visitAddExpr(const SCEVAddExpr *Numerator) { 802 SmallVector<const SCEV *, 2> Qs, Rs; 803 Type *Ty = Denominator->getType(); 804 805 for (const SCEV *Op : Numerator->operands()) { 806 const SCEV *Q, *R; 807 divide(SE, Op, Denominator, &Q, &R); 808 809 // Bail out if types do not match. 810 if (Ty != Q->getType() || Ty != R->getType()) 811 return cannotDivide(Numerator); 812 813 Qs.push_back(Q); 814 Rs.push_back(R); 815 } 816 817 if (Qs.size() == 1) { 818 Quotient = Qs[0]; 819 Remainder = Rs[0]; 820 return; 821 } 822 823 Quotient = SE.getAddExpr(Qs); 824 Remainder = SE.getAddExpr(Rs); 825 } 826 827 void visitMulExpr(const SCEVMulExpr *Numerator) { 828 SmallVector<const SCEV *, 2> Qs; 829 Type *Ty = Denominator->getType(); 830 831 bool FoundDenominatorTerm = false; 832 for (const SCEV *Op : Numerator->operands()) { 833 // Bail out if types do not match. 834 if (Ty != Op->getType()) 835 return cannotDivide(Numerator); 836 837 if (FoundDenominatorTerm) { 838 Qs.push_back(Op); 839 continue; 840 } 841 842 // Check whether Denominator divides one of the product operands. 843 const SCEV *Q, *R; 844 divide(SE, Op, Denominator, &Q, &R); 845 if (!R->isZero()) { 846 Qs.push_back(Op); 847 continue; 848 } 849 850 // Bail out if types do not match. 851 if (Ty != Q->getType()) 852 return cannotDivide(Numerator); 853 854 FoundDenominatorTerm = true; 855 Qs.push_back(Q); 856 } 857 858 if (FoundDenominatorTerm) { 859 Remainder = Zero; 860 if (Qs.size() == 1) 861 Quotient = Qs[0]; 862 else 863 Quotient = SE.getMulExpr(Qs); 864 return; 865 } 866 867 if (!isa<SCEVUnknown>(Denominator)) 868 return cannotDivide(Numerator); 869 870 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 871 ValueToValueMap RewriteMap; 872 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 873 cast<SCEVConstant>(Zero)->getValue(); 874 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 875 876 if (Remainder->isZero()) { 877 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 878 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 879 cast<SCEVConstant>(One)->getValue(); 880 Quotient = 881 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 882 return; 883 } 884 885 // Quotient is (Numerator - Remainder) divided by Denominator. 886 const SCEV *Q, *R; 887 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 888 // This SCEV does not seem to simplify: fail the division here. 889 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 890 return cannotDivide(Numerator); 891 divide(SE, Diff, Denominator, &Q, &R); 892 if (R != Zero) 893 return cannotDivide(Numerator); 894 Quotient = Q; 895 } 896 897 private: 898 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 899 const SCEV *Denominator) 900 : SE(S), Denominator(Denominator) { 901 Zero = SE.getZero(Denominator->getType()); 902 One = SE.getOne(Denominator->getType()); 903 904 // We generally do not know how to divide Expr by Denominator. We 905 // initialize the division to a "cannot divide" state to simplify the rest 906 // of the code. 907 cannotDivide(Numerator); 908 } 909 910 // Convenience function for giving up on the division. We set the quotient to 911 // be equal to zero and the remainder to be equal to the numerator. 912 void cannotDivide(const SCEV *Numerator) { 913 Quotient = Zero; 914 Remainder = Numerator; 915 } 916 917 ScalarEvolution &SE; 918 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 919 }; 920 921 } 922 923 //===----------------------------------------------------------------------===// 924 // Simple SCEV method implementations 925 //===----------------------------------------------------------------------===// 926 927 /// Compute BC(It, K). The result has width W. Assume, K > 0. 928 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 929 ScalarEvolution &SE, 930 Type *ResultTy) { 931 // Handle the simplest case efficiently. 932 if (K == 1) 933 return SE.getTruncateOrZeroExtend(It, ResultTy); 934 935 // We are using the following formula for BC(It, K): 936 // 937 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 938 // 939 // Suppose, W is the bitwidth of the return value. We must be prepared for 940 // overflow. Hence, we must assure that the result of our computation is 941 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 942 // safe in modular arithmetic. 943 // 944 // However, this code doesn't use exactly that formula; the formula it uses 945 // is something like the following, where T is the number of factors of 2 in 946 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 947 // exponentiation: 948 // 949 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 950 // 951 // This formula is trivially equivalent to the previous formula. However, 952 // this formula can be implemented much more efficiently. The trick is that 953 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 954 // arithmetic. To do exact division in modular arithmetic, all we have 955 // to do is multiply by the inverse. Therefore, this step can be done at 956 // width W. 957 // 958 // The next issue is how to safely do the division by 2^T. The way this 959 // is done is by doing the multiplication step at a width of at least W + T 960 // bits. This way, the bottom W+T bits of the product are accurate. Then, 961 // when we perform the division by 2^T (which is equivalent to a right shift 962 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 963 // truncated out after the division by 2^T. 964 // 965 // In comparison to just directly using the first formula, this technique 966 // is much more efficient; using the first formula requires W * K bits, 967 // but this formula less than W + K bits. Also, the first formula requires 968 // a division step, whereas this formula only requires multiplies and shifts. 969 // 970 // It doesn't matter whether the subtraction step is done in the calculation 971 // width or the input iteration count's width; if the subtraction overflows, 972 // the result must be zero anyway. We prefer here to do it in the width of 973 // the induction variable because it helps a lot for certain cases; CodeGen 974 // isn't smart enough to ignore the overflow, which leads to much less 975 // efficient code if the width of the subtraction is wider than the native 976 // register width. 977 // 978 // (It's possible to not widen at all by pulling out factors of 2 before 979 // the multiplication; for example, K=2 can be calculated as 980 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 981 // extra arithmetic, so it's not an obvious win, and it gets 982 // much more complicated for K > 3.) 983 984 // Protection from insane SCEVs; this bound is conservative, 985 // but it probably doesn't matter. 986 if (K > 1000) 987 return SE.getCouldNotCompute(); 988 989 unsigned W = SE.getTypeSizeInBits(ResultTy); 990 991 // Calculate K! / 2^T and T; we divide out the factors of two before 992 // multiplying for calculating K! / 2^T to avoid overflow. 993 // Other overflow doesn't matter because we only care about the bottom 994 // W bits of the result. 995 APInt OddFactorial(W, 1); 996 unsigned T = 1; 997 for (unsigned i = 3; i <= K; ++i) { 998 APInt Mult(W, i); 999 unsigned TwoFactors = Mult.countTrailingZeros(); 1000 T += TwoFactors; 1001 Mult = Mult.lshr(TwoFactors); 1002 OddFactorial *= Mult; 1003 } 1004 1005 // We need at least W + T bits for the multiplication step 1006 unsigned CalculationBits = W + T; 1007 1008 // Calculate 2^T, at width T+W. 1009 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1010 1011 // Calculate the multiplicative inverse of K! / 2^T; 1012 // this multiplication factor will perform the exact division by 1013 // K! / 2^T. 1014 APInt Mod = APInt::getSignedMinValue(W+1); 1015 APInt MultiplyFactor = OddFactorial.zext(W+1); 1016 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1017 MultiplyFactor = MultiplyFactor.trunc(W); 1018 1019 // Calculate the product, at width T+W 1020 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1021 CalculationBits); 1022 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1023 for (unsigned i = 1; i != K; ++i) { 1024 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1025 Dividend = SE.getMulExpr(Dividend, 1026 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1027 } 1028 1029 // Divide by 2^T 1030 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1031 1032 // Truncate the result, and divide by K! / 2^T. 1033 1034 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1035 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1036 } 1037 1038 /// Return the value of this chain of recurrences at the specified iteration 1039 /// number. We can evaluate this recurrence by multiplying each element in the 1040 /// chain by the binomial coefficient corresponding to it. In other words, we 1041 /// can evaluate {A,+,B,+,C,+,D} as: 1042 /// 1043 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1044 /// 1045 /// where BC(It, k) stands for binomial coefficient. 1046 /// 1047 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1048 ScalarEvolution &SE) const { 1049 const SCEV *Result = getStart(); 1050 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1051 // The computation is correct in the face of overflow provided that the 1052 // multiplication is performed _after_ the evaluation of the binomial 1053 // coefficient. 1054 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1055 if (isa<SCEVCouldNotCompute>(Coeff)) 1056 return Coeff; 1057 1058 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1059 } 1060 return Result; 1061 } 1062 1063 //===----------------------------------------------------------------------===// 1064 // SCEV Expression folder implementations 1065 //===----------------------------------------------------------------------===// 1066 1067 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1068 Type *Ty) { 1069 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1070 "This is not a truncating conversion!"); 1071 assert(isSCEVable(Ty) && 1072 "This is not a conversion to a SCEVable type!"); 1073 Ty = getEffectiveSCEVType(Ty); 1074 1075 FoldingSetNodeID ID; 1076 ID.AddInteger(scTruncate); 1077 ID.AddPointer(Op); 1078 ID.AddPointer(Ty); 1079 void *IP = nullptr; 1080 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1081 1082 // Fold if the operand is constant. 1083 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1084 return getConstant( 1085 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1086 1087 // trunc(trunc(x)) --> trunc(x) 1088 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1089 return getTruncateExpr(ST->getOperand(), Ty); 1090 1091 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1092 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1093 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1094 1095 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1096 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1097 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1098 1099 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1100 // eliminate all the truncates, or we replace other casts with truncates. 1101 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1102 SmallVector<const SCEV *, 4> Operands; 1103 bool hasTrunc = false; 1104 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1105 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1106 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1107 hasTrunc = isa<SCEVTruncateExpr>(S); 1108 Operands.push_back(S); 1109 } 1110 if (!hasTrunc) 1111 return getAddExpr(Operands); 1112 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1113 } 1114 1115 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1116 // eliminate all the truncates, or we replace other casts with truncates. 1117 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1118 SmallVector<const SCEV *, 4> Operands; 1119 bool hasTrunc = false; 1120 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1121 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1122 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1123 hasTrunc = isa<SCEVTruncateExpr>(S); 1124 Operands.push_back(S); 1125 } 1126 if (!hasTrunc) 1127 return getMulExpr(Operands); 1128 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1129 } 1130 1131 // If the input value is a chrec scev, truncate the chrec's operands. 1132 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1133 SmallVector<const SCEV *, 4> Operands; 1134 for (const SCEV *Op : AddRec->operands()) 1135 Operands.push_back(getTruncateExpr(Op, Ty)); 1136 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1137 } 1138 1139 // The cast wasn't folded; create an explicit cast node. We can reuse 1140 // the existing insert position since if we get here, we won't have 1141 // made any changes which would invalidate it. 1142 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1143 Op, Ty); 1144 UniqueSCEVs.InsertNode(S, IP); 1145 return S; 1146 } 1147 1148 // Get the limit of a recurrence such that incrementing by Step cannot cause 1149 // signed overflow as long as the value of the recurrence within the 1150 // loop does not exceed this limit before incrementing. 1151 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1152 ICmpInst::Predicate *Pred, 1153 ScalarEvolution *SE) { 1154 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1155 if (SE->isKnownPositive(Step)) { 1156 *Pred = ICmpInst::ICMP_SLT; 1157 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1158 SE->getSignedRange(Step).getSignedMax()); 1159 } 1160 if (SE->isKnownNegative(Step)) { 1161 *Pred = ICmpInst::ICMP_SGT; 1162 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1163 SE->getSignedRange(Step).getSignedMin()); 1164 } 1165 return nullptr; 1166 } 1167 1168 // Get the limit of a recurrence such that incrementing by Step cannot cause 1169 // unsigned overflow as long as the value of the recurrence within the loop does 1170 // not exceed this limit before incrementing. 1171 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1172 ICmpInst::Predicate *Pred, 1173 ScalarEvolution *SE) { 1174 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1175 *Pred = ICmpInst::ICMP_ULT; 1176 1177 return SE->getConstant(APInt::getMinValue(BitWidth) - 1178 SE->getUnsignedRange(Step).getUnsignedMax()); 1179 } 1180 1181 namespace { 1182 1183 struct ExtendOpTraitsBase { 1184 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *); 1185 }; 1186 1187 // Used to make code generic over signed and unsigned overflow. 1188 template <typename ExtendOp> struct ExtendOpTraits { 1189 // Members present: 1190 // 1191 // static const SCEV::NoWrapFlags WrapType; 1192 // 1193 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1194 // 1195 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1196 // ICmpInst::Predicate *Pred, 1197 // ScalarEvolution *SE); 1198 }; 1199 1200 template <> 1201 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1202 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1203 1204 static const GetExtendExprTy GetExtendExpr; 1205 1206 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1207 ICmpInst::Predicate *Pred, 1208 ScalarEvolution *SE) { 1209 return getSignedOverflowLimitForStep(Step, Pred, SE); 1210 } 1211 }; 1212 1213 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1214 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1215 1216 template <> 1217 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1218 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1219 1220 static const GetExtendExprTy GetExtendExpr; 1221 1222 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1223 ICmpInst::Predicate *Pred, 1224 ScalarEvolution *SE) { 1225 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1226 } 1227 }; 1228 1229 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1230 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1231 } 1232 1233 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1234 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1235 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1236 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1237 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1238 // expression "Step + sext/zext(PreIncAR)" is congruent with 1239 // "sext/zext(PostIncAR)" 1240 template <typename ExtendOpTy> 1241 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1242 ScalarEvolution *SE) { 1243 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1244 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1245 1246 const Loop *L = AR->getLoop(); 1247 const SCEV *Start = AR->getStart(); 1248 const SCEV *Step = AR->getStepRecurrence(*SE); 1249 1250 // Check for a simple looking step prior to loop entry. 1251 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1252 if (!SA) 1253 return nullptr; 1254 1255 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1256 // subtraction is expensive. For this purpose, perform a quick and dirty 1257 // difference, by checking for Step in the operand list. 1258 SmallVector<const SCEV *, 4> DiffOps; 1259 for (const SCEV *Op : SA->operands()) 1260 if (Op != Step) 1261 DiffOps.push_back(Op); 1262 1263 if (DiffOps.size() == SA->getNumOperands()) 1264 return nullptr; 1265 1266 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1267 // `Step`: 1268 1269 // 1. NSW/NUW flags on the step increment. 1270 auto PreStartFlags = 1271 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1272 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1273 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1274 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1275 1276 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1277 // "S+X does not sign/unsign-overflow". 1278 // 1279 1280 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1281 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1282 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1283 return PreStart; 1284 1285 // 2. Direct overflow check on the step operation's expression. 1286 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1287 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1288 const SCEV *OperandExtendedStart = 1289 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy), 1290 (SE->*GetExtendExpr)(Step, WideTy)); 1291 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) { 1292 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1293 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1294 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1295 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1296 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1297 } 1298 return PreStart; 1299 } 1300 1301 // 3. Loop precondition. 1302 ICmpInst::Predicate Pred; 1303 const SCEV *OverflowLimit = 1304 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1305 1306 if (OverflowLimit && 1307 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1308 return PreStart; 1309 1310 return nullptr; 1311 } 1312 1313 // Get the normalized zero or sign extended expression for this AddRec's Start. 1314 template <typename ExtendOpTy> 1315 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1316 ScalarEvolution *SE) { 1317 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1318 1319 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE); 1320 if (!PreStart) 1321 return (SE->*GetExtendExpr)(AR->getStart(), Ty); 1322 1323 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty), 1324 (SE->*GetExtendExpr)(PreStart, Ty)); 1325 } 1326 1327 // Try to prove away overflow by looking at "nearby" add recurrences. A 1328 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1329 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1330 // 1331 // Formally: 1332 // 1333 // {S,+,X} == {S-T,+,X} + T 1334 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1335 // 1336 // If ({S-T,+,X} + T) does not overflow ... (1) 1337 // 1338 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1339 // 1340 // If {S-T,+,X} does not overflow ... (2) 1341 // 1342 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1343 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1344 // 1345 // If (S-T)+T does not overflow ... (3) 1346 // 1347 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1348 // == {Ext(S),+,Ext(X)} == LHS 1349 // 1350 // Thus, if (1), (2) and (3) are true for some T, then 1351 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1352 // 1353 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1354 // does not overflow" restricted to the 0th iteration. Therefore we only need 1355 // to check for (1) and (2). 1356 // 1357 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1358 // is `Delta` (defined below). 1359 // 1360 template <typename ExtendOpTy> 1361 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1362 const SCEV *Step, 1363 const Loop *L) { 1364 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1365 1366 // We restrict `Start` to a constant to prevent SCEV from spending too much 1367 // time here. It is correct (but more expensive) to continue with a 1368 // non-constant `Start` and do a general SCEV subtraction to compute 1369 // `PreStart` below. 1370 // 1371 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1372 if (!StartC) 1373 return false; 1374 1375 APInt StartAI = StartC->getAPInt(); 1376 1377 for (unsigned Delta : {-2, -1, 1, 2}) { 1378 const SCEV *PreStart = getConstant(StartAI - Delta); 1379 1380 FoldingSetNodeID ID; 1381 ID.AddInteger(scAddRecExpr); 1382 ID.AddPointer(PreStart); 1383 ID.AddPointer(Step); 1384 ID.AddPointer(L); 1385 void *IP = nullptr; 1386 const auto *PreAR = 1387 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1388 1389 // Give up if we don't already have the add recurrence we need because 1390 // actually constructing an add recurrence is relatively expensive. 1391 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1392 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1393 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1394 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1395 DeltaS, &Pred, this); 1396 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1397 return true; 1398 } 1399 } 1400 1401 return false; 1402 } 1403 1404 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 1405 Type *Ty) { 1406 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1407 "This is not an extending conversion!"); 1408 assert(isSCEVable(Ty) && 1409 "This is not a conversion to a SCEVable type!"); 1410 Ty = getEffectiveSCEVType(Ty); 1411 1412 // Fold if the operand is constant. 1413 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1414 return getConstant( 1415 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1416 1417 // zext(zext(x)) --> zext(x) 1418 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1419 return getZeroExtendExpr(SZ->getOperand(), Ty); 1420 1421 // Before doing any expensive analysis, check to see if we've already 1422 // computed a SCEV for this Op and Ty. 1423 FoldingSetNodeID ID; 1424 ID.AddInteger(scZeroExtend); 1425 ID.AddPointer(Op); 1426 ID.AddPointer(Ty); 1427 void *IP = nullptr; 1428 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1429 1430 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1431 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1432 // It's possible the bits taken off by the truncate were all zero bits. If 1433 // so, we should be able to simplify this further. 1434 const SCEV *X = ST->getOperand(); 1435 ConstantRange CR = getUnsignedRange(X); 1436 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1437 unsigned NewBits = getTypeSizeInBits(Ty); 1438 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1439 CR.zextOrTrunc(NewBits))) 1440 return getTruncateOrZeroExtend(X, Ty); 1441 } 1442 1443 // If the input value is a chrec scev, and we can prove that the value 1444 // did not overflow the old, smaller, value, we can zero extend all of the 1445 // operands (often constants). This allows analysis of something like 1446 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1447 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1448 if (AR->isAffine()) { 1449 const SCEV *Start = AR->getStart(); 1450 const SCEV *Step = AR->getStepRecurrence(*this); 1451 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1452 const Loop *L = AR->getLoop(); 1453 1454 if (!AR->hasNoUnsignedWrap()) { 1455 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1456 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1457 } 1458 1459 // If we have special knowledge that this addrec won't overflow, 1460 // we don't need to do any further analysis. 1461 if (AR->hasNoUnsignedWrap()) 1462 return getAddRecExpr( 1463 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1464 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1465 1466 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1467 // Note that this serves two purposes: It filters out loops that are 1468 // simply not analyzable, and it covers the case where this code is 1469 // being called from within backedge-taken count analysis, such that 1470 // attempting to ask for the backedge-taken count would likely result 1471 // in infinite recursion. In the later case, the analysis code will 1472 // cope with a conservative value, and it will take care to purge 1473 // that value once it has finished. 1474 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1475 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1476 // Manually compute the final value for AR, checking for 1477 // overflow. 1478 1479 // Check whether the backedge-taken count can be losslessly casted to 1480 // the addrec's type. The count is always unsigned. 1481 const SCEV *CastedMaxBECount = 1482 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1483 const SCEV *RecastedMaxBECount = 1484 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1485 if (MaxBECount == RecastedMaxBECount) { 1486 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1487 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1488 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 1489 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy); 1490 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy); 1491 const SCEV *WideMaxBECount = 1492 getZeroExtendExpr(CastedMaxBECount, WideTy); 1493 const SCEV *OperandExtendedAdd = 1494 getAddExpr(WideStart, 1495 getMulExpr(WideMaxBECount, 1496 getZeroExtendExpr(Step, WideTy))); 1497 if (ZAdd == OperandExtendedAdd) { 1498 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1499 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1500 // Return the expression with the addrec on the outside. 1501 return getAddRecExpr( 1502 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1503 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1504 } 1505 // Similar to above, only this time treat the step value as signed. 1506 // This covers loops that count down. 1507 OperandExtendedAdd = 1508 getAddExpr(WideStart, 1509 getMulExpr(WideMaxBECount, 1510 getSignExtendExpr(Step, WideTy))); 1511 if (ZAdd == OperandExtendedAdd) { 1512 // Cache knowledge of AR NW, which is propagated to this AddRec. 1513 // Negative step causes unsigned wrap, but it still can't self-wrap. 1514 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1515 // Return the expression with the addrec on the outside. 1516 return getAddRecExpr( 1517 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1518 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1519 } 1520 } 1521 } 1522 1523 // Normally, in the cases we can prove no-overflow via a 1524 // backedge guarding condition, we can also compute a backedge 1525 // taken count for the loop. The exceptions are assumptions and 1526 // guards present in the loop -- SCEV is not great at exploiting 1527 // these to compute max backedge taken counts, but can still use 1528 // these to prove lack of overflow. Use this fact to avoid 1529 // doing extra work that may not pay off. 1530 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1531 !AC.assumptions().empty()) { 1532 // If the backedge is guarded by a comparison with the pre-inc 1533 // value the addrec is safe. Also, if the entry is guarded by 1534 // a comparison with the start value and the backedge is 1535 // guarded by a comparison with the post-inc value, the addrec 1536 // is safe. 1537 if (isKnownPositive(Step)) { 1538 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1539 getUnsignedRange(Step).getUnsignedMax()); 1540 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1541 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1542 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1543 AR->getPostIncExpr(*this), N))) { 1544 // Cache knowledge of AR NUW, which is propagated to this 1545 // AddRec. 1546 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1547 // Return the expression with the addrec on the outside. 1548 return getAddRecExpr( 1549 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1550 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1551 } 1552 } else if (isKnownNegative(Step)) { 1553 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1554 getSignedRange(Step).getSignedMin()); 1555 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1556 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1557 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1558 AR->getPostIncExpr(*this), N))) { 1559 // Cache knowledge of AR NW, which is propagated to this 1560 // AddRec. Negative step causes unsigned wrap, but it 1561 // still can't self-wrap. 1562 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1563 // Return the expression with the addrec on the outside. 1564 return getAddRecExpr( 1565 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1566 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1567 } 1568 } 1569 } 1570 1571 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1572 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1573 return getAddRecExpr( 1574 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1575 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1576 } 1577 } 1578 1579 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1580 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1581 if (SA->hasNoUnsignedWrap()) { 1582 // If the addition does not unsign overflow then we can, by definition, 1583 // commute the zero extension with the addition operation. 1584 SmallVector<const SCEV *, 4> Ops; 1585 for (const auto *Op : SA->operands()) 1586 Ops.push_back(getZeroExtendExpr(Op, Ty)); 1587 return getAddExpr(Ops, SCEV::FlagNUW); 1588 } 1589 } 1590 1591 // The cast wasn't folded; create an explicit cast node. 1592 // Recompute the insert position, as it may have been invalidated. 1593 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1594 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1595 Op, Ty); 1596 UniqueSCEVs.InsertNode(S, IP); 1597 return S; 1598 } 1599 1600 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 1601 Type *Ty) { 1602 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1603 "This is not an extending conversion!"); 1604 assert(isSCEVable(Ty) && 1605 "This is not a conversion to a SCEVable type!"); 1606 Ty = getEffectiveSCEVType(Ty); 1607 1608 // Fold if the operand is constant. 1609 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1610 return getConstant( 1611 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1612 1613 // sext(sext(x)) --> sext(x) 1614 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1615 return getSignExtendExpr(SS->getOperand(), Ty); 1616 1617 // sext(zext(x)) --> zext(x) 1618 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1619 return getZeroExtendExpr(SZ->getOperand(), Ty); 1620 1621 // Before doing any expensive analysis, check to see if we've already 1622 // computed a SCEV for this Op and Ty. 1623 FoldingSetNodeID ID; 1624 ID.AddInteger(scSignExtend); 1625 ID.AddPointer(Op); 1626 ID.AddPointer(Ty); 1627 void *IP = nullptr; 1628 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1629 1630 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1631 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1632 // It's possible the bits taken off by the truncate were all sign bits. If 1633 // so, we should be able to simplify this further. 1634 const SCEV *X = ST->getOperand(); 1635 ConstantRange CR = getSignedRange(X); 1636 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1637 unsigned NewBits = getTypeSizeInBits(Ty); 1638 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1639 CR.sextOrTrunc(NewBits))) 1640 return getTruncateOrSignExtend(X, Ty); 1641 } 1642 1643 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1644 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1645 if (SA->getNumOperands() == 2) { 1646 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1647 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1648 if (SMul && SC1) { 1649 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1650 const APInt &C1 = SC1->getAPInt(); 1651 const APInt &C2 = SC2->getAPInt(); 1652 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1653 C2.ugt(C1) && C2.isPowerOf2()) 1654 return getAddExpr(getSignExtendExpr(SC1, Ty), 1655 getSignExtendExpr(SMul, Ty)); 1656 } 1657 } 1658 } 1659 1660 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1661 if (SA->hasNoSignedWrap()) { 1662 // If the addition does not sign overflow then we can, by definition, 1663 // commute the sign extension with the addition operation. 1664 SmallVector<const SCEV *, 4> Ops; 1665 for (const auto *Op : SA->operands()) 1666 Ops.push_back(getSignExtendExpr(Op, Ty)); 1667 return getAddExpr(Ops, SCEV::FlagNSW); 1668 } 1669 } 1670 // If the input value is a chrec scev, and we can prove that the value 1671 // did not overflow the old, smaller, value, we can sign extend all of the 1672 // operands (often constants). This allows analysis of something like 1673 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1674 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1675 if (AR->isAffine()) { 1676 const SCEV *Start = AR->getStart(); 1677 const SCEV *Step = AR->getStepRecurrence(*this); 1678 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1679 const Loop *L = AR->getLoop(); 1680 1681 if (!AR->hasNoSignedWrap()) { 1682 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1683 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1684 } 1685 1686 // If we have special knowledge that this addrec won't overflow, 1687 // we don't need to do any further analysis. 1688 if (AR->hasNoSignedWrap()) 1689 return getAddRecExpr( 1690 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1691 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW); 1692 1693 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1694 // Note that this serves two purposes: It filters out loops that are 1695 // simply not analyzable, and it covers the case where this code is 1696 // being called from within backedge-taken count analysis, such that 1697 // attempting to ask for the backedge-taken count would likely result 1698 // in infinite recursion. In the later case, the analysis code will 1699 // cope with a conservative value, and it will take care to purge 1700 // that value once it has finished. 1701 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1702 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1703 // Manually compute the final value for AR, checking for 1704 // overflow. 1705 1706 // Check whether the backedge-taken count can be losslessly casted to 1707 // the addrec's type. The count is always unsigned. 1708 const SCEV *CastedMaxBECount = 1709 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1710 const SCEV *RecastedMaxBECount = 1711 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1712 if (MaxBECount == RecastedMaxBECount) { 1713 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1714 // Check whether Start+Step*MaxBECount has no signed overflow. 1715 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1716 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy); 1717 const SCEV *WideStart = getSignExtendExpr(Start, WideTy); 1718 const SCEV *WideMaxBECount = 1719 getZeroExtendExpr(CastedMaxBECount, WideTy); 1720 const SCEV *OperandExtendedAdd = 1721 getAddExpr(WideStart, 1722 getMulExpr(WideMaxBECount, 1723 getSignExtendExpr(Step, WideTy))); 1724 if (SAdd == OperandExtendedAdd) { 1725 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1726 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1727 // Return the expression with the addrec on the outside. 1728 return getAddRecExpr( 1729 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1730 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1731 } 1732 // Similar to above, only this time treat the step value as unsigned. 1733 // This covers loops that count up with an unsigned step. 1734 OperandExtendedAdd = 1735 getAddExpr(WideStart, 1736 getMulExpr(WideMaxBECount, 1737 getZeroExtendExpr(Step, WideTy))); 1738 if (SAdd == OperandExtendedAdd) { 1739 // If AR wraps around then 1740 // 1741 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1742 // => SAdd != OperandExtendedAdd 1743 // 1744 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1745 // (SAdd == OperandExtendedAdd => AR is NW) 1746 1747 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1748 1749 // Return the expression with the addrec on the outside. 1750 return getAddRecExpr( 1751 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1752 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1753 } 1754 } 1755 } 1756 1757 // Normally, in the cases we can prove no-overflow via a 1758 // backedge guarding condition, we can also compute a backedge 1759 // taken count for the loop. The exceptions are assumptions and 1760 // guards present in the loop -- SCEV is not great at exploiting 1761 // these to compute max backedge taken counts, but can still use 1762 // these to prove lack of overflow. Use this fact to avoid 1763 // doing extra work that may not pay off. 1764 1765 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1766 !AC.assumptions().empty()) { 1767 // If the backedge is guarded by a comparison with the pre-inc 1768 // value the addrec is safe. Also, if the entry is guarded by 1769 // a comparison with the start value and the backedge is 1770 // guarded by a comparison with the post-inc value, the addrec 1771 // is safe. 1772 ICmpInst::Predicate Pred; 1773 const SCEV *OverflowLimit = 1774 getSignedOverflowLimitForStep(Step, &Pred, this); 1775 if (OverflowLimit && 1776 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1777 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1778 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1779 OverflowLimit)))) { 1780 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1781 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1782 return getAddRecExpr( 1783 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1784 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1785 } 1786 } 1787 1788 // If Start and Step are constants, check if we can apply this 1789 // transformation: 1790 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1791 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1792 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1793 if (SC1 && SC2) { 1794 const APInt &C1 = SC1->getAPInt(); 1795 const APInt &C2 = SC2->getAPInt(); 1796 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1797 C2.isPowerOf2()) { 1798 Start = getSignExtendExpr(Start, Ty); 1799 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1800 AR->getNoWrapFlags()); 1801 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty)); 1802 } 1803 } 1804 1805 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1806 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1807 return getAddRecExpr( 1808 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1809 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1810 } 1811 } 1812 1813 // If the input value is provably positive and we could not simplify 1814 // away the sext build a zext instead. 1815 if (isKnownNonNegative(Op)) 1816 return getZeroExtendExpr(Op, Ty); 1817 1818 // The cast wasn't folded; create an explicit cast node. 1819 // Recompute the insert position, as it may have been invalidated. 1820 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1821 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1822 Op, Ty); 1823 UniqueSCEVs.InsertNode(S, IP); 1824 return S; 1825 } 1826 1827 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1828 /// unspecified bits out to the given type. 1829 /// 1830 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1831 Type *Ty) { 1832 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1833 "This is not an extending conversion!"); 1834 assert(isSCEVable(Ty) && 1835 "This is not a conversion to a SCEVable type!"); 1836 Ty = getEffectiveSCEVType(Ty); 1837 1838 // Sign-extend negative constants. 1839 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1840 if (SC->getAPInt().isNegative()) 1841 return getSignExtendExpr(Op, Ty); 1842 1843 // Peel off a truncate cast. 1844 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1845 const SCEV *NewOp = T->getOperand(); 1846 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1847 return getAnyExtendExpr(NewOp, Ty); 1848 return getTruncateOrNoop(NewOp, Ty); 1849 } 1850 1851 // Next try a zext cast. If the cast is folded, use it. 1852 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1853 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1854 return ZExt; 1855 1856 // Next try a sext cast. If the cast is folded, use it. 1857 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1858 if (!isa<SCEVSignExtendExpr>(SExt)) 1859 return SExt; 1860 1861 // Force the cast to be folded into the operands of an addrec. 1862 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1863 SmallVector<const SCEV *, 4> Ops; 1864 for (const SCEV *Op : AR->operands()) 1865 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1866 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1867 } 1868 1869 // If the expression is obviously signed, use the sext cast value. 1870 if (isa<SCEVSMaxExpr>(Op)) 1871 return SExt; 1872 1873 // Absent any other information, use the zext cast value. 1874 return ZExt; 1875 } 1876 1877 /// Process the given Ops list, which is a list of operands to be added under 1878 /// the given scale, update the given map. This is a helper function for 1879 /// getAddRecExpr. As an example of what it does, given a sequence of operands 1880 /// that would form an add expression like this: 1881 /// 1882 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 1883 /// 1884 /// where A and B are constants, update the map with these values: 1885 /// 1886 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1887 /// 1888 /// and add 13 + A*B*29 to AccumulatedConstant. 1889 /// This will allow getAddRecExpr to produce this: 1890 /// 1891 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1892 /// 1893 /// This form often exposes folding opportunities that are hidden in 1894 /// the original operand list. 1895 /// 1896 /// Return true iff it appears that any interesting folding opportunities 1897 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1898 /// the common case where no interesting opportunities are present, and 1899 /// is also used as a check to avoid infinite recursion. 1900 /// 1901 static bool 1902 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1903 SmallVectorImpl<const SCEV *> &NewOps, 1904 APInt &AccumulatedConstant, 1905 const SCEV *const *Ops, size_t NumOperands, 1906 const APInt &Scale, 1907 ScalarEvolution &SE) { 1908 bool Interesting = false; 1909 1910 // Iterate over the add operands. They are sorted, with constants first. 1911 unsigned i = 0; 1912 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1913 ++i; 1914 // Pull a buried constant out to the outside. 1915 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 1916 Interesting = true; 1917 AccumulatedConstant += Scale * C->getAPInt(); 1918 } 1919 1920 // Next comes everything else. We're especially interested in multiplies 1921 // here, but they're in the middle, so just visit the rest with one loop. 1922 for (; i != NumOperands; ++i) { 1923 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1924 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1925 APInt NewScale = 1926 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 1927 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1928 // A multiplication of a constant with another add; recurse. 1929 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 1930 Interesting |= 1931 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1932 Add->op_begin(), Add->getNumOperands(), 1933 NewScale, SE); 1934 } else { 1935 // A multiplication of a constant with some other value. Update 1936 // the map. 1937 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1938 const SCEV *Key = SE.getMulExpr(MulOps); 1939 auto Pair = M.insert({Key, NewScale}); 1940 if (Pair.second) { 1941 NewOps.push_back(Pair.first->first); 1942 } else { 1943 Pair.first->second += NewScale; 1944 // The map already had an entry for this value, which may indicate 1945 // a folding opportunity. 1946 Interesting = true; 1947 } 1948 } 1949 } else { 1950 // An ordinary operand. Update the map. 1951 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1952 M.insert({Ops[i], Scale}); 1953 if (Pair.second) { 1954 NewOps.push_back(Pair.first->first); 1955 } else { 1956 Pair.first->second += Scale; 1957 // The map already had an entry for this value, which may indicate 1958 // a folding opportunity. 1959 Interesting = true; 1960 } 1961 } 1962 } 1963 1964 return Interesting; 1965 } 1966 1967 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 1968 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 1969 // can't-overflow flags for the operation if possible. 1970 static SCEV::NoWrapFlags 1971 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 1972 const SmallVectorImpl<const SCEV *> &Ops, 1973 SCEV::NoWrapFlags Flags) { 1974 using namespace std::placeholders; 1975 typedef OverflowingBinaryOperator OBO; 1976 1977 bool CanAnalyze = 1978 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 1979 (void)CanAnalyze; 1980 assert(CanAnalyze && "don't call from other places!"); 1981 1982 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 1983 SCEV::NoWrapFlags SignOrUnsignWrap = 1984 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 1985 1986 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 1987 auto IsKnownNonNegative = [&](const SCEV *S) { 1988 return SE->isKnownNonNegative(S); 1989 }; 1990 1991 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 1992 Flags = 1993 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 1994 1995 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 1996 1997 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 1998 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 1999 2000 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2001 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2002 2003 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2004 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2005 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2006 Instruction::Add, C, OBO::NoSignedWrap); 2007 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2008 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2009 } 2010 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2011 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2012 Instruction::Add, C, OBO::NoUnsignedWrap); 2013 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2014 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2015 } 2016 } 2017 2018 return Flags; 2019 } 2020 2021 /// Get a canonical add expression, or something simpler if possible. 2022 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2023 SCEV::NoWrapFlags Flags) { 2024 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2025 "only nuw or nsw allowed"); 2026 assert(!Ops.empty() && "Cannot get empty add!"); 2027 if (Ops.size() == 1) return Ops[0]; 2028 #ifndef NDEBUG 2029 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2030 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2031 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2032 "SCEVAddExpr operand types don't match!"); 2033 #endif 2034 2035 // Sort by complexity, this groups all similar expression types together. 2036 GroupByComplexity(Ops, &LI); 2037 2038 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2039 2040 // If there are any constants, fold them together. 2041 unsigned Idx = 0; 2042 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2043 ++Idx; 2044 assert(Idx < Ops.size()); 2045 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2046 // We found two constants, fold them together! 2047 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2048 if (Ops.size() == 2) return Ops[0]; 2049 Ops.erase(Ops.begin()+1); // Erase the folded element 2050 LHSC = cast<SCEVConstant>(Ops[0]); 2051 } 2052 2053 // If we are left with a constant zero being added, strip it off. 2054 if (LHSC->getValue()->isZero()) { 2055 Ops.erase(Ops.begin()); 2056 --Idx; 2057 } 2058 2059 if (Ops.size() == 1) return Ops[0]; 2060 } 2061 2062 // Okay, check to see if the same value occurs in the operand list more than 2063 // once. If so, merge them together into an multiply expression. Since we 2064 // sorted the list, these values are required to be adjacent. 2065 Type *Ty = Ops[0]->getType(); 2066 bool FoundMatch = false; 2067 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2068 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2069 // Scan ahead to count how many equal operands there are. 2070 unsigned Count = 2; 2071 while (i+Count != e && Ops[i+Count] == Ops[i]) 2072 ++Count; 2073 // Merge the values into a multiply. 2074 const SCEV *Scale = getConstant(Ty, Count); 2075 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 2076 if (Ops.size() == Count) 2077 return Mul; 2078 Ops[i] = Mul; 2079 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2080 --i; e -= Count - 1; 2081 FoundMatch = true; 2082 } 2083 if (FoundMatch) 2084 return getAddExpr(Ops, Flags); 2085 2086 // Check for truncates. If all the operands are truncated from the same 2087 // type, see if factoring out the truncate would permit the result to be 2088 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2089 // if the contents of the resulting outer trunc fold to something simple. 2090 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2091 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2092 Type *DstType = Trunc->getType(); 2093 Type *SrcType = Trunc->getOperand()->getType(); 2094 SmallVector<const SCEV *, 8> LargeOps; 2095 bool Ok = true; 2096 // Check all the operands to see if they can be represented in the 2097 // source type of the truncate. 2098 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2099 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2100 if (T->getOperand()->getType() != SrcType) { 2101 Ok = false; 2102 break; 2103 } 2104 LargeOps.push_back(T->getOperand()); 2105 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2106 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2107 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2108 SmallVector<const SCEV *, 8> LargeMulOps; 2109 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2110 if (const SCEVTruncateExpr *T = 2111 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2112 if (T->getOperand()->getType() != SrcType) { 2113 Ok = false; 2114 break; 2115 } 2116 LargeMulOps.push_back(T->getOperand()); 2117 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2118 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2119 } else { 2120 Ok = false; 2121 break; 2122 } 2123 } 2124 if (Ok) 2125 LargeOps.push_back(getMulExpr(LargeMulOps)); 2126 } else { 2127 Ok = false; 2128 break; 2129 } 2130 } 2131 if (Ok) { 2132 // Evaluate the expression in the larger type. 2133 const SCEV *Fold = getAddExpr(LargeOps, Flags); 2134 // If it folds to something simple, use it. Otherwise, don't. 2135 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2136 return getTruncateExpr(Fold, DstType); 2137 } 2138 } 2139 2140 // Skip past any other cast SCEVs. 2141 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2142 ++Idx; 2143 2144 // If there are add operands they would be next. 2145 if (Idx < Ops.size()) { 2146 bool DeletedAdd = false; 2147 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2148 // If we have an add, expand the add operands onto the end of the operands 2149 // list. 2150 Ops.erase(Ops.begin()+Idx); 2151 Ops.append(Add->op_begin(), Add->op_end()); 2152 DeletedAdd = true; 2153 } 2154 2155 // If we deleted at least one add, we added operands to the end of the list, 2156 // and they are not necessarily sorted. Recurse to resort and resimplify 2157 // any operands we just acquired. 2158 if (DeletedAdd) 2159 return getAddExpr(Ops); 2160 } 2161 2162 // Skip over the add expression until we get to a multiply. 2163 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2164 ++Idx; 2165 2166 // Check to see if there are any folding opportunities present with 2167 // operands multiplied by constant values. 2168 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2169 uint64_t BitWidth = getTypeSizeInBits(Ty); 2170 DenseMap<const SCEV *, APInt> M; 2171 SmallVector<const SCEV *, 8> NewOps; 2172 APInt AccumulatedConstant(BitWidth, 0); 2173 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2174 Ops.data(), Ops.size(), 2175 APInt(BitWidth, 1), *this)) { 2176 struct APIntCompare { 2177 bool operator()(const APInt &LHS, const APInt &RHS) const { 2178 return LHS.ult(RHS); 2179 } 2180 }; 2181 2182 // Some interesting folding opportunity is present, so its worthwhile to 2183 // re-generate the operands list. Group the operands by constant scale, 2184 // to avoid multiplying by the same constant scale multiple times. 2185 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2186 for (const SCEV *NewOp : NewOps) 2187 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2188 // Re-generate the operands list. 2189 Ops.clear(); 2190 if (AccumulatedConstant != 0) 2191 Ops.push_back(getConstant(AccumulatedConstant)); 2192 for (auto &MulOp : MulOpLists) 2193 if (MulOp.first != 0) 2194 Ops.push_back(getMulExpr(getConstant(MulOp.first), 2195 getAddExpr(MulOp.second))); 2196 if (Ops.empty()) 2197 return getZero(Ty); 2198 if (Ops.size() == 1) 2199 return Ops[0]; 2200 return getAddExpr(Ops); 2201 } 2202 } 2203 2204 // If we are adding something to a multiply expression, make sure the 2205 // something is not already an operand of the multiply. If so, merge it into 2206 // the multiply. 2207 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2208 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2209 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2210 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2211 if (isa<SCEVConstant>(MulOpSCEV)) 2212 continue; 2213 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2214 if (MulOpSCEV == Ops[AddOp]) { 2215 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2216 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2217 if (Mul->getNumOperands() != 2) { 2218 // If the multiply has more than two operands, we must get the 2219 // Y*Z term. 2220 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2221 Mul->op_begin()+MulOp); 2222 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2223 InnerMul = getMulExpr(MulOps); 2224 } 2225 const SCEV *One = getOne(Ty); 2226 const SCEV *AddOne = getAddExpr(One, InnerMul); 2227 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 2228 if (Ops.size() == 2) return OuterMul; 2229 if (AddOp < Idx) { 2230 Ops.erase(Ops.begin()+AddOp); 2231 Ops.erase(Ops.begin()+Idx-1); 2232 } else { 2233 Ops.erase(Ops.begin()+Idx); 2234 Ops.erase(Ops.begin()+AddOp-1); 2235 } 2236 Ops.push_back(OuterMul); 2237 return getAddExpr(Ops); 2238 } 2239 2240 // Check this multiply against other multiplies being added together. 2241 for (unsigned OtherMulIdx = Idx+1; 2242 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2243 ++OtherMulIdx) { 2244 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2245 // If MulOp occurs in OtherMul, we can fold the two multiplies 2246 // together. 2247 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2248 OMulOp != e; ++OMulOp) 2249 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2250 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2251 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2252 if (Mul->getNumOperands() != 2) { 2253 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2254 Mul->op_begin()+MulOp); 2255 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2256 InnerMul1 = getMulExpr(MulOps); 2257 } 2258 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2259 if (OtherMul->getNumOperands() != 2) { 2260 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2261 OtherMul->op_begin()+OMulOp); 2262 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2263 InnerMul2 = getMulExpr(MulOps); 2264 } 2265 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 2266 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 2267 if (Ops.size() == 2) return OuterMul; 2268 Ops.erase(Ops.begin()+Idx); 2269 Ops.erase(Ops.begin()+OtherMulIdx-1); 2270 Ops.push_back(OuterMul); 2271 return getAddExpr(Ops); 2272 } 2273 } 2274 } 2275 } 2276 2277 // If there are any add recurrences in the operands list, see if any other 2278 // added values are loop invariant. If so, we can fold them into the 2279 // recurrence. 2280 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2281 ++Idx; 2282 2283 // Scan over all recurrences, trying to fold loop invariants into them. 2284 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2285 // Scan all of the other operands to this add and add them to the vector if 2286 // they are loop invariant w.r.t. the recurrence. 2287 SmallVector<const SCEV *, 8> LIOps; 2288 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2289 const Loop *AddRecLoop = AddRec->getLoop(); 2290 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2291 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2292 LIOps.push_back(Ops[i]); 2293 Ops.erase(Ops.begin()+i); 2294 --i; --e; 2295 } 2296 2297 // If we found some loop invariants, fold them into the recurrence. 2298 if (!LIOps.empty()) { 2299 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2300 LIOps.push_back(AddRec->getStart()); 2301 2302 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2303 AddRec->op_end()); 2304 // This follows from the fact that the no-wrap flags on the outer add 2305 // expression are applicable on the 0th iteration, when the add recurrence 2306 // will be equal to its start value. 2307 AddRecOps[0] = getAddExpr(LIOps, Flags); 2308 2309 // Build the new addrec. Propagate the NUW and NSW flags if both the 2310 // outer add and the inner addrec are guaranteed to have no overflow. 2311 // Always propagate NW. 2312 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2313 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2314 2315 // If all of the other operands were loop invariant, we are done. 2316 if (Ops.size() == 1) return NewRec; 2317 2318 // Otherwise, add the folded AddRec by the non-invariant parts. 2319 for (unsigned i = 0;; ++i) 2320 if (Ops[i] == AddRec) { 2321 Ops[i] = NewRec; 2322 break; 2323 } 2324 return getAddExpr(Ops); 2325 } 2326 2327 // Okay, if there weren't any loop invariants to be folded, check to see if 2328 // there are multiple AddRec's with the same loop induction variable being 2329 // added together. If so, we can fold them. 2330 for (unsigned OtherIdx = Idx+1; 2331 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2332 ++OtherIdx) 2333 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2334 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2335 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2336 AddRec->op_end()); 2337 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2338 ++OtherIdx) 2339 if (const auto *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 2340 if (OtherAddRec->getLoop() == AddRecLoop) { 2341 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2342 i != e; ++i) { 2343 if (i >= AddRecOps.size()) { 2344 AddRecOps.append(OtherAddRec->op_begin()+i, 2345 OtherAddRec->op_end()); 2346 break; 2347 } 2348 AddRecOps[i] = getAddExpr(AddRecOps[i], 2349 OtherAddRec->getOperand(i)); 2350 } 2351 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2352 } 2353 // Step size has changed, so we cannot guarantee no self-wraparound. 2354 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2355 return getAddExpr(Ops); 2356 } 2357 2358 // Otherwise couldn't fold anything into this recurrence. Move onto the 2359 // next one. 2360 } 2361 2362 // Okay, it looks like we really DO need an add expr. Check to see if we 2363 // already have one, otherwise create a new one. 2364 FoldingSetNodeID ID; 2365 ID.AddInteger(scAddExpr); 2366 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2367 ID.AddPointer(Ops[i]); 2368 void *IP = nullptr; 2369 SCEVAddExpr *S = 2370 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2371 if (!S) { 2372 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2373 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2374 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), 2375 O, Ops.size()); 2376 UniqueSCEVs.InsertNode(S, IP); 2377 } 2378 S->setNoWrapFlags(Flags); 2379 return S; 2380 } 2381 2382 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2383 uint64_t k = i*j; 2384 if (j > 1 && k / j != i) Overflow = true; 2385 return k; 2386 } 2387 2388 /// Compute the result of "n choose k", the binomial coefficient. If an 2389 /// intermediate computation overflows, Overflow will be set and the return will 2390 /// be garbage. Overflow is not cleared on absence of overflow. 2391 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2392 // We use the multiplicative formula: 2393 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2394 // At each iteration, we take the n-th term of the numeral and divide by the 2395 // (k-n)th term of the denominator. This division will always produce an 2396 // integral result, and helps reduce the chance of overflow in the 2397 // intermediate computations. However, we can still overflow even when the 2398 // final result would fit. 2399 2400 if (n == 0 || n == k) return 1; 2401 if (k > n) return 0; 2402 2403 if (k > n/2) 2404 k = n-k; 2405 2406 uint64_t r = 1; 2407 for (uint64_t i = 1; i <= k; ++i) { 2408 r = umul_ov(r, n-(i-1), Overflow); 2409 r /= i; 2410 } 2411 return r; 2412 } 2413 2414 /// Determine if any of the operands in this SCEV are a constant or if 2415 /// any of the add or multiply expressions in this SCEV contain a constant. 2416 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2417 SmallVector<const SCEV *, 4> Ops; 2418 Ops.push_back(StartExpr); 2419 while (!Ops.empty()) { 2420 const SCEV *CurrentExpr = Ops.pop_back_val(); 2421 if (isa<SCEVConstant>(*CurrentExpr)) 2422 return true; 2423 2424 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2425 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2426 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2427 } 2428 } 2429 return false; 2430 } 2431 2432 /// Get a canonical multiply expression, or something simpler if possible. 2433 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2434 SCEV::NoWrapFlags Flags) { 2435 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2436 "only nuw or nsw allowed"); 2437 assert(!Ops.empty() && "Cannot get empty mul!"); 2438 if (Ops.size() == 1) return Ops[0]; 2439 #ifndef NDEBUG 2440 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2441 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2442 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2443 "SCEVMulExpr operand types don't match!"); 2444 #endif 2445 2446 // Sort by complexity, this groups all similar expression types together. 2447 GroupByComplexity(Ops, &LI); 2448 2449 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2450 2451 // If there are any constants, fold them together. 2452 unsigned Idx = 0; 2453 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2454 2455 // C1*(C2+V) -> C1*C2 + C1*V 2456 if (Ops.size() == 2) 2457 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2458 // If any of Add's ops are Adds or Muls with a constant, 2459 // apply this transformation as well. 2460 if (Add->getNumOperands() == 2) 2461 if (containsConstantSomewhere(Add)) 2462 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 2463 getMulExpr(LHSC, Add->getOperand(1))); 2464 2465 ++Idx; 2466 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2467 // We found two constants, fold them together! 2468 ConstantInt *Fold = 2469 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2470 Ops[0] = getConstant(Fold); 2471 Ops.erase(Ops.begin()+1); // Erase the folded element 2472 if (Ops.size() == 1) return Ops[0]; 2473 LHSC = cast<SCEVConstant>(Ops[0]); 2474 } 2475 2476 // If we are left with a constant one being multiplied, strip it off. 2477 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 2478 Ops.erase(Ops.begin()); 2479 --Idx; 2480 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2481 // If we have a multiply of zero, it will always be zero. 2482 return Ops[0]; 2483 } else if (Ops[0]->isAllOnesValue()) { 2484 // If we have a mul by -1 of an add, try distributing the -1 among the 2485 // add operands. 2486 if (Ops.size() == 2) { 2487 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2488 SmallVector<const SCEV *, 4> NewOps; 2489 bool AnyFolded = false; 2490 for (const SCEV *AddOp : Add->operands()) { 2491 const SCEV *Mul = getMulExpr(Ops[0], AddOp); 2492 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2493 NewOps.push_back(Mul); 2494 } 2495 if (AnyFolded) 2496 return getAddExpr(NewOps); 2497 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2498 // Negation preserves a recurrence's no self-wrap property. 2499 SmallVector<const SCEV *, 4> Operands; 2500 for (const SCEV *AddRecOp : AddRec->operands()) 2501 Operands.push_back(getMulExpr(Ops[0], AddRecOp)); 2502 2503 return getAddRecExpr(Operands, AddRec->getLoop(), 2504 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2505 } 2506 } 2507 } 2508 2509 if (Ops.size() == 1) 2510 return Ops[0]; 2511 } 2512 2513 // Skip over the add expression until we get to a multiply. 2514 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2515 ++Idx; 2516 2517 // If there are mul operands inline them all into this expression. 2518 if (Idx < Ops.size()) { 2519 bool DeletedMul = false; 2520 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2521 // If we have an mul, expand the mul operands onto the end of the operands 2522 // list. 2523 Ops.erase(Ops.begin()+Idx); 2524 Ops.append(Mul->op_begin(), Mul->op_end()); 2525 DeletedMul = true; 2526 } 2527 2528 // If we deleted at least one mul, we added operands to the end of the list, 2529 // and they are not necessarily sorted. Recurse to resort and resimplify 2530 // any operands we just acquired. 2531 if (DeletedMul) 2532 return getMulExpr(Ops); 2533 } 2534 2535 // If there are any add recurrences in the operands list, see if any other 2536 // added values are loop invariant. If so, we can fold them into the 2537 // recurrence. 2538 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2539 ++Idx; 2540 2541 // Scan over all recurrences, trying to fold loop invariants into them. 2542 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2543 // Scan all of the other operands to this mul and add them to the vector if 2544 // they are loop invariant w.r.t. the recurrence. 2545 SmallVector<const SCEV *, 8> LIOps; 2546 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2547 const Loop *AddRecLoop = AddRec->getLoop(); 2548 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2549 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2550 LIOps.push_back(Ops[i]); 2551 Ops.erase(Ops.begin()+i); 2552 --i; --e; 2553 } 2554 2555 // If we found some loop invariants, fold them into the recurrence. 2556 if (!LIOps.empty()) { 2557 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2558 SmallVector<const SCEV *, 4> NewOps; 2559 NewOps.reserve(AddRec->getNumOperands()); 2560 const SCEV *Scale = getMulExpr(LIOps); 2561 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2562 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 2563 2564 // Build the new addrec. Propagate the NUW and NSW flags if both the 2565 // outer mul and the inner addrec are guaranteed to have no overflow. 2566 // 2567 // No self-wrap cannot be guaranteed after changing the step size, but 2568 // will be inferred if either NUW or NSW is true. 2569 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2570 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2571 2572 // If all of the other operands were loop invariant, we are done. 2573 if (Ops.size() == 1) return NewRec; 2574 2575 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2576 for (unsigned i = 0;; ++i) 2577 if (Ops[i] == AddRec) { 2578 Ops[i] = NewRec; 2579 break; 2580 } 2581 return getMulExpr(Ops); 2582 } 2583 2584 // Okay, if there weren't any loop invariants to be folded, check to see if 2585 // there are multiple AddRec's with the same loop induction variable being 2586 // multiplied together. If so, we can fold them. 2587 2588 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2589 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2590 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2591 // ]]],+,...up to x=2n}. 2592 // Note that the arguments to choose() are always integers with values 2593 // known at compile time, never SCEV objects. 2594 // 2595 // The implementation avoids pointless extra computations when the two 2596 // addrec's are of different length (mathematically, it's equivalent to 2597 // an infinite stream of zeros on the right). 2598 bool OpsModified = false; 2599 for (unsigned OtherIdx = Idx+1; 2600 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2601 ++OtherIdx) { 2602 const SCEVAddRecExpr *OtherAddRec = 2603 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2604 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2605 continue; 2606 2607 bool Overflow = false; 2608 Type *Ty = AddRec->getType(); 2609 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2610 SmallVector<const SCEV*, 7> AddRecOps; 2611 for (int x = 0, xe = AddRec->getNumOperands() + 2612 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2613 const SCEV *Term = getZero(Ty); 2614 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2615 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2616 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2617 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2618 z < ze && !Overflow; ++z) { 2619 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2620 uint64_t Coeff; 2621 if (LargerThan64Bits) 2622 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2623 else 2624 Coeff = Coeff1*Coeff2; 2625 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2626 const SCEV *Term1 = AddRec->getOperand(y-z); 2627 const SCEV *Term2 = OtherAddRec->getOperand(z); 2628 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); 2629 } 2630 } 2631 AddRecOps.push_back(Term); 2632 } 2633 if (!Overflow) { 2634 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2635 SCEV::FlagAnyWrap); 2636 if (Ops.size() == 2) return NewAddRec; 2637 Ops[Idx] = NewAddRec; 2638 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2639 OpsModified = true; 2640 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2641 if (!AddRec) 2642 break; 2643 } 2644 } 2645 if (OpsModified) 2646 return getMulExpr(Ops); 2647 2648 // Otherwise couldn't fold anything into this recurrence. Move onto the 2649 // next one. 2650 } 2651 2652 // Okay, it looks like we really DO need an mul expr. Check to see if we 2653 // already have one, otherwise create a new one. 2654 FoldingSetNodeID ID; 2655 ID.AddInteger(scMulExpr); 2656 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2657 ID.AddPointer(Ops[i]); 2658 void *IP = nullptr; 2659 SCEVMulExpr *S = 2660 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2661 if (!S) { 2662 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2663 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2664 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2665 O, Ops.size()); 2666 UniqueSCEVs.InsertNode(S, IP); 2667 } 2668 S->setNoWrapFlags(Flags); 2669 return S; 2670 } 2671 2672 /// Get a canonical unsigned division expression, or something simpler if 2673 /// possible. 2674 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2675 const SCEV *RHS) { 2676 assert(getEffectiveSCEVType(LHS->getType()) == 2677 getEffectiveSCEVType(RHS->getType()) && 2678 "SCEVUDivExpr operand types don't match!"); 2679 2680 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2681 if (RHSC->getValue()->equalsInt(1)) 2682 return LHS; // X udiv 1 --> x 2683 // If the denominator is zero, the result of the udiv is undefined. Don't 2684 // try to analyze it, because the resolution chosen here may differ from 2685 // the resolution chosen in other parts of the compiler. 2686 if (!RHSC->getValue()->isZero()) { 2687 // Determine if the division can be folded into the operands of 2688 // its operands. 2689 // TODO: Generalize this to non-constants by using known-bits information. 2690 Type *Ty = LHS->getType(); 2691 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2692 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2693 // For non-power-of-two values, effectively round the value up to the 2694 // nearest power of two. 2695 if (!RHSC->getAPInt().isPowerOf2()) 2696 ++MaxShiftAmt; 2697 IntegerType *ExtTy = 2698 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2699 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2700 if (const SCEVConstant *Step = 2701 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2702 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2703 const APInt &StepInt = Step->getAPInt(); 2704 const APInt &DivInt = RHSC->getAPInt(); 2705 if (!StepInt.urem(DivInt) && 2706 getZeroExtendExpr(AR, ExtTy) == 2707 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2708 getZeroExtendExpr(Step, ExtTy), 2709 AR->getLoop(), SCEV::FlagAnyWrap)) { 2710 SmallVector<const SCEV *, 4> Operands; 2711 for (const SCEV *Op : AR->operands()) 2712 Operands.push_back(getUDivExpr(Op, RHS)); 2713 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2714 } 2715 /// Get a canonical UDivExpr for a recurrence. 2716 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2717 // We can currently only fold X%N if X is constant. 2718 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2719 if (StartC && !DivInt.urem(StepInt) && 2720 getZeroExtendExpr(AR, ExtTy) == 2721 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2722 getZeroExtendExpr(Step, ExtTy), 2723 AR->getLoop(), SCEV::FlagAnyWrap)) { 2724 const APInt &StartInt = StartC->getAPInt(); 2725 const APInt &StartRem = StartInt.urem(StepInt); 2726 if (StartRem != 0) 2727 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2728 AR->getLoop(), SCEV::FlagNW); 2729 } 2730 } 2731 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2732 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2733 SmallVector<const SCEV *, 4> Operands; 2734 for (const SCEV *Op : M->operands()) 2735 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2736 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 2737 // Find an operand that's safely divisible. 2738 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 2739 const SCEV *Op = M->getOperand(i); 2740 const SCEV *Div = getUDivExpr(Op, RHSC); 2741 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 2742 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 2743 M->op_end()); 2744 Operands[i] = Div; 2745 return getMulExpr(Operands); 2746 } 2747 } 2748 } 2749 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 2750 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 2751 SmallVector<const SCEV *, 4> Operands; 2752 for (const SCEV *Op : A->operands()) 2753 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2754 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 2755 Operands.clear(); 2756 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2757 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 2758 if (isa<SCEVUDivExpr>(Op) || 2759 getMulExpr(Op, RHS) != A->getOperand(i)) 2760 break; 2761 Operands.push_back(Op); 2762 } 2763 if (Operands.size() == A->getNumOperands()) 2764 return getAddExpr(Operands); 2765 } 2766 } 2767 2768 // Fold if both operands are constant. 2769 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 2770 Constant *LHSCV = LHSC->getValue(); 2771 Constant *RHSCV = RHSC->getValue(); 2772 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 2773 RHSCV))); 2774 } 2775 } 2776 } 2777 2778 FoldingSetNodeID ID; 2779 ID.AddInteger(scUDivExpr); 2780 ID.AddPointer(LHS); 2781 ID.AddPointer(RHS); 2782 void *IP = nullptr; 2783 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2784 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 2785 LHS, RHS); 2786 UniqueSCEVs.InsertNode(S, IP); 2787 return S; 2788 } 2789 2790 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 2791 APInt A = C1->getAPInt().abs(); 2792 APInt B = C2->getAPInt().abs(); 2793 uint32_t ABW = A.getBitWidth(); 2794 uint32_t BBW = B.getBitWidth(); 2795 2796 if (ABW > BBW) 2797 B = B.zext(ABW); 2798 else if (ABW < BBW) 2799 A = A.zext(BBW); 2800 2801 return APIntOps::GreatestCommonDivisor(A, B); 2802 } 2803 2804 /// Get a canonical unsigned division expression, or something simpler if 2805 /// possible. There is no representation for an exact udiv in SCEV IR, but we 2806 /// can attempt to remove factors from the LHS and RHS. We can't do this when 2807 /// it's not exact because the udiv may be clearing bits. 2808 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 2809 const SCEV *RHS) { 2810 // TODO: we could try to find factors in all sorts of things, but for now we 2811 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 2812 // end of this file for inspiration. 2813 2814 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 2815 if (!Mul) 2816 return getUDivExpr(LHS, RHS); 2817 2818 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 2819 // If the mulexpr multiplies by a constant, then that constant must be the 2820 // first element of the mulexpr. 2821 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2822 if (LHSCst == RHSCst) { 2823 SmallVector<const SCEV *, 2> Operands; 2824 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2825 return getMulExpr(Operands); 2826 } 2827 2828 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 2829 // that there's a factor provided by one of the other terms. We need to 2830 // check. 2831 APInt Factor = gcd(LHSCst, RHSCst); 2832 if (!Factor.isIntN(1)) { 2833 LHSCst = 2834 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 2835 RHSCst = 2836 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 2837 SmallVector<const SCEV *, 2> Operands; 2838 Operands.push_back(LHSCst); 2839 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2840 LHS = getMulExpr(Operands); 2841 RHS = RHSCst; 2842 Mul = dyn_cast<SCEVMulExpr>(LHS); 2843 if (!Mul) 2844 return getUDivExactExpr(LHS, RHS); 2845 } 2846 } 2847 } 2848 2849 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 2850 if (Mul->getOperand(i) == RHS) { 2851 SmallVector<const SCEV *, 2> Operands; 2852 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 2853 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 2854 return getMulExpr(Operands); 2855 } 2856 } 2857 2858 return getUDivExpr(LHS, RHS); 2859 } 2860 2861 /// Get an add recurrence expression for the specified loop. Simplify the 2862 /// expression as much as possible. 2863 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 2864 const Loop *L, 2865 SCEV::NoWrapFlags Flags) { 2866 SmallVector<const SCEV *, 4> Operands; 2867 Operands.push_back(Start); 2868 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 2869 if (StepChrec->getLoop() == L) { 2870 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 2871 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 2872 } 2873 2874 Operands.push_back(Step); 2875 return getAddRecExpr(Operands, L, Flags); 2876 } 2877 2878 /// Get an add recurrence expression for the specified loop. Simplify the 2879 /// expression as much as possible. 2880 const SCEV * 2881 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 2882 const Loop *L, SCEV::NoWrapFlags Flags) { 2883 if (Operands.size() == 1) return Operands[0]; 2884 #ifndef NDEBUG 2885 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 2886 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 2887 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 2888 "SCEVAddRecExpr operand types don't match!"); 2889 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2890 assert(isLoopInvariant(Operands[i], L) && 2891 "SCEVAddRecExpr operand is not loop-invariant!"); 2892 #endif 2893 2894 if (Operands.back()->isZero()) { 2895 Operands.pop_back(); 2896 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 2897 } 2898 2899 // It's tempting to want to call getMaxBackedgeTakenCount count here and 2900 // use that information to infer NUW and NSW flags. However, computing a 2901 // BE count requires calling getAddRecExpr, so we may not yet have a 2902 // meaningful BE count at this point (and if we don't, we'd be stuck 2903 // with a SCEVCouldNotCompute as the cached BE count). 2904 2905 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 2906 2907 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 2908 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 2909 const Loop *NestedLoop = NestedAR->getLoop(); 2910 if (L->contains(NestedLoop) 2911 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 2912 : (!NestedLoop->contains(L) && 2913 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 2914 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 2915 NestedAR->op_end()); 2916 Operands[0] = NestedAR->getStart(); 2917 // AddRecs require their operands be loop-invariant with respect to their 2918 // loops. Don't perform this transformation if it would break this 2919 // requirement. 2920 bool AllInvariant = all_of( 2921 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 2922 2923 if (AllInvariant) { 2924 // Create a recurrence for the outer loop with the same step size. 2925 // 2926 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 2927 // inner recurrence has the same property. 2928 SCEV::NoWrapFlags OuterFlags = 2929 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 2930 2931 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 2932 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 2933 return isLoopInvariant(Op, NestedLoop); 2934 }); 2935 2936 if (AllInvariant) { 2937 // Ok, both add recurrences are valid after the transformation. 2938 // 2939 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 2940 // the outer recurrence has the same property. 2941 SCEV::NoWrapFlags InnerFlags = 2942 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 2943 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 2944 } 2945 } 2946 // Reset Operands to its original state. 2947 Operands[0] = NestedAR; 2948 } 2949 } 2950 2951 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2952 // already have one, otherwise create a new one. 2953 FoldingSetNodeID ID; 2954 ID.AddInteger(scAddRecExpr); 2955 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2956 ID.AddPointer(Operands[i]); 2957 ID.AddPointer(L); 2958 void *IP = nullptr; 2959 SCEVAddRecExpr *S = 2960 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2961 if (!S) { 2962 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 2963 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 2964 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 2965 O, Operands.size(), L); 2966 UniqueSCEVs.InsertNode(S, IP); 2967 } 2968 S->setNoWrapFlags(Flags); 2969 return S; 2970 } 2971 2972 const SCEV * 2973 ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr, 2974 const SmallVectorImpl<const SCEV *> &IndexExprs, 2975 bool InBounds) { 2976 // getSCEV(Base)->getType() has the same address space as Base->getType() 2977 // because SCEV::getType() preserves the address space. 2978 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 2979 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 2980 // instruction to its SCEV, because the Instruction may be guarded by control 2981 // flow and the no-overflow bits may not be valid for the expression in any 2982 // context. This can be fixed similarly to how these flags are handled for 2983 // adds. 2984 SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 2985 2986 const SCEV *TotalOffset = getZero(IntPtrTy); 2987 // The address space is unimportant. The first thing we do on CurTy is getting 2988 // its element type. 2989 Type *CurTy = PointerType::getUnqual(PointeeType); 2990 for (const SCEV *IndexExpr : IndexExprs) { 2991 // Compute the (potentially symbolic) offset in bytes for this index. 2992 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 2993 // For a struct, add the member offset. 2994 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 2995 unsigned FieldNo = Index->getZExtValue(); 2996 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 2997 2998 // Add the field offset to the running total offset. 2999 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3000 3001 // Update CurTy to the type of the field at Index. 3002 CurTy = STy->getTypeAtIndex(Index); 3003 } else { 3004 // Update CurTy to its element type. 3005 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3006 // For an array, add the element offset, explicitly scaled. 3007 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3008 // Getelementptr indices are signed. 3009 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3010 3011 // Multiply the index by the element size to compute the element offset. 3012 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3013 3014 // Add the element offset to the running total offset. 3015 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3016 } 3017 } 3018 3019 // Add the total offset from all the GEP indices to the base. 3020 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3021 } 3022 3023 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3024 const SCEV *RHS) { 3025 SmallVector<const SCEV *, 2> Ops; 3026 Ops.push_back(LHS); 3027 Ops.push_back(RHS); 3028 return getSMaxExpr(Ops); 3029 } 3030 3031 const SCEV * 3032 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3033 assert(!Ops.empty() && "Cannot get empty smax!"); 3034 if (Ops.size() == 1) return Ops[0]; 3035 #ifndef NDEBUG 3036 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3037 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3038 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3039 "SCEVSMaxExpr operand types don't match!"); 3040 #endif 3041 3042 // Sort by complexity, this groups all similar expression types together. 3043 GroupByComplexity(Ops, &LI); 3044 3045 // If there are any constants, fold them together. 3046 unsigned Idx = 0; 3047 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3048 ++Idx; 3049 assert(Idx < Ops.size()); 3050 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3051 // We found two constants, fold them together! 3052 ConstantInt *Fold = ConstantInt::get( 3053 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3054 Ops[0] = getConstant(Fold); 3055 Ops.erase(Ops.begin()+1); // Erase the folded element 3056 if (Ops.size() == 1) return Ops[0]; 3057 LHSC = cast<SCEVConstant>(Ops[0]); 3058 } 3059 3060 // If we are left with a constant minimum-int, strip it off. 3061 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3062 Ops.erase(Ops.begin()); 3063 --Idx; 3064 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3065 // If we have an smax with a constant maximum-int, it will always be 3066 // maximum-int. 3067 return Ops[0]; 3068 } 3069 3070 if (Ops.size() == 1) return Ops[0]; 3071 } 3072 3073 // Find the first SMax 3074 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3075 ++Idx; 3076 3077 // Check to see if one of the operands is an SMax. If so, expand its operands 3078 // onto our operand list, and recurse to simplify. 3079 if (Idx < Ops.size()) { 3080 bool DeletedSMax = false; 3081 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3082 Ops.erase(Ops.begin()+Idx); 3083 Ops.append(SMax->op_begin(), SMax->op_end()); 3084 DeletedSMax = true; 3085 } 3086 3087 if (DeletedSMax) 3088 return getSMaxExpr(Ops); 3089 } 3090 3091 // Okay, check to see if the same value occurs in the operand list twice. If 3092 // so, delete one. Since we sorted the list, these values are required to 3093 // be adjacent. 3094 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3095 // X smax Y smax Y --> X smax Y 3096 // X smax Y --> X, if X is always greater than Y 3097 if (Ops[i] == Ops[i+1] || 3098 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3099 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3100 --i; --e; 3101 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3102 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3103 --i; --e; 3104 } 3105 3106 if (Ops.size() == 1) return Ops[0]; 3107 3108 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3109 3110 // Okay, it looks like we really DO need an smax expr. Check to see if we 3111 // already have one, otherwise create a new one. 3112 FoldingSetNodeID ID; 3113 ID.AddInteger(scSMaxExpr); 3114 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3115 ID.AddPointer(Ops[i]); 3116 void *IP = nullptr; 3117 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3118 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3119 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3120 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3121 O, Ops.size()); 3122 UniqueSCEVs.InsertNode(S, IP); 3123 return S; 3124 } 3125 3126 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3127 const SCEV *RHS) { 3128 SmallVector<const SCEV *, 2> Ops; 3129 Ops.push_back(LHS); 3130 Ops.push_back(RHS); 3131 return getUMaxExpr(Ops); 3132 } 3133 3134 const SCEV * 3135 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3136 assert(!Ops.empty() && "Cannot get empty umax!"); 3137 if (Ops.size() == 1) return Ops[0]; 3138 #ifndef NDEBUG 3139 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3140 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3141 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3142 "SCEVUMaxExpr operand types don't match!"); 3143 #endif 3144 3145 // Sort by complexity, this groups all similar expression types together. 3146 GroupByComplexity(Ops, &LI); 3147 3148 // If there are any constants, fold them together. 3149 unsigned Idx = 0; 3150 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3151 ++Idx; 3152 assert(Idx < Ops.size()); 3153 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3154 // We found two constants, fold them together! 3155 ConstantInt *Fold = ConstantInt::get( 3156 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3157 Ops[0] = getConstant(Fold); 3158 Ops.erase(Ops.begin()+1); // Erase the folded element 3159 if (Ops.size() == 1) return Ops[0]; 3160 LHSC = cast<SCEVConstant>(Ops[0]); 3161 } 3162 3163 // If we are left with a constant minimum-int, strip it off. 3164 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3165 Ops.erase(Ops.begin()); 3166 --Idx; 3167 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3168 // If we have an umax with a constant maximum-int, it will always be 3169 // maximum-int. 3170 return Ops[0]; 3171 } 3172 3173 if (Ops.size() == 1) return Ops[0]; 3174 } 3175 3176 // Find the first UMax 3177 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3178 ++Idx; 3179 3180 // Check to see if one of the operands is a UMax. If so, expand its operands 3181 // onto our operand list, and recurse to simplify. 3182 if (Idx < Ops.size()) { 3183 bool DeletedUMax = false; 3184 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3185 Ops.erase(Ops.begin()+Idx); 3186 Ops.append(UMax->op_begin(), UMax->op_end()); 3187 DeletedUMax = true; 3188 } 3189 3190 if (DeletedUMax) 3191 return getUMaxExpr(Ops); 3192 } 3193 3194 // Okay, check to see if the same value occurs in the operand list twice. If 3195 // so, delete one. Since we sorted the list, these values are required to 3196 // be adjacent. 3197 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3198 // X umax Y umax Y --> X umax Y 3199 // X umax Y --> X, if X is always greater than Y 3200 if (Ops[i] == Ops[i+1] || 3201 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3202 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3203 --i; --e; 3204 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3205 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3206 --i; --e; 3207 } 3208 3209 if (Ops.size() == 1) return Ops[0]; 3210 3211 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3212 3213 // Okay, it looks like we really DO need a umax expr. Check to see if we 3214 // already have one, otherwise create a new one. 3215 FoldingSetNodeID ID; 3216 ID.AddInteger(scUMaxExpr); 3217 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3218 ID.AddPointer(Ops[i]); 3219 void *IP = nullptr; 3220 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3221 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3222 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3223 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3224 O, Ops.size()); 3225 UniqueSCEVs.InsertNode(S, IP); 3226 return S; 3227 } 3228 3229 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3230 const SCEV *RHS) { 3231 // ~smax(~x, ~y) == smin(x, y). 3232 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3233 } 3234 3235 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3236 const SCEV *RHS) { 3237 // ~umax(~x, ~y) == umin(x, y) 3238 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3239 } 3240 3241 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3242 // We can bypass creating a target-independent 3243 // constant expression and then folding it back into a ConstantInt. 3244 // This is just a compile-time optimization. 3245 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3246 } 3247 3248 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3249 StructType *STy, 3250 unsigned FieldNo) { 3251 // We can bypass creating a target-independent 3252 // constant expression and then folding it back into a ConstantInt. 3253 // This is just a compile-time optimization. 3254 return getConstant( 3255 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3256 } 3257 3258 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3259 // Don't attempt to do anything other than create a SCEVUnknown object 3260 // here. createSCEV only calls getUnknown after checking for all other 3261 // interesting possibilities, and any other code that calls getUnknown 3262 // is doing so in order to hide a value from SCEV canonicalization. 3263 3264 FoldingSetNodeID ID; 3265 ID.AddInteger(scUnknown); 3266 ID.AddPointer(V); 3267 void *IP = nullptr; 3268 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3269 assert(cast<SCEVUnknown>(S)->getValue() == V && 3270 "Stale SCEVUnknown in uniquing map!"); 3271 return S; 3272 } 3273 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3274 FirstUnknown); 3275 FirstUnknown = cast<SCEVUnknown>(S); 3276 UniqueSCEVs.InsertNode(S, IP); 3277 return S; 3278 } 3279 3280 //===----------------------------------------------------------------------===// 3281 // Basic SCEV Analysis and PHI Idiom Recognition Code 3282 // 3283 3284 /// Test if values of the given type are analyzable within the SCEV 3285 /// framework. This primarily includes integer types, and it can optionally 3286 /// include pointer types if the ScalarEvolution class has access to 3287 /// target-specific information. 3288 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3289 // Integers and pointers are always SCEVable. 3290 return Ty->isIntegerTy() || Ty->isPointerTy(); 3291 } 3292 3293 /// Return the size in bits of the specified type, for which isSCEVable must 3294 /// return true. 3295 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3296 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3297 return getDataLayout().getTypeSizeInBits(Ty); 3298 } 3299 3300 /// Return a type with the same bitwidth as the given type and which represents 3301 /// how SCEV will treat the given type, for which isSCEVable must return 3302 /// true. For pointer types, this is the pointer-sized integer type. 3303 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3304 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3305 3306 if (Ty->isIntegerTy()) 3307 return Ty; 3308 3309 // The only other support type is pointer. 3310 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3311 return getDataLayout().getIntPtrType(Ty); 3312 } 3313 3314 const SCEV *ScalarEvolution::getCouldNotCompute() { 3315 return CouldNotCompute.get(); 3316 } 3317 3318 3319 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3320 // Helper class working with SCEVTraversal to figure out if a SCEV contains 3321 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne 3322 // is set iff if find such SCEVUnknown. 3323 // 3324 struct FindInvalidSCEVUnknown { 3325 bool FindOne; 3326 FindInvalidSCEVUnknown() { FindOne = false; } 3327 bool follow(const SCEV *S) { 3328 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 3329 case scConstant: 3330 return false; 3331 case scUnknown: 3332 if (!cast<SCEVUnknown>(S)->getValue()) 3333 FindOne = true; 3334 return false; 3335 default: 3336 return true; 3337 } 3338 } 3339 bool isDone() const { return FindOne; } 3340 }; 3341 3342 FindInvalidSCEVUnknown F; 3343 SCEVTraversal<FindInvalidSCEVUnknown> ST(F); 3344 ST.visitAll(S); 3345 3346 return !F.FindOne; 3347 } 3348 3349 namespace { 3350 // Helper class working with SCEVTraversal to figure out if a SCEV contains 3351 // a sub SCEV of scAddRecExpr type. FindInvalidSCEVUnknown::FoundOne is set 3352 // iff if such sub scAddRecExpr type SCEV is found. 3353 struct FindAddRecurrence { 3354 bool FoundOne; 3355 FindAddRecurrence() : FoundOne(false) {} 3356 3357 bool follow(const SCEV *S) { 3358 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 3359 case scAddRecExpr: 3360 FoundOne = true; 3361 case scConstant: 3362 case scUnknown: 3363 case scCouldNotCompute: 3364 return false; 3365 default: 3366 return true; 3367 } 3368 } 3369 bool isDone() const { return FoundOne; } 3370 }; 3371 } 3372 3373 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3374 HasRecMapType::iterator I = HasRecMap.find_as(S); 3375 if (I != HasRecMap.end()) 3376 return I->second; 3377 3378 FindAddRecurrence F; 3379 SCEVTraversal<FindAddRecurrence> ST(F); 3380 ST.visitAll(S); 3381 HasRecMap.insert({S, F.FoundOne}); 3382 return F.FoundOne; 3383 } 3384 3385 /// Return the Value set from S. 3386 SetVector<Value *> *ScalarEvolution::getSCEVValues(const SCEV *S) { 3387 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3388 if (SI == ExprValueMap.end()) 3389 return nullptr; 3390 #ifndef NDEBUG 3391 if (VerifySCEVMap) { 3392 // Check there is no dangling Value in the set returned. 3393 for (const auto &VE : SI->second) 3394 assert(ValueExprMap.count(VE)); 3395 } 3396 #endif 3397 return &SI->second; 3398 } 3399 3400 /// Erase Value from ValueExprMap and ExprValueMap. If ValueExprMap.erase(V) is 3401 /// not used together with forgetMemoizedResults(S), eraseValueFromMap should be 3402 /// used instead to ensure whenever V->S is removed from ValueExprMap, V is also 3403 /// removed from the set of ExprValueMap[S]. 3404 void ScalarEvolution::eraseValueFromMap(Value *V) { 3405 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3406 if (I != ValueExprMap.end()) { 3407 const SCEV *S = I->second; 3408 SetVector<Value *> *SV = getSCEVValues(S); 3409 // Remove V from the set of ExprValueMap[S] 3410 if (SV) 3411 SV->remove(V); 3412 ValueExprMap.erase(V); 3413 } 3414 } 3415 3416 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3417 /// create a new one. 3418 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3419 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3420 3421 const SCEV *S = getExistingSCEV(V); 3422 if (S == nullptr) { 3423 S = createSCEV(V); 3424 // During PHI resolution, it is possible to create two SCEVs for the same 3425 // V, so it is needed to double check whether V->S is inserted into 3426 // ValueExprMap before insert S->V into ExprValueMap. 3427 std::pair<ValueExprMapType::iterator, bool> Pair = 3428 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3429 if (Pair.second) 3430 ExprValueMap[S].insert(V); 3431 } 3432 return S; 3433 } 3434 3435 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3436 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3437 3438 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3439 if (I != ValueExprMap.end()) { 3440 const SCEV *S = I->second; 3441 if (checkValidity(S)) 3442 return S; 3443 forgetMemoizedResults(S); 3444 ValueExprMap.erase(I); 3445 } 3446 return nullptr; 3447 } 3448 3449 /// Return a SCEV corresponding to -V = -1*V 3450 /// 3451 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3452 SCEV::NoWrapFlags Flags) { 3453 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3454 return getConstant( 3455 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3456 3457 Type *Ty = V->getType(); 3458 Ty = getEffectiveSCEVType(Ty); 3459 return getMulExpr( 3460 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3461 } 3462 3463 /// Return a SCEV corresponding to ~V = -1-V 3464 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3465 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3466 return getConstant( 3467 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3468 3469 Type *Ty = V->getType(); 3470 Ty = getEffectiveSCEVType(Ty); 3471 const SCEV *AllOnes = 3472 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3473 return getMinusSCEV(AllOnes, V); 3474 } 3475 3476 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3477 SCEV::NoWrapFlags Flags) { 3478 // Fast path: X - X --> 0. 3479 if (LHS == RHS) 3480 return getZero(LHS->getType()); 3481 3482 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3483 // makes it so that we cannot make much use of NUW. 3484 auto AddFlags = SCEV::FlagAnyWrap; 3485 const bool RHSIsNotMinSigned = 3486 !getSignedRange(RHS).getSignedMin().isMinSignedValue(); 3487 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3488 // Let M be the minimum representable signed value. Then (-1)*RHS 3489 // signed-wraps if and only if RHS is M. That can happen even for 3490 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3491 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3492 // (-1)*RHS, we need to prove that RHS != M. 3493 // 3494 // If LHS is non-negative and we know that LHS - RHS does not 3495 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3496 // either by proving that RHS > M or that LHS >= 0. 3497 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3498 AddFlags = SCEV::FlagNSW; 3499 } 3500 } 3501 3502 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3503 // RHS is NSW and LHS >= 0. 3504 // 3505 // The difficulty here is that the NSW flag may have been proven 3506 // relative to a loop that is to be found in a recurrence in LHS and 3507 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3508 // larger scope than intended. 3509 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3510 3511 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags); 3512 } 3513 3514 const SCEV * 3515 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3516 Type *SrcTy = V->getType(); 3517 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3518 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3519 "Cannot truncate or zero extend with non-integer arguments!"); 3520 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3521 return V; // No conversion 3522 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3523 return getTruncateExpr(V, Ty); 3524 return getZeroExtendExpr(V, Ty); 3525 } 3526 3527 const SCEV * 3528 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3529 Type *Ty) { 3530 Type *SrcTy = V->getType(); 3531 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3532 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3533 "Cannot truncate or zero extend with non-integer arguments!"); 3534 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3535 return V; // No conversion 3536 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3537 return getTruncateExpr(V, Ty); 3538 return getSignExtendExpr(V, Ty); 3539 } 3540 3541 const SCEV * 3542 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3543 Type *SrcTy = V->getType(); 3544 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3545 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3546 "Cannot noop or zero extend with non-integer arguments!"); 3547 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3548 "getNoopOrZeroExtend cannot truncate!"); 3549 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3550 return V; // No conversion 3551 return getZeroExtendExpr(V, Ty); 3552 } 3553 3554 const SCEV * 3555 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3556 Type *SrcTy = V->getType(); 3557 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3558 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3559 "Cannot noop or sign extend with non-integer arguments!"); 3560 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3561 "getNoopOrSignExtend cannot truncate!"); 3562 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3563 return V; // No conversion 3564 return getSignExtendExpr(V, Ty); 3565 } 3566 3567 const SCEV * 3568 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3569 Type *SrcTy = V->getType(); 3570 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3571 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3572 "Cannot noop or any extend with non-integer arguments!"); 3573 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3574 "getNoopOrAnyExtend cannot truncate!"); 3575 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3576 return V; // No conversion 3577 return getAnyExtendExpr(V, Ty); 3578 } 3579 3580 const SCEV * 3581 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3582 Type *SrcTy = V->getType(); 3583 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3584 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3585 "Cannot truncate or noop with non-integer arguments!"); 3586 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3587 "getTruncateOrNoop cannot extend!"); 3588 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3589 return V; // No conversion 3590 return getTruncateExpr(V, Ty); 3591 } 3592 3593 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3594 const SCEV *RHS) { 3595 const SCEV *PromotedLHS = LHS; 3596 const SCEV *PromotedRHS = RHS; 3597 3598 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3599 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3600 else 3601 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3602 3603 return getUMaxExpr(PromotedLHS, PromotedRHS); 3604 } 3605 3606 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3607 const SCEV *RHS) { 3608 const SCEV *PromotedLHS = LHS; 3609 const SCEV *PromotedRHS = RHS; 3610 3611 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3612 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3613 else 3614 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3615 3616 return getUMinExpr(PromotedLHS, PromotedRHS); 3617 } 3618 3619 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3620 // A pointer operand may evaluate to a nonpointer expression, such as null. 3621 if (!V->getType()->isPointerTy()) 3622 return V; 3623 3624 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3625 return getPointerBase(Cast->getOperand()); 3626 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3627 const SCEV *PtrOp = nullptr; 3628 for (const SCEV *NAryOp : NAry->operands()) { 3629 if (NAryOp->getType()->isPointerTy()) { 3630 // Cannot find the base of an expression with multiple pointer operands. 3631 if (PtrOp) 3632 return V; 3633 PtrOp = NAryOp; 3634 } 3635 } 3636 if (!PtrOp) 3637 return V; 3638 return getPointerBase(PtrOp); 3639 } 3640 return V; 3641 } 3642 3643 /// Push users of the given Instruction onto the given Worklist. 3644 static void 3645 PushDefUseChildren(Instruction *I, 3646 SmallVectorImpl<Instruction *> &Worklist) { 3647 // Push the def-use children onto the Worklist stack. 3648 for (User *U : I->users()) 3649 Worklist.push_back(cast<Instruction>(U)); 3650 } 3651 3652 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3653 SmallVector<Instruction *, 16> Worklist; 3654 PushDefUseChildren(PN, Worklist); 3655 3656 SmallPtrSet<Instruction *, 8> Visited; 3657 Visited.insert(PN); 3658 while (!Worklist.empty()) { 3659 Instruction *I = Worklist.pop_back_val(); 3660 if (!Visited.insert(I).second) 3661 continue; 3662 3663 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3664 if (It != ValueExprMap.end()) { 3665 const SCEV *Old = It->second; 3666 3667 // Short-circuit the def-use traversal if the symbolic name 3668 // ceases to appear in expressions. 3669 if (Old != SymName && !hasOperand(Old, SymName)) 3670 continue; 3671 3672 // SCEVUnknown for a PHI either means that it has an unrecognized 3673 // structure, it's a PHI that's in the progress of being computed 3674 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3675 // additional loop trip count information isn't going to change anything. 3676 // In the second case, createNodeForPHI will perform the necessary 3677 // updates on its own when it gets to that point. In the third, we do 3678 // want to forget the SCEVUnknown. 3679 if (!isa<PHINode>(I) || 3680 !isa<SCEVUnknown>(Old) || 3681 (I != PN && Old == SymName)) { 3682 forgetMemoizedResults(Old); 3683 ValueExprMap.erase(It); 3684 } 3685 } 3686 3687 PushDefUseChildren(I, Worklist); 3688 } 3689 } 3690 3691 namespace { 3692 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3693 public: 3694 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3695 ScalarEvolution &SE) { 3696 SCEVInitRewriter Rewriter(L, SE); 3697 const SCEV *Result = Rewriter.visit(S); 3698 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3699 } 3700 3701 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3702 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3703 3704 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3705 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3706 Valid = false; 3707 return Expr; 3708 } 3709 3710 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3711 // Only allow AddRecExprs for this loop. 3712 if (Expr->getLoop() == L) 3713 return Expr->getStart(); 3714 Valid = false; 3715 return Expr; 3716 } 3717 3718 bool isValid() { return Valid; } 3719 3720 private: 3721 const Loop *L; 3722 bool Valid; 3723 }; 3724 3725 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3726 public: 3727 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3728 ScalarEvolution &SE) { 3729 SCEVShiftRewriter Rewriter(L, SE); 3730 const SCEV *Result = Rewriter.visit(S); 3731 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3732 } 3733 3734 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 3735 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3736 3737 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3738 // Only allow AddRecExprs for this loop. 3739 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3740 Valid = false; 3741 return Expr; 3742 } 3743 3744 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3745 if (Expr->getLoop() == L && Expr->isAffine()) 3746 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 3747 Valid = false; 3748 return Expr; 3749 } 3750 bool isValid() { return Valid; } 3751 3752 private: 3753 const Loop *L; 3754 bool Valid; 3755 }; 3756 } // end anonymous namespace 3757 3758 SCEV::NoWrapFlags 3759 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 3760 if (!AR->isAffine()) 3761 return SCEV::FlagAnyWrap; 3762 3763 typedef OverflowingBinaryOperator OBO; 3764 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 3765 3766 if (!AR->hasNoSignedWrap()) { 3767 ConstantRange AddRecRange = getSignedRange(AR); 3768 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 3769 3770 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3771 Instruction::Add, IncRange, OBO::NoSignedWrap); 3772 if (NSWRegion.contains(AddRecRange)) 3773 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 3774 } 3775 3776 if (!AR->hasNoUnsignedWrap()) { 3777 ConstantRange AddRecRange = getUnsignedRange(AR); 3778 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 3779 3780 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3781 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 3782 if (NUWRegion.contains(AddRecRange)) 3783 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 3784 } 3785 3786 return Result; 3787 } 3788 3789 namespace { 3790 /// Represents an abstract binary operation. This may exist as a 3791 /// normal instruction or constant expression, or may have been 3792 /// derived from an expression tree. 3793 struct BinaryOp { 3794 unsigned Opcode; 3795 Value *LHS; 3796 Value *RHS; 3797 bool IsNSW; 3798 bool IsNUW; 3799 3800 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 3801 /// constant expression. 3802 Operator *Op; 3803 3804 explicit BinaryOp(Operator *Op) 3805 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 3806 IsNSW(false), IsNUW(false), Op(Op) { 3807 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 3808 IsNSW = OBO->hasNoSignedWrap(); 3809 IsNUW = OBO->hasNoUnsignedWrap(); 3810 } 3811 } 3812 3813 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 3814 bool IsNUW = false) 3815 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 3816 Op(nullptr) {} 3817 }; 3818 } 3819 3820 3821 /// Try to map \p V into a BinaryOp, and return \c None on failure. 3822 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 3823 auto *Op = dyn_cast<Operator>(V); 3824 if (!Op) 3825 return None; 3826 3827 // Implementation detail: all the cleverness here should happen without 3828 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 3829 // SCEV expressions when possible, and we should not break that. 3830 3831 switch (Op->getOpcode()) { 3832 case Instruction::Add: 3833 case Instruction::Sub: 3834 case Instruction::Mul: 3835 case Instruction::UDiv: 3836 case Instruction::And: 3837 case Instruction::Or: 3838 case Instruction::AShr: 3839 case Instruction::Shl: 3840 return BinaryOp(Op); 3841 3842 case Instruction::Xor: 3843 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 3844 // If the RHS of the xor is a signbit, then this is just an add. 3845 // Instcombine turns add of signbit into xor as a strength reduction step. 3846 if (RHSC->getValue().isSignBit()) 3847 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 3848 return BinaryOp(Op); 3849 3850 case Instruction::LShr: 3851 // Turn logical shift right of a constant into a unsigned divide. 3852 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 3853 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 3854 3855 // If the shift count is not less than the bitwidth, the result of 3856 // the shift is undefined. Don't try to analyze it, because the 3857 // resolution chosen here may differ from the resolution chosen in 3858 // other parts of the compiler. 3859 if (SA->getValue().ult(BitWidth)) { 3860 Constant *X = 3861 ConstantInt::get(SA->getContext(), 3862 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 3863 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 3864 } 3865 } 3866 return BinaryOp(Op); 3867 3868 case Instruction::ExtractValue: { 3869 auto *EVI = cast<ExtractValueInst>(Op); 3870 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 3871 break; 3872 3873 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 3874 if (!CI) 3875 break; 3876 3877 if (auto *F = CI->getCalledFunction()) 3878 switch (F->getIntrinsicID()) { 3879 case Intrinsic::sadd_with_overflow: 3880 case Intrinsic::uadd_with_overflow: { 3881 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 3882 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3883 CI->getArgOperand(1)); 3884 3885 // Now that we know that all uses of the arithmetic-result component of 3886 // CI are guarded by the overflow check, we can go ahead and pretend 3887 // that the arithmetic is non-overflowing. 3888 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 3889 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3890 CI->getArgOperand(1), /* IsNSW = */ true, 3891 /* IsNUW = */ false); 3892 else 3893 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3894 CI->getArgOperand(1), /* IsNSW = */ false, 3895 /* IsNUW*/ true); 3896 } 3897 3898 case Intrinsic::ssub_with_overflow: 3899 case Intrinsic::usub_with_overflow: 3900 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 3901 CI->getArgOperand(1)); 3902 3903 case Intrinsic::smul_with_overflow: 3904 case Intrinsic::umul_with_overflow: 3905 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 3906 CI->getArgOperand(1)); 3907 default: 3908 break; 3909 } 3910 } 3911 3912 default: 3913 break; 3914 } 3915 3916 return None; 3917 } 3918 3919 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 3920 const Loop *L = LI.getLoopFor(PN->getParent()); 3921 if (!L || L->getHeader() != PN->getParent()) 3922 return nullptr; 3923 3924 // The loop may have multiple entrances or multiple exits; we can analyze 3925 // this phi as an addrec if it has a unique entry value and a unique 3926 // backedge value. 3927 Value *BEValueV = nullptr, *StartValueV = nullptr; 3928 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 3929 Value *V = PN->getIncomingValue(i); 3930 if (L->contains(PN->getIncomingBlock(i))) { 3931 if (!BEValueV) { 3932 BEValueV = V; 3933 } else if (BEValueV != V) { 3934 BEValueV = nullptr; 3935 break; 3936 } 3937 } else if (!StartValueV) { 3938 StartValueV = V; 3939 } else if (StartValueV != V) { 3940 StartValueV = nullptr; 3941 break; 3942 } 3943 } 3944 if (BEValueV && StartValueV) { 3945 // While we are analyzing this PHI node, handle its value symbolically. 3946 const SCEV *SymbolicName = getUnknown(PN); 3947 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 3948 "PHI node already processed?"); 3949 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 3950 3951 // Using this symbolic name for the PHI, analyze the value coming around 3952 // the back-edge. 3953 const SCEV *BEValue = getSCEV(BEValueV); 3954 3955 // NOTE: If BEValue is loop invariant, we know that the PHI node just 3956 // has a special value for the first iteration of the loop. 3957 3958 // If the value coming around the backedge is an add with the symbolic 3959 // value we just inserted, then we found a simple induction variable! 3960 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 3961 // If there is a single occurrence of the symbolic value, replace it 3962 // with a recurrence. 3963 unsigned FoundIndex = Add->getNumOperands(); 3964 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3965 if (Add->getOperand(i) == SymbolicName) 3966 if (FoundIndex == e) { 3967 FoundIndex = i; 3968 break; 3969 } 3970 3971 if (FoundIndex != Add->getNumOperands()) { 3972 // Create an add with everything but the specified operand. 3973 SmallVector<const SCEV *, 8> Ops; 3974 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3975 if (i != FoundIndex) 3976 Ops.push_back(Add->getOperand(i)); 3977 const SCEV *Accum = getAddExpr(Ops); 3978 3979 // This is not a valid addrec if the step amount is varying each 3980 // loop iteration, but is not itself an addrec in this loop. 3981 if (isLoopInvariant(Accum, L) || 3982 (isa<SCEVAddRecExpr>(Accum) && 3983 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 3984 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 3985 3986 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 3987 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 3988 if (BO->IsNUW) 3989 Flags = setFlags(Flags, SCEV::FlagNUW); 3990 if (BO->IsNSW) 3991 Flags = setFlags(Flags, SCEV::FlagNSW); 3992 } 3993 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 3994 // If the increment is an inbounds GEP, then we know the address 3995 // space cannot be wrapped around. We cannot make any guarantee 3996 // about signed or unsigned overflow because pointers are 3997 // unsigned but we may have a negative index from the base 3998 // pointer. We can guarantee that no unsigned wrap occurs if the 3999 // indices form a positive value. 4000 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4001 Flags = setFlags(Flags, SCEV::FlagNW); 4002 4003 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4004 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4005 Flags = setFlags(Flags, SCEV::FlagNUW); 4006 } 4007 4008 // We cannot transfer nuw and nsw flags from subtraction 4009 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4010 // for instance. 4011 } 4012 4013 const SCEV *StartVal = getSCEV(StartValueV); 4014 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4015 4016 // Okay, for the entire analysis of this edge we assumed the PHI 4017 // to be symbolic. We now need to go back and purge all of the 4018 // entries for the scalars that use the symbolic expression. 4019 forgetSymbolicName(PN, SymbolicName); 4020 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4021 4022 // We can add Flags to the post-inc expression only if we 4023 // know that it us *undefined behavior* for BEValueV to 4024 // overflow. 4025 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4026 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4027 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4028 4029 return PHISCEV; 4030 } 4031 } 4032 } else { 4033 // Otherwise, this could be a loop like this: 4034 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4035 // In this case, j = {1,+,1} and BEValue is j. 4036 // Because the other in-value of i (0) fits the evolution of BEValue 4037 // i really is an addrec evolution. 4038 // 4039 // We can generalize this saying that i is the shifted value of BEValue 4040 // by one iteration: 4041 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4042 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4043 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4044 if (Shifted != getCouldNotCompute() && 4045 Start != getCouldNotCompute()) { 4046 const SCEV *StartVal = getSCEV(StartValueV); 4047 if (Start == StartVal) { 4048 // Okay, for the entire analysis of this edge we assumed the PHI 4049 // to be symbolic. We now need to go back and purge all of the 4050 // entries for the scalars that use the symbolic expression. 4051 forgetSymbolicName(PN, SymbolicName); 4052 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4053 return Shifted; 4054 } 4055 } 4056 } 4057 4058 // Remove the temporary PHI node SCEV that has been inserted while intending 4059 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4060 // as it will prevent later (possibly simpler) SCEV expressions to be added 4061 // to the ValueExprMap. 4062 ValueExprMap.erase(PN); 4063 } 4064 4065 return nullptr; 4066 } 4067 4068 // Checks if the SCEV S is available at BB. S is considered available at BB 4069 // if S can be materialized at BB without introducing a fault. 4070 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4071 BasicBlock *BB) { 4072 struct CheckAvailable { 4073 bool TraversalDone = false; 4074 bool Available = true; 4075 4076 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4077 BasicBlock *BB = nullptr; 4078 DominatorTree &DT; 4079 4080 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4081 : L(L), BB(BB), DT(DT) {} 4082 4083 bool setUnavailable() { 4084 TraversalDone = true; 4085 Available = false; 4086 return false; 4087 } 4088 4089 bool follow(const SCEV *S) { 4090 switch (S->getSCEVType()) { 4091 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4092 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4093 // These expressions are available if their operand(s) is/are. 4094 return true; 4095 4096 case scAddRecExpr: { 4097 // We allow add recurrences that are on the loop BB is in, or some 4098 // outer loop. This guarantees availability because the value of the 4099 // add recurrence at BB is simply the "current" value of the induction 4100 // variable. We can relax this in the future; for instance an add 4101 // recurrence on a sibling dominating loop is also available at BB. 4102 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4103 if (L && (ARLoop == L || ARLoop->contains(L))) 4104 return true; 4105 4106 return setUnavailable(); 4107 } 4108 4109 case scUnknown: { 4110 // For SCEVUnknown, we check for simple dominance. 4111 const auto *SU = cast<SCEVUnknown>(S); 4112 Value *V = SU->getValue(); 4113 4114 if (isa<Argument>(V)) 4115 return false; 4116 4117 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4118 return false; 4119 4120 return setUnavailable(); 4121 } 4122 4123 case scUDivExpr: 4124 case scCouldNotCompute: 4125 // We do not try to smart about these at all. 4126 return setUnavailable(); 4127 } 4128 llvm_unreachable("switch should be fully covered!"); 4129 } 4130 4131 bool isDone() { return TraversalDone; } 4132 }; 4133 4134 CheckAvailable CA(L, BB, DT); 4135 SCEVTraversal<CheckAvailable> ST(CA); 4136 4137 ST.visitAll(S); 4138 return CA.Available; 4139 } 4140 4141 // Try to match a control flow sequence that branches out at BI and merges back 4142 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4143 // match. 4144 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4145 Value *&C, Value *&LHS, Value *&RHS) { 4146 C = BI->getCondition(); 4147 4148 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4149 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4150 4151 if (!LeftEdge.isSingleEdge()) 4152 return false; 4153 4154 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4155 4156 Use &LeftUse = Merge->getOperandUse(0); 4157 Use &RightUse = Merge->getOperandUse(1); 4158 4159 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4160 LHS = LeftUse; 4161 RHS = RightUse; 4162 return true; 4163 } 4164 4165 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4166 LHS = RightUse; 4167 RHS = LeftUse; 4168 return true; 4169 } 4170 4171 return false; 4172 } 4173 4174 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4175 if (PN->getNumIncomingValues() == 2) { 4176 const Loop *L = LI.getLoopFor(PN->getParent()); 4177 4178 // We don't want to break LCSSA, even in a SCEV expression tree. 4179 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4180 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4181 return nullptr; 4182 4183 // Try to match 4184 // 4185 // br %cond, label %left, label %right 4186 // left: 4187 // br label %merge 4188 // right: 4189 // br label %merge 4190 // merge: 4191 // V = phi [ %x, %left ], [ %y, %right ] 4192 // 4193 // as "select %cond, %x, %y" 4194 4195 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4196 assert(IDom && "At least the entry block should dominate PN"); 4197 4198 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4199 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4200 4201 if (BI && BI->isConditional() && 4202 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4203 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4204 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4205 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4206 } 4207 4208 return nullptr; 4209 } 4210 4211 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4212 if (const SCEV *S = createAddRecFromPHI(PN)) 4213 return S; 4214 4215 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4216 return S; 4217 4218 // If the PHI has a single incoming value, follow that value, unless the 4219 // PHI's incoming blocks are in a different loop, in which case doing so 4220 // risks breaking LCSSA form. Instcombine would normally zap these, but 4221 // it doesn't have DominatorTree information, so it may miss cases. 4222 if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC)) 4223 if (LI.replacementPreservesLCSSAForm(PN, V)) 4224 return getSCEV(V); 4225 4226 // If it's not a loop phi, we can't handle it yet. 4227 return getUnknown(PN); 4228 } 4229 4230 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4231 Value *Cond, 4232 Value *TrueVal, 4233 Value *FalseVal) { 4234 // Handle "constant" branch or select. This can occur for instance when a 4235 // loop pass transforms an inner loop and moves on to process the outer loop. 4236 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4237 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4238 4239 // Try to match some simple smax or umax patterns. 4240 auto *ICI = dyn_cast<ICmpInst>(Cond); 4241 if (!ICI) 4242 return getUnknown(I); 4243 4244 Value *LHS = ICI->getOperand(0); 4245 Value *RHS = ICI->getOperand(1); 4246 4247 switch (ICI->getPredicate()) { 4248 case ICmpInst::ICMP_SLT: 4249 case ICmpInst::ICMP_SLE: 4250 std::swap(LHS, RHS); 4251 // fall through 4252 case ICmpInst::ICMP_SGT: 4253 case ICmpInst::ICMP_SGE: 4254 // a >s b ? a+x : b+x -> smax(a, b)+x 4255 // a >s b ? b+x : a+x -> smin(a, b)+x 4256 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4257 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4258 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4259 const SCEV *LA = getSCEV(TrueVal); 4260 const SCEV *RA = getSCEV(FalseVal); 4261 const SCEV *LDiff = getMinusSCEV(LA, LS); 4262 const SCEV *RDiff = getMinusSCEV(RA, RS); 4263 if (LDiff == RDiff) 4264 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4265 LDiff = getMinusSCEV(LA, RS); 4266 RDiff = getMinusSCEV(RA, LS); 4267 if (LDiff == RDiff) 4268 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4269 } 4270 break; 4271 case ICmpInst::ICMP_ULT: 4272 case ICmpInst::ICMP_ULE: 4273 std::swap(LHS, RHS); 4274 // fall through 4275 case ICmpInst::ICMP_UGT: 4276 case ICmpInst::ICMP_UGE: 4277 // a >u b ? a+x : b+x -> umax(a, b)+x 4278 // a >u b ? b+x : a+x -> umin(a, b)+x 4279 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4280 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4281 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4282 const SCEV *LA = getSCEV(TrueVal); 4283 const SCEV *RA = getSCEV(FalseVal); 4284 const SCEV *LDiff = getMinusSCEV(LA, LS); 4285 const SCEV *RDiff = getMinusSCEV(RA, RS); 4286 if (LDiff == RDiff) 4287 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4288 LDiff = getMinusSCEV(LA, RS); 4289 RDiff = getMinusSCEV(RA, LS); 4290 if (LDiff == RDiff) 4291 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4292 } 4293 break; 4294 case ICmpInst::ICMP_NE: 4295 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4296 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4297 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4298 const SCEV *One = getOne(I->getType()); 4299 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4300 const SCEV *LA = getSCEV(TrueVal); 4301 const SCEV *RA = getSCEV(FalseVal); 4302 const SCEV *LDiff = getMinusSCEV(LA, LS); 4303 const SCEV *RDiff = getMinusSCEV(RA, One); 4304 if (LDiff == RDiff) 4305 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4306 } 4307 break; 4308 case ICmpInst::ICMP_EQ: 4309 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4310 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4311 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4312 const SCEV *One = getOne(I->getType()); 4313 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4314 const SCEV *LA = getSCEV(TrueVal); 4315 const SCEV *RA = getSCEV(FalseVal); 4316 const SCEV *LDiff = getMinusSCEV(LA, One); 4317 const SCEV *RDiff = getMinusSCEV(RA, LS); 4318 if (LDiff == RDiff) 4319 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4320 } 4321 break; 4322 default: 4323 break; 4324 } 4325 4326 return getUnknown(I); 4327 } 4328 4329 /// Expand GEP instructions into add and multiply operations. This allows them 4330 /// to be analyzed by regular SCEV code. 4331 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4332 // Don't attempt to analyze GEPs over unsized objects. 4333 if (!GEP->getSourceElementType()->isSized()) 4334 return getUnknown(GEP); 4335 4336 SmallVector<const SCEV *, 4> IndexExprs; 4337 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4338 IndexExprs.push_back(getSCEV(*Index)); 4339 return getGEPExpr(GEP->getSourceElementType(), 4340 getSCEV(GEP->getPointerOperand()), 4341 IndexExprs, GEP->isInBounds()); 4342 } 4343 4344 uint32_t 4345 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 4346 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4347 return C->getAPInt().countTrailingZeros(); 4348 4349 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4350 return std::min(GetMinTrailingZeros(T->getOperand()), 4351 (uint32_t)getTypeSizeInBits(T->getType())); 4352 4353 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4354 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4355 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4356 getTypeSizeInBits(E->getType()) : OpRes; 4357 } 4358 4359 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4360 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4361 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4362 getTypeSizeInBits(E->getType()) : OpRes; 4363 } 4364 4365 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 4366 // The result is the min of all operands results. 4367 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4368 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4369 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4370 return MinOpRes; 4371 } 4372 4373 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 4374 // The result is the sum of all operands results. 4375 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 4376 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 4377 for (unsigned i = 1, e = M->getNumOperands(); 4378 SumOpRes != BitWidth && i != e; ++i) 4379 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 4380 BitWidth); 4381 return SumOpRes; 4382 } 4383 4384 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 4385 // The result is the min of all operands results. 4386 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4387 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4388 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4389 return MinOpRes; 4390 } 4391 4392 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 4393 // The result is the min of all operands results. 4394 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4395 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4396 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4397 return MinOpRes; 4398 } 4399 4400 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 4401 // The result is the min of all operands results. 4402 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4403 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4404 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4405 return MinOpRes; 4406 } 4407 4408 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4409 // For a SCEVUnknown, ask ValueTracking. 4410 unsigned BitWidth = getTypeSizeInBits(U->getType()); 4411 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4412 computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC, 4413 nullptr, &DT); 4414 return Zeros.countTrailingOnes(); 4415 } 4416 4417 // SCEVUDivExpr 4418 return 0; 4419 } 4420 4421 /// Helper method to assign a range to V from metadata present in the IR. 4422 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 4423 if (Instruction *I = dyn_cast<Instruction>(V)) 4424 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 4425 return getConstantRangeFromMetadata(*MD); 4426 4427 return None; 4428 } 4429 4430 /// Determine the range for a particular SCEV. If SignHint is 4431 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 4432 /// with a "cleaner" unsigned (resp. signed) representation. 4433 ConstantRange 4434 ScalarEvolution::getRange(const SCEV *S, 4435 ScalarEvolution::RangeSignHint SignHint) { 4436 DenseMap<const SCEV *, ConstantRange> &Cache = 4437 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 4438 : SignedRanges; 4439 4440 // See if we've computed this range already. 4441 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 4442 if (I != Cache.end()) 4443 return I->second; 4444 4445 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4446 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 4447 4448 unsigned BitWidth = getTypeSizeInBits(S->getType()); 4449 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 4450 4451 // If the value has known zeros, the maximum value will have those known zeros 4452 // as well. 4453 uint32_t TZ = GetMinTrailingZeros(S); 4454 if (TZ != 0) { 4455 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 4456 ConservativeResult = 4457 ConstantRange(APInt::getMinValue(BitWidth), 4458 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 4459 else 4460 ConservativeResult = ConstantRange( 4461 APInt::getSignedMinValue(BitWidth), 4462 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 4463 } 4464 4465 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 4466 ConstantRange X = getRange(Add->getOperand(0), SignHint); 4467 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 4468 X = X.add(getRange(Add->getOperand(i), SignHint)); 4469 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 4470 } 4471 4472 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 4473 ConstantRange X = getRange(Mul->getOperand(0), SignHint); 4474 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 4475 X = X.multiply(getRange(Mul->getOperand(i), SignHint)); 4476 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 4477 } 4478 4479 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 4480 ConstantRange X = getRange(SMax->getOperand(0), SignHint); 4481 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 4482 X = X.smax(getRange(SMax->getOperand(i), SignHint)); 4483 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 4484 } 4485 4486 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 4487 ConstantRange X = getRange(UMax->getOperand(0), SignHint); 4488 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 4489 X = X.umax(getRange(UMax->getOperand(i), SignHint)); 4490 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 4491 } 4492 4493 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 4494 ConstantRange X = getRange(UDiv->getLHS(), SignHint); 4495 ConstantRange Y = getRange(UDiv->getRHS(), SignHint); 4496 return setRange(UDiv, SignHint, 4497 ConservativeResult.intersectWith(X.udiv(Y))); 4498 } 4499 4500 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 4501 ConstantRange X = getRange(ZExt->getOperand(), SignHint); 4502 return setRange(ZExt, SignHint, 4503 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 4504 } 4505 4506 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 4507 ConstantRange X = getRange(SExt->getOperand(), SignHint); 4508 return setRange(SExt, SignHint, 4509 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 4510 } 4511 4512 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 4513 ConstantRange X = getRange(Trunc->getOperand(), SignHint); 4514 return setRange(Trunc, SignHint, 4515 ConservativeResult.intersectWith(X.truncate(BitWidth))); 4516 } 4517 4518 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 4519 // If there's no unsigned wrap, the value will never be less than its 4520 // initial value. 4521 if (AddRec->hasNoUnsignedWrap()) 4522 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 4523 if (!C->getValue()->isZero()) 4524 ConservativeResult = ConservativeResult.intersectWith( 4525 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 4526 4527 // If there's no signed wrap, and all the operands have the same sign or 4528 // zero, the value won't ever change sign. 4529 if (AddRec->hasNoSignedWrap()) { 4530 bool AllNonNeg = true; 4531 bool AllNonPos = true; 4532 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4533 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 4534 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 4535 } 4536 if (AllNonNeg) 4537 ConservativeResult = ConservativeResult.intersectWith( 4538 ConstantRange(APInt(BitWidth, 0), 4539 APInt::getSignedMinValue(BitWidth))); 4540 else if (AllNonPos) 4541 ConservativeResult = ConservativeResult.intersectWith( 4542 ConstantRange(APInt::getSignedMinValue(BitWidth), 4543 APInt(BitWidth, 1))); 4544 } 4545 4546 // TODO: non-affine addrec 4547 if (AddRec->isAffine()) { 4548 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 4549 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 4550 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 4551 auto RangeFromAffine = getRangeForAffineAR( 4552 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4553 BitWidth); 4554 if (!RangeFromAffine.isFullSet()) 4555 ConservativeResult = 4556 ConservativeResult.intersectWith(RangeFromAffine); 4557 4558 auto RangeFromFactoring = getRangeViaFactoring( 4559 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4560 BitWidth); 4561 if (!RangeFromFactoring.isFullSet()) 4562 ConservativeResult = 4563 ConservativeResult.intersectWith(RangeFromFactoring); 4564 } 4565 } 4566 4567 return setRange(AddRec, SignHint, ConservativeResult); 4568 } 4569 4570 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4571 // Check if the IR explicitly contains !range metadata. 4572 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 4573 if (MDRange.hasValue()) 4574 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 4575 4576 // Split here to avoid paying the compile-time cost of calling both 4577 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 4578 // if needed. 4579 const DataLayout &DL = getDataLayout(); 4580 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 4581 // For a SCEVUnknown, ask ValueTracking. 4582 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4583 computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT); 4584 if (Ones != ~Zeros + 1) 4585 ConservativeResult = 4586 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); 4587 } else { 4588 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 4589 "generalize as needed!"); 4590 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4591 if (NS > 1) 4592 ConservativeResult = ConservativeResult.intersectWith( 4593 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 4594 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 4595 } 4596 4597 return setRange(U, SignHint, ConservativeResult); 4598 } 4599 4600 return setRange(S, SignHint, ConservativeResult); 4601 } 4602 4603 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 4604 const SCEV *Step, 4605 const SCEV *MaxBECount, 4606 unsigned BitWidth) { 4607 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 4608 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 4609 "Precondition!"); 4610 4611 ConstantRange Result(BitWidth, /* isFullSet = */ true); 4612 4613 // Check for overflow. This must be done with ConstantRange arithmetic 4614 // because we could be called from within the ScalarEvolution overflow 4615 // checking code. 4616 4617 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 4618 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 4619 ConstantRange ZExtMaxBECountRange = 4620 MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1); 4621 4622 ConstantRange StepSRange = getSignedRange(Step); 4623 ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1); 4624 4625 ConstantRange StartURange = getUnsignedRange(Start); 4626 ConstantRange EndURange = 4627 StartURange.add(MaxBECountRange.multiply(StepSRange)); 4628 4629 // Check for unsigned overflow. 4630 ConstantRange ZExtStartURange = StartURange.zextOrTrunc(BitWidth * 2 + 1); 4631 ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1); 4632 if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4633 ZExtEndURange) { 4634 APInt Min = APIntOps::umin(StartURange.getUnsignedMin(), 4635 EndURange.getUnsignedMin()); 4636 APInt Max = APIntOps::umax(StartURange.getUnsignedMax(), 4637 EndURange.getUnsignedMax()); 4638 bool IsFullRange = Min.isMinValue() && Max.isMaxValue(); 4639 if (!IsFullRange) 4640 Result = 4641 Result.intersectWith(ConstantRange(Min, Max + 1)); 4642 } 4643 4644 ConstantRange StartSRange = getSignedRange(Start); 4645 ConstantRange EndSRange = 4646 StartSRange.add(MaxBECountRange.multiply(StepSRange)); 4647 4648 // Check for signed overflow. This must be done with ConstantRange 4649 // arithmetic because we could be called from within the ScalarEvolution 4650 // overflow checking code. 4651 ConstantRange SExtStartSRange = StartSRange.sextOrTrunc(BitWidth * 2 + 1); 4652 ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1); 4653 if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4654 SExtEndSRange) { 4655 APInt Min = 4656 APIntOps::smin(StartSRange.getSignedMin(), EndSRange.getSignedMin()); 4657 APInt Max = 4658 APIntOps::smax(StartSRange.getSignedMax(), EndSRange.getSignedMax()); 4659 bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue(); 4660 if (!IsFullRange) 4661 Result = 4662 Result.intersectWith(ConstantRange(Min, Max + 1)); 4663 } 4664 4665 return Result; 4666 } 4667 4668 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 4669 const SCEV *Step, 4670 const SCEV *MaxBECount, 4671 unsigned BitWidth) { 4672 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 4673 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 4674 4675 struct SelectPattern { 4676 Value *Condition = nullptr; 4677 APInt TrueValue; 4678 APInt FalseValue; 4679 4680 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 4681 const SCEV *S) { 4682 Optional<unsigned> CastOp; 4683 APInt Offset(BitWidth, 0); 4684 4685 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 4686 "Should be!"); 4687 4688 // Peel off a constant offset: 4689 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 4690 // In the future we could consider being smarter here and handle 4691 // {Start+Step,+,Step} too. 4692 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 4693 return; 4694 4695 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 4696 S = SA->getOperand(1); 4697 } 4698 4699 // Peel off a cast operation 4700 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 4701 CastOp = SCast->getSCEVType(); 4702 S = SCast->getOperand(); 4703 } 4704 4705 using namespace llvm::PatternMatch; 4706 4707 auto *SU = dyn_cast<SCEVUnknown>(S); 4708 const APInt *TrueVal, *FalseVal; 4709 if (!SU || 4710 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 4711 m_APInt(FalseVal)))) { 4712 Condition = nullptr; 4713 return; 4714 } 4715 4716 TrueValue = *TrueVal; 4717 FalseValue = *FalseVal; 4718 4719 // Re-apply the cast we peeled off earlier 4720 if (CastOp.hasValue()) 4721 switch (*CastOp) { 4722 default: 4723 llvm_unreachable("Unknown SCEV cast type!"); 4724 4725 case scTruncate: 4726 TrueValue = TrueValue.trunc(BitWidth); 4727 FalseValue = FalseValue.trunc(BitWidth); 4728 break; 4729 case scZeroExtend: 4730 TrueValue = TrueValue.zext(BitWidth); 4731 FalseValue = FalseValue.zext(BitWidth); 4732 break; 4733 case scSignExtend: 4734 TrueValue = TrueValue.sext(BitWidth); 4735 FalseValue = FalseValue.sext(BitWidth); 4736 break; 4737 } 4738 4739 // Re-apply the constant offset we peeled off earlier 4740 TrueValue += Offset; 4741 FalseValue += Offset; 4742 } 4743 4744 bool isRecognized() { return Condition != nullptr; } 4745 }; 4746 4747 SelectPattern StartPattern(*this, BitWidth, Start); 4748 if (!StartPattern.isRecognized()) 4749 return ConstantRange(BitWidth, /* isFullSet = */ true); 4750 4751 SelectPattern StepPattern(*this, BitWidth, Step); 4752 if (!StepPattern.isRecognized()) 4753 return ConstantRange(BitWidth, /* isFullSet = */ true); 4754 4755 if (StartPattern.Condition != StepPattern.Condition) { 4756 // We don't handle this case today; but we could, by considering four 4757 // possibilities below instead of two. I'm not sure if there are cases where 4758 // that will help over what getRange already does, though. 4759 return ConstantRange(BitWidth, /* isFullSet = */ true); 4760 } 4761 4762 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 4763 // construct arbitrary general SCEV expressions here. This function is called 4764 // from deep in the call stack, and calling getSCEV (on a sext instruction, 4765 // say) can end up caching a suboptimal value. 4766 4767 // FIXME: without the explicit `this` receiver below, MSVC errors out with 4768 // C2352 and C2512 (otherwise it isn't needed). 4769 4770 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 4771 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 4772 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 4773 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 4774 4775 ConstantRange TrueRange = 4776 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 4777 ConstantRange FalseRange = 4778 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 4779 4780 return TrueRange.unionWith(FalseRange); 4781 } 4782 4783 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 4784 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 4785 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 4786 4787 // Return early if there are no flags to propagate to the SCEV. 4788 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4789 if (BinOp->hasNoUnsignedWrap()) 4790 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 4791 if (BinOp->hasNoSignedWrap()) 4792 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 4793 if (Flags == SCEV::FlagAnyWrap) 4794 return SCEV::FlagAnyWrap; 4795 4796 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 4797 } 4798 4799 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 4800 // Here we check that I is in the header of the innermost loop containing I, 4801 // since we only deal with instructions in the loop header. The actual loop we 4802 // need to check later will come from an add recurrence, but getting that 4803 // requires computing the SCEV of the operands, which can be expensive. This 4804 // check we can do cheaply to rule out some cases early. 4805 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 4806 if (InnermostContainingLoop == nullptr || 4807 InnermostContainingLoop->getHeader() != I->getParent()) 4808 return false; 4809 4810 // Only proceed if we can prove that I does not yield poison. 4811 if (!isKnownNotFullPoison(I)) return false; 4812 4813 // At this point we know that if I is executed, then it does not wrap 4814 // according to at least one of NSW or NUW. If I is not executed, then we do 4815 // not know if the calculation that I represents would wrap. Multiple 4816 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 4817 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 4818 // derived from other instructions that map to the same SCEV. We cannot make 4819 // that guarantee for cases where I is not executed. So we need to find the 4820 // loop that I is considered in relation to and prove that I is executed for 4821 // every iteration of that loop. That implies that the value that I 4822 // calculates does not wrap anywhere in the loop, so then we can apply the 4823 // flags to the SCEV. 4824 // 4825 // We check isLoopInvariant to disambiguate in case we are adding recurrences 4826 // from different loops, so that we know which loop to prove that I is 4827 // executed in. 4828 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 4829 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 4830 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 4831 bool AllOtherOpsLoopInvariant = true; 4832 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 4833 ++OtherOpIndex) { 4834 if (OtherOpIndex != OpIndex) { 4835 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 4836 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 4837 AllOtherOpsLoopInvariant = false; 4838 break; 4839 } 4840 } 4841 } 4842 if (AllOtherOpsLoopInvariant && 4843 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 4844 return true; 4845 } 4846 } 4847 return false; 4848 } 4849 4850 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 4851 // If we know that \c I can never be poison period, then that's enough. 4852 if (isSCEVExprNeverPoison(I)) 4853 return true; 4854 4855 // For an add recurrence specifically, we assume that infinite loops without 4856 // side effects are undefined behavior, and then reason as follows: 4857 // 4858 // If the add recurrence is poison in any iteration, it is poison on all 4859 // future iterations (since incrementing poison yields poison). If the result 4860 // of the add recurrence is fed into the loop latch condition and the loop 4861 // does not contain any throws or exiting blocks other than the latch, we now 4862 // have the ability to "choose" whether the backedge is taken or not (by 4863 // choosing a sufficiently evil value for the poison feeding into the branch) 4864 // for every iteration including and after the one in which \p I first became 4865 // poison. There are two possibilities (let's call the iteration in which \p 4866 // I first became poison as K): 4867 // 4868 // 1. In the set of iterations including and after K, the loop body executes 4869 // no side effects. In this case executing the backege an infinte number 4870 // of times will yield undefined behavior. 4871 // 4872 // 2. In the set of iterations including and after K, the loop body executes 4873 // at least one side effect. In this case, that specific instance of side 4874 // effect is control dependent on poison, which also yields undefined 4875 // behavior. 4876 4877 auto *ExitingBB = L->getExitingBlock(); 4878 auto *LatchBB = L->getLoopLatch(); 4879 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 4880 return false; 4881 4882 SmallPtrSet<const Instruction *, 16> Pushed; 4883 SmallVector<const Instruction *, 8> PoisonStack; 4884 4885 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 4886 // things that are known to be fully poison under that assumption go on the 4887 // PoisonStack. 4888 Pushed.insert(I); 4889 PoisonStack.push_back(I); 4890 4891 bool LatchControlDependentOnPoison = false; 4892 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 4893 const Instruction *Poison = PoisonStack.pop_back_val(); 4894 4895 for (auto *PoisonUser : Poison->users()) { 4896 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 4897 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 4898 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 4899 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 4900 assert(BI->isConditional() && "Only possibility!"); 4901 if (BI->getParent() == LatchBB) { 4902 LatchControlDependentOnPoison = true; 4903 break; 4904 } 4905 } 4906 } 4907 } 4908 4909 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 4910 } 4911 4912 bool ScalarEvolution::loopHasNoAbnormalExits(const Loop *L) { 4913 auto Itr = LoopHasNoAbnormalExits.find(L); 4914 if (Itr == LoopHasNoAbnormalExits.end()) { 4915 auto NoAbnormalExitInBB = [&](BasicBlock *BB) { 4916 return all_of(*BB, [](Instruction &I) { 4917 return isGuaranteedToTransferExecutionToSuccessor(&I); 4918 }); 4919 }; 4920 4921 auto InsertPair = LoopHasNoAbnormalExits.insert( 4922 {L, all_of(L->getBlocks(), NoAbnormalExitInBB)}); 4923 assert(InsertPair.second && "We just checked!"); 4924 Itr = InsertPair.first; 4925 } 4926 4927 return Itr->second; 4928 } 4929 4930 const SCEV *ScalarEvolution::createSCEV(Value *V) { 4931 if (!isSCEVable(V->getType())) 4932 return getUnknown(V); 4933 4934 if (Instruction *I = dyn_cast<Instruction>(V)) { 4935 // Don't attempt to analyze instructions in blocks that aren't 4936 // reachable. Such instructions don't matter, and they aren't required 4937 // to obey basic rules for definitions dominating uses which this 4938 // analysis depends on. 4939 if (!DT.isReachableFromEntry(I->getParent())) 4940 return getUnknown(V); 4941 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 4942 return getConstant(CI); 4943 else if (isa<ConstantPointerNull>(V)) 4944 return getZero(V->getType()); 4945 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 4946 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 4947 else if (!isa<ConstantExpr>(V)) 4948 return getUnknown(V); 4949 4950 Operator *U = cast<Operator>(V); 4951 if (auto BO = MatchBinaryOp(U, DT)) { 4952 switch (BO->Opcode) { 4953 case Instruction::Add: { 4954 // The simple thing to do would be to just call getSCEV on both operands 4955 // and call getAddExpr with the result. However if we're looking at a 4956 // bunch of things all added together, this can be quite inefficient, 4957 // because it leads to N-1 getAddExpr calls for N ultimate operands. 4958 // Instead, gather up all the operands and make a single getAddExpr call. 4959 // LLVM IR canonical form means we need only traverse the left operands. 4960 SmallVector<const SCEV *, 4> AddOps; 4961 do { 4962 if (BO->Op) { 4963 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 4964 AddOps.push_back(OpSCEV); 4965 break; 4966 } 4967 4968 // If a NUW or NSW flag can be applied to the SCEV for this 4969 // addition, then compute the SCEV for this addition by itself 4970 // with a separate call to getAddExpr. We need to do that 4971 // instead of pushing the operands of the addition onto AddOps, 4972 // since the flags are only known to apply to this particular 4973 // addition - they may not apply to other additions that can be 4974 // formed with operands from AddOps. 4975 const SCEV *RHS = getSCEV(BO->RHS); 4976 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 4977 if (Flags != SCEV::FlagAnyWrap) { 4978 const SCEV *LHS = getSCEV(BO->LHS); 4979 if (BO->Opcode == Instruction::Sub) 4980 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 4981 else 4982 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 4983 break; 4984 } 4985 } 4986 4987 if (BO->Opcode == Instruction::Sub) 4988 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 4989 else 4990 AddOps.push_back(getSCEV(BO->RHS)); 4991 4992 auto NewBO = MatchBinaryOp(BO->LHS, DT); 4993 if (!NewBO || (NewBO->Opcode != Instruction::Add && 4994 NewBO->Opcode != Instruction::Sub)) { 4995 AddOps.push_back(getSCEV(BO->LHS)); 4996 break; 4997 } 4998 BO = NewBO; 4999 } while (true); 5000 5001 return getAddExpr(AddOps); 5002 } 5003 5004 case Instruction::Mul: { 5005 SmallVector<const SCEV *, 4> MulOps; 5006 do { 5007 if (BO->Op) { 5008 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5009 MulOps.push_back(OpSCEV); 5010 break; 5011 } 5012 5013 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5014 if (Flags != SCEV::FlagAnyWrap) { 5015 MulOps.push_back( 5016 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5017 break; 5018 } 5019 } 5020 5021 MulOps.push_back(getSCEV(BO->RHS)); 5022 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5023 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5024 MulOps.push_back(getSCEV(BO->LHS)); 5025 break; 5026 } 5027 BO = NewBO; 5028 } while (true); 5029 5030 return getMulExpr(MulOps); 5031 } 5032 case Instruction::UDiv: 5033 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5034 case Instruction::Sub: { 5035 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5036 if (BO->Op) 5037 Flags = getNoWrapFlagsFromUB(BO->Op); 5038 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5039 } 5040 case Instruction::And: 5041 // For an expression like x&255 that merely masks off the high bits, 5042 // use zext(trunc(x)) as the SCEV expression. 5043 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5044 if (CI->isNullValue()) 5045 return getSCEV(BO->RHS); 5046 if (CI->isAllOnesValue()) 5047 return getSCEV(BO->LHS); 5048 const APInt &A = CI->getValue(); 5049 5050 // Instcombine's ShrinkDemandedConstant may strip bits out of 5051 // constants, obscuring what would otherwise be a low-bits mask. 5052 // Use computeKnownBits to compute what ShrinkDemandedConstant 5053 // knew about to reconstruct a low-bits mask value. 5054 unsigned LZ = A.countLeadingZeros(); 5055 unsigned TZ = A.countTrailingZeros(); 5056 unsigned BitWidth = A.getBitWidth(); 5057 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 5058 computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(), 5059 0, &AC, nullptr, &DT); 5060 5061 APInt EffectiveMask = 5062 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5063 if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) { 5064 const SCEV *MulCount = getConstant(ConstantInt::get( 5065 getContext(), APInt::getOneBitSet(BitWidth, TZ))); 5066 return getMulExpr( 5067 getZeroExtendExpr( 5068 getTruncateExpr( 5069 getUDivExactExpr(getSCEV(BO->LHS), MulCount), 5070 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5071 BO->LHS->getType()), 5072 MulCount); 5073 } 5074 } 5075 break; 5076 5077 case Instruction::Or: 5078 // If the RHS of the Or is a constant, we may have something like: 5079 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5080 // optimizations will transparently handle this case. 5081 // 5082 // In order for this transformation to be safe, the LHS must be of the 5083 // form X*(2^n) and the Or constant must be less than 2^n. 5084 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5085 const SCEV *LHS = getSCEV(BO->LHS); 5086 const APInt &CIVal = CI->getValue(); 5087 if (GetMinTrailingZeros(LHS) >= 5088 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5089 // Build a plain add SCEV. 5090 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5091 // If the LHS of the add was an addrec and it has no-wrap flags, 5092 // transfer the no-wrap flags, since an or won't introduce a wrap. 5093 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5094 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5095 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5096 OldAR->getNoWrapFlags()); 5097 } 5098 return S; 5099 } 5100 } 5101 break; 5102 5103 case Instruction::Xor: 5104 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5105 // If the RHS of xor is -1, then this is a not operation. 5106 if (CI->isAllOnesValue()) 5107 return getNotSCEV(getSCEV(BO->LHS)); 5108 5109 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5110 // This is a variant of the check for xor with -1, and it handles 5111 // the case where instcombine has trimmed non-demanded bits out 5112 // of an xor with -1. 5113 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5114 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5115 if (LBO->getOpcode() == Instruction::And && 5116 LCI->getValue() == CI->getValue()) 5117 if (const SCEVZeroExtendExpr *Z = 5118 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5119 Type *UTy = BO->LHS->getType(); 5120 const SCEV *Z0 = Z->getOperand(); 5121 Type *Z0Ty = Z0->getType(); 5122 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5123 5124 // If C is a low-bits mask, the zero extend is serving to 5125 // mask off the high bits. Complement the operand and 5126 // re-apply the zext. 5127 if (APIntOps::isMask(Z0TySize, CI->getValue())) 5128 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5129 5130 // If C is a single bit, it may be in the sign-bit position 5131 // before the zero-extend. In this case, represent the xor 5132 // using an add, which is equivalent, and re-apply the zext. 5133 APInt Trunc = CI->getValue().trunc(Z0TySize); 5134 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5135 Trunc.isSignBit()) 5136 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5137 UTy); 5138 } 5139 } 5140 break; 5141 5142 case Instruction::Shl: 5143 // Turn shift left of a constant amount into a multiply. 5144 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5145 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5146 5147 // If the shift count is not less than the bitwidth, the result of 5148 // the shift is undefined. Don't try to analyze it, because the 5149 // resolution chosen here may differ from the resolution chosen in 5150 // other parts of the compiler. 5151 if (SA->getValue().uge(BitWidth)) 5152 break; 5153 5154 // It is currently not resolved how to interpret NSW for left 5155 // shift by BitWidth - 1, so we avoid applying flags in that 5156 // case. Remove this check (or this comment) once the situation 5157 // is resolved. See 5158 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5159 // and http://reviews.llvm.org/D8890 . 5160 auto Flags = SCEV::FlagAnyWrap; 5161 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5162 Flags = getNoWrapFlagsFromUB(BO->Op); 5163 5164 Constant *X = ConstantInt::get(getContext(), 5165 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5166 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5167 } 5168 break; 5169 5170 case Instruction::AShr: 5171 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 5172 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) 5173 if (Operator *L = dyn_cast<Operator>(BO->LHS)) 5174 if (L->getOpcode() == Instruction::Shl && 5175 L->getOperand(1) == BO->RHS) { 5176 uint64_t BitWidth = getTypeSizeInBits(BO->LHS->getType()); 5177 5178 // If the shift count is not less than the bitwidth, the result of 5179 // the shift is undefined. Don't try to analyze it, because the 5180 // resolution chosen here may differ from the resolution chosen in 5181 // other parts of the compiler. 5182 if (CI->getValue().uge(BitWidth)) 5183 break; 5184 5185 uint64_t Amt = BitWidth - CI->getZExtValue(); 5186 if (Amt == BitWidth) 5187 return getSCEV(L->getOperand(0)); // shift by zero --> noop 5188 return getSignExtendExpr( 5189 getTruncateExpr(getSCEV(L->getOperand(0)), 5190 IntegerType::get(getContext(), Amt)), 5191 BO->LHS->getType()); 5192 } 5193 break; 5194 } 5195 } 5196 5197 switch (U->getOpcode()) { 5198 case Instruction::Trunc: 5199 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5200 5201 case Instruction::ZExt: 5202 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5203 5204 case Instruction::SExt: 5205 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5206 5207 case Instruction::BitCast: 5208 // BitCasts are no-op casts so we just eliminate the cast. 5209 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5210 return getSCEV(U->getOperand(0)); 5211 break; 5212 5213 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5214 // lead to pointer expressions which cannot safely be expanded to GEPs, 5215 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5216 // simplifying integer expressions. 5217 5218 case Instruction::GetElementPtr: 5219 return createNodeForGEP(cast<GEPOperator>(U)); 5220 5221 case Instruction::PHI: 5222 return createNodeForPHI(cast<PHINode>(U)); 5223 5224 case Instruction::Select: 5225 // U can also be a select constant expr, which let fall through. Since 5226 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5227 // constant expressions cannot have instructions as operands, we'd have 5228 // returned getUnknown for a select constant expressions anyway. 5229 if (isa<Instruction>(U)) 5230 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5231 U->getOperand(1), U->getOperand(2)); 5232 } 5233 5234 return getUnknown(V); 5235 } 5236 5237 5238 5239 //===----------------------------------------------------------------------===// 5240 // Iteration Count Computation Code 5241 // 5242 5243 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) { 5244 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5245 return getSmallConstantTripCount(L, ExitingBB); 5246 5247 // No trip count information for multiple exits. 5248 return 0; 5249 } 5250 5251 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L, 5252 BasicBlock *ExitingBlock) { 5253 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5254 assert(L->isLoopExiting(ExitingBlock) && 5255 "Exiting block must actually branch out of the loop!"); 5256 const SCEVConstant *ExitCount = 5257 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 5258 if (!ExitCount) 5259 return 0; 5260 5261 ConstantInt *ExitConst = ExitCount->getValue(); 5262 5263 // Guard against huge trip counts. 5264 if (ExitConst->getValue().getActiveBits() > 32) 5265 return 0; 5266 5267 // In case of integer overflow, this returns 0, which is correct. 5268 return ((unsigned)ExitConst->getZExtValue()) + 1; 5269 } 5270 5271 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) { 5272 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5273 return getSmallConstantTripMultiple(L, ExitingBB); 5274 5275 // No trip multiple information for multiple exits. 5276 return 0; 5277 } 5278 5279 /// Returns the largest constant divisor of the trip count of this loop as a 5280 /// normal unsigned value, if possible. This means that the actual trip count is 5281 /// always a multiple of the returned value (don't forget the trip count could 5282 /// very well be zero as well!). 5283 /// 5284 /// Returns 1 if the trip count is unknown or not guaranteed to be the 5285 /// multiple of a constant (which is also the case if the trip count is simply 5286 /// constant, use getSmallConstantTripCount for that case), Will also return 1 5287 /// if the trip count is very large (>= 2^32). 5288 /// 5289 /// As explained in the comments for getSmallConstantTripCount, this assumes 5290 /// that control exits the loop via ExitingBlock. 5291 unsigned 5292 ScalarEvolution::getSmallConstantTripMultiple(Loop *L, 5293 BasicBlock *ExitingBlock) { 5294 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5295 assert(L->isLoopExiting(ExitingBlock) && 5296 "Exiting block must actually branch out of the loop!"); 5297 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 5298 if (ExitCount == getCouldNotCompute()) 5299 return 1; 5300 5301 // Get the trip count from the BE count by adding 1. 5302 const SCEV *TCMul = getAddExpr(ExitCount, getOne(ExitCount->getType())); 5303 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt 5304 // to factor simple cases. 5305 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul)) 5306 TCMul = Mul->getOperand(0); 5307 5308 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul); 5309 if (!MulC) 5310 return 1; 5311 5312 ConstantInt *Result = MulC->getValue(); 5313 5314 // Guard against huge trip counts (this requires checking 5315 // for zero to handle the case where the trip count == -1 and the 5316 // addition wraps). 5317 if (!Result || Result->getValue().getActiveBits() > 32 || 5318 Result->getValue().getActiveBits() == 0) 5319 return 1; 5320 5321 return (unsigned)Result->getZExtValue(); 5322 } 5323 5324 /// Get the expression for the number of loop iterations for which this loop is 5325 /// guaranteed not to exit via ExitingBlock. Otherwise return 5326 /// SCEVCouldNotCompute. 5327 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) { 5328 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 5329 } 5330 5331 const SCEV * 5332 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 5333 SCEVUnionPredicate &Preds) { 5334 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 5335 } 5336 5337 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 5338 return getBackedgeTakenInfo(L).getExact(this); 5339 } 5340 5341 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 5342 /// known never to be less than the actual backedge taken count. 5343 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 5344 return getBackedgeTakenInfo(L).getMax(this); 5345 } 5346 5347 /// Push PHI nodes in the header of the given loop onto the given Worklist. 5348 static void 5349 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 5350 BasicBlock *Header = L->getHeader(); 5351 5352 // Push all Loop-header PHIs onto the Worklist stack. 5353 for (BasicBlock::iterator I = Header->begin(); 5354 PHINode *PN = dyn_cast<PHINode>(I); ++I) 5355 Worklist.push_back(PN); 5356 } 5357 5358 const ScalarEvolution::BackedgeTakenInfo & 5359 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 5360 auto &BTI = getBackedgeTakenInfo(L); 5361 if (BTI.hasFullInfo()) 5362 return BTI; 5363 5364 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5365 5366 if (!Pair.second) 5367 return Pair.first->second; 5368 5369 BackedgeTakenInfo Result = 5370 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 5371 5372 return PredicatedBackedgeTakenCounts.find(L)->second = Result; 5373 } 5374 5375 const ScalarEvolution::BackedgeTakenInfo & 5376 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 5377 // Initially insert an invalid entry for this loop. If the insertion 5378 // succeeds, proceed to actually compute a backedge-taken count and 5379 // update the value. The temporary CouldNotCompute value tells SCEV 5380 // code elsewhere that it shouldn't attempt to request a new 5381 // backedge-taken count, which could result in infinite recursion. 5382 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 5383 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5384 if (!Pair.second) 5385 return Pair.first->second; 5386 5387 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 5388 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 5389 // must be cleared in this scope. 5390 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 5391 5392 if (Result.getExact(this) != getCouldNotCompute()) { 5393 assert(isLoopInvariant(Result.getExact(this), L) && 5394 isLoopInvariant(Result.getMax(this), L) && 5395 "Computed backedge-taken count isn't loop invariant for loop!"); 5396 ++NumTripCountsComputed; 5397 } 5398 else if (Result.getMax(this) == getCouldNotCompute() && 5399 isa<PHINode>(L->getHeader()->begin())) { 5400 // Only count loops that have phi nodes as not being computable. 5401 ++NumTripCountsNotComputed; 5402 } 5403 5404 // Now that we know more about the trip count for this loop, forget any 5405 // existing SCEV values for PHI nodes in this loop since they are only 5406 // conservative estimates made without the benefit of trip count 5407 // information. This is similar to the code in forgetLoop, except that 5408 // it handles SCEVUnknown PHI nodes specially. 5409 if (Result.hasAnyInfo()) { 5410 SmallVector<Instruction *, 16> Worklist; 5411 PushLoopPHIs(L, Worklist); 5412 5413 SmallPtrSet<Instruction *, 8> Visited; 5414 while (!Worklist.empty()) { 5415 Instruction *I = Worklist.pop_back_val(); 5416 if (!Visited.insert(I).second) 5417 continue; 5418 5419 ValueExprMapType::iterator It = 5420 ValueExprMap.find_as(static_cast<Value *>(I)); 5421 if (It != ValueExprMap.end()) { 5422 const SCEV *Old = It->second; 5423 5424 // SCEVUnknown for a PHI either means that it has an unrecognized 5425 // structure, or it's a PHI that's in the progress of being computed 5426 // by createNodeForPHI. In the former case, additional loop trip 5427 // count information isn't going to change anything. In the later 5428 // case, createNodeForPHI will perform the necessary updates on its 5429 // own when it gets to that point. 5430 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 5431 forgetMemoizedResults(Old); 5432 ValueExprMap.erase(It); 5433 } 5434 if (PHINode *PN = dyn_cast<PHINode>(I)) 5435 ConstantEvolutionLoopExitValue.erase(PN); 5436 } 5437 5438 PushDefUseChildren(I, Worklist); 5439 } 5440 } 5441 5442 // Re-lookup the insert position, since the call to 5443 // computeBackedgeTakenCount above could result in a 5444 // recusive call to getBackedgeTakenInfo (on a different 5445 // loop), which would invalidate the iterator computed 5446 // earlier. 5447 return BackedgeTakenCounts.find(L)->second = Result; 5448 } 5449 5450 void ScalarEvolution::forgetLoop(const Loop *L) { 5451 // Drop any stored trip count value. 5452 auto RemoveLoopFromBackedgeMap = 5453 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 5454 auto BTCPos = Map.find(L); 5455 if (BTCPos != Map.end()) { 5456 BTCPos->second.clear(); 5457 Map.erase(BTCPos); 5458 } 5459 }; 5460 5461 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 5462 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 5463 5464 // Drop information about expressions based on loop-header PHIs. 5465 SmallVector<Instruction *, 16> Worklist; 5466 PushLoopPHIs(L, Worklist); 5467 5468 SmallPtrSet<Instruction *, 8> Visited; 5469 while (!Worklist.empty()) { 5470 Instruction *I = Worklist.pop_back_val(); 5471 if (!Visited.insert(I).second) 5472 continue; 5473 5474 ValueExprMapType::iterator It = 5475 ValueExprMap.find_as(static_cast<Value *>(I)); 5476 if (It != ValueExprMap.end()) { 5477 forgetMemoizedResults(It->second); 5478 ValueExprMap.erase(It); 5479 if (PHINode *PN = dyn_cast<PHINode>(I)) 5480 ConstantEvolutionLoopExitValue.erase(PN); 5481 } 5482 5483 PushDefUseChildren(I, Worklist); 5484 } 5485 5486 // Forget all contained loops too, to avoid dangling entries in the 5487 // ValuesAtScopes map. 5488 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 5489 forgetLoop(*I); 5490 5491 LoopHasNoAbnormalExits.erase(L); 5492 } 5493 5494 void ScalarEvolution::forgetValue(Value *V) { 5495 Instruction *I = dyn_cast<Instruction>(V); 5496 if (!I) return; 5497 5498 // Drop information about expressions based on loop-header PHIs. 5499 SmallVector<Instruction *, 16> Worklist; 5500 Worklist.push_back(I); 5501 5502 SmallPtrSet<Instruction *, 8> Visited; 5503 while (!Worklist.empty()) { 5504 I = Worklist.pop_back_val(); 5505 if (!Visited.insert(I).second) 5506 continue; 5507 5508 ValueExprMapType::iterator It = 5509 ValueExprMap.find_as(static_cast<Value *>(I)); 5510 if (It != ValueExprMap.end()) { 5511 forgetMemoizedResults(It->second); 5512 ValueExprMap.erase(It); 5513 if (PHINode *PN = dyn_cast<PHINode>(I)) 5514 ConstantEvolutionLoopExitValue.erase(PN); 5515 } 5516 5517 PushDefUseChildren(I, Worklist); 5518 } 5519 } 5520 5521 /// Get the exact loop backedge taken count considering all loop exits. A 5522 /// computable result can only be returned for loops with a single exit. 5523 /// Returning the minimum taken count among all exits is incorrect because one 5524 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 5525 /// the limit of each loop test is never skipped. This is a valid assumption as 5526 /// long as the loop exits via that test. For precise results, it is the 5527 /// caller's responsibility to specify the relevant loop exit using 5528 /// getExact(ExitingBlock, SE). 5529 const SCEV * 5530 ScalarEvolution::BackedgeTakenInfo::getExact( 5531 ScalarEvolution *SE, SCEVUnionPredicate *Preds) const { 5532 // If any exits were not computable, the loop is not computable. 5533 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute(); 5534 5535 // We need exactly one computable exit. 5536 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute(); 5537 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info"); 5538 5539 const SCEV *BECount = nullptr; 5540 for (auto &ENT : ExitNotTaken) { 5541 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 5542 5543 if (!BECount) 5544 BECount = ENT.ExactNotTaken; 5545 else if (BECount != ENT.ExactNotTaken) 5546 return SE->getCouldNotCompute(); 5547 if (Preds && ENT.getPred()) 5548 Preds->add(ENT.getPred()); 5549 5550 assert((Preds || ENT.hasAlwaysTruePred()) && 5551 "Predicate should be always true!"); 5552 } 5553 5554 assert(BECount && "Invalid not taken count for loop exit"); 5555 return BECount; 5556 } 5557 5558 /// Get the exact not taken count for this loop exit. 5559 const SCEV * 5560 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 5561 ScalarEvolution *SE) const { 5562 for (auto &ENT : ExitNotTaken) 5563 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePred()) 5564 return ENT.ExactNotTaken; 5565 5566 return SE->getCouldNotCompute(); 5567 } 5568 5569 /// getMax - Get the max backedge taken count for the loop. 5570 const SCEV * 5571 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 5572 for (auto &ENT : ExitNotTaken) 5573 if (!ENT.hasAlwaysTruePred()) 5574 return SE->getCouldNotCompute(); 5575 5576 return Max ? Max : SE->getCouldNotCompute(); 5577 } 5578 5579 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 5580 ScalarEvolution *SE) const { 5581 if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S)) 5582 return true; 5583 5584 if (!ExitNotTaken.ExitingBlock) 5585 return false; 5586 5587 for (auto &ENT : ExitNotTaken) 5588 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 5589 SE->hasOperand(ENT.ExactNotTaken, S)) 5590 return true; 5591 5592 return false; 5593 } 5594 5595 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 5596 /// computable exit into a persistent ExitNotTakenInfo array. 5597 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 5598 SmallVectorImpl<EdgeInfo> &ExitCounts, bool Complete, const SCEV *MaxCount) 5599 : Max(MaxCount) { 5600 5601 if (!Complete) 5602 ExitNotTaken.setIncomplete(); 5603 5604 unsigned NumExits = ExitCounts.size(); 5605 if (NumExits == 0) return; 5606 5607 ExitNotTaken.ExitingBlock = ExitCounts[0].ExitBlock; 5608 ExitNotTaken.ExactNotTaken = ExitCounts[0].Taken; 5609 5610 // Determine the number of ExitNotTakenExtras structures that we need. 5611 unsigned ExtraInfoSize = 0; 5612 if (NumExits > 1) 5613 ExtraInfoSize = 1 + std::count_if(std::next(ExitCounts.begin()), 5614 ExitCounts.end(), [](EdgeInfo &Entry) { 5615 return !Entry.Pred.isAlwaysTrue(); 5616 }); 5617 else if (!ExitCounts[0].Pred.isAlwaysTrue()) 5618 ExtraInfoSize = 1; 5619 5620 ExitNotTakenExtras *ENT = nullptr; 5621 5622 // Allocate the ExitNotTakenExtras structures and initialize the first 5623 // element (ExitNotTaken). 5624 if (ExtraInfoSize > 0) { 5625 ENT = new ExitNotTakenExtras[ExtraInfoSize]; 5626 ExitNotTaken.ExtraInfo = &ENT[0]; 5627 *ExitNotTaken.getPred() = std::move(ExitCounts[0].Pred); 5628 } 5629 5630 if (NumExits == 1) 5631 return; 5632 5633 assert(ENT && "ExitNotTakenExtras is NULL while having more than one exit"); 5634 5635 auto &Exits = ExitNotTaken.ExtraInfo->Exits; 5636 5637 // Handle the rare case of multiple computable exits. 5638 for (unsigned i = 1, PredPos = 1; i < NumExits; ++i) { 5639 ExitNotTakenExtras *Ptr = nullptr; 5640 if (!ExitCounts[i].Pred.isAlwaysTrue()) { 5641 Ptr = &ENT[PredPos++]; 5642 Ptr->Pred = std::move(ExitCounts[i].Pred); 5643 } 5644 5645 Exits.emplace_back(ExitCounts[i].ExitBlock, ExitCounts[i].Taken, Ptr); 5646 } 5647 } 5648 5649 /// Invalidate this result and free the ExitNotTakenInfo array. 5650 void ScalarEvolution::BackedgeTakenInfo::clear() { 5651 ExitNotTaken.ExitingBlock = nullptr; 5652 ExitNotTaken.ExactNotTaken = nullptr; 5653 delete[] ExitNotTaken.ExtraInfo; 5654 } 5655 5656 /// Compute the number of times the backedge of the specified loop will execute. 5657 ScalarEvolution::BackedgeTakenInfo 5658 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 5659 bool AllowPredicates) { 5660 SmallVector<BasicBlock *, 8> ExitingBlocks; 5661 L->getExitingBlocks(ExitingBlocks); 5662 5663 SmallVector<EdgeInfo, 4> ExitCounts; 5664 bool CouldComputeBECount = true; 5665 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 5666 const SCEV *MustExitMaxBECount = nullptr; 5667 const SCEV *MayExitMaxBECount = nullptr; 5668 5669 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 5670 // and compute maxBECount. 5671 // Do a union of all the predicates here. 5672 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 5673 BasicBlock *ExitBB = ExitingBlocks[i]; 5674 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 5675 5676 assert((AllowPredicates || EL.Pred.isAlwaysTrue()) && 5677 "Predicated exit limit when predicates are not allowed!"); 5678 5679 // 1. For each exit that can be computed, add an entry to ExitCounts. 5680 // CouldComputeBECount is true only if all exits can be computed. 5681 if (EL.Exact == getCouldNotCompute()) 5682 // We couldn't compute an exact value for this exit, so 5683 // we won't be able to compute an exact value for the loop. 5684 CouldComputeBECount = false; 5685 else 5686 ExitCounts.emplace_back(EdgeInfo(ExitBB, EL.Exact, EL.Pred)); 5687 5688 // 2. Derive the loop's MaxBECount from each exit's max number of 5689 // non-exiting iterations. Partition the loop exits into two kinds: 5690 // LoopMustExits and LoopMayExits. 5691 // 5692 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 5693 // is a LoopMayExit. If any computable LoopMustExit is found, then 5694 // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise, 5695 // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is 5696 // considered greater than any computable EL.Max. 5697 if (EL.Max != getCouldNotCompute() && Latch && 5698 DT.dominates(ExitBB, Latch)) { 5699 if (!MustExitMaxBECount) 5700 MustExitMaxBECount = EL.Max; 5701 else { 5702 MustExitMaxBECount = 5703 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max); 5704 } 5705 } else if (MayExitMaxBECount != getCouldNotCompute()) { 5706 if (!MayExitMaxBECount || EL.Max == getCouldNotCompute()) 5707 MayExitMaxBECount = EL.Max; 5708 else { 5709 MayExitMaxBECount = 5710 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max); 5711 } 5712 } 5713 } 5714 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 5715 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 5716 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount); 5717 } 5718 5719 ScalarEvolution::ExitLimit 5720 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 5721 bool AllowPredicates) { 5722 5723 // Okay, we've chosen an exiting block. See what condition causes us to exit 5724 // at this block and remember the exit block and whether all other targets 5725 // lead to the loop header. 5726 bool MustExecuteLoopHeader = true; 5727 BasicBlock *Exit = nullptr; 5728 for (auto *SBB : successors(ExitingBlock)) 5729 if (!L->contains(SBB)) { 5730 if (Exit) // Multiple exit successors. 5731 return getCouldNotCompute(); 5732 Exit = SBB; 5733 } else if (SBB != L->getHeader()) { 5734 MustExecuteLoopHeader = false; 5735 } 5736 5737 // At this point, we know we have a conditional branch that determines whether 5738 // the loop is exited. However, we don't know if the branch is executed each 5739 // time through the loop. If not, then the execution count of the branch will 5740 // not be equal to the trip count of the loop. 5741 // 5742 // Currently we check for this by checking to see if the Exit branch goes to 5743 // the loop header. If so, we know it will always execute the same number of 5744 // times as the loop. We also handle the case where the exit block *is* the 5745 // loop header. This is common for un-rotated loops. 5746 // 5747 // If both of those tests fail, walk up the unique predecessor chain to the 5748 // header, stopping if there is an edge that doesn't exit the loop. If the 5749 // header is reached, the execution count of the branch will be equal to the 5750 // trip count of the loop. 5751 // 5752 // More extensive analysis could be done to handle more cases here. 5753 // 5754 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 5755 // The simple checks failed, try climbing the unique predecessor chain 5756 // up to the header. 5757 bool Ok = false; 5758 for (BasicBlock *BB = ExitingBlock; BB; ) { 5759 BasicBlock *Pred = BB->getUniquePredecessor(); 5760 if (!Pred) 5761 return getCouldNotCompute(); 5762 TerminatorInst *PredTerm = Pred->getTerminator(); 5763 for (const BasicBlock *PredSucc : PredTerm->successors()) { 5764 if (PredSucc == BB) 5765 continue; 5766 // If the predecessor has a successor that isn't BB and isn't 5767 // outside the loop, assume the worst. 5768 if (L->contains(PredSucc)) 5769 return getCouldNotCompute(); 5770 } 5771 if (Pred == L->getHeader()) { 5772 Ok = true; 5773 break; 5774 } 5775 BB = Pred; 5776 } 5777 if (!Ok) 5778 return getCouldNotCompute(); 5779 } 5780 5781 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 5782 TerminatorInst *Term = ExitingBlock->getTerminator(); 5783 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 5784 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 5785 // Proceed to the next level to examine the exit condition expression. 5786 return computeExitLimitFromCond( 5787 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 5788 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 5789 } 5790 5791 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 5792 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 5793 /*ControlsExit=*/IsOnlyExit); 5794 5795 return getCouldNotCompute(); 5796 } 5797 5798 ScalarEvolution::ExitLimit 5799 ScalarEvolution::computeExitLimitFromCond(const Loop *L, 5800 Value *ExitCond, 5801 BasicBlock *TBB, 5802 BasicBlock *FBB, 5803 bool ControlsExit, 5804 bool AllowPredicates) { 5805 // Check if the controlling expression for this loop is an And or Or. 5806 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 5807 if (BO->getOpcode() == Instruction::And) { 5808 // Recurse on the operands of the and. 5809 bool EitherMayExit = L->contains(TBB); 5810 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5811 ControlsExit && !EitherMayExit, 5812 AllowPredicates); 5813 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5814 ControlsExit && !EitherMayExit, 5815 AllowPredicates); 5816 const SCEV *BECount = getCouldNotCompute(); 5817 const SCEV *MaxBECount = getCouldNotCompute(); 5818 if (EitherMayExit) { 5819 // Both conditions must be true for the loop to continue executing. 5820 // Choose the less conservative count. 5821 if (EL0.Exact == getCouldNotCompute() || 5822 EL1.Exact == getCouldNotCompute()) 5823 BECount = getCouldNotCompute(); 5824 else 5825 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 5826 if (EL0.Max == getCouldNotCompute()) 5827 MaxBECount = EL1.Max; 5828 else if (EL1.Max == getCouldNotCompute()) 5829 MaxBECount = EL0.Max; 5830 else 5831 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 5832 } else { 5833 // Both conditions must be true at the same time for the loop to exit. 5834 // For now, be conservative. 5835 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 5836 if (EL0.Max == EL1.Max) 5837 MaxBECount = EL0.Max; 5838 if (EL0.Exact == EL1.Exact) 5839 BECount = EL0.Exact; 5840 } 5841 5842 SCEVUnionPredicate NP; 5843 NP.add(&EL0.Pred); 5844 NP.add(&EL1.Pred); 5845 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 5846 // to be more aggressive when computing BECount than when computing 5847 // MaxBECount. In these cases it is possible for EL0.Exact and EL1.Exact 5848 // to match, but for EL0.Max and EL1.Max to not. 5849 if (isa<SCEVCouldNotCompute>(MaxBECount) && 5850 !isa<SCEVCouldNotCompute>(BECount)) 5851 MaxBECount = BECount; 5852 5853 return ExitLimit(BECount, MaxBECount, NP); 5854 } 5855 if (BO->getOpcode() == Instruction::Or) { 5856 // Recurse on the operands of the or. 5857 bool EitherMayExit = L->contains(FBB); 5858 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5859 ControlsExit && !EitherMayExit, 5860 AllowPredicates); 5861 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5862 ControlsExit && !EitherMayExit, 5863 AllowPredicates); 5864 const SCEV *BECount = getCouldNotCompute(); 5865 const SCEV *MaxBECount = getCouldNotCompute(); 5866 if (EitherMayExit) { 5867 // Both conditions must be false for the loop to continue executing. 5868 // Choose the less conservative count. 5869 if (EL0.Exact == getCouldNotCompute() || 5870 EL1.Exact == getCouldNotCompute()) 5871 BECount = getCouldNotCompute(); 5872 else 5873 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 5874 if (EL0.Max == getCouldNotCompute()) 5875 MaxBECount = EL1.Max; 5876 else if (EL1.Max == getCouldNotCompute()) 5877 MaxBECount = EL0.Max; 5878 else 5879 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 5880 } else { 5881 // Both conditions must be false at the same time for the loop to exit. 5882 // For now, be conservative. 5883 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 5884 if (EL0.Max == EL1.Max) 5885 MaxBECount = EL0.Max; 5886 if (EL0.Exact == EL1.Exact) 5887 BECount = EL0.Exact; 5888 } 5889 5890 SCEVUnionPredicate NP; 5891 NP.add(&EL0.Pred); 5892 NP.add(&EL1.Pred); 5893 return ExitLimit(BECount, MaxBECount, NP); 5894 } 5895 } 5896 5897 // With an icmp, it may be feasible to compute an exact backedge-taken count. 5898 // Proceed to the next level to examine the icmp. 5899 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 5900 ExitLimit EL = 5901 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 5902 if (EL.hasFullInfo() || !AllowPredicates) 5903 return EL; 5904 5905 // Try again, but use SCEV predicates this time. 5906 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 5907 /*AllowPredicates=*/true); 5908 } 5909 5910 // Check for a constant condition. These are normally stripped out by 5911 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 5912 // preserve the CFG and is temporarily leaving constant conditions 5913 // in place. 5914 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 5915 if (L->contains(FBB) == !CI->getZExtValue()) 5916 // The backedge is always taken. 5917 return getCouldNotCompute(); 5918 else 5919 // The backedge is never taken. 5920 return getZero(CI->getType()); 5921 } 5922 5923 // If it's not an integer or pointer comparison then compute it the hard way. 5924 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 5925 } 5926 5927 ScalarEvolution::ExitLimit 5928 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 5929 ICmpInst *ExitCond, 5930 BasicBlock *TBB, 5931 BasicBlock *FBB, 5932 bool ControlsExit, 5933 bool AllowPredicates) { 5934 5935 // If the condition was exit on true, convert the condition to exit on false 5936 ICmpInst::Predicate Cond; 5937 if (!L->contains(FBB)) 5938 Cond = ExitCond->getPredicate(); 5939 else 5940 Cond = ExitCond->getInversePredicate(); 5941 5942 // Handle common loops like: for (X = "string"; *X; ++X) 5943 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 5944 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 5945 ExitLimit ItCnt = 5946 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 5947 if (ItCnt.hasAnyInfo()) 5948 return ItCnt; 5949 } 5950 5951 ExitLimit ShiftEL = computeShiftCompareExitLimit( 5952 ExitCond->getOperand(0), ExitCond->getOperand(1), L, Cond); 5953 if (ShiftEL.hasAnyInfo()) 5954 return ShiftEL; 5955 5956 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 5957 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 5958 5959 // Try to evaluate any dependencies out of the loop. 5960 LHS = getSCEVAtScope(LHS, L); 5961 RHS = getSCEVAtScope(RHS, L); 5962 5963 // At this point, we would like to compute how many iterations of the 5964 // loop the predicate will return true for these inputs. 5965 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 5966 // If there is a loop-invariant, force it into the RHS. 5967 std::swap(LHS, RHS); 5968 Cond = ICmpInst::getSwappedPredicate(Cond); 5969 } 5970 5971 // Simplify the operands before analyzing them. 5972 (void)SimplifyICmpOperands(Cond, LHS, RHS); 5973 5974 // If we have a comparison of a chrec against a constant, try to use value 5975 // ranges to answer this query. 5976 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 5977 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 5978 if (AddRec->getLoop() == L) { 5979 // Form the constant range. 5980 ConstantRange CompRange( 5981 ICmpInst::makeConstantRange(Cond, RHSC->getAPInt())); 5982 5983 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 5984 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 5985 } 5986 5987 switch (Cond) { 5988 case ICmpInst::ICMP_NE: { // while (X != Y) 5989 // Convert to: while (X-Y != 0) 5990 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 5991 AllowPredicates); 5992 if (EL.hasAnyInfo()) return EL; 5993 break; 5994 } 5995 case ICmpInst::ICMP_EQ: { // while (X == Y) 5996 // Convert to: while (X-Y == 0) 5997 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 5998 if (EL.hasAnyInfo()) return EL; 5999 break; 6000 } 6001 case ICmpInst::ICMP_SLT: 6002 case ICmpInst::ICMP_ULT: { // while (X < Y) 6003 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 6004 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 6005 AllowPredicates); 6006 if (EL.hasAnyInfo()) return EL; 6007 break; 6008 } 6009 case ICmpInst::ICMP_SGT: 6010 case ICmpInst::ICMP_UGT: { // while (X > Y) 6011 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 6012 ExitLimit EL = 6013 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 6014 AllowPredicates); 6015 if (EL.hasAnyInfo()) return EL; 6016 break; 6017 } 6018 default: 6019 break; 6020 } 6021 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6022 } 6023 6024 ScalarEvolution::ExitLimit 6025 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 6026 SwitchInst *Switch, 6027 BasicBlock *ExitingBlock, 6028 bool ControlsExit) { 6029 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 6030 6031 // Give up if the exit is the default dest of a switch. 6032 if (Switch->getDefaultDest() == ExitingBlock) 6033 return getCouldNotCompute(); 6034 6035 assert(L->contains(Switch->getDefaultDest()) && 6036 "Default case must not exit the loop!"); 6037 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 6038 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 6039 6040 // while (X != Y) --> while (X-Y != 0) 6041 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 6042 if (EL.hasAnyInfo()) 6043 return EL; 6044 6045 return getCouldNotCompute(); 6046 } 6047 6048 static ConstantInt * 6049 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 6050 ScalarEvolution &SE) { 6051 const SCEV *InVal = SE.getConstant(C); 6052 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 6053 assert(isa<SCEVConstant>(Val) && 6054 "Evaluation of SCEV at constant didn't fold correctly?"); 6055 return cast<SCEVConstant>(Val)->getValue(); 6056 } 6057 6058 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 6059 /// compute the backedge execution count. 6060 ScalarEvolution::ExitLimit 6061 ScalarEvolution::computeLoadConstantCompareExitLimit( 6062 LoadInst *LI, 6063 Constant *RHS, 6064 const Loop *L, 6065 ICmpInst::Predicate predicate) { 6066 6067 if (LI->isVolatile()) return getCouldNotCompute(); 6068 6069 // Check to see if the loaded pointer is a getelementptr of a global. 6070 // TODO: Use SCEV instead of manually grubbing with GEPs. 6071 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6072 if (!GEP) return getCouldNotCompute(); 6073 6074 // Make sure that it is really a constant global we are gepping, with an 6075 // initializer, and make sure the first IDX is really 0. 6076 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6077 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6078 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6079 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6080 return getCouldNotCompute(); 6081 6082 // Okay, we allow one non-constant index into the GEP instruction. 6083 Value *VarIdx = nullptr; 6084 std::vector<Constant*> Indexes; 6085 unsigned VarIdxNum = 0; 6086 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6087 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6088 Indexes.push_back(CI); 6089 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6090 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6091 VarIdx = GEP->getOperand(i); 6092 VarIdxNum = i-2; 6093 Indexes.push_back(nullptr); 6094 } 6095 6096 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6097 if (!VarIdx) 6098 return getCouldNotCompute(); 6099 6100 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6101 // Check to see if X is a loop variant variable value now. 6102 const SCEV *Idx = getSCEV(VarIdx); 6103 Idx = getSCEVAtScope(Idx, L); 6104 6105 // We can only recognize very limited forms of loop index expressions, in 6106 // particular, only affine AddRec's like {C1,+,C2}. 6107 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6108 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6109 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6110 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6111 return getCouldNotCompute(); 6112 6113 unsigned MaxSteps = MaxBruteForceIterations; 6114 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6115 ConstantInt *ItCst = ConstantInt::get( 6116 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6117 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6118 6119 // Form the GEP offset. 6120 Indexes[VarIdxNum] = Val; 6121 6122 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6123 Indexes); 6124 if (!Result) break; // Cannot compute! 6125 6126 // Evaluate the condition for this iteration. 6127 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6128 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6129 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6130 ++NumArrayLenItCounts; 6131 return getConstant(ItCst); // Found terminating iteration! 6132 } 6133 } 6134 return getCouldNotCompute(); 6135 } 6136 6137 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6138 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6139 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6140 if (!RHS) 6141 return getCouldNotCompute(); 6142 6143 const BasicBlock *Latch = L->getLoopLatch(); 6144 if (!Latch) 6145 return getCouldNotCompute(); 6146 6147 const BasicBlock *Predecessor = L->getLoopPredecessor(); 6148 if (!Predecessor) 6149 return getCouldNotCompute(); 6150 6151 // Return true if V is of the form "LHS `shift_op` <positive constant>". 6152 // Return LHS in OutLHS and shift_opt in OutOpCode. 6153 auto MatchPositiveShift = 6154 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 6155 6156 using namespace PatternMatch; 6157 6158 ConstantInt *ShiftAmt; 6159 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6160 OutOpCode = Instruction::LShr; 6161 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6162 OutOpCode = Instruction::AShr; 6163 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6164 OutOpCode = Instruction::Shl; 6165 else 6166 return false; 6167 6168 return ShiftAmt->getValue().isStrictlyPositive(); 6169 }; 6170 6171 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 6172 // 6173 // loop: 6174 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 6175 // %iv.shifted = lshr i32 %iv, <positive constant> 6176 // 6177 // Return true on a succesful match. Return the corresponding PHI node (%iv 6178 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 6179 auto MatchShiftRecurrence = 6180 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 6181 Optional<Instruction::BinaryOps> PostShiftOpCode; 6182 6183 { 6184 Instruction::BinaryOps OpC; 6185 Value *V; 6186 6187 // If we encounter a shift instruction, "peel off" the shift operation, 6188 // and remember that we did so. Later when we inspect %iv's backedge 6189 // value, we will make sure that the backedge value uses the same 6190 // operation. 6191 // 6192 // Note: the peeled shift operation does not have to be the same 6193 // instruction as the one feeding into the PHI's backedge value. We only 6194 // really care about it being the same *kind* of shift instruction -- 6195 // that's all that is required for our later inferences to hold. 6196 if (MatchPositiveShift(LHS, V, OpC)) { 6197 PostShiftOpCode = OpC; 6198 LHS = V; 6199 } 6200 } 6201 6202 PNOut = dyn_cast<PHINode>(LHS); 6203 if (!PNOut || PNOut->getParent() != L->getHeader()) 6204 return false; 6205 6206 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 6207 Value *OpLHS; 6208 6209 return 6210 // The backedge value for the PHI node must be a shift by a positive 6211 // amount 6212 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 6213 6214 // of the PHI node itself 6215 OpLHS == PNOut && 6216 6217 // and the kind of shift should be match the kind of shift we peeled 6218 // off, if any. 6219 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 6220 }; 6221 6222 PHINode *PN; 6223 Instruction::BinaryOps OpCode; 6224 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 6225 return getCouldNotCompute(); 6226 6227 const DataLayout &DL = getDataLayout(); 6228 6229 // The key rationale for this optimization is that for some kinds of shift 6230 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 6231 // within a finite number of iterations. If the condition guarding the 6232 // backedge (in the sense that the backedge is taken if the condition is true) 6233 // is false for the value the shift recurrence stabilizes to, then we know 6234 // that the backedge is taken only a finite number of times. 6235 6236 ConstantInt *StableValue = nullptr; 6237 switch (OpCode) { 6238 default: 6239 llvm_unreachable("Impossible case!"); 6240 6241 case Instruction::AShr: { 6242 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 6243 // bitwidth(K) iterations. 6244 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 6245 bool KnownZero, KnownOne; 6246 ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0, nullptr, 6247 Predecessor->getTerminator(), &DT); 6248 auto *Ty = cast<IntegerType>(RHS->getType()); 6249 if (KnownZero) 6250 StableValue = ConstantInt::get(Ty, 0); 6251 else if (KnownOne) 6252 StableValue = ConstantInt::get(Ty, -1, true); 6253 else 6254 return getCouldNotCompute(); 6255 6256 break; 6257 } 6258 case Instruction::LShr: 6259 case Instruction::Shl: 6260 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 6261 // stabilize to 0 in at most bitwidth(K) iterations. 6262 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 6263 break; 6264 } 6265 6266 auto *Result = 6267 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 6268 assert(Result->getType()->isIntegerTy(1) && 6269 "Otherwise cannot be an operand to a branch instruction"); 6270 6271 if (Result->isZeroValue()) { 6272 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 6273 const SCEV *UpperBound = 6274 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 6275 SCEVUnionPredicate P; 6276 return ExitLimit(getCouldNotCompute(), UpperBound, P); 6277 } 6278 6279 return getCouldNotCompute(); 6280 } 6281 6282 /// Return true if we can constant fold an instruction of the specified type, 6283 /// assuming that all operands were constants. 6284 static bool CanConstantFold(const Instruction *I) { 6285 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 6286 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6287 isa<LoadInst>(I)) 6288 return true; 6289 6290 if (const CallInst *CI = dyn_cast<CallInst>(I)) 6291 if (const Function *F = CI->getCalledFunction()) 6292 return canConstantFoldCallTo(F); 6293 return false; 6294 } 6295 6296 /// Determine whether this instruction can constant evolve within this loop 6297 /// assuming its operands can all constant evolve. 6298 static bool canConstantEvolve(Instruction *I, const Loop *L) { 6299 // An instruction outside of the loop can't be derived from a loop PHI. 6300 if (!L->contains(I)) return false; 6301 6302 if (isa<PHINode>(I)) { 6303 // We don't currently keep track of the control flow needed to evaluate 6304 // PHIs, so we cannot handle PHIs inside of loops. 6305 return L->getHeader() == I->getParent(); 6306 } 6307 6308 // If we won't be able to constant fold this expression even if the operands 6309 // are constants, bail early. 6310 return CanConstantFold(I); 6311 } 6312 6313 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 6314 /// recursing through each instruction operand until reaching a loop header phi. 6315 static PHINode * 6316 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 6317 DenseMap<Instruction *, PHINode *> &PHIMap) { 6318 6319 // Otherwise, we can evaluate this instruction if all of its operands are 6320 // constant or derived from a PHI node themselves. 6321 PHINode *PHI = nullptr; 6322 for (Value *Op : UseInst->operands()) { 6323 if (isa<Constant>(Op)) continue; 6324 6325 Instruction *OpInst = dyn_cast<Instruction>(Op); 6326 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 6327 6328 PHINode *P = dyn_cast<PHINode>(OpInst); 6329 if (!P) 6330 // If this operand is already visited, reuse the prior result. 6331 // We may have P != PHI if this is the deepest point at which the 6332 // inconsistent paths meet. 6333 P = PHIMap.lookup(OpInst); 6334 if (!P) { 6335 // Recurse and memoize the results, whether a phi is found or not. 6336 // This recursive call invalidates pointers into PHIMap. 6337 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap); 6338 PHIMap[OpInst] = P; 6339 } 6340 if (!P) 6341 return nullptr; // Not evolving from PHI 6342 if (PHI && PHI != P) 6343 return nullptr; // Evolving from multiple different PHIs. 6344 PHI = P; 6345 } 6346 // This is a expression evolving from a constant PHI! 6347 return PHI; 6348 } 6349 6350 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 6351 /// in the loop that V is derived from. We allow arbitrary operations along the 6352 /// way, but the operands of an operation must either be constants or a value 6353 /// derived from a constant PHI. If this expression does not fit with these 6354 /// constraints, return null. 6355 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 6356 Instruction *I = dyn_cast<Instruction>(V); 6357 if (!I || !canConstantEvolve(I, L)) return nullptr; 6358 6359 if (PHINode *PN = dyn_cast<PHINode>(I)) 6360 return PN; 6361 6362 // Record non-constant instructions contained by the loop. 6363 DenseMap<Instruction *, PHINode *> PHIMap; 6364 return getConstantEvolvingPHIOperands(I, L, PHIMap); 6365 } 6366 6367 /// EvaluateExpression - Given an expression that passes the 6368 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 6369 /// in the loop has the value PHIVal. If we can't fold this expression for some 6370 /// reason, return null. 6371 static Constant *EvaluateExpression(Value *V, const Loop *L, 6372 DenseMap<Instruction *, Constant *> &Vals, 6373 const DataLayout &DL, 6374 const TargetLibraryInfo *TLI) { 6375 // Convenient constant check, but redundant for recursive calls. 6376 if (Constant *C = dyn_cast<Constant>(V)) return C; 6377 Instruction *I = dyn_cast<Instruction>(V); 6378 if (!I) return nullptr; 6379 6380 if (Constant *C = Vals.lookup(I)) return C; 6381 6382 // An instruction inside the loop depends on a value outside the loop that we 6383 // weren't given a mapping for, or a value such as a call inside the loop. 6384 if (!canConstantEvolve(I, L)) return nullptr; 6385 6386 // An unmapped PHI can be due to a branch or another loop inside this loop, 6387 // or due to this not being the initial iteration through a loop where we 6388 // couldn't compute the evolution of this particular PHI last time. 6389 if (isa<PHINode>(I)) return nullptr; 6390 6391 std::vector<Constant*> Operands(I->getNumOperands()); 6392 6393 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 6394 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 6395 if (!Operand) { 6396 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 6397 if (!Operands[i]) return nullptr; 6398 continue; 6399 } 6400 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 6401 Vals[Operand] = C; 6402 if (!C) return nullptr; 6403 Operands[i] = C; 6404 } 6405 6406 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6407 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6408 Operands[1], DL, TLI); 6409 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6410 if (!LI->isVolatile()) 6411 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6412 } 6413 return ConstantFoldInstOperands(I, Operands, DL, TLI); 6414 } 6415 6416 6417 // If every incoming value to PN except the one for BB is a specific Constant, 6418 // return that, else return nullptr. 6419 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 6420 Constant *IncomingVal = nullptr; 6421 6422 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 6423 if (PN->getIncomingBlock(i) == BB) 6424 continue; 6425 6426 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 6427 if (!CurrentVal) 6428 return nullptr; 6429 6430 if (IncomingVal != CurrentVal) { 6431 if (IncomingVal) 6432 return nullptr; 6433 IncomingVal = CurrentVal; 6434 } 6435 } 6436 6437 return IncomingVal; 6438 } 6439 6440 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 6441 /// in the header of its containing loop, we know the loop executes a 6442 /// constant number of times, and the PHI node is just a recurrence 6443 /// involving constants, fold it. 6444 Constant * 6445 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 6446 const APInt &BEs, 6447 const Loop *L) { 6448 auto I = ConstantEvolutionLoopExitValue.find(PN); 6449 if (I != ConstantEvolutionLoopExitValue.end()) 6450 return I->second; 6451 6452 if (BEs.ugt(MaxBruteForceIterations)) 6453 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 6454 6455 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 6456 6457 DenseMap<Instruction *, Constant *> CurrentIterVals; 6458 BasicBlock *Header = L->getHeader(); 6459 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6460 6461 BasicBlock *Latch = L->getLoopLatch(); 6462 if (!Latch) 6463 return nullptr; 6464 6465 for (auto &I : *Header) { 6466 PHINode *PHI = dyn_cast<PHINode>(&I); 6467 if (!PHI) break; 6468 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6469 if (!StartCST) continue; 6470 CurrentIterVals[PHI] = StartCST; 6471 } 6472 if (!CurrentIterVals.count(PN)) 6473 return RetVal = nullptr; 6474 6475 Value *BEValue = PN->getIncomingValueForBlock(Latch); 6476 6477 // Execute the loop symbolically to determine the exit value. 6478 if (BEs.getActiveBits() >= 32) 6479 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 6480 6481 unsigned NumIterations = BEs.getZExtValue(); // must be in range 6482 unsigned IterationNum = 0; 6483 const DataLayout &DL = getDataLayout(); 6484 for (; ; ++IterationNum) { 6485 if (IterationNum == NumIterations) 6486 return RetVal = CurrentIterVals[PN]; // Got exit value! 6487 6488 // Compute the value of the PHIs for the next iteration. 6489 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 6490 DenseMap<Instruction *, Constant *> NextIterVals; 6491 Constant *NextPHI = 6492 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6493 if (!NextPHI) 6494 return nullptr; // Couldn't evaluate! 6495 NextIterVals[PN] = NextPHI; 6496 6497 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 6498 6499 // Also evaluate the other PHI nodes. However, we don't get to stop if we 6500 // cease to be able to evaluate one of them or if they stop evolving, 6501 // because that doesn't necessarily prevent us from computing PN. 6502 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 6503 for (const auto &I : CurrentIterVals) { 6504 PHINode *PHI = dyn_cast<PHINode>(I.first); 6505 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 6506 PHIsToCompute.emplace_back(PHI, I.second); 6507 } 6508 // We use two distinct loops because EvaluateExpression may invalidate any 6509 // iterators into CurrentIterVals. 6510 for (const auto &I : PHIsToCompute) { 6511 PHINode *PHI = I.first; 6512 Constant *&NextPHI = NextIterVals[PHI]; 6513 if (!NextPHI) { // Not already computed. 6514 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6515 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6516 } 6517 if (NextPHI != I.second) 6518 StoppedEvolving = false; 6519 } 6520 6521 // If all entries in CurrentIterVals == NextIterVals then we can stop 6522 // iterating, the loop can't continue to change. 6523 if (StoppedEvolving) 6524 return RetVal = CurrentIterVals[PN]; 6525 6526 CurrentIterVals.swap(NextIterVals); 6527 } 6528 } 6529 6530 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 6531 Value *Cond, 6532 bool ExitWhen) { 6533 PHINode *PN = getConstantEvolvingPHI(Cond, L); 6534 if (!PN) return getCouldNotCompute(); 6535 6536 // If the loop is canonicalized, the PHI will have exactly two entries. 6537 // That's the only form we support here. 6538 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 6539 6540 DenseMap<Instruction *, Constant *> CurrentIterVals; 6541 BasicBlock *Header = L->getHeader(); 6542 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6543 6544 BasicBlock *Latch = L->getLoopLatch(); 6545 assert(Latch && "Should follow from NumIncomingValues == 2!"); 6546 6547 for (auto &I : *Header) { 6548 PHINode *PHI = dyn_cast<PHINode>(&I); 6549 if (!PHI) 6550 break; 6551 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6552 if (!StartCST) continue; 6553 CurrentIterVals[PHI] = StartCST; 6554 } 6555 if (!CurrentIterVals.count(PN)) 6556 return getCouldNotCompute(); 6557 6558 // Okay, we find a PHI node that defines the trip count of this loop. Execute 6559 // the loop symbolically to determine when the condition gets a value of 6560 // "ExitWhen". 6561 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 6562 const DataLayout &DL = getDataLayout(); 6563 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 6564 auto *CondVal = dyn_cast_or_null<ConstantInt>( 6565 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 6566 6567 // Couldn't symbolically evaluate. 6568 if (!CondVal) return getCouldNotCompute(); 6569 6570 if (CondVal->getValue() == uint64_t(ExitWhen)) { 6571 ++NumBruteForceTripCountsComputed; 6572 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 6573 } 6574 6575 // Update all the PHI nodes for the next iteration. 6576 DenseMap<Instruction *, Constant *> NextIterVals; 6577 6578 // Create a list of which PHIs we need to compute. We want to do this before 6579 // calling EvaluateExpression on them because that may invalidate iterators 6580 // into CurrentIterVals. 6581 SmallVector<PHINode *, 8> PHIsToCompute; 6582 for (const auto &I : CurrentIterVals) { 6583 PHINode *PHI = dyn_cast<PHINode>(I.first); 6584 if (!PHI || PHI->getParent() != Header) continue; 6585 PHIsToCompute.push_back(PHI); 6586 } 6587 for (PHINode *PHI : PHIsToCompute) { 6588 Constant *&NextPHI = NextIterVals[PHI]; 6589 if (NextPHI) continue; // Already computed! 6590 6591 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6592 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6593 } 6594 CurrentIterVals.swap(NextIterVals); 6595 } 6596 6597 // Too many iterations were needed to evaluate. 6598 return getCouldNotCompute(); 6599 } 6600 6601 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 6602 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 6603 ValuesAtScopes[V]; 6604 // Check to see if we've folded this expression at this loop before. 6605 for (auto &LS : Values) 6606 if (LS.first == L) 6607 return LS.second ? LS.second : V; 6608 6609 Values.emplace_back(L, nullptr); 6610 6611 // Otherwise compute it. 6612 const SCEV *C = computeSCEVAtScope(V, L); 6613 for (auto &LS : reverse(ValuesAtScopes[V])) 6614 if (LS.first == L) { 6615 LS.second = C; 6616 break; 6617 } 6618 return C; 6619 } 6620 6621 /// This builds up a Constant using the ConstantExpr interface. That way, we 6622 /// will return Constants for objects which aren't represented by a 6623 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 6624 /// Returns NULL if the SCEV isn't representable as a Constant. 6625 static Constant *BuildConstantFromSCEV(const SCEV *V) { 6626 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 6627 case scCouldNotCompute: 6628 case scAddRecExpr: 6629 break; 6630 case scConstant: 6631 return cast<SCEVConstant>(V)->getValue(); 6632 case scUnknown: 6633 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 6634 case scSignExtend: { 6635 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 6636 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 6637 return ConstantExpr::getSExt(CastOp, SS->getType()); 6638 break; 6639 } 6640 case scZeroExtend: { 6641 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 6642 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 6643 return ConstantExpr::getZExt(CastOp, SZ->getType()); 6644 break; 6645 } 6646 case scTruncate: { 6647 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 6648 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 6649 return ConstantExpr::getTrunc(CastOp, ST->getType()); 6650 break; 6651 } 6652 case scAddExpr: { 6653 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 6654 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 6655 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6656 unsigned AS = PTy->getAddressSpace(); 6657 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6658 C = ConstantExpr::getBitCast(C, DestPtrTy); 6659 } 6660 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 6661 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 6662 if (!C2) return nullptr; 6663 6664 // First pointer! 6665 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 6666 unsigned AS = C2->getType()->getPointerAddressSpace(); 6667 std::swap(C, C2); 6668 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6669 // The offsets have been converted to bytes. We can add bytes to an 6670 // i8* by GEP with the byte count in the first index. 6671 C = ConstantExpr::getBitCast(C, DestPtrTy); 6672 } 6673 6674 // Don't bother trying to sum two pointers. We probably can't 6675 // statically compute a load that results from it anyway. 6676 if (C2->getType()->isPointerTy()) 6677 return nullptr; 6678 6679 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6680 if (PTy->getElementType()->isStructTy()) 6681 C2 = ConstantExpr::getIntegerCast( 6682 C2, Type::getInt32Ty(C->getContext()), true); 6683 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 6684 } else 6685 C = ConstantExpr::getAdd(C, C2); 6686 } 6687 return C; 6688 } 6689 break; 6690 } 6691 case scMulExpr: { 6692 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 6693 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 6694 // Don't bother with pointers at all. 6695 if (C->getType()->isPointerTy()) return nullptr; 6696 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 6697 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 6698 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 6699 C = ConstantExpr::getMul(C, C2); 6700 } 6701 return C; 6702 } 6703 break; 6704 } 6705 case scUDivExpr: { 6706 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 6707 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 6708 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 6709 if (LHS->getType() == RHS->getType()) 6710 return ConstantExpr::getUDiv(LHS, RHS); 6711 break; 6712 } 6713 case scSMaxExpr: 6714 case scUMaxExpr: 6715 break; // TODO: smax, umax. 6716 } 6717 return nullptr; 6718 } 6719 6720 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 6721 if (isa<SCEVConstant>(V)) return V; 6722 6723 // If this instruction is evolved from a constant-evolving PHI, compute the 6724 // exit value from the loop without using SCEVs. 6725 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 6726 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 6727 const Loop *LI = this->LI[I->getParent()]; 6728 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 6729 if (PHINode *PN = dyn_cast<PHINode>(I)) 6730 if (PN->getParent() == LI->getHeader()) { 6731 // Okay, there is no closed form solution for the PHI node. Check 6732 // to see if the loop that contains it has a known backedge-taken 6733 // count. If so, we may be able to force computation of the exit 6734 // value. 6735 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 6736 if (const SCEVConstant *BTCC = 6737 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 6738 // Okay, we know how many times the containing loop executes. If 6739 // this is a constant evolving PHI node, get the final value at 6740 // the specified iteration number. 6741 Constant *RV = 6742 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 6743 if (RV) return getSCEV(RV); 6744 } 6745 } 6746 6747 // Okay, this is an expression that we cannot symbolically evaluate 6748 // into a SCEV. Check to see if it's possible to symbolically evaluate 6749 // the arguments into constants, and if so, try to constant propagate the 6750 // result. This is particularly useful for computing loop exit values. 6751 if (CanConstantFold(I)) { 6752 SmallVector<Constant *, 4> Operands; 6753 bool MadeImprovement = false; 6754 for (Value *Op : I->operands()) { 6755 if (Constant *C = dyn_cast<Constant>(Op)) { 6756 Operands.push_back(C); 6757 continue; 6758 } 6759 6760 // If any of the operands is non-constant and if they are 6761 // non-integer and non-pointer, don't even try to analyze them 6762 // with scev techniques. 6763 if (!isSCEVable(Op->getType())) 6764 return V; 6765 6766 const SCEV *OrigV = getSCEV(Op); 6767 const SCEV *OpV = getSCEVAtScope(OrigV, L); 6768 MadeImprovement |= OrigV != OpV; 6769 6770 Constant *C = BuildConstantFromSCEV(OpV); 6771 if (!C) return V; 6772 if (C->getType() != Op->getType()) 6773 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 6774 Op->getType(), 6775 false), 6776 C, Op->getType()); 6777 Operands.push_back(C); 6778 } 6779 6780 // Check to see if getSCEVAtScope actually made an improvement. 6781 if (MadeImprovement) { 6782 Constant *C = nullptr; 6783 const DataLayout &DL = getDataLayout(); 6784 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 6785 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6786 Operands[1], DL, &TLI); 6787 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 6788 if (!LI->isVolatile()) 6789 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6790 } else 6791 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 6792 if (!C) return V; 6793 return getSCEV(C); 6794 } 6795 } 6796 } 6797 6798 // This is some other type of SCEVUnknown, just return it. 6799 return V; 6800 } 6801 6802 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 6803 // Avoid performing the look-up in the common case where the specified 6804 // expression has no loop-variant portions. 6805 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 6806 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6807 if (OpAtScope != Comm->getOperand(i)) { 6808 // Okay, at least one of these operands is loop variant but might be 6809 // foldable. Build a new instance of the folded commutative expression. 6810 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 6811 Comm->op_begin()+i); 6812 NewOps.push_back(OpAtScope); 6813 6814 for (++i; i != e; ++i) { 6815 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6816 NewOps.push_back(OpAtScope); 6817 } 6818 if (isa<SCEVAddExpr>(Comm)) 6819 return getAddExpr(NewOps); 6820 if (isa<SCEVMulExpr>(Comm)) 6821 return getMulExpr(NewOps); 6822 if (isa<SCEVSMaxExpr>(Comm)) 6823 return getSMaxExpr(NewOps); 6824 if (isa<SCEVUMaxExpr>(Comm)) 6825 return getUMaxExpr(NewOps); 6826 llvm_unreachable("Unknown commutative SCEV type!"); 6827 } 6828 } 6829 // If we got here, all operands are loop invariant. 6830 return Comm; 6831 } 6832 6833 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 6834 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 6835 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 6836 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 6837 return Div; // must be loop invariant 6838 return getUDivExpr(LHS, RHS); 6839 } 6840 6841 // If this is a loop recurrence for a loop that does not contain L, then we 6842 // are dealing with the final value computed by the loop. 6843 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 6844 // First, attempt to evaluate each operand. 6845 // Avoid performing the look-up in the common case where the specified 6846 // expression has no loop-variant portions. 6847 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 6848 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 6849 if (OpAtScope == AddRec->getOperand(i)) 6850 continue; 6851 6852 // Okay, at least one of these operands is loop variant but might be 6853 // foldable. Build a new instance of the folded commutative expression. 6854 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 6855 AddRec->op_begin()+i); 6856 NewOps.push_back(OpAtScope); 6857 for (++i; i != e; ++i) 6858 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 6859 6860 const SCEV *FoldedRec = 6861 getAddRecExpr(NewOps, AddRec->getLoop(), 6862 AddRec->getNoWrapFlags(SCEV::FlagNW)); 6863 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 6864 // The addrec may be folded to a nonrecurrence, for example, if the 6865 // induction variable is multiplied by zero after constant folding. Go 6866 // ahead and return the folded value. 6867 if (!AddRec) 6868 return FoldedRec; 6869 break; 6870 } 6871 6872 // If the scope is outside the addrec's loop, evaluate it by using the 6873 // loop exit value of the addrec. 6874 if (!AddRec->getLoop()->contains(L)) { 6875 // To evaluate this recurrence, we need to know how many times the AddRec 6876 // loop iterates. Compute this now. 6877 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 6878 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 6879 6880 // Then, evaluate the AddRec. 6881 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 6882 } 6883 6884 return AddRec; 6885 } 6886 6887 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 6888 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6889 if (Op == Cast->getOperand()) 6890 return Cast; // must be loop invariant 6891 return getZeroExtendExpr(Op, Cast->getType()); 6892 } 6893 6894 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 6895 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6896 if (Op == Cast->getOperand()) 6897 return Cast; // must be loop invariant 6898 return getSignExtendExpr(Op, Cast->getType()); 6899 } 6900 6901 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 6902 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6903 if (Op == Cast->getOperand()) 6904 return Cast; // must be loop invariant 6905 return getTruncateExpr(Op, Cast->getType()); 6906 } 6907 6908 llvm_unreachable("Unknown SCEV type!"); 6909 } 6910 6911 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 6912 return getSCEVAtScope(getSCEV(V), L); 6913 } 6914 6915 /// Finds the minimum unsigned root of the following equation: 6916 /// 6917 /// A * X = B (mod N) 6918 /// 6919 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 6920 /// A and B isn't important. 6921 /// 6922 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 6923 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 6924 ScalarEvolution &SE) { 6925 uint32_t BW = A.getBitWidth(); 6926 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 6927 assert(A != 0 && "A must be non-zero."); 6928 6929 // 1. D = gcd(A, N) 6930 // 6931 // The gcd of A and N may have only one prime factor: 2. The number of 6932 // trailing zeros in A is its multiplicity 6933 uint32_t Mult2 = A.countTrailingZeros(); 6934 // D = 2^Mult2 6935 6936 // 2. Check if B is divisible by D. 6937 // 6938 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 6939 // is not less than multiplicity of this prime factor for D. 6940 if (B.countTrailingZeros() < Mult2) 6941 return SE.getCouldNotCompute(); 6942 6943 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 6944 // modulo (N / D). 6945 // 6946 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 6947 // bit width during computations. 6948 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 6949 APInt Mod(BW + 1, 0); 6950 Mod.setBit(BW - Mult2); // Mod = N / D 6951 APInt I = AD.multiplicativeInverse(Mod); 6952 6953 // 4. Compute the minimum unsigned root of the equation: 6954 // I * (B / D) mod (N / D) 6955 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 6956 6957 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 6958 // bits. 6959 return SE.getConstant(Result.trunc(BW)); 6960 } 6961 6962 /// Find the roots of the quadratic equation for the given quadratic chrec 6963 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 6964 /// two SCEVCouldNotCompute objects. 6965 /// 6966 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 6967 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 6968 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 6969 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 6970 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 6971 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 6972 6973 // We currently can only solve this if the coefficients are constants. 6974 if (!LC || !MC || !NC) 6975 return None; 6976 6977 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 6978 const APInt &L = LC->getAPInt(); 6979 const APInt &M = MC->getAPInt(); 6980 const APInt &N = NC->getAPInt(); 6981 APInt Two(BitWidth, 2); 6982 APInt Four(BitWidth, 4); 6983 6984 { 6985 using namespace APIntOps; 6986 const APInt& C = L; 6987 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 6988 // The B coefficient is M-N/2 6989 APInt B(M); 6990 B -= sdiv(N,Two); 6991 6992 // The A coefficient is N/2 6993 APInt A(N.sdiv(Two)); 6994 6995 // Compute the B^2-4ac term. 6996 APInt SqrtTerm(B); 6997 SqrtTerm *= B; 6998 SqrtTerm -= Four * (A * C); 6999 7000 if (SqrtTerm.isNegative()) { 7001 // The loop is provably infinite. 7002 return None; 7003 } 7004 7005 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 7006 // integer value or else APInt::sqrt() will assert. 7007 APInt SqrtVal(SqrtTerm.sqrt()); 7008 7009 // Compute the two solutions for the quadratic formula. 7010 // The divisions must be performed as signed divisions. 7011 APInt NegB(-B); 7012 APInt TwoA(A << 1); 7013 if (TwoA.isMinValue()) 7014 return None; 7015 7016 LLVMContext &Context = SE.getContext(); 7017 7018 ConstantInt *Solution1 = 7019 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 7020 ConstantInt *Solution2 = 7021 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 7022 7023 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 7024 cast<SCEVConstant>(SE.getConstant(Solution2))); 7025 } // end APIntOps namespace 7026 } 7027 7028 ScalarEvolution::ExitLimit 7029 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 7030 bool AllowPredicates) { 7031 7032 // This is only used for loops with a "x != y" exit test. The exit condition 7033 // is now expressed as a single expression, V = x-y. So the exit test is 7034 // effectively V != 0. We know and take advantage of the fact that this 7035 // expression only being used in a comparison by zero context. 7036 7037 SCEVUnionPredicate P; 7038 // If the value is a constant 7039 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7040 // If the value is already zero, the branch will execute zero times. 7041 if (C->getValue()->isZero()) return C; 7042 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7043 } 7044 7045 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7046 if (!AddRec && AllowPredicates) 7047 // Try to make this an AddRec using runtime tests, in the first X 7048 // iterations of this loop, where X is the SCEV expression found by the 7049 // algorithm below. 7050 AddRec = convertSCEVToAddRecWithPredicates(V, L, P); 7051 7052 if (!AddRec || AddRec->getLoop() != L) 7053 return getCouldNotCompute(); 7054 7055 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7056 // the quadratic equation to solve it. 7057 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7058 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 7059 const SCEVConstant *R1 = Roots->first; 7060 const SCEVConstant *R2 = Roots->second; 7061 // Pick the smallest positive root value. 7062 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 7063 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 7064 if (!CB->getZExtValue()) 7065 std::swap(R1, R2); // R1 is the minimum root now. 7066 7067 // We can only use this value if the chrec ends up with an exact zero 7068 // value at this index. When solving for "X*X != 5", for example, we 7069 // should not accept a root of 2. 7070 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7071 if (Val->isZero()) 7072 return ExitLimit(R1, R1, P); // We found a quadratic root! 7073 } 7074 } 7075 return getCouldNotCompute(); 7076 } 7077 7078 // Otherwise we can only handle this if it is affine. 7079 if (!AddRec->isAffine()) 7080 return getCouldNotCompute(); 7081 7082 // If this is an affine expression, the execution count of this branch is 7083 // the minimum unsigned root of the following equation: 7084 // 7085 // Start + Step*N = 0 (mod 2^BW) 7086 // 7087 // equivalent to: 7088 // 7089 // Step*N = -Start (mod 2^BW) 7090 // 7091 // where BW is the common bit width of Start and Step. 7092 7093 // Get the initial value for the loop. 7094 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7095 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7096 7097 // For now we handle only constant steps. 7098 // 7099 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7100 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7101 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7102 // We have not yet seen any such cases. 7103 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7104 if (!StepC || StepC->getValue()->equalsInt(0)) 7105 return getCouldNotCompute(); 7106 7107 // For positive steps (counting up until unsigned overflow): 7108 // N = -Start/Step (as unsigned) 7109 // For negative steps (counting down to zero): 7110 // N = Start/-Step 7111 // First compute the unsigned distance from zero in the direction of Step. 7112 bool CountDown = StepC->getAPInt().isNegative(); 7113 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7114 7115 // Handle unitary steps, which cannot wraparound. 7116 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7117 // N = Distance (as unsigned) 7118 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { 7119 ConstantRange CR = getUnsignedRange(Start); 7120 const SCEV *MaxBECount; 7121 if (!CountDown && CR.getUnsignedMin().isMinValue()) 7122 // When counting up, the worst starting value is 1, not 0. 7123 MaxBECount = CR.getUnsignedMax().isMinValue() 7124 ? getConstant(APInt::getMinValue(CR.getBitWidth())) 7125 : getConstant(APInt::getMaxValue(CR.getBitWidth())); 7126 else 7127 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax() 7128 : -CR.getUnsignedMin()); 7129 return ExitLimit(Distance, MaxBECount, P); 7130 } 7131 7132 // As a special case, handle the instance where Step is a positive power of 7133 // two. In this case, determining whether Step divides Distance evenly can be 7134 // done by counting and comparing the number of trailing zeros of Step and 7135 // Distance. 7136 if (!CountDown) { 7137 const APInt &StepV = StepC->getAPInt(); 7138 // StepV.isPowerOf2() returns true if StepV is an positive power of two. It 7139 // also returns true if StepV is maximally negative (eg, INT_MIN), but that 7140 // case is not handled as this code is guarded by !CountDown. 7141 if (StepV.isPowerOf2() && 7142 GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros()) { 7143 // Here we've constrained the equation to be of the form 7144 // 7145 // 2^(N + k) * Distance' = (StepV == 2^N) * X (mod 2^W) ... (0) 7146 // 7147 // where we're operating on a W bit wide integer domain and k is 7148 // non-negative. The smallest unsigned solution for X is the trip count. 7149 // 7150 // (0) is equivalent to: 7151 // 7152 // 2^(N + k) * Distance' - 2^N * X = L * 2^W 7153 // <=> 2^N(2^k * Distance' - X) = L * 2^(W - N) * 2^N 7154 // <=> 2^k * Distance' - X = L * 2^(W - N) 7155 // <=> 2^k * Distance' = L * 2^(W - N) + X ... (1) 7156 // 7157 // The smallest X satisfying (1) is unsigned remainder of dividing the LHS 7158 // by 2^(W - N). 7159 // 7160 // <=> X = 2^k * Distance' URem 2^(W - N) ... (2) 7161 // 7162 // E.g. say we're solving 7163 // 7164 // 2 * Val = 2 * X (in i8) ... (3) 7165 // 7166 // then from (2), we get X = Val URem i8 128 (k = 0 in this case). 7167 // 7168 // Note: It is tempting to solve (3) by setting X = Val, but Val is not 7169 // necessarily the smallest unsigned value of X that satisfies (3). 7170 // E.g. if Val is i8 -127 then the smallest value of X that satisfies (3) 7171 // is i8 1, not i8 -127 7172 7173 const auto *ModuloResult = getUDivExactExpr(Distance, Step); 7174 7175 // Since SCEV does not have a URem node, we construct one using a truncate 7176 // and a zero extend. 7177 7178 unsigned NarrowWidth = StepV.getBitWidth() - StepV.countTrailingZeros(); 7179 auto *NarrowTy = IntegerType::get(getContext(), NarrowWidth); 7180 auto *WideTy = Distance->getType(); 7181 7182 const SCEV *Limit = 7183 getZeroExtendExpr(getTruncateExpr(ModuloResult, NarrowTy), WideTy); 7184 return ExitLimit(Limit, Limit, P); 7185 } 7186 } 7187 7188 // If the condition controls loop exit (the loop exits only if the expression 7189 // is true) and the addition is no-wrap we can use unsigned divide to 7190 // compute the backedge count. In this case, the step may not divide the 7191 // distance, but we don't care because if the condition is "missed" the loop 7192 // will have undefined behavior due to wrapping. 7193 if (ControlsExit && AddRec->hasNoSelfWrap() && 7194 loopHasNoAbnormalExits(AddRec->getLoop())) { 7195 const SCEV *Exact = 7196 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 7197 return ExitLimit(Exact, Exact, P); 7198 } 7199 7200 // Then, try to solve the above equation provided that Start is constant. 7201 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) { 7202 const SCEV *E = SolveLinEquationWithOverflow( 7203 StepC->getValue()->getValue(), -StartC->getValue()->getValue(), *this); 7204 return ExitLimit(E, E, P); 7205 } 7206 return getCouldNotCompute(); 7207 } 7208 7209 ScalarEvolution::ExitLimit 7210 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 7211 // Loops that look like: while (X == 0) are very strange indeed. We don't 7212 // handle them yet except for the trivial case. This could be expanded in the 7213 // future as needed. 7214 7215 // If the value is a constant, check to see if it is known to be non-zero 7216 // already. If so, the backedge will execute zero times. 7217 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7218 if (!C->getValue()->isNullValue()) 7219 return getZero(C->getType()); 7220 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7221 } 7222 7223 // We could implement others, but I really doubt anyone writes loops like 7224 // this, and if they did, they would already be constant folded. 7225 return getCouldNotCompute(); 7226 } 7227 7228 std::pair<BasicBlock *, BasicBlock *> 7229 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 7230 // If the block has a unique predecessor, then there is no path from the 7231 // predecessor to the block that does not go through the direct edge 7232 // from the predecessor to the block. 7233 if (BasicBlock *Pred = BB->getSinglePredecessor()) 7234 return {Pred, BB}; 7235 7236 // A loop's header is defined to be a block that dominates the loop. 7237 // If the header has a unique predecessor outside the loop, it must be 7238 // a block that has exactly one successor that can reach the loop. 7239 if (Loop *L = LI.getLoopFor(BB)) 7240 return {L->getLoopPredecessor(), L->getHeader()}; 7241 7242 return {nullptr, nullptr}; 7243 } 7244 7245 /// SCEV structural equivalence is usually sufficient for testing whether two 7246 /// expressions are equal, however for the purposes of looking for a condition 7247 /// guarding a loop, it can be useful to be a little more general, since a 7248 /// front-end may have replicated the controlling expression. 7249 /// 7250 static bool HasSameValue(const SCEV *A, const SCEV *B) { 7251 // Quick check to see if they are the same SCEV. 7252 if (A == B) return true; 7253 7254 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 7255 // Not all instructions that are "identical" compute the same value. For 7256 // instance, two distinct alloca instructions allocating the same type are 7257 // identical and do not read memory; but compute distinct values. 7258 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 7259 }; 7260 7261 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 7262 // two different instructions with the same value. Check for this case. 7263 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 7264 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 7265 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 7266 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 7267 if (ComputesEqualValues(AI, BI)) 7268 return true; 7269 7270 // Otherwise assume they may have a different value. 7271 return false; 7272 } 7273 7274 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 7275 const SCEV *&LHS, const SCEV *&RHS, 7276 unsigned Depth) { 7277 bool Changed = false; 7278 7279 // If we hit the max recursion limit bail out. 7280 if (Depth >= 3) 7281 return false; 7282 7283 // Canonicalize a constant to the right side. 7284 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 7285 // Check for both operands constant. 7286 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 7287 if (ConstantExpr::getICmp(Pred, 7288 LHSC->getValue(), 7289 RHSC->getValue())->isNullValue()) 7290 goto trivially_false; 7291 else 7292 goto trivially_true; 7293 } 7294 // Otherwise swap the operands to put the constant on the right. 7295 std::swap(LHS, RHS); 7296 Pred = ICmpInst::getSwappedPredicate(Pred); 7297 Changed = true; 7298 } 7299 7300 // If we're comparing an addrec with a value which is loop-invariant in the 7301 // addrec's loop, put the addrec on the left. Also make a dominance check, 7302 // as both operands could be addrecs loop-invariant in each other's loop. 7303 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 7304 const Loop *L = AR->getLoop(); 7305 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 7306 std::swap(LHS, RHS); 7307 Pred = ICmpInst::getSwappedPredicate(Pred); 7308 Changed = true; 7309 } 7310 } 7311 7312 // If there's a constant operand, canonicalize comparisons with boundary 7313 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 7314 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 7315 const APInt &RA = RC->getAPInt(); 7316 switch (Pred) { 7317 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 7318 case ICmpInst::ICMP_EQ: 7319 case ICmpInst::ICMP_NE: 7320 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 7321 if (!RA) 7322 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 7323 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 7324 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 7325 ME->getOperand(0)->isAllOnesValue()) { 7326 RHS = AE->getOperand(1); 7327 LHS = ME->getOperand(1); 7328 Changed = true; 7329 } 7330 break; 7331 case ICmpInst::ICMP_UGE: 7332 if ((RA - 1).isMinValue()) { 7333 Pred = ICmpInst::ICMP_NE; 7334 RHS = getConstant(RA - 1); 7335 Changed = true; 7336 break; 7337 } 7338 if (RA.isMaxValue()) { 7339 Pred = ICmpInst::ICMP_EQ; 7340 Changed = true; 7341 break; 7342 } 7343 if (RA.isMinValue()) goto trivially_true; 7344 7345 Pred = ICmpInst::ICMP_UGT; 7346 RHS = getConstant(RA - 1); 7347 Changed = true; 7348 break; 7349 case ICmpInst::ICMP_ULE: 7350 if ((RA + 1).isMaxValue()) { 7351 Pred = ICmpInst::ICMP_NE; 7352 RHS = getConstant(RA + 1); 7353 Changed = true; 7354 break; 7355 } 7356 if (RA.isMinValue()) { 7357 Pred = ICmpInst::ICMP_EQ; 7358 Changed = true; 7359 break; 7360 } 7361 if (RA.isMaxValue()) goto trivially_true; 7362 7363 Pred = ICmpInst::ICMP_ULT; 7364 RHS = getConstant(RA + 1); 7365 Changed = true; 7366 break; 7367 case ICmpInst::ICMP_SGE: 7368 if ((RA - 1).isMinSignedValue()) { 7369 Pred = ICmpInst::ICMP_NE; 7370 RHS = getConstant(RA - 1); 7371 Changed = true; 7372 break; 7373 } 7374 if (RA.isMaxSignedValue()) { 7375 Pred = ICmpInst::ICMP_EQ; 7376 Changed = true; 7377 break; 7378 } 7379 if (RA.isMinSignedValue()) goto trivially_true; 7380 7381 Pred = ICmpInst::ICMP_SGT; 7382 RHS = getConstant(RA - 1); 7383 Changed = true; 7384 break; 7385 case ICmpInst::ICMP_SLE: 7386 if ((RA + 1).isMaxSignedValue()) { 7387 Pred = ICmpInst::ICMP_NE; 7388 RHS = getConstant(RA + 1); 7389 Changed = true; 7390 break; 7391 } 7392 if (RA.isMinSignedValue()) { 7393 Pred = ICmpInst::ICMP_EQ; 7394 Changed = true; 7395 break; 7396 } 7397 if (RA.isMaxSignedValue()) goto trivially_true; 7398 7399 Pred = ICmpInst::ICMP_SLT; 7400 RHS = getConstant(RA + 1); 7401 Changed = true; 7402 break; 7403 case ICmpInst::ICMP_UGT: 7404 if (RA.isMinValue()) { 7405 Pred = ICmpInst::ICMP_NE; 7406 Changed = true; 7407 break; 7408 } 7409 if ((RA + 1).isMaxValue()) { 7410 Pred = ICmpInst::ICMP_EQ; 7411 RHS = getConstant(RA + 1); 7412 Changed = true; 7413 break; 7414 } 7415 if (RA.isMaxValue()) goto trivially_false; 7416 break; 7417 case ICmpInst::ICMP_ULT: 7418 if (RA.isMaxValue()) { 7419 Pred = ICmpInst::ICMP_NE; 7420 Changed = true; 7421 break; 7422 } 7423 if ((RA - 1).isMinValue()) { 7424 Pred = ICmpInst::ICMP_EQ; 7425 RHS = getConstant(RA - 1); 7426 Changed = true; 7427 break; 7428 } 7429 if (RA.isMinValue()) goto trivially_false; 7430 break; 7431 case ICmpInst::ICMP_SGT: 7432 if (RA.isMinSignedValue()) { 7433 Pred = ICmpInst::ICMP_NE; 7434 Changed = true; 7435 break; 7436 } 7437 if ((RA + 1).isMaxSignedValue()) { 7438 Pred = ICmpInst::ICMP_EQ; 7439 RHS = getConstant(RA + 1); 7440 Changed = true; 7441 break; 7442 } 7443 if (RA.isMaxSignedValue()) goto trivially_false; 7444 break; 7445 case ICmpInst::ICMP_SLT: 7446 if (RA.isMaxSignedValue()) { 7447 Pred = ICmpInst::ICMP_NE; 7448 Changed = true; 7449 break; 7450 } 7451 if ((RA - 1).isMinSignedValue()) { 7452 Pred = ICmpInst::ICMP_EQ; 7453 RHS = getConstant(RA - 1); 7454 Changed = true; 7455 break; 7456 } 7457 if (RA.isMinSignedValue()) goto trivially_false; 7458 break; 7459 } 7460 } 7461 7462 // Check for obvious equality. 7463 if (HasSameValue(LHS, RHS)) { 7464 if (ICmpInst::isTrueWhenEqual(Pred)) 7465 goto trivially_true; 7466 if (ICmpInst::isFalseWhenEqual(Pred)) 7467 goto trivially_false; 7468 } 7469 7470 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 7471 // adding or subtracting 1 from one of the operands. 7472 switch (Pred) { 7473 case ICmpInst::ICMP_SLE: 7474 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 7475 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7476 SCEV::FlagNSW); 7477 Pred = ICmpInst::ICMP_SLT; 7478 Changed = true; 7479 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 7480 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 7481 SCEV::FlagNSW); 7482 Pred = ICmpInst::ICMP_SLT; 7483 Changed = true; 7484 } 7485 break; 7486 case ICmpInst::ICMP_SGE: 7487 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 7488 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 7489 SCEV::FlagNSW); 7490 Pred = ICmpInst::ICMP_SGT; 7491 Changed = true; 7492 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 7493 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7494 SCEV::FlagNSW); 7495 Pred = ICmpInst::ICMP_SGT; 7496 Changed = true; 7497 } 7498 break; 7499 case ICmpInst::ICMP_ULE: 7500 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 7501 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7502 SCEV::FlagNUW); 7503 Pred = ICmpInst::ICMP_ULT; 7504 Changed = true; 7505 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 7506 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 7507 Pred = ICmpInst::ICMP_ULT; 7508 Changed = true; 7509 } 7510 break; 7511 case ICmpInst::ICMP_UGE: 7512 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 7513 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 7514 Pred = ICmpInst::ICMP_UGT; 7515 Changed = true; 7516 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 7517 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7518 SCEV::FlagNUW); 7519 Pred = ICmpInst::ICMP_UGT; 7520 Changed = true; 7521 } 7522 break; 7523 default: 7524 break; 7525 } 7526 7527 // TODO: More simplifications are possible here. 7528 7529 // Recursively simplify until we either hit a recursion limit or nothing 7530 // changes. 7531 if (Changed) 7532 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 7533 7534 return Changed; 7535 7536 trivially_true: 7537 // Return 0 == 0. 7538 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7539 Pred = ICmpInst::ICMP_EQ; 7540 return true; 7541 7542 trivially_false: 7543 // Return 0 != 0. 7544 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7545 Pred = ICmpInst::ICMP_NE; 7546 return true; 7547 } 7548 7549 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 7550 return getSignedRange(S).getSignedMax().isNegative(); 7551 } 7552 7553 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 7554 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 7555 } 7556 7557 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 7558 return !getSignedRange(S).getSignedMin().isNegative(); 7559 } 7560 7561 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 7562 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 7563 } 7564 7565 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 7566 return isKnownNegative(S) || isKnownPositive(S); 7567 } 7568 7569 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 7570 const SCEV *LHS, const SCEV *RHS) { 7571 // Canonicalize the inputs first. 7572 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7573 7574 // If LHS or RHS is an addrec, check to see if the condition is true in 7575 // every iteration of the loop. 7576 // If LHS and RHS are both addrec, both conditions must be true in 7577 // every iteration of the loop. 7578 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 7579 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 7580 bool LeftGuarded = false; 7581 bool RightGuarded = false; 7582 if (LAR) { 7583 const Loop *L = LAR->getLoop(); 7584 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 7585 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 7586 if (!RAR) return true; 7587 LeftGuarded = true; 7588 } 7589 } 7590 if (RAR) { 7591 const Loop *L = RAR->getLoop(); 7592 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 7593 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 7594 if (!LAR) return true; 7595 RightGuarded = true; 7596 } 7597 } 7598 if (LeftGuarded && RightGuarded) 7599 return true; 7600 7601 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 7602 return true; 7603 7604 // Otherwise see what can be done with known constant ranges. 7605 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 7606 } 7607 7608 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 7609 ICmpInst::Predicate Pred, 7610 bool &Increasing) { 7611 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 7612 7613 #ifndef NDEBUG 7614 // Verify an invariant: inverting the predicate should turn a monotonically 7615 // increasing change to a monotonically decreasing one, and vice versa. 7616 bool IncreasingSwapped; 7617 bool ResultSwapped = isMonotonicPredicateImpl( 7618 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 7619 7620 assert(Result == ResultSwapped && "should be able to analyze both!"); 7621 if (ResultSwapped) 7622 assert(Increasing == !IncreasingSwapped && 7623 "monotonicity should flip as we flip the predicate"); 7624 #endif 7625 7626 return Result; 7627 } 7628 7629 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 7630 ICmpInst::Predicate Pred, 7631 bool &Increasing) { 7632 7633 // A zero step value for LHS means the induction variable is essentially a 7634 // loop invariant value. We don't really depend on the predicate actually 7635 // flipping from false to true (for increasing predicates, and the other way 7636 // around for decreasing predicates), all we care about is that *if* the 7637 // predicate changes then it only changes from false to true. 7638 // 7639 // A zero step value in itself is not very useful, but there may be places 7640 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 7641 // as general as possible. 7642 7643 switch (Pred) { 7644 default: 7645 return false; // Conservative answer 7646 7647 case ICmpInst::ICMP_UGT: 7648 case ICmpInst::ICMP_UGE: 7649 case ICmpInst::ICMP_ULT: 7650 case ICmpInst::ICMP_ULE: 7651 if (!LHS->hasNoUnsignedWrap()) 7652 return false; 7653 7654 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 7655 return true; 7656 7657 case ICmpInst::ICMP_SGT: 7658 case ICmpInst::ICMP_SGE: 7659 case ICmpInst::ICMP_SLT: 7660 case ICmpInst::ICMP_SLE: { 7661 if (!LHS->hasNoSignedWrap()) 7662 return false; 7663 7664 const SCEV *Step = LHS->getStepRecurrence(*this); 7665 7666 if (isKnownNonNegative(Step)) { 7667 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 7668 return true; 7669 } 7670 7671 if (isKnownNonPositive(Step)) { 7672 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 7673 return true; 7674 } 7675 7676 return false; 7677 } 7678 7679 } 7680 7681 llvm_unreachable("switch has default clause!"); 7682 } 7683 7684 bool ScalarEvolution::isLoopInvariantPredicate( 7685 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 7686 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 7687 const SCEV *&InvariantRHS) { 7688 7689 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 7690 if (!isLoopInvariant(RHS, L)) { 7691 if (!isLoopInvariant(LHS, L)) 7692 return false; 7693 7694 std::swap(LHS, RHS); 7695 Pred = ICmpInst::getSwappedPredicate(Pred); 7696 } 7697 7698 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 7699 if (!ArLHS || ArLHS->getLoop() != L) 7700 return false; 7701 7702 bool Increasing; 7703 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 7704 return false; 7705 7706 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 7707 // true as the loop iterates, and the backedge is control dependent on 7708 // "ArLHS `Pred` RHS" == true then we can reason as follows: 7709 // 7710 // * if the predicate was false in the first iteration then the predicate 7711 // is never evaluated again, since the loop exits without taking the 7712 // backedge. 7713 // * if the predicate was true in the first iteration then it will 7714 // continue to be true for all future iterations since it is 7715 // monotonically increasing. 7716 // 7717 // For both the above possibilities, we can replace the loop varying 7718 // predicate with its value on the first iteration of the loop (which is 7719 // loop invariant). 7720 // 7721 // A similar reasoning applies for a monotonically decreasing predicate, by 7722 // replacing true with false and false with true in the above two bullets. 7723 7724 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 7725 7726 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 7727 return false; 7728 7729 InvariantPred = Pred; 7730 InvariantLHS = ArLHS->getStart(); 7731 InvariantRHS = RHS; 7732 return true; 7733 } 7734 7735 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 7736 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 7737 if (HasSameValue(LHS, RHS)) 7738 return ICmpInst::isTrueWhenEqual(Pred); 7739 7740 // This code is split out from isKnownPredicate because it is called from 7741 // within isLoopEntryGuardedByCond. 7742 7743 auto CheckRanges = 7744 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 7745 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 7746 .contains(RangeLHS); 7747 }; 7748 7749 // The check at the top of the function catches the case where the values are 7750 // known to be equal. 7751 if (Pred == CmpInst::ICMP_EQ) 7752 return false; 7753 7754 if (Pred == CmpInst::ICMP_NE) 7755 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 7756 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 7757 isKnownNonZero(getMinusSCEV(LHS, RHS)); 7758 7759 if (CmpInst::isSigned(Pred)) 7760 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 7761 7762 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 7763 } 7764 7765 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 7766 const SCEV *LHS, 7767 const SCEV *RHS) { 7768 7769 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 7770 // Return Y via OutY. 7771 auto MatchBinaryAddToConst = 7772 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 7773 SCEV::NoWrapFlags ExpectedFlags) { 7774 const SCEV *NonConstOp, *ConstOp; 7775 SCEV::NoWrapFlags FlagsPresent; 7776 7777 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 7778 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 7779 return false; 7780 7781 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 7782 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 7783 }; 7784 7785 APInt C; 7786 7787 switch (Pred) { 7788 default: 7789 break; 7790 7791 case ICmpInst::ICMP_SGE: 7792 std::swap(LHS, RHS); 7793 case ICmpInst::ICMP_SLE: 7794 // X s<= (X + C)<nsw> if C >= 0 7795 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 7796 return true; 7797 7798 // (X + C)<nsw> s<= X if C <= 0 7799 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 7800 !C.isStrictlyPositive()) 7801 return true; 7802 break; 7803 7804 case ICmpInst::ICMP_SGT: 7805 std::swap(LHS, RHS); 7806 case ICmpInst::ICMP_SLT: 7807 // X s< (X + C)<nsw> if C > 0 7808 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 7809 C.isStrictlyPositive()) 7810 return true; 7811 7812 // (X + C)<nsw> s< X if C < 0 7813 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 7814 return true; 7815 break; 7816 } 7817 7818 return false; 7819 } 7820 7821 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 7822 const SCEV *LHS, 7823 const SCEV *RHS) { 7824 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 7825 return false; 7826 7827 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 7828 // the stack can result in exponential time complexity. 7829 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 7830 7831 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 7832 // 7833 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 7834 // isKnownPredicate. isKnownPredicate is more powerful, but also more 7835 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 7836 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 7837 // use isKnownPredicate later if needed. 7838 return isKnownNonNegative(RHS) && 7839 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 7840 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 7841 } 7842 7843 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 7844 ICmpInst::Predicate Pred, 7845 const SCEV *LHS, const SCEV *RHS) { 7846 // No need to even try if we know the module has no guards. 7847 if (!HasGuards) 7848 return false; 7849 7850 return any_of(*BB, [&](Instruction &I) { 7851 using namespace llvm::PatternMatch; 7852 7853 Value *Condition; 7854 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 7855 m_Value(Condition))) && 7856 isImpliedCond(Pred, LHS, RHS, Condition, false); 7857 }); 7858 } 7859 7860 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 7861 /// protected by a conditional between LHS and RHS. This is used to 7862 /// to eliminate casts. 7863 bool 7864 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 7865 ICmpInst::Predicate Pred, 7866 const SCEV *LHS, const SCEV *RHS) { 7867 // Interpret a null as meaning no loop, where there is obviously no guard 7868 // (interprocedural conditions notwithstanding). 7869 if (!L) return true; 7870 7871 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7872 return true; 7873 7874 BasicBlock *Latch = L->getLoopLatch(); 7875 if (!Latch) 7876 return false; 7877 7878 BranchInst *LoopContinuePredicate = 7879 dyn_cast<BranchInst>(Latch->getTerminator()); 7880 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 7881 isImpliedCond(Pred, LHS, RHS, 7882 LoopContinuePredicate->getCondition(), 7883 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 7884 return true; 7885 7886 // We don't want more than one activation of the following loops on the stack 7887 // -- that can lead to O(n!) time complexity. 7888 if (WalkingBEDominatingConds) 7889 return false; 7890 7891 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 7892 7893 // See if we can exploit a trip count to prove the predicate. 7894 const auto &BETakenInfo = getBackedgeTakenInfo(L); 7895 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 7896 if (LatchBECount != getCouldNotCompute()) { 7897 // We know that Latch branches back to the loop header exactly 7898 // LatchBECount times. This means the backdege condition at Latch is 7899 // equivalent to "{0,+,1} u< LatchBECount". 7900 Type *Ty = LatchBECount->getType(); 7901 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 7902 const SCEV *LoopCounter = 7903 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 7904 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 7905 LatchBECount)) 7906 return true; 7907 } 7908 7909 // Check conditions due to any @llvm.assume intrinsics. 7910 for (auto &AssumeVH : AC.assumptions()) { 7911 if (!AssumeVH) 7912 continue; 7913 auto *CI = cast<CallInst>(AssumeVH); 7914 if (!DT.dominates(CI, Latch->getTerminator())) 7915 continue; 7916 7917 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 7918 return true; 7919 } 7920 7921 // If the loop is not reachable from the entry block, we risk running into an 7922 // infinite loop as we walk up into the dom tree. These loops do not matter 7923 // anyway, so we just return a conservative answer when we see them. 7924 if (!DT.isReachableFromEntry(L->getHeader())) 7925 return false; 7926 7927 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 7928 return true; 7929 7930 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 7931 DTN != HeaderDTN; DTN = DTN->getIDom()) { 7932 7933 assert(DTN && "should reach the loop header before reaching the root!"); 7934 7935 BasicBlock *BB = DTN->getBlock(); 7936 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 7937 return true; 7938 7939 BasicBlock *PBB = BB->getSinglePredecessor(); 7940 if (!PBB) 7941 continue; 7942 7943 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 7944 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 7945 continue; 7946 7947 Value *Condition = ContinuePredicate->getCondition(); 7948 7949 // If we have an edge `E` within the loop body that dominates the only 7950 // latch, the condition guarding `E` also guards the backedge. This 7951 // reasoning works only for loops with a single latch. 7952 7953 BasicBlockEdge DominatingEdge(PBB, BB); 7954 if (DominatingEdge.isSingleEdge()) { 7955 // We're constructively (and conservatively) enumerating edges within the 7956 // loop body that dominate the latch. The dominator tree better agree 7957 // with us on this: 7958 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 7959 7960 if (isImpliedCond(Pred, LHS, RHS, Condition, 7961 BB != ContinuePredicate->getSuccessor(0))) 7962 return true; 7963 } 7964 } 7965 7966 return false; 7967 } 7968 7969 bool 7970 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 7971 ICmpInst::Predicate Pred, 7972 const SCEV *LHS, const SCEV *RHS) { 7973 // Interpret a null as meaning no loop, where there is obviously no guard 7974 // (interprocedural conditions notwithstanding). 7975 if (!L) return false; 7976 7977 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7978 return true; 7979 7980 // Starting at the loop predecessor, climb up the predecessor chain, as long 7981 // as there are predecessors that can be found that have unique successors 7982 // leading to the original header. 7983 for (std::pair<BasicBlock *, BasicBlock *> 7984 Pair(L->getLoopPredecessor(), L->getHeader()); 7985 Pair.first; 7986 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 7987 7988 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 7989 return true; 7990 7991 BranchInst *LoopEntryPredicate = 7992 dyn_cast<BranchInst>(Pair.first->getTerminator()); 7993 if (!LoopEntryPredicate || 7994 LoopEntryPredicate->isUnconditional()) 7995 continue; 7996 7997 if (isImpliedCond(Pred, LHS, RHS, 7998 LoopEntryPredicate->getCondition(), 7999 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8000 return true; 8001 } 8002 8003 // Check conditions due to any @llvm.assume intrinsics. 8004 for (auto &AssumeVH : AC.assumptions()) { 8005 if (!AssumeVH) 8006 continue; 8007 auto *CI = cast<CallInst>(AssumeVH); 8008 if (!DT.dominates(CI, L->getHeader())) 8009 continue; 8010 8011 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8012 return true; 8013 } 8014 8015 return false; 8016 } 8017 8018 namespace { 8019 /// RAII wrapper to prevent recursive application of isImpliedCond. 8020 /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are 8021 /// currently evaluating isImpliedCond. 8022 struct MarkPendingLoopPredicate { 8023 Value *Cond; 8024 DenseSet<Value*> &LoopPreds; 8025 bool Pending; 8026 8027 MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP) 8028 : Cond(C), LoopPreds(LP) { 8029 Pending = !LoopPreds.insert(Cond).second; 8030 } 8031 ~MarkPendingLoopPredicate() { 8032 if (!Pending) 8033 LoopPreds.erase(Cond); 8034 } 8035 }; 8036 } // end anonymous namespace 8037 8038 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8039 const SCEV *LHS, const SCEV *RHS, 8040 Value *FoundCondValue, 8041 bool Inverse) { 8042 MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates); 8043 if (Mark.Pending) 8044 return false; 8045 8046 // Recursively handle And and Or conditions. 8047 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8048 if (BO->getOpcode() == Instruction::And) { 8049 if (!Inverse) 8050 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8051 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8052 } else if (BO->getOpcode() == Instruction::Or) { 8053 if (Inverse) 8054 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8055 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8056 } 8057 } 8058 8059 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8060 if (!ICI) return false; 8061 8062 // Now that we found a conditional branch that dominates the loop or controls 8063 // the loop latch. Check to see if it is the comparison we are looking for. 8064 ICmpInst::Predicate FoundPred; 8065 if (Inverse) 8066 FoundPred = ICI->getInversePredicate(); 8067 else 8068 FoundPred = ICI->getPredicate(); 8069 8070 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8071 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8072 8073 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8074 } 8075 8076 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8077 const SCEV *RHS, 8078 ICmpInst::Predicate FoundPred, 8079 const SCEV *FoundLHS, 8080 const SCEV *FoundRHS) { 8081 // Balance the types. 8082 if (getTypeSizeInBits(LHS->getType()) < 8083 getTypeSizeInBits(FoundLHS->getType())) { 8084 if (CmpInst::isSigned(Pred)) { 8085 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8086 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8087 } else { 8088 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8089 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8090 } 8091 } else if (getTypeSizeInBits(LHS->getType()) > 8092 getTypeSizeInBits(FoundLHS->getType())) { 8093 if (CmpInst::isSigned(FoundPred)) { 8094 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8095 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8096 } else { 8097 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8098 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8099 } 8100 } 8101 8102 // Canonicalize the query to match the way instcombine will have 8103 // canonicalized the comparison. 8104 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8105 if (LHS == RHS) 8106 return CmpInst::isTrueWhenEqual(Pred); 8107 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8108 if (FoundLHS == FoundRHS) 8109 return CmpInst::isFalseWhenEqual(FoundPred); 8110 8111 // Check to see if we can make the LHS or RHS match. 8112 if (LHS == FoundRHS || RHS == FoundLHS) { 8113 if (isa<SCEVConstant>(RHS)) { 8114 std::swap(FoundLHS, FoundRHS); 8115 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8116 } else { 8117 std::swap(LHS, RHS); 8118 Pred = ICmpInst::getSwappedPredicate(Pred); 8119 } 8120 } 8121 8122 // Check whether the found predicate is the same as the desired predicate. 8123 if (FoundPred == Pred) 8124 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8125 8126 // Check whether swapping the found predicate makes it the same as the 8127 // desired predicate. 8128 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8129 if (isa<SCEVConstant>(RHS)) 8130 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8131 else 8132 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8133 RHS, LHS, FoundLHS, FoundRHS); 8134 } 8135 8136 // Unsigned comparison is the same as signed comparison when both the operands 8137 // are non-negative. 8138 if (CmpInst::isUnsigned(FoundPred) && 8139 CmpInst::getSignedPredicate(FoundPred) == Pred && 8140 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8141 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8142 8143 // Check if we can make progress by sharpening ranges. 8144 if (FoundPred == ICmpInst::ICMP_NE && 8145 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8146 8147 const SCEVConstant *C = nullptr; 8148 const SCEV *V = nullptr; 8149 8150 if (isa<SCEVConstant>(FoundLHS)) { 8151 C = cast<SCEVConstant>(FoundLHS); 8152 V = FoundRHS; 8153 } else { 8154 C = cast<SCEVConstant>(FoundRHS); 8155 V = FoundLHS; 8156 } 8157 8158 // The guarding predicate tells us that C != V. If the known range 8159 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8160 // range we consider has to correspond to same signedness as the 8161 // predicate we're interested in folding. 8162 8163 APInt Min = ICmpInst::isSigned(Pred) ? 8164 getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin(); 8165 8166 if (Min == C->getAPInt()) { 8167 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8168 // This is true even if (Min + 1) wraps around -- in case of 8169 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8170 8171 APInt SharperMin = Min + 1; 8172 8173 switch (Pred) { 8174 case ICmpInst::ICMP_SGE: 8175 case ICmpInst::ICMP_UGE: 8176 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8177 // RHS, we're done. 8178 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8179 getConstant(SharperMin))) 8180 return true; 8181 8182 case ICmpInst::ICMP_SGT: 8183 case ICmpInst::ICMP_UGT: 8184 // We know from the range information that (V `Pred` Min || 8185 // V == Min). We know from the guarding condition that !(V 8186 // == Min). This gives us 8187 // 8188 // V `Pred` Min || V == Min && !(V == Min) 8189 // => V `Pred` Min 8190 // 8191 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8192 8193 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8194 return true; 8195 8196 default: 8197 // No change 8198 break; 8199 } 8200 } 8201 } 8202 8203 // Check whether the actual condition is beyond sufficient. 8204 if (FoundPred == ICmpInst::ICMP_EQ) 8205 if (ICmpInst::isTrueWhenEqual(Pred)) 8206 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8207 return true; 8208 if (Pred == ICmpInst::ICMP_NE) 8209 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8210 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8211 return true; 8212 8213 // Otherwise assume the worst. 8214 return false; 8215 } 8216 8217 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8218 const SCEV *&L, const SCEV *&R, 8219 SCEV::NoWrapFlags &Flags) { 8220 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8221 if (!AE || AE->getNumOperands() != 2) 8222 return false; 8223 8224 L = AE->getOperand(0); 8225 R = AE->getOperand(1); 8226 Flags = AE->getNoWrapFlags(); 8227 return true; 8228 } 8229 8230 bool ScalarEvolution::computeConstantDifference(const SCEV *Less, 8231 const SCEV *More, 8232 APInt &C) { 8233 // We avoid subtracting expressions here because this function is usually 8234 // fairly deep in the call stack (i.e. is called many times). 8235 8236 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8237 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8238 const auto *MAR = cast<SCEVAddRecExpr>(More); 8239 8240 if (LAR->getLoop() != MAR->getLoop()) 8241 return false; 8242 8243 // We look at affine expressions only; not for correctness but to keep 8244 // getStepRecurrence cheap. 8245 if (!LAR->isAffine() || !MAR->isAffine()) 8246 return false; 8247 8248 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8249 return false; 8250 8251 Less = LAR->getStart(); 8252 More = MAR->getStart(); 8253 8254 // fall through 8255 } 8256 8257 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8258 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8259 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8260 C = M - L; 8261 return true; 8262 } 8263 8264 const SCEV *L, *R; 8265 SCEV::NoWrapFlags Flags; 8266 if (splitBinaryAdd(Less, L, R, Flags)) 8267 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8268 if (R == More) { 8269 C = -(LC->getAPInt()); 8270 return true; 8271 } 8272 8273 if (splitBinaryAdd(More, L, R, Flags)) 8274 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8275 if (R == Less) { 8276 C = LC->getAPInt(); 8277 return true; 8278 } 8279 8280 return false; 8281 } 8282 8283 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 8284 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 8285 const SCEV *FoundLHS, const SCEV *FoundRHS) { 8286 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 8287 return false; 8288 8289 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8290 if (!AddRecLHS) 8291 return false; 8292 8293 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 8294 if (!AddRecFoundLHS) 8295 return false; 8296 8297 // We'd like to let SCEV reason about control dependencies, so we constrain 8298 // both the inequalities to be about add recurrences on the same loop. This 8299 // way we can use isLoopEntryGuardedByCond later. 8300 8301 const Loop *L = AddRecFoundLHS->getLoop(); 8302 if (L != AddRecLHS->getLoop()) 8303 return false; 8304 8305 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 8306 // 8307 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 8308 // ... (2) 8309 // 8310 // Informal proof for (2), assuming (1) [*]: 8311 // 8312 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 8313 // 8314 // Then 8315 // 8316 // FoundLHS s< FoundRHS s< INT_MIN - C 8317 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 8318 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 8319 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 8320 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 8321 // <=> FoundLHS + C s< FoundRHS + C 8322 // 8323 // [*]: (1) can be proved by ruling out overflow. 8324 // 8325 // [**]: This can be proved by analyzing all the four possibilities: 8326 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 8327 // (A s>= 0, B s>= 0). 8328 // 8329 // Note: 8330 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 8331 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 8332 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 8333 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 8334 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 8335 // C)". 8336 8337 APInt LDiff, RDiff; 8338 if (!computeConstantDifference(FoundLHS, LHS, LDiff) || 8339 !computeConstantDifference(FoundRHS, RHS, RDiff) || 8340 LDiff != RDiff) 8341 return false; 8342 8343 if (LDiff == 0) 8344 return true; 8345 8346 APInt FoundRHSLimit; 8347 8348 if (Pred == CmpInst::ICMP_ULT) { 8349 FoundRHSLimit = -RDiff; 8350 } else { 8351 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 8352 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - RDiff; 8353 } 8354 8355 // Try to prove (1) or (2), as needed. 8356 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 8357 getConstant(FoundRHSLimit)); 8358 } 8359 8360 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 8361 const SCEV *LHS, const SCEV *RHS, 8362 const SCEV *FoundLHS, 8363 const SCEV *FoundRHS) { 8364 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8365 return true; 8366 8367 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8368 return true; 8369 8370 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 8371 FoundLHS, FoundRHS) || 8372 // ~x < ~y --> x > y 8373 isImpliedCondOperandsHelper(Pred, LHS, RHS, 8374 getNotSCEV(FoundRHS), 8375 getNotSCEV(FoundLHS)); 8376 } 8377 8378 8379 /// If Expr computes ~A, return A else return nullptr 8380 static const SCEV *MatchNotExpr(const SCEV *Expr) { 8381 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 8382 if (!Add || Add->getNumOperands() != 2 || 8383 !Add->getOperand(0)->isAllOnesValue()) 8384 return nullptr; 8385 8386 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 8387 if (!AddRHS || AddRHS->getNumOperands() != 2 || 8388 !AddRHS->getOperand(0)->isAllOnesValue()) 8389 return nullptr; 8390 8391 return AddRHS->getOperand(1); 8392 } 8393 8394 8395 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 8396 template<typename MaxExprType> 8397 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 8398 const SCEV *Candidate) { 8399 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 8400 if (!MaxExpr) return false; 8401 8402 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 8403 } 8404 8405 8406 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 8407 template<typename MaxExprType> 8408 static bool IsMinConsistingOf(ScalarEvolution &SE, 8409 const SCEV *MaybeMinExpr, 8410 const SCEV *Candidate) { 8411 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 8412 if (!MaybeMaxExpr) 8413 return false; 8414 8415 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 8416 } 8417 8418 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 8419 ICmpInst::Predicate Pred, 8420 const SCEV *LHS, const SCEV *RHS) { 8421 8422 // If both sides are affine addrecs for the same loop, with equal 8423 // steps, and we know the recurrences don't wrap, then we only 8424 // need to check the predicate on the starting values. 8425 8426 if (!ICmpInst::isRelational(Pred)) 8427 return false; 8428 8429 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8430 if (!LAR) 8431 return false; 8432 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8433 if (!RAR) 8434 return false; 8435 if (LAR->getLoop() != RAR->getLoop()) 8436 return false; 8437 if (!LAR->isAffine() || !RAR->isAffine()) 8438 return false; 8439 8440 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 8441 return false; 8442 8443 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 8444 SCEV::FlagNSW : SCEV::FlagNUW; 8445 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 8446 return false; 8447 8448 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 8449 } 8450 8451 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 8452 /// expression? 8453 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 8454 ICmpInst::Predicate Pred, 8455 const SCEV *LHS, const SCEV *RHS) { 8456 switch (Pred) { 8457 default: 8458 return false; 8459 8460 case ICmpInst::ICMP_SGE: 8461 std::swap(LHS, RHS); 8462 // fall through 8463 case ICmpInst::ICMP_SLE: 8464 return 8465 // min(A, ...) <= A 8466 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 8467 // A <= max(A, ...) 8468 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 8469 8470 case ICmpInst::ICMP_UGE: 8471 std::swap(LHS, RHS); 8472 // fall through 8473 case ICmpInst::ICMP_ULE: 8474 return 8475 // min(A, ...) <= A 8476 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 8477 // A <= max(A, ...) 8478 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 8479 } 8480 8481 llvm_unreachable("covered switch fell through?!"); 8482 } 8483 8484 bool 8485 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 8486 const SCEV *LHS, const SCEV *RHS, 8487 const SCEV *FoundLHS, 8488 const SCEV *FoundRHS) { 8489 auto IsKnownPredicateFull = 8490 [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8491 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 8492 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 8493 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 8494 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 8495 }; 8496 8497 switch (Pred) { 8498 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 8499 case ICmpInst::ICMP_EQ: 8500 case ICmpInst::ICMP_NE: 8501 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 8502 return true; 8503 break; 8504 case ICmpInst::ICMP_SLT: 8505 case ICmpInst::ICMP_SLE: 8506 if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 8507 IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 8508 return true; 8509 break; 8510 case ICmpInst::ICMP_SGT: 8511 case ICmpInst::ICMP_SGE: 8512 if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 8513 IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 8514 return true; 8515 break; 8516 case ICmpInst::ICMP_ULT: 8517 case ICmpInst::ICMP_ULE: 8518 if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 8519 IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 8520 return true; 8521 break; 8522 case ICmpInst::ICMP_UGT: 8523 case ICmpInst::ICMP_UGE: 8524 if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 8525 IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 8526 return true; 8527 break; 8528 } 8529 8530 return false; 8531 } 8532 8533 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 8534 const SCEV *LHS, 8535 const SCEV *RHS, 8536 const SCEV *FoundLHS, 8537 const SCEV *FoundRHS) { 8538 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 8539 // The restriction on `FoundRHS` be lifted easily -- it exists only to 8540 // reduce the compile time impact of this optimization. 8541 return false; 8542 8543 const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS); 8544 if (!AddLHS || AddLHS->getOperand(1) != FoundLHS || 8545 !isa<SCEVConstant>(AddLHS->getOperand(0))) 8546 return false; 8547 8548 APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 8549 8550 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 8551 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 8552 ConstantRange FoundLHSRange = 8553 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 8554 8555 // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range 8556 // for `LHS`: 8557 APInt Addend = cast<SCEVConstant>(AddLHS->getOperand(0))->getAPInt(); 8558 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend)); 8559 8560 // We can also compute the range of values for `LHS` that satisfy the 8561 // consequent, "`LHS` `Pred` `RHS`": 8562 APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 8563 ConstantRange SatisfyingLHSRange = 8564 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 8565 8566 // The antecedent implies the consequent if every value of `LHS` that 8567 // satisfies the antecedent also satisfies the consequent. 8568 return SatisfyingLHSRange.contains(LHSRange); 8569 } 8570 8571 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 8572 bool IsSigned, bool NoWrap) { 8573 if (NoWrap) return false; 8574 8575 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8576 const SCEV *One = getOne(Stride->getType()); 8577 8578 if (IsSigned) { 8579 APInt MaxRHS = getSignedRange(RHS).getSignedMax(); 8580 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 8581 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8582 .getSignedMax(); 8583 8584 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 8585 return (MaxValue - MaxStrideMinusOne).slt(MaxRHS); 8586 } 8587 8588 APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax(); 8589 APInt MaxValue = APInt::getMaxValue(BitWidth); 8590 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8591 .getUnsignedMax(); 8592 8593 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 8594 return (MaxValue - MaxStrideMinusOne).ult(MaxRHS); 8595 } 8596 8597 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 8598 bool IsSigned, bool NoWrap) { 8599 if (NoWrap) return false; 8600 8601 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8602 const SCEV *One = getOne(Stride->getType()); 8603 8604 if (IsSigned) { 8605 APInt MinRHS = getSignedRange(RHS).getSignedMin(); 8606 APInt MinValue = APInt::getSignedMinValue(BitWidth); 8607 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8608 .getSignedMax(); 8609 8610 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 8611 return (MinValue + MaxStrideMinusOne).sgt(MinRHS); 8612 } 8613 8614 APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin(); 8615 APInt MinValue = APInt::getMinValue(BitWidth); 8616 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8617 .getUnsignedMax(); 8618 8619 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 8620 return (MinValue + MaxStrideMinusOne).ugt(MinRHS); 8621 } 8622 8623 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 8624 bool Equality) { 8625 const SCEV *One = getOne(Step->getType()); 8626 Delta = Equality ? getAddExpr(Delta, Step) 8627 : getAddExpr(Delta, getMinusSCEV(Step, One)); 8628 return getUDivExpr(Delta, Step); 8629 } 8630 8631 ScalarEvolution::ExitLimit 8632 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 8633 const Loop *L, bool IsSigned, 8634 bool ControlsExit, bool AllowPredicates) { 8635 SCEVUnionPredicate P; 8636 // We handle only IV < Invariant 8637 if (!isLoopInvariant(RHS, L)) 8638 return getCouldNotCompute(); 8639 8640 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8641 if (!IV && AllowPredicates) 8642 // Try to make this an AddRec using runtime tests, in the first X 8643 // iterations of this loop, where X is the SCEV expression found by the 8644 // algorithm below. 8645 IV = convertSCEVToAddRecWithPredicates(LHS, L, P); 8646 8647 // Avoid weird loops 8648 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8649 return getCouldNotCompute(); 8650 8651 bool NoWrap = ControlsExit && 8652 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8653 8654 const SCEV *Stride = IV->getStepRecurrence(*this); 8655 8656 // Avoid negative or zero stride values 8657 if (!isKnownPositive(Stride)) 8658 return getCouldNotCompute(); 8659 8660 // Avoid proven overflow cases: this will ensure that the backedge taken count 8661 // will not generate any unsigned overflow. Relaxed no-overflow conditions 8662 // exploit NoWrapFlags, allowing to optimize in presence of undefined 8663 // behaviors like the case of C language. 8664 if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 8665 return getCouldNotCompute(); 8666 8667 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 8668 : ICmpInst::ICMP_ULT; 8669 const SCEV *Start = IV->getStart(); 8670 const SCEV *End = RHS; 8671 if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) { 8672 const SCEV *Diff = getMinusSCEV(RHS, Start); 8673 // If we have NoWrap set, then we can assume that the increment won't 8674 // overflow, in which case if RHS - Start is a constant, we don't need to 8675 // do a max operation since we can just figure it out statically 8676 if (NoWrap && isa<SCEVConstant>(Diff)) { 8677 if (cast<SCEVConstant>(Diff)->getAPInt().isNegative()) 8678 End = Start; 8679 } else 8680 End = IsSigned ? getSMaxExpr(RHS, Start) 8681 : getUMaxExpr(RHS, Start); 8682 } 8683 8684 const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 8685 8686 APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin() 8687 : getUnsignedRange(Start).getUnsignedMin(); 8688 8689 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() 8690 : getUnsignedRange(Stride).getUnsignedMin(); 8691 8692 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8693 APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1) 8694 : APInt::getMaxValue(BitWidth) - (MinStride - 1); 8695 8696 // Although End can be a MAX expression we estimate MaxEnd considering only 8697 // the case End = RHS. This is safe because in the other case (End - Start) 8698 // is zero, leading to a zero maximum backedge taken count. 8699 APInt MaxEnd = 8700 IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit) 8701 : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit); 8702 8703 const SCEV *MaxBECount; 8704 if (isa<SCEVConstant>(BECount)) 8705 MaxBECount = BECount; 8706 else 8707 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 8708 getConstant(MinStride), false); 8709 8710 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8711 MaxBECount = BECount; 8712 8713 return ExitLimit(BECount, MaxBECount, P); 8714 } 8715 8716 ScalarEvolution::ExitLimit 8717 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 8718 const Loop *L, bool IsSigned, 8719 bool ControlsExit, bool AllowPredicates) { 8720 SCEVUnionPredicate P; 8721 // We handle only IV > Invariant 8722 if (!isLoopInvariant(RHS, L)) 8723 return getCouldNotCompute(); 8724 8725 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8726 if (!IV && AllowPredicates) 8727 // Try to make this an AddRec using runtime tests, in the first X 8728 // iterations of this loop, where X is the SCEV expression found by the 8729 // algorithm below. 8730 IV = convertSCEVToAddRecWithPredicates(LHS, L, P); 8731 8732 // Avoid weird loops 8733 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8734 return getCouldNotCompute(); 8735 8736 bool NoWrap = ControlsExit && 8737 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8738 8739 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 8740 8741 // Avoid negative or zero stride values 8742 if (!isKnownPositive(Stride)) 8743 return getCouldNotCompute(); 8744 8745 // Avoid proven overflow cases: this will ensure that the backedge taken count 8746 // will not generate any unsigned overflow. Relaxed no-overflow conditions 8747 // exploit NoWrapFlags, allowing to optimize in presence of undefined 8748 // behaviors like the case of C language. 8749 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 8750 return getCouldNotCompute(); 8751 8752 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 8753 : ICmpInst::ICMP_UGT; 8754 8755 const SCEV *Start = IV->getStart(); 8756 const SCEV *End = RHS; 8757 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 8758 const SCEV *Diff = getMinusSCEV(RHS, Start); 8759 // If we have NoWrap set, then we can assume that the increment won't 8760 // overflow, in which case if RHS - Start is a constant, we don't need to 8761 // do a max operation since we can just figure it out statically 8762 if (NoWrap && isa<SCEVConstant>(Diff)) { 8763 if (!cast<SCEVConstant>(Diff)->getAPInt().isNegative()) 8764 End = Start; 8765 } else 8766 End = IsSigned ? getSMinExpr(RHS, Start) 8767 : getUMinExpr(RHS, Start); 8768 } 8769 8770 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 8771 8772 APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax() 8773 : getUnsignedRange(Start).getUnsignedMax(); 8774 8775 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() 8776 : getUnsignedRange(Stride).getUnsignedMin(); 8777 8778 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8779 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 8780 : APInt::getMinValue(BitWidth) + (MinStride - 1); 8781 8782 // Although End can be a MIN expression we estimate MinEnd considering only 8783 // the case End = RHS. This is safe because in the other case (Start - End) 8784 // is zero, leading to a zero maximum backedge taken count. 8785 APInt MinEnd = 8786 IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit) 8787 : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit); 8788 8789 8790 const SCEV *MaxBECount = getCouldNotCompute(); 8791 if (isa<SCEVConstant>(BECount)) 8792 MaxBECount = BECount; 8793 else 8794 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 8795 getConstant(MinStride), false); 8796 8797 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8798 MaxBECount = BECount; 8799 8800 return ExitLimit(BECount, MaxBECount, P); 8801 } 8802 8803 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 8804 ScalarEvolution &SE) const { 8805 if (Range.isFullSet()) // Infinite loop. 8806 return SE.getCouldNotCompute(); 8807 8808 // If the start is a non-zero constant, shift the range to simplify things. 8809 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 8810 if (!SC->getValue()->isZero()) { 8811 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 8812 Operands[0] = SE.getZero(SC->getType()); 8813 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 8814 getNoWrapFlags(FlagNW)); 8815 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 8816 return ShiftedAddRec->getNumIterationsInRange( 8817 Range.subtract(SC->getAPInt()), SE); 8818 // This is strange and shouldn't happen. 8819 return SE.getCouldNotCompute(); 8820 } 8821 8822 // The only time we can solve this is when we have all constant indices. 8823 // Otherwise, we cannot determine the overflow conditions. 8824 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 8825 return SE.getCouldNotCompute(); 8826 8827 // Okay at this point we know that all elements of the chrec are constants and 8828 // that the start element is zero. 8829 8830 // First check to see if the range contains zero. If not, the first 8831 // iteration exits. 8832 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 8833 if (!Range.contains(APInt(BitWidth, 0))) 8834 return SE.getZero(getType()); 8835 8836 if (isAffine()) { 8837 // If this is an affine expression then we have this situation: 8838 // Solve {0,+,A} in Range === Ax in Range 8839 8840 // We know that zero is in the range. If A is positive then we know that 8841 // the upper value of the range must be the first possible exit value. 8842 // If A is negative then the lower of the range is the last possible loop 8843 // value. Also note that we already checked for a full range. 8844 APInt One(BitWidth,1); 8845 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 8846 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 8847 8848 // The exit value should be (End+A)/A. 8849 APInt ExitVal = (End + A).udiv(A); 8850 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 8851 8852 // Evaluate at the exit value. If we really did fall out of the valid 8853 // range, then we computed our trip count, otherwise wrap around or other 8854 // things must have happened. 8855 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 8856 if (Range.contains(Val->getValue())) 8857 return SE.getCouldNotCompute(); // Something strange happened 8858 8859 // Ensure that the previous value is in the range. This is a sanity check. 8860 assert(Range.contains( 8861 EvaluateConstantChrecAtConstant(this, 8862 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 8863 "Linear scev computation is off in a bad way!"); 8864 return SE.getConstant(ExitValue); 8865 } else if (isQuadratic()) { 8866 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 8867 // quadratic equation to solve it. To do this, we must frame our problem in 8868 // terms of figuring out when zero is crossed, instead of when 8869 // Range.getUpper() is crossed. 8870 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 8871 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 8872 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), 8873 // getNoWrapFlags(FlagNW) 8874 FlagAnyWrap); 8875 8876 // Next, solve the constructed addrec 8877 if (auto Roots = 8878 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 8879 const SCEVConstant *R1 = Roots->first; 8880 const SCEVConstant *R2 = Roots->second; 8881 // Pick the smallest positive root value. 8882 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8883 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8884 if (!CB->getZExtValue()) 8885 std::swap(R1, R2); // R1 is the minimum root now. 8886 8887 // Make sure the root is not off by one. The returned iteration should 8888 // not be in the range, but the previous one should be. When solving 8889 // for "X*X < 5", for example, we should not return a root of 2. 8890 ConstantInt *R1Val = 8891 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 8892 if (Range.contains(R1Val->getValue())) { 8893 // The next iteration must be out of the range... 8894 ConstantInt *NextVal = 8895 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 8896 8897 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8898 if (!Range.contains(R1Val->getValue())) 8899 return SE.getConstant(NextVal); 8900 return SE.getCouldNotCompute(); // Something strange happened 8901 } 8902 8903 // If R1 was not in the range, then it is a good return value. Make 8904 // sure that R1-1 WAS in the range though, just in case. 8905 ConstantInt *NextVal = 8906 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 8907 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8908 if (Range.contains(R1Val->getValue())) 8909 return R1; 8910 return SE.getCouldNotCompute(); // Something strange happened 8911 } 8912 } 8913 } 8914 8915 return SE.getCouldNotCompute(); 8916 } 8917 8918 namespace { 8919 struct FindUndefs { 8920 bool Found; 8921 FindUndefs() : Found(false) {} 8922 8923 bool follow(const SCEV *S) { 8924 if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) { 8925 if (isa<UndefValue>(C->getValue())) 8926 Found = true; 8927 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 8928 if (isa<UndefValue>(C->getValue())) 8929 Found = true; 8930 } 8931 8932 // Keep looking if we haven't found it yet. 8933 return !Found; 8934 } 8935 bool isDone() const { 8936 // Stop recursion if we have found an undef. 8937 return Found; 8938 } 8939 }; 8940 } 8941 8942 // Return true when S contains at least an undef value. 8943 static inline bool 8944 containsUndefs(const SCEV *S) { 8945 FindUndefs F; 8946 SCEVTraversal<FindUndefs> ST(F); 8947 ST.visitAll(S); 8948 8949 return F.Found; 8950 } 8951 8952 namespace { 8953 // Collect all steps of SCEV expressions. 8954 struct SCEVCollectStrides { 8955 ScalarEvolution &SE; 8956 SmallVectorImpl<const SCEV *> &Strides; 8957 8958 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 8959 : SE(SE), Strides(S) {} 8960 8961 bool follow(const SCEV *S) { 8962 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 8963 Strides.push_back(AR->getStepRecurrence(SE)); 8964 return true; 8965 } 8966 bool isDone() const { return false; } 8967 }; 8968 8969 // Collect all SCEVUnknown and SCEVMulExpr expressions. 8970 struct SCEVCollectTerms { 8971 SmallVectorImpl<const SCEV *> &Terms; 8972 8973 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 8974 : Terms(T) {} 8975 8976 bool follow(const SCEV *S) { 8977 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) { 8978 if (!containsUndefs(S)) 8979 Terms.push_back(S); 8980 8981 // Stop recursion: once we collected a term, do not walk its operands. 8982 return false; 8983 } 8984 8985 // Keep looking. 8986 return true; 8987 } 8988 bool isDone() const { return false; } 8989 }; 8990 8991 // Check if a SCEV contains an AddRecExpr. 8992 struct SCEVHasAddRec { 8993 bool &ContainsAddRec; 8994 8995 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 8996 ContainsAddRec = false; 8997 } 8998 8999 bool follow(const SCEV *S) { 9000 if (isa<SCEVAddRecExpr>(S)) { 9001 ContainsAddRec = true; 9002 9003 // Stop recursion: once we collected a term, do not walk its operands. 9004 return false; 9005 } 9006 9007 // Keep looking. 9008 return true; 9009 } 9010 bool isDone() const { return false; } 9011 }; 9012 9013 // Find factors that are multiplied with an expression that (possibly as a 9014 // subexpression) contains an AddRecExpr. In the expression: 9015 // 9016 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9017 // 9018 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9019 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9020 // parameters as they form a product with an induction variable. 9021 // 9022 // This collector expects all array size parameters to be in the same MulExpr. 9023 // It might be necessary to later add support for collecting parameters that are 9024 // spread over different nested MulExpr. 9025 struct SCEVCollectAddRecMultiplies { 9026 SmallVectorImpl<const SCEV *> &Terms; 9027 ScalarEvolution &SE; 9028 9029 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9030 : Terms(T), SE(SE) {} 9031 9032 bool follow(const SCEV *S) { 9033 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9034 bool HasAddRec = false; 9035 SmallVector<const SCEV *, 0> Operands; 9036 for (auto Op : Mul->operands()) { 9037 if (isa<SCEVUnknown>(Op)) { 9038 Operands.push_back(Op); 9039 } else { 9040 bool ContainsAddRec; 9041 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9042 visitAll(Op, ContiansAddRec); 9043 HasAddRec |= ContainsAddRec; 9044 } 9045 } 9046 if (Operands.size() == 0) 9047 return true; 9048 9049 if (!HasAddRec) 9050 return false; 9051 9052 Terms.push_back(SE.getMulExpr(Operands)); 9053 // Stop recursion: once we collected a term, do not walk its operands. 9054 return false; 9055 } 9056 9057 // Keep looking. 9058 return true; 9059 } 9060 bool isDone() const { return false; } 9061 }; 9062 } 9063 9064 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9065 /// two places: 9066 /// 1) The strides of AddRec expressions. 9067 /// 2) Unknowns that are multiplied with AddRec expressions. 9068 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9069 SmallVectorImpl<const SCEV *> &Terms) { 9070 SmallVector<const SCEV *, 4> Strides; 9071 SCEVCollectStrides StrideCollector(*this, Strides); 9072 visitAll(Expr, StrideCollector); 9073 9074 DEBUG({ 9075 dbgs() << "Strides:\n"; 9076 for (const SCEV *S : Strides) 9077 dbgs() << *S << "\n"; 9078 }); 9079 9080 for (const SCEV *S : Strides) { 9081 SCEVCollectTerms TermCollector(Terms); 9082 visitAll(S, TermCollector); 9083 } 9084 9085 DEBUG({ 9086 dbgs() << "Terms:\n"; 9087 for (const SCEV *T : Terms) 9088 dbgs() << *T << "\n"; 9089 }); 9090 9091 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 9092 visitAll(Expr, MulCollector); 9093 } 9094 9095 static bool findArrayDimensionsRec(ScalarEvolution &SE, 9096 SmallVectorImpl<const SCEV *> &Terms, 9097 SmallVectorImpl<const SCEV *> &Sizes) { 9098 int Last = Terms.size() - 1; 9099 const SCEV *Step = Terms[Last]; 9100 9101 // End of recursion. 9102 if (Last == 0) { 9103 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 9104 SmallVector<const SCEV *, 2> Qs; 9105 for (const SCEV *Op : M->operands()) 9106 if (!isa<SCEVConstant>(Op)) 9107 Qs.push_back(Op); 9108 9109 Step = SE.getMulExpr(Qs); 9110 } 9111 9112 Sizes.push_back(Step); 9113 return true; 9114 } 9115 9116 for (const SCEV *&Term : Terms) { 9117 // Normalize the terms before the next call to findArrayDimensionsRec. 9118 const SCEV *Q, *R; 9119 SCEVDivision::divide(SE, Term, Step, &Q, &R); 9120 9121 // Bail out when GCD does not evenly divide one of the terms. 9122 if (!R->isZero()) 9123 return false; 9124 9125 Term = Q; 9126 } 9127 9128 // Remove all SCEVConstants. 9129 Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) { 9130 return isa<SCEVConstant>(E); 9131 }), 9132 Terms.end()); 9133 9134 if (Terms.size() > 0) 9135 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 9136 return false; 9137 9138 Sizes.push_back(Step); 9139 return true; 9140 } 9141 9142 // Returns true when S contains at least a SCEVUnknown parameter. 9143 static inline bool 9144 containsParameters(const SCEV *S) { 9145 struct FindParameter { 9146 bool FoundParameter; 9147 FindParameter() : FoundParameter(false) {} 9148 9149 bool follow(const SCEV *S) { 9150 if (isa<SCEVUnknown>(S)) { 9151 FoundParameter = true; 9152 // Stop recursion: we found a parameter. 9153 return false; 9154 } 9155 // Keep looking. 9156 return true; 9157 } 9158 bool isDone() const { 9159 // Stop recursion if we have found a parameter. 9160 return FoundParameter; 9161 } 9162 }; 9163 9164 FindParameter F; 9165 SCEVTraversal<FindParameter> ST(F); 9166 ST.visitAll(S); 9167 9168 return F.FoundParameter; 9169 } 9170 9171 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 9172 static inline bool 9173 containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 9174 for (const SCEV *T : Terms) 9175 if (containsParameters(T)) 9176 return true; 9177 return false; 9178 } 9179 9180 // Return the number of product terms in S. 9181 static inline int numberOfTerms(const SCEV *S) { 9182 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 9183 return Expr->getNumOperands(); 9184 return 1; 9185 } 9186 9187 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 9188 if (isa<SCEVConstant>(T)) 9189 return nullptr; 9190 9191 if (isa<SCEVUnknown>(T)) 9192 return T; 9193 9194 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 9195 SmallVector<const SCEV *, 2> Factors; 9196 for (const SCEV *Op : M->operands()) 9197 if (!isa<SCEVConstant>(Op)) 9198 Factors.push_back(Op); 9199 9200 return SE.getMulExpr(Factors); 9201 } 9202 9203 return T; 9204 } 9205 9206 /// Return the size of an element read or written by Inst. 9207 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 9208 Type *Ty; 9209 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 9210 Ty = Store->getValueOperand()->getType(); 9211 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 9212 Ty = Load->getType(); 9213 else 9214 return nullptr; 9215 9216 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 9217 return getSizeOfExpr(ETy, Ty); 9218 } 9219 9220 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 9221 SmallVectorImpl<const SCEV *> &Sizes, 9222 const SCEV *ElementSize) const { 9223 if (Terms.size() < 1 || !ElementSize) 9224 return; 9225 9226 // Early return when Terms do not contain parameters: we do not delinearize 9227 // non parametric SCEVs. 9228 if (!containsParameters(Terms)) 9229 return; 9230 9231 DEBUG({ 9232 dbgs() << "Terms:\n"; 9233 for (const SCEV *T : Terms) 9234 dbgs() << *T << "\n"; 9235 }); 9236 9237 // Remove duplicates. 9238 std::sort(Terms.begin(), Terms.end()); 9239 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 9240 9241 // Put larger terms first. 9242 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 9243 return numberOfTerms(LHS) > numberOfTerms(RHS); 9244 }); 9245 9246 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9247 9248 // Try to divide all terms by the element size. If term is not divisible by 9249 // element size, proceed with the original term. 9250 for (const SCEV *&Term : Terms) { 9251 const SCEV *Q, *R; 9252 SCEVDivision::divide(SE, Term, ElementSize, &Q, &R); 9253 if (!Q->isZero()) 9254 Term = Q; 9255 } 9256 9257 SmallVector<const SCEV *, 4> NewTerms; 9258 9259 // Remove constant factors. 9260 for (const SCEV *T : Terms) 9261 if (const SCEV *NewT = removeConstantFactors(SE, T)) 9262 NewTerms.push_back(NewT); 9263 9264 DEBUG({ 9265 dbgs() << "Terms after sorting:\n"; 9266 for (const SCEV *T : NewTerms) 9267 dbgs() << *T << "\n"; 9268 }); 9269 9270 if (NewTerms.empty() || 9271 !findArrayDimensionsRec(SE, NewTerms, Sizes)) { 9272 Sizes.clear(); 9273 return; 9274 } 9275 9276 // The last element to be pushed into Sizes is the size of an element. 9277 Sizes.push_back(ElementSize); 9278 9279 DEBUG({ 9280 dbgs() << "Sizes:\n"; 9281 for (const SCEV *S : Sizes) 9282 dbgs() << *S << "\n"; 9283 }); 9284 } 9285 9286 void ScalarEvolution::computeAccessFunctions( 9287 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 9288 SmallVectorImpl<const SCEV *> &Sizes) { 9289 9290 // Early exit in case this SCEV is not an affine multivariate function. 9291 if (Sizes.empty()) 9292 return; 9293 9294 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 9295 if (!AR->isAffine()) 9296 return; 9297 9298 const SCEV *Res = Expr; 9299 int Last = Sizes.size() - 1; 9300 for (int i = Last; i >= 0; i--) { 9301 const SCEV *Q, *R; 9302 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 9303 9304 DEBUG({ 9305 dbgs() << "Res: " << *Res << "\n"; 9306 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 9307 dbgs() << "Res divided by Sizes[i]:\n"; 9308 dbgs() << "Quotient: " << *Q << "\n"; 9309 dbgs() << "Remainder: " << *R << "\n"; 9310 }); 9311 9312 Res = Q; 9313 9314 // Do not record the last subscript corresponding to the size of elements in 9315 // the array. 9316 if (i == Last) { 9317 9318 // Bail out if the remainder is too complex. 9319 if (isa<SCEVAddRecExpr>(R)) { 9320 Subscripts.clear(); 9321 Sizes.clear(); 9322 return; 9323 } 9324 9325 continue; 9326 } 9327 9328 // Record the access function for the current subscript. 9329 Subscripts.push_back(R); 9330 } 9331 9332 // Also push in last position the remainder of the last division: it will be 9333 // the access function of the innermost dimension. 9334 Subscripts.push_back(Res); 9335 9336 std::reverse(Subscripts.begin(), Subscripts.end()); 9337 9338 DEBUG({ 9339 dbgs() << "Subscripts:\n"; 9340 for (const SCEV *S : Subscripts) 9341 dbgs() << *S << "\n"; 9342 }); 9343 } 9344 9345 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 9346 /// sizes of an array access. Returns the remainder of the delinearization that 9347 /// is the offset start of the array. The SCEV->delinearize algorithm computes 9348 /// the multiples of SCEV coefficients: that is a pattern matching of sub 9349 /// expressions in the stride and base of a SCEV corresponding to the 9350 /// computation of a GCD (greatest common divisor) of base and stride. When 9351 /// SCEV->delinearize fails, it returns the SCEV unchanged. 9352 /// 9353 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 9354 /// 9355 /// void foo(long n, long m, long o, double A[n][m][o]) { 9356 /// 9357 /// for (long i = 0; i < n; i++) 9358 /// for (long j = 0; j < m; j++) 9359 /// for (long k = 0; k < o; k++) 9360 /// A[i][j][k] = 1.0; 9361 /// } 9362 /// 9363 /// the delinearization input is the following AddRec SCEV: 9364 /// 9365 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 9366 /// 9367 /// From this SCEV, we are able to say that the base offset of the access is %A 9368 /// because it appears as an offset that does not divide any of the strides in 9369 /// the loops: 9370 /// 9371 /// CHECK: Base offset: %A 9372 /// 9373 /// and then SCEV->delinearize determines the size of some of the dimensions of 9374 /// the array as these are the multiples by which the strides are happening: 9375 /// 9376 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 9377 /// 9378 /// Note that the outermost dimension remains of UnknownSize because there are 9379 /// no strides that would help identifying the size of the last dimension: when 9380 /// the array has been statically allocated, one could compute the size of that 9381 /// dimension by dividing the overall size of the array by the size of the known 9382 /// dimensions: %m * %o * 8. 9383 /// 9384 /// Finally delinearize provides the access functions for the array reference 9385 /// that does correspond to A[i][j][k] of the above C testcase: 9386 /// 9387 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 9388 /// 9389 /// The testcases are checking the output of a function pass: 9390 /// DelinearizationPass that walks through all loads and stores of a function 9391 /// asking for the SCEV of the memory access with respect to all enclosing 9392 /// loops, calling SCEV->delinearize on that and printing the results. 9393 9394 void ScalarEvolution::delinearize(const SCEV *Expr, 9395 SmallVectorImpl<const SCEV *> &Subscripts, 9396 SmallVectorImpl<const SCEV *> &Sizes, 9397 const SCEV *ElementSize) { 9398 // First step: collect parametric terms. 9399 SmallVector<const SCEV *, 4> Terms; 9400 collectParametricTerms(Expr, Terms); 9401 9402 if (Terms.empty()) 9403 return; 9404 9405 // Second step: find subscript sizes. 9406 findArrayDimensions(Terms, Sizes, ElementSize); 9407 9408 if (Sizes.empty()) 9409 return; 9410 9411 // Third step: compute the access functions for each subscript. 9412 computeAccessFunctions(Expr, Subscripts, Sizes); 9413 9414 if (Subscripts.empty()) 9415 return; 9416 9417 DEBUG({ 9418 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 9419 dbgs() << "ArrayDecl[UnknownSize]"; 9420 for (const SCEV *S : Sizes) 9421 dbgs() << "[" << *S << "]"; 9422 9423 dbgs() << "\nArrayRef"; 9424 for (const SCEV *S : Subscripts) 9425 dbgs() << "[" << *S << "]"; 9426 dbgs() << "\n"; 9427 }); 9428 } 9429 9430 //===----------------------------------------------------------------------===// 9431 // SCEVCallbackVH Class Implementation 9432 //===----------------------------------------------------------------------===// 9433 9434 void ScalarEvolution::SCEVCallbackVH::deleted() { 9435 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9436 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 9437 SE->ConstantEvolutionLoopExitValue.erase(PN); 9438 SE->eraseValueFromMap(getValPtr()); 9439 // this now dangles! 9440 } 9441 9442 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 9443 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9444 9445 // Forget all the expressions associated with users of the old value, 9446 // so that future queries will recompute the expressions using the new 9447 // value. 9448 Value *Old = getValPtr(); 9449 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 9450 SmallPtrSet<User *, 8> Visited; 9451 while (!Worklist.empty()) { 9452 User *U = Worklist.pop_back_val(); 9453 // Deleting the Old value will cause this to dangle. Postpone 9454 // that until everything else is done. 9455 if (U == Old) 9456 continue; 9457 if (!Visited.insert(U).second) 9458 continue; 9459 if (PHINode *PN = dyn_cast<PHINode>(U)) 9460 SE->ConstantEvolutionLoopExitValue.erase(PN); 9461 SE->eraseValueFromMap(U); 9462 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 9463 } 9464 // Delete the Old value. 9465 if (PHINode *PN = dyn_cast<PHINode>(Old)) 9466 SE->ConstantEvolutionLoopExitValue.erase(PN); 9467 SE->eraseValueFromMap(Old); 9468 // this now dangles! 9469 } 9470 9471 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 9472 : CallbackVH(V), SE(se) {} 9473 9474 //===----------------------------------------------------------------------===// 9475 // ScalarEvolution Class Implementation 9476 //===----------------------------------------------------------------------===// 9477 9478 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 9479 AssumptionCache &AC, DominatorTree &DT, 9480 LoopInfo &LI) 9481 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 9482 CouldNotCompute(new SCEVCouldNotCompute()), 9483 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9484 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 9485 FirstUnknown(nullptr) { 9486 9487 // To use guards for proving predicates, we need to scan every instruction in 9488 // relevant basic blocks, and not just terminators. Doing this is a waste of 9489 // time if the IR does not actually contain any calls to 9490 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 9491 // 9492 // This pessimizes the case where a pass that preserves ScalarEvolution wants 9493 // to _add_ guards to the module when there weren't any before, and wants 9494 // ScalarEvolution to optimize based on those guards. For now we prefer to be 9495 // efficient in lieu of being smart in that rather obscure case. 9496 9497 auto *GuardDecl = F.getParent()->getFunction( 9498 Intrinsic::getName(Intrinsic::experimental_guard)); 9499 HasGuards = GuardDecl && !GuardDecl->use_empty(); 9500 } 9501 9502 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 9503 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 9504 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 9505 ValueExprMap(std::move(Arg.ValueExprMap)), 9506 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9507 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 9508 PredicatedBackedgeTakenCounts( 9509 std::move(Arg.PredicatedBackedgeTakenCounts)), 9510 ConstantEvolutionLoopExitValue( 9511 std::move(Arg.ConstantEvolutionLoopExitValue)), 9512 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 9513 LoopDispositions(std::move(Arg.LoopDispositions)), 9514 BlockDispositions(std::move(Arg.BlockDispositions)), 9515 UnsignedRanges(std::move(Arg.UnsignedRanges)), 9516 SignedRanges(std::move(Arg.SignedRanges)), 9517 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 9518 UniquePreds(std::move(Arg.UniquePreds)), 9519 SCEVAllocator(std::move(Arg.SCEVAllocator)), 9520 FirstUnknown(Arg.FirstUnknown) { 9521 Arg.FirstUnknown = nullptr; 9522 } 9523 9524 ScalarEvolution::~ScalarEvolution() { 9525 // Iterate through all the SCEVUnknown instances and call their 9526 // destructors, so that they release their references to their values. 9527 for (SCEVUnknown *U = FirstUnknown; U;) { 9528 SCEVUnknown *Tmp = U; 9529 U = U->Next; 9530 Tmp->~SCEVUnknown(); 9531 } 9532 FirstUnknown = nullptr; 9533 9534 ExprValueMap.clear(); 9535 ValueExprMap.clear(); 9536 HasRecMap.clear(); 9537 9538 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 9539 // that a loop had multiple computable exits. 9540 for (auto &BTCI : BackedgeTakenCounts) 9541 BTCI.second.clear(); 9542 for (auto &BTCI : PredicatedBackedgeTakenCounts) 9543 BTCI.second.clear(); 9544 9545 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 9546 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 9547 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 9548 } 9549 9550 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 9551 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 9552 } 9553 9554 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 9555 const Loop *L) { 9556 // Print all inner loops first 9557 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 9558 PrintLoopInfo(OS, SE, *I); 9559 9560 OS << "Loop "; 9561 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9562 OS << ": "; 9563 9564 SmallVector<BasicBlock *, 8> ExitBlocks; 9565 L->getExitBlocks(ExitBlocks); 9566 if (ExitBlocks.size() != 1) 9567 OS << "<multiple exits> "; 9568 9569 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 9570 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 9571 } else { 9572 OS << "Unpredictable backedge-taken count. "; 9573 } 9574 9575 OS << "\n" 9576 "Loop "; 9577 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9578 OS << ": "; 9579 9580 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 9581 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 9582 } else { 9583 OS << "Unpredictable max backedge-taken count. "; 9584 } 9585 9586 OS << "\n" 9587 "Loop "; 9588 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9589 OS << ": "; 9590 9591 SCEVUnionPredicate Pred; 9592 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 9593 if (!isa<SCEVCouldNotCompute>(PBT)) { 9594 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 9595 OS << " Predicates:\n"; 9596 Pred.print(OS, 4); 9597 } else { 9598 OS << "Unpredictable predicated backedge-taken count. "; 9599 } 9600 OS << "\n"; 9601 } 9602 9603 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 9604 switch (LD) { 9605 case ScalarEvolution::LoopVariant: 9606 return "Variant"; 9607 case ScalarEvolution::LoopInvariant: 9608 return "Invariant"; 9609 case ScalarEvolution::LoopComputable: 9610 return "Computable"; 9611 } 9612 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 9613 } 9614 9615 void ScalarEvolution::print(raw_ostream &OS) const { 9616 // ScalarEvolution's implementation of the print method is to print 9617 // out SCEV values of all instructions that are interesting. Doing 9618 // this potentially causes it to create new SCEV objects though, 9619 // which technically conflicts with the const qualifier. This isn't 9620 // observable from outside the class though, so casting away the 9621 // const isn't dangerous. 9622 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9623 9624 OS << "Classifying expressions for: "; 9625 F.printAsOperand(OS, /*PrintType=*/false); 9626 OS << "\n"; 9627 for (Instruction &I : instructions(F)) 9628 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 9629 OS << I << '\n'; 9630 OS << " --> "; 9631 const SCEV *SV = SE.getSCEV(&I); 9632 SV->print(OS); 9633 if (!isa<SCEVCouldNotCompute>(SV)) { 9634 OS << " U: "; 9635 SE.getUnsignedRange(SV).print(OS); 9636 OS << " S: "; 9637 SE.getSignedRange(SV).print(OS); 9638 } 9639 9640 const Loop *L = LI.getLoopFor(I.getParent()); 9641 9642 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 9643 if (AtUse != SV) { 9644 OS << " --> "; 9645 AtUse->print(OS); 9646 if (!isa<SCEVCouldNotCompute>(AtUse)) { 9647 OS << " U: "; 9648 SE.getUnsignedRange(AtUse).print(OS); 9649 OS << " S: "; 9650 SE.getSignedRange(AtUse).print(OS); 9651 } 9652 } 9653 9654 if (L) { 9655 OS << "\t\t" "Exits: "; 9656 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 9657 if (!SE.isLoopInvariant(ExitValue, L)) { 9658 OS << "<<Unknown>>"; 9659 } else { 9660 OS << *ExitValue; 9661 } 9662 9663 bool First = true; 9664 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 9665 if (First) { 9666 OS << "\t\t" "LoopDispositions: { "; 9667 First = false; 9668 } else { 9669 OS << ", "; 9670 } 9671 9672 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9673 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 9674 } 9675 9676 for (auto *InnerL : depth_first(L)) { 9677 if (InnerL == L) 9678 continue; 9679 if (First) { 9680 OS << "\t\t" "LoopDispositions: { "; 9681 First = false; 9682 } else { 9683 OS << ", "; 9684 } 9685 9686 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9687 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 9688 } 9689 9690 OS << " }"; 9691 } 9692 9693 OS << "\n"; 9694 } 9695 9696 OS << "Determining loop execution counts for: "; 9697 F.printAsOperand(OS, /*PrintType=*/false); 9698 OS << "\n"; 9699 for (LoopInfo::iterator I = LI.begin(), E = LI.end(); I != E; ++I) 9700 PrintLoopInfo(OS, &SE, *I); 9701 } 9702 9703 ScalarEvolution::LoopDisposition 9704 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 9705 auto &Values = LoopDispositions[S]; 9706 for (auto &V : Values) { 9707 if (V.getPointer() == L) 9708 return V.getInt(); 9709 } 9710 Values.emplace_back(L, LoopVariant); 9711 LoopDisposition D = computeLoopDisposition(S, L); 9712 auto &Values2 = LoopDispositions[S]; 9713 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9714 if (V.getPointer() == L) { 9715 V.setInt(D); 9716 break; 9717 } 9718 } 9719 return D; 9720 } 9721 9722 ScalarEvolution::LoopDisposition 9723 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 9724 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9725 case scConstant: 9726 return LoopInvariant; 9727 case scTruncate: 9728 case scZeroExtend: 9729 case scSignExtend: 9730 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 9731 case scAddRecExpr: { 9732 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9733 9734 // If L is the addrec's loop, it's computable. 9735 if (AR->getLoop() == L) 9736 return LoopComputable; 9737 9738 // Add recurrences are never invariant in the function-body (null loop). 9739 if (!L) 9740 return LoopVariant; 9741 9742 // This recurrence is variant w.r.t. L if L contains AR's loop. 9743 if (L->contains(AR->getLoop())) 9744 return LoopVariant; 9745 9746 // This recurrence is invariant w.r.t. L if AR's loop contains L. 9747 if (AR->getLoop()->contains(L)) 9748 return LoopInvariant; 9749 9750 // This recurrence is variant w.r.t. L if any of its operands 9751 // are variant. 9752 for (auto *Op : AR->operands()) 9753 if (!isLoopInvariant(Op, L)) 9754 return LoopVariant; 9755 9756 // Otherwise it's loop-invariant. 9757 return LoopInvariant; 9758 } 9759 case scAddExpr: 9760 case scMulExpr: 9761 case scUMaxExpr: 9762 case scSMaxExpr: { 9763 bool HasVarying = false; 9764 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 9765 LoopDisposition D = getLoopDisposition(Op, L); 9766 if (D == LoopVariant) 9767 return LoopVariant; 9768 if (D == LoopComputable) 9769 HasVarying = true; 9770 } 9771 return HasVarying ? LoopComputable : LoopInvariant; 9772 } 9773 case scUDivExpr: { 9774 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9775 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 9776 if (LD == LoopVariant) 9777 return LoopVariant; 9778 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 9779 if (RD == LoopVariant) 9780 return LoopVariant; 9781 return (LD == LoopInvariant && RD == LoopInvariant) ? 9782 LoopInvariant : LoopComputable; 9783 } 9784 case scUnknown: 9785 // All non-instruction values are loop invariant. All instructions are loop 9786 // invariant if they are not contained in the specified loop. 9787 // Instructions are never considered invariant in the function body 9788 // (null loop) because they are defined within the "loop". 9789 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 9790 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 9791 return LoopInvariant; 9792 case scCouldNotCompute: 9793 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9794 } 9795 llvm_unreachable("Unknown SCEV kind!"); 9796 } 9797 9798 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 9799 return getLoopDisposition(S, L) == LoopInvariant; 9800 } 9801 9802 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 9803 return getLoopDisposition(S, L) == LoopComputable; 9804 } 9805 9806 ScalarEvolution::BlockDisposition 9807 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9808 auto &Values = BlockDispositions[S]; 9809 for (auto &V : Values) { 9810 if (V.getPointer() == BB) 9811 return V.getInt(); 9812 } 9813 Values.emplace_back(BB, DoesNotDominateBlock); 9814 BlockDisposition D = computeBlockDisposition(S, BB); 9815 auto &Values2 = BlockDispositions[S]; 9816 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9817 if (V.getPointer() == BB) { 9818 V.setInt(D); 9819 break; 9820 } 9821 } 9822 return D; 9823 } 9824 9825 ScalarEvolution::BlockDisposition 9826 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9827 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9828 case scConstant: 9829 return ProperlyDominatesBlock; 9830 case scTruncate: 9831 case scZeroExtend: 9832 case scSignExtend: 9833 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 9834 case scAddRecExpr: { 9835 // This uses a "dominates" query instead of "properly dominates" query 9836 // to test for proper dominance too, because the instruction which 9837 // produces the addrec's value is a PHI, and a PHI effectively properly 9838 // dominates its entire containing block. 9839 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9840 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 9841 return DoesNotDominateBlock; 9842 } 9843 // FALL THROUGH into SCEVNAryExpr handling. 9844 case scAddExpr: 9845 case scMulExpr: 9846 case scUMaxExpr: 9847 case scSMaxExpr: { 9848 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 9849 bool Proper = true; 9850 for (const SCEV *NAryOp : NAry->operands()) { 9851 BlockDisposition D = getBlockDisposition(NAryOp, BB); 9852 if (D == DoesNotDominateBlock) 9853 return DoesNotDominateBlock; 9854 if (D == DominatesBlock) 9855 Proper = false; 9856 } 9857 return Proper ? ProperlyDominatesBlock : DominatesBlock; 9858 } 9859 case scUDivExpr: { 9860 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9861 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 9862 BlockDisposition LD = getBlockDisposition(LHS, BB); 9863 if (LD == DoesNotDominateBlock) 9864 return DoesNotDominateBlock; 9865 BlockDisposition RD = getBlockDisposition(RHS, BB); 9866 if (RD == DoesNotDominateBlock) 9867 return DoesNotDominateBlock; 9868 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 9869 ProperlyDominatesBlock : DominatesBlock; 9870 } 9871 case scUnknown: 9872 if (Instruction *I = 9873 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 9874 if (I->getParent() == BB) 9875 return DominatesBlock; 9876 if (DT.properlyDominates(I->getParent(), BB)) 9877 return ProperlyDominatesBlock; 9878 return DoesNotDominateBlock; 9879 } 9880 return ProperlyDominatesBlock; 9881 case scCouldNotCompute: 9882 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9883 } 9884 llvm_unreachable("Unknown SCEV kind!"); 9885 } 9886 9887 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 9888 return getBlockDisposition(S, BB) >= DominatesBlock; 9889 } 9890 9891 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 9892 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 9893 } 9894 9895 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 9896 // Search for a SCEV expression node within an expression tree. 9897 // Implements SCEVTraversal::Visitor. 9898 struct SCEVSearch { 9899 const SCEV *Node; 9900 bool IsFound; 9901 9902 SCEVSearch(const SCEV *N): Node(N), IsFound(false) {} 9903 9904 bool follow(const SCEV *S) { 9905 IsFound |= (S == Node); 9906 return !IsFound; 9907 } 9908 bool isDone() const { return IsFound; } 9909 }; 9910 9911 SCEVSearch Search(Op); 9912 visitAll(S, Search); 9913 return Search.IsFound; 9914 } 9915 9916 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 9917 ValuesAtScopes.erase(S); 9918 LoopDispositions.erase(S); 9919 BlockDispositions.erase(S); 9920 UnsignedRanges.erase(S); 9921 SignedRanges.erase(S); 9922 ExprValueMap.erase(S); 9923 HasRecMap.erase(S); 9924 9925 auto RemoveSCEVFromBackedgeMap = 9926 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 9927 for (auto I = Map.begin(), E = Map.end(); I != E;) { 9928 BackedgeTakenInfo &BEInfo = I->second; 9929 if (BEInfo.hasOperand(S, this)) { 9930 BEInfo.clear(); 9931 Map.erase(I++); 9932 } else 9933 ++I; 9934 } 9935 }; 9936 9937 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 9938 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 9939 } 9940 9941 typedef DenseMap<const Loop *, std::string> VerifyMap; 9942 9943 /// replaceSubString - Replaces all occurrences of From in Str with To. 9944 static void replaceSubString(std::string &Str, StringRef From, StringRef To) { 9945 size_t Pos = 0; 9946 while ((Pos = Str.find(From, Pos)) != std::string::npos) { 9947 Str.replace(Pos, From.size(), To.data(), To.size()); 9948 Pos += To.size(); 9949 } 9950 } 9951 9952 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis. 9953 static void 9954 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { 9955 std::string &S = Map[L]; 9956 if (S.empty()) { 9957 raw_string_ostream OS(S); 9958 SE.getBackedgeTakenCount(L)->print(OS); 9959 9960 // false and 0 are semantically equivalent. This can happen in dead loops. 9961 replaceSubString(OS.str(), "false", "0"); 9962 // Remove wrap flags, their use in SCEV is highly fragile. 9963 // FIXME: Remove this when SCEV gets smarter about them. 9964 replaceSubString(OS.str(), "<nw>", ""); 9965 replaceSubString(OS.str(), "<nsw>", ""); 9966 replaceSubString(OS.str(), "<nuw>", ""); 9967 } 9968 9969 for (auto *R : reverse(*L)) 9970 getLoopBackedgeTakenCounts(R, Map, SE); // recurse. 9971 } 9972 9973 void ScalarEvolution::verify() const { 9974 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9975 9976 // Gather stringified backedge taken counts for all loops using SCEV's caches. 9977 // FIXME: It would be much better to store actual values instead of strings, 9978 // but SCEV pointers will change if we drop the caches. 9979 VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; 9980 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 9981 getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); 9982 9983 // Gather stringified backedge taken counts for all loops using a fresh 9984 // ScalarEvolution object. 9985 ScalarEvolution SE2(F, TLI, AC, DT, LI); 9986 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 9987 getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2); 9988 9989 // Now compare whether they're the same with and without caches. This allows 9990 // verifying that no pass changed the cache. 9991 assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() && 9992 "New loops suddenly appeared!"); 9993 9994 for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(), 9995 OldE = BackedgeDumpsOld.end(), 9996 NewI = BackedgeDumpsNew.begin(); 9997 OldI != OldE; ++OldI, ++NewI) { 9998 assert(OldI->first == NewI->first && "Loop order changed!"); 9999 10000 // Compare the stringified SCEVs. We don't care if undef backedgetaken count 10001 // changes. 10002 // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This 10003 // means that a pass is buggy or SCEV has to learn a new pattern but is 10004 // usually not harmful. 10005 if (OldI->second != NewI->second && 10006 OldI->second.find("undef") == std::string::npos && 10007 NewI->second.find("undef") == std::string::npos && 10008 OldI->second != "***COULDNOTCOMPUTE***" && 10009 NewI->second != "***COULDNOTCOMPUTE***") { 10010 dbgs() << "SCEVValidator: SCEV for loop '" 10011 << OldI->first->getHeader()->getName() 10012 << "' changed from '" << OldI->second 10013 << "' to '" << NewI->second << "'!\n"; 10014 std::abort(); 10015 } 10016 } 10017 10018 // TODO: Verify more things. 10019 } 10020 10021 char ScalarEvolutionAnalysis::PassID; 10022 10023 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10024 AnalysisManager<Function> &AM) { 10025 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10026 AM.getResult<AssumptionAnalysis>(F), 10027 AM.getResult<DominatorTreeAnalysis>(F), 10028 AM.getResult<LoopAnalysis>(F)); 10029 } 10030 10031 PreservedAnalyses 10032 ScalarEvolutionPrinterPass::run(Function &F, AnalysisManager<Function> &AM) { 10033 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10034 return PreservedAnalyses::all(); 10035 } 10036 10037 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10038 "Scalar Evolution Analysis", false, true) 10039 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10040 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10041 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10042 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10043 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10044 "Scalar Evolution Analysis", false, true) 10045 char ScalarEvolutionWrapperPass::ID = 0; 10046 10047 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10048 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10049 } 10050 10051 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10052 SE.reset(new ScalarEvolution( 10053 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10054 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10055 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10056 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10057 return false; 10058 } 10059 10060 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10061 10062 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10063 SE->print(OS); 10064 } 10065 10066 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10067 if (!VerifySCEV) 10068 return; 10069 10070 SE->verify(); 10071 } 10072 10073 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10074 AU.setPreservesAll(); 10075 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10076 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10077 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10078 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10079 } 10080 10081 const SCEVPredicate * 10082 ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS, 10083 const SCEVConstant *RHS) { 10084 FoldingSetNodeID ID; 10085 // Unique this node based on the arguments 10086 ID.AddInteger(SCEVPredicate::P_Equal); 10087 ID.AddPointer(LHS); 10088 ID.AddPointer(RHS); 10089 void *IP = nullptr; 10090 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10091 return S; 10092 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10093 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10094 UniquePreds.InsertNode(Eq, IP); 10095 return Eq; 10096 } 10097 10098 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10099 const SCEVAddRecExpr *AR, 10100 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10101 FoldingSetNodeID ID; 10102 // Unique this node based on the arguments 10103 ID.AddInteger(SCEVPredicate::P_Wrap); 10104 ID.AddPointer(AR); 10105 ID.AddInteger(AddedFlags); 10106 void *IP = nullptr; 10107 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10108 return S; 10109 auto *OF = new (SCEVAllocator) 10110 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10111 UniquePreds.InsertNode(OF, IP); 10112 return OF; 10113 } 10114 10115 namespace { 10116 10117 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 10118 public: 10119 // Rewrites \p S in the context of a loop L and the predicate A. 10120 // If Assume is true, rewrite is free to add further predicates to A 10121 // such that the result will be an AddRecExpr. 10122 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 10123 SCEVUnionPredicate &A, bool Assume) { 10124 SCEVPredicateRewriter Rewriter(L, SE, A, Assume); 10125 return Rewriter.visit(S); 10126 } 10127 10128 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 10129 SCEVUnionPredicate &P, bool Assume) 10130 : SCEVRewriteVisitor(SE), P(P), L(L), Assume(Assume) {} 10131 10132 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10133 auto ExprPreds = P.getPredicatesForExpr(Expr); 10134 for (auto *Pred : ExprPreds) 10135 if (const auto *IPred = dyn_cast<const SCEVEqualPredicate>(Pred)) 10136 if (IPred->getLHS() == Expr) 10137 return IPred->getRHS(); 10138 10139 return Expr; 10140 } 10141 10142 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 10143 const SCEV *Operand = visit(Expr->getOperand()); 10144 const SCEVAddRecExpr *AR = dyn_cast<const SCEVAddRecExpr>(Operand); 10145 if (AR && AR->getLoop() == L && AR->isAffine()) { 10146 // This couldn't be folded because the operand didn't have the nuw 10147 // flag. Add the nusw flag as an assumption that we could make. 10148 const SCEV *Step = AR->getStepRecurrence(SE); 10149 Type *Ty = Expr->getType(); 10150 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 10151 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 10152 SE.getSignExtendExpr(Step, Ty), L, 10153 AR->getNoWrapFlags()); 10154 } 10155 return SE.getZeroExtendExpr(Operand, Expr->getType()); 10156 } 10157 10158 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 10159 const SCEV *Operand = visit(Expr->getOperand()); 10160 const SCEVAddRecExpr *AR = dyn_cast<const SCEVAddRecExpr>(Operand); 10161 if (AR && AR->getLoop() == L && AR->isAffine()) { 10162 // This couldn't be folded because the operand didn't have the nsw 10163 // flag. Add the nssw flag as an assumption that we could make. 10164 const SCEV *Step = AR->getStepRecurrence(SE); 10165 Type *Ty = Expr->getType(); 10166 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 10167 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 10168 SE.getSignExtendExpr(Step, Ty), L, 10169 AR->getNoWrapFlags()); 10170 } 10171 return SE.getSignExtendExpr(Operand, Expr->getType()); 10172 } 10173 10174 private: 10175 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 10176 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10177 auto *A = SE.getWrapPredicate(AR, AddedFlags); 10178 if (!Assume) { 10179 // Check if we've already made this assumption. 10180 if (P.implies(A)) 10181 return true; 10182 return false; 10183 } 10184 P.add(A); 10185 return true; 10186 } 10187 10188 SCEVUnionPredicate &P; 10189 const Loop *L; 10190 bool Assume; 10191 }; 10192 } // end anonymous namespace 10193 10194 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 10195 SCEVUnionPredicate &Preds) { 10196 return SCEVPredicateRewriter::rewrite(S, L, *this, Preds, false); 10197 } 10198 10199 const SCEVAddRecExpr * 10200 ScalarEvolution::convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, 10201 SCEVUnionPredicate &Preds) { 10202 SCEVUnionPredicate TransformPreds; 10203 S = SCEVPredicateRewriter::rewrite(S, L, *this, TransformPreds, true); 10204 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 10205 10206 if (!AddRec) 10207 return nullptr; 10208 10209 // Since the transformation was successful, we can now transfer the SCEV 10210 // predicates. 10211 Preds.add(&TransformPreds); 10212 return AddRec; 10213 } 10214 10215 /// SCEV predicates 10216 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 10217 SCEVPredicateKind Kind) 10218 : FastID(ID), Kind(Kind) {} 10219 10220 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 10221 const SCEVUnknown *LHS, 10222 const SCEVConstant *RHS) 10223 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {} 10224 10225 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 10226 const auto *Op = dyn_cast<const SCEVEqualPredicate>(N); 10227 10228 if (!Op) 10229 return false; 10230 10231 return Op->LHS == LHS && Op->RHS == RHS; 10232 } 10233 10234 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 10235 10236 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 10237 10238 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 10239 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 10240 } 10241 10242 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 10243 const SCEVAddRecExpr *AR, 10244 IncrementWrapFlags Flags) 10245 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 10246 10247 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 10248 10249 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 10250 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 10251 10252 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 10253 } 10254 10255 bool SCEVWrapPredicate::isAlwaysTrue() const { 10256 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 10257 IncrementWrapFlags IFlags = Flags; 10258 10259 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 10260 IFlags = clearFlags(IFlags, IncrementNSSW); 10261 10262 return IFlags == IncrementAnyWrap; 10263 } 10264 10265 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 10266 OS.indent(Depth) << *getExpr() << " Added Flags: "; 10267 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 10268 OS << "<nusw>"; 10269 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 10270 OS << "<nssw>"; 10271 OS << "\n"; 10272 } 10273 10274 SCEVWrapPredicate::IncrementWrapFlags 10275 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 10276 ScalarEvolution &SE) { 10277 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 10278 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 10279 10280 // We can safely transfer the NSW flag as NSSW. 10281 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 10282 ImpliedFlags = IncrementNSSW; 10283 10284 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 10285 // If the increment is positive, the SCEV NUW flag will also imply the 10286 // WrapPredicate NUSW flag. 10287 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 10288 if (Step->getValue()->getValue().isNonNegative()) 10289 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 10290 } 10291 10292 return ImpliedFlags; 10293 } 10294 10295 /// Union predicates don't get cached so create a dummy set ID for it. 10296 SCEVUnionPredicate::SCEVUnionPredicate() 10297 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 10298 10299 bool SCEVUnionPredicate::isAlwaysTrue() const { 10300 return all_of(Preds, 10301 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 10302 } 10303 10304 ArrayRef<const SCEVPredicate *> 10305 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 10306 auto I = SCEVToPreds.find(Expr); 10307 if (I == SCEVToPreds.end()) 10308 return ArrayRef<const SCEVPredicate *>(); 10309 return I->second; 10310 } 10311 10312 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 10313 if (const auto *Set = dyn_cast<const SCEVUnionPredicate>(N)) 10314 return all_of(Set->Preds, 10315 [this](const SCEVPredicate *I) { return this->implies(I); }); 10316 10317 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 10318 if (ScevPredsIt == SCEVToPreds.end()) 10319 return false; 10320 auto &SCEVPreds = ScevPredsIt->second; 10321 10322 return any_of(SCEVPreds, 10323 [N](const SCEVPredicate *I) { return I->implies(N); }); 10324 } 10325 10326 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 10327 10328 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 10329 for (auto Pred : Preds) 10330 Pred->print(OS, Depth); 10331 } 10332 10333 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 10334 if (const auto *Set = dyn_cast<const SCEVUnionPredicate>(N)) { 10335 for (auto Pred : Set->Preds) 10336 add(Pred); 10337 return; 10338 } 10339 10340 if (implies(N)) 10341 return; 10342 10343 const SCEV *Key = N->getExpr(); 10344 assert(Key && "Only SCEVUnionPredicate doesn't have an " 10345 " associated expression!"); 10346 10347 SCEVToPreds[Key].push_back(N); 10348 Preds.push_back(N); 10349 } 10350 10351 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 10352 Loop &L) 10353 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 10354 10355 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 10356 const SCEV *Expr = SE.getSCEV(V); 10357 RewriteEntry &Entry = RewriteMap[Expr]; 10358 10359 // If we already have an entry and the version matches, return it. 10360 if (Entry.second && Generation == Entry.first) 10361 return Entry.second; 10362 10363 // We found an entry but it's stale. Rewrite the stale entry 10364 // acording to the current predicate. 10365 if (Entry.second) 10366 Expr = Entry.second; 10367 10368 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 10369 Entry = {Generation, NewSCEV}; 10370 10371 return NewSCEV; 10372 } 10373 10374 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 10375 if (!BackedgeCount) { 10376 SCEVUnionPredicate BackedgePred; 10377 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 10378 addPredicate(BackedgePred); 10379 } 10380 return BackedgeCount; 10381 } 10382 10383 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 10384 if (Preds.implies(&Pred)) 10385 return; 10386 Preds.add(&Pred); 10387 updateGeneration(); 10388 } 10389 10390 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 10391 return Preds; 10392 } 10393 10394 void PredicatedScalarEvolution::updateGeneration() { 10395 // If the generation number wrapped recompute everything. 10396 if (++Generation == 0) { 10397 for (auto &II : RewriteMap) { 10398 const SCEV *Rewritten = II.second.second; 10399 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 10400 } 10401 } 10402 } 10403 10404 void PredicatedScalarEvolution::setNoOverflow( 10405 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10406 const SCEV *Expr = getSCEV(V); 10407 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10408 10409 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 10410 10411 // Clear the statically implied flags. 10412 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 10413 addPredicate(*SE.getWrapPredicate(AR, Flags)); 10414 10415 auto II = FlagsMap.insert({V, Flags}); 10416 if (!II.second) 10417 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 10418 } 10419 10420 bool PredicatedScalarEvolution::hasNoOverflow( 10421 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10422 const SCEV *Expr = getSCEV(V); 10423 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10424 10425 Flags = SCEVWrapPredicate::clearFlags( 10426 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 10427 10428 auto II = FlagsMap.find(V); 10429 10430 if (II != FlagsMap.end()) 10431 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 10432 10433 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 10434 } 10435 10436 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 10437 const SCEV *Expr = this->getSCEV(V); 10438 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, Preds); 10439 10440 if (!New) 10441 return nullptr; 10442 10443 updateGeneration(); 10444 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 10445 return New; 10446 } 10447 10448 PredicatedScalarEvolution::PredicatedScalarEvolution( 10449 const PredicatedScalarEvolution &Init) 10450 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 10451 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 10452 for (auto I = Init.FlagsMap.begin(), E = Init.FlagsMap.end(); I != E; ++I) 10453 FlagsMap.insert(*I); 10454 } 10455 10456 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 10457 // For each block. 10458 for (auto *BB : L.getBlocks()) 10459 for (auto &I : *BB) { 10460 if (!SE.isSCEVable(I.getType())) 10461 continue; 10462 10463 auto *Expr = SE.getSCEV(&I); 10464 auto II = RewriteMap.find(Expr); 10465 10466 if (II == RewriteMap.end()) 10467 continue; 10468 10469 // Don't print things that are not interesting. 10470 if (II->second.second == Expr) 10471 continue; 10472 10473 OS.indent(Depth) << "[PSE]" << I << ":\n"; 10474 OS.indent(Depth + 2) << *Expr << "\n"; 10475 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 10476 } 10477 } 10478