1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #define DEBUG_TYPE "scalar-evolution" 62 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 63 #include "llvm/Constants.h" 64 #include "llvm/DerivedTypes.h" 65 #include "llvm/GlobalVariable.h" 66 #include "llvm/GlobalAlias.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/LLVMContext.h" 69 #include "llvm/Operator.h" 70 #include "llvm/Analysis/ConstantFolding.h" 71 #include "llvm/Analysis/Dominators.h" 72 #include "llvm/Analysis/InstructionSimplify.h" 73 #include "llvm/Analysis/LoopInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/Assembly/Writer.h" 76 #include "llvm/Target/TargetData.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/ConstantRange.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/GetElementPtrTypeIterator.h" 82 #include "llvm/Support/InstIterator.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/ADT/Statistic.h" 86 #include "llvm/ADT/STLExtras.h" 87 #include "llvm/ADT/SmallPtrSet.h" 88 #include <algorithm> 89 using namespace llvm; 90 91 STATISTIC(NumArrayLenItCounts, 92 "Number of trip counts computed with array length"); 93 STATISTIC(NumTripCountsComputed, 94 "Number of loops with predictable loop counts"); 95 STATISTIC(NumTripCountsNotComputed, 96 "Number of loops without predictable loop counts"); 97 STATISTIC(NumBruteForceTripCountsComputed, 98 "Number of loops with trip counts computed by force"); 99 100 static cl::opt<unsigned> 101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 102 cl::desc("Maximum number of iterations SCEV will " 103 "symbolically execute a constant " 104 "derived loop"), 105 cl::init(100)); 106 107 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution", 108 "Scalar Evolution Analysis", false, true) 109 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 110 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 111 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution", 112 "Scalar Evolution Analysis", false, true) 113 char ScalarEvolution::ID = 0; 114 115 //===----------------------------------------------------------------------===// 116 // SCEV class definitions 117 //===----------------------------------------------------------------------===// 118 119 //===----------------------------------------------------------------------===// 120 // Implementation of the SCEV class. 121 // 122 123 void SCEV::dump() const { 124 print(dbgs()); 125 dbgs() << '\n'; 126 } 127 128 void SCEV::print(raw_ostream &OS) const { 129 switch (getSCEVType()) { 130 case scConstant: 131 WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false); 132 return; 133 case scTruncate: { 134 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 135 const SCEV *Op = Trunc->getOperand(); 136 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 137 << *Trunc->getType() << ")"; 138 return; 139 } 140 case scZeroExtend: { 141 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 142 const SCEV *Op = ZExt->getOperand(); 143 OS << "(zext " << *Op->getType() << " " << *Op << " to " 144 << *ZExt->getType() << ")"; 145 return; 146 } 147 case scSignExtend: { 148 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 149 const SCEV *Op = SExt->getOperand(); 150 OS << "(sext " << *Op->getType() << " " << *Op << " to " 151 << *SExt->getType() << ")"; 152 return; 153 } 154 case scAddRecExpr: { 155 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 156 OS << "{" << *AR->getOperand(0); 157 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 158 OS << ",+," << *AR->getOperand(i); 159 OS << "}<"; 160 if (AR->hasNoUnsignedWrap()) 161 OS << "nuw><"; 162 if (AR->hasNoSignedWrap()) 163 OS << "nsw><"; 164 WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false); 165 OS << ">"; 166 return; 167 } 168 case scAddExpr: 169 case scMulExpr: 170 case scUMaxExpr: 171 case scSMaxExpr: { 172 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 173 const char *OpStr = 0; 174 switch (NAry->getSCEVType()) { 175 case scAddExpr: OpStr = " + "; break; 176 case scMulExpr: OpStr = " * "; break; 177 case scUMaxExpr: OpStr = " umax "; break; 178 case scSMaxExpr: OpStr = " smax "; break; 179 } 180 OS << "("; 181 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 182 I != E; ++I) { 183 OS << **I; 184 if (llvm::next(I) != E) 185 OS << OpStr; 186 } 187 OS << ")"; 188 return; 189 } 190 case scUDivExpr: { 191 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 192 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 193 return; 194 } 195 case scUnknown: { 196 const SCEVUnknown *U = cast<SCEVUnknown>(this); 197 const Type *AllocTy; 198 if (U->isSizeOf(AllocTy)) { 199 OS << "sizeof(" << *AllocTy << ")"; 200 return; 201 } 202 if (U->isAlignOf(AllocTy)) { 203 OS << "alignof(" << *AllocTy << ")"; 204 return; 205 } 206 207 const Type *CTy; 208 Constant *FieldNo; 209 if (U->isOffsetOf(CTy, FieldNo)) { 210 OS << "offsetof(" << *CTy << ", "; 211 WriteAsOperand(OS, FieldNo, false); 212 OS << ")"; 213 return; 214 } 215 216 // Otherwise just print it normally. 217 WriteAsOperand(OS, U->getValue(), false); 218 return; 219 } 220 case scCouldNotCompute: 221 OS << "***COULDNOTCOMPUTE***"; 222 return; 223 default: break; 224 } 225 llvm_unreachable("Unknown SCEV kind!"); 226 } 227 228 const Type *SCEV::getType() const { 229 switch (getSCEVType()) { 230 case scConstant: 231 return cast<SCEVConstant>(this)->getType(); 232 case scTruncate: 233 case scZeroExtend: 234 case scSignExtend: 235 return cast<SCEVCastExpr>(this)->getType(); 236 case scAddRecExpr: 237 case scMulExpr: 238 case scUMaxExpr: 239 case scSMaxExpr: 240 return cast<SCEVNAryExpr>(this)->getType(); 241 case scAddExpr: 242 return cast<SCEVAddExpr>(this)->getType(); 243 case scUDivExpr: 244 return cast<SCEVUDivExpr>(this)->getType(); 245 case scUnknown: 246 return cast<SCEVUnknown>(this)->getType(); 247 case scCouldNotCompute: 248 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 249 return 0; 250 default: break; 251 } 252 llvm_unreachable("Unknown SCEV kind!"); 253 return 0; 254 } 255 256 bool SCEV::isZero() const { 257 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 258 return SC->getValue()->isZero(); 259 return false; 260 } 261 262 bool SCEV::isOne() const { 263 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 264 return SC->getValue()->isOne(); 265 return false; 266 } 267 268 bool SCEV::isAllOnesValue() const { 269 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 270 return SC->getValue()->isAllOnesValue(); 271 return false; 272 } 273 274 SCEVCouldNotCompute::SCEVCouldNotCompute() : 275 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 276 277 bool SCEVCouldNotCompute::classof(const SCEV *S) { 278 return S->getSCEVType() == scCouldNotCompute; 279 } 280 281 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 282 FoldingSetNodeID ID; 283 ID.AddInteger(scConstant); 284 ID.AddPointer(V); 285 void *IP = 0; 286 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 287 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 288 UniqueSCEVs.InsertNode(S, IP); 289 return S; 290 } 291 292 const SCEV *ScalarEvolution::getConstant(const APInt& Val) { 293 return getConstant(ConstantInt::get(getContext(), Val)); 294 } 295 296 const SCEV * 297 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 298 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 299 return getConstant(ConstantInt::get(ITy, V, isSigned)); 300 } 301 302 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 303 unsigned SCEVTy, const SCEV *op, const Type *ty) 304 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 305 306 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 307 const SCEV *op, const Type *ty) 308 : SCEVCastExpr(ID, scTruncate, op, ty) { 309 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 310 (Ty->isIntegerTy() || Ty->isPointerTy()) && 311 "Cannot truncate non-integer value!"); 312 } 313 314 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 315 const SCEV *op, const Type *ty) 316 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 317 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 318 (Ty->isIntegerTy() || Ty->isPointerTy()) && 319 "Cannot zero extend non-integer value!"); 320 } 321 322 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 323 const SCEV *op, const Type *ty) 324 : SCEVCastExpr(ID, scSignExtend, op, ty) { 325 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 326 (Ty->isIntegerTy() || Ty->isPointerTy()) && 327 "Cannot sign extend non-integer value!"); 328 } 329 330 void SCEVUnknown::deleted() { 331 // Clear this SCEVUnknown from various maps. 332 SE->forgetMemoizedResults(this); 333 334 // Remove this SCEVUnknown from the uniquing map. 335 SE->UniqueSCEVs.RemoveNode(this); 336 337 // Release the value. 338 setValPtr(0); 339 } 340 341 void SCEVUnknown::allUsesReplacedWith(Value *New) { 342 // Clear this SCEVUnknown from various maps. 343 SE->forgetMemoizedResults(this); 344 345 // Remove this SCEVUnknown from the uniquing map. 346 SE->UniqueSCEVs.RemoveNode(this); 347 348 // Update this SCEVUnknown to point to the new value. This is needed 349 // because there may still be outstanding SCEVs which still point to 350 // this SCEVUnknown. 351 setValPtr(New); 352 } 353 354 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const { 355 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 356 if (VCE->getOpcode() == Instruction::PtrToInt) 357 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 358 if (CE->getOpcode() == Instruction::GetElementPtr && 359 CE->getOperand(0)->isNullValue() && 360 CE->getNumOperands() == 2) 361 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 362 if (CI->isOne()) { 363 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 364 ->getElementType(); 365 return true; 366 } 367 368 return false; 369 } 370 371 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const { 372 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 373 if (VCE->getOpcode() == Instruction::PtrToInt) 374 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 375 if (CE->getOpcode() == Instruction::GetElementPtr && 376 CE->getOperand(0)->isNullValue()) { 377 const Type *Ty = 378 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 379 if (const StructType *STy = dyn_cast<StructType>(Ty)) 380 if (!STy->isPacked() && 381 CE->getNumOperands() == 3 && 382 CE->getOperand(1)->isNullValue()) { 383 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 384 if (CI->isOne() && 385 STy->getNumElements() == 2 && 386 STy->getElementType(0)->isIntegerTy(1)) { 387 AllocTy = STy->getElementType(1); 388 return true; 389 } 390 } 391 } 392 393 return false; 394 } 395 396 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { 397 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 398 if (VCE->getOpcode() == Instruction::PtrToInt) 399 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 400 if (CE->getOpcode() == Instruction::GetElementPtr && 401 CE->getNumOperands() == 3 && 402 CE->getOperand(0)->isNullValue() && 403 CE->getOperand(1)->isNullValue()) { 404 const Type *Ty = 405 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 406 // Ignore vector types here so that ScalarEvolutionExpander doesn't 407 // emit getelementptrs that index into vectors. 408 if (Ty->isStructTy() || Ty->isArrayTy()) { 409 CTy = Ty; 410 FieldNo = CE->getOperand(2); 411 return true; 412 } 413 } 414 415 return false; 416 } 417 418 //===----------------------------------------------------------------------===// 419 // SCEV Utilities 420 //===----------------------------------------------------------------------===// 421 422 namespace { 423 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 424 /// than the complexity of the RHS. This comparator is used to canonicalize 425 /// expressions. 426 class SCEVComplexityCompare { 427 const LoopInfo *const LI; 428 public: 429 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} 430 431 // Return true or false if LHS is less than, or at least RHS, respectively. 432 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 433 return compare(LHS, RHS) < 0; 434 } 435 436 // Return negative, zero, or positive, if LHS is less than, equal to, or 437 // greater than RHS, respectively. A three-way result allows recursive 438 // comparisons to be more efficient. 439 int compare(const SCEV *LHS, const SCEV *RHS) const { 440 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 441 if (LHS == RHS) 442 return 0; 443 444 // Primarily, sort the SCEVs by their getSCEVType(). 445 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 446 if (LType != RType) 447 return (int)LType - (int)RType; 448 449 // Aside from the getSCEVType() ordering, the particular ordering 450 // isn't very important except that it's beneficial to be consistent, 451 // so that (a + b) and (b + a) don't end up as different expressions. 452 switch (LType) { 453 case scUnknown: { 454 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 455 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 456 457 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 458 // not as complete as it could be. 459 const Value *LV = LU->getValue(), *RV = RU->getValue(); 460 461 // Order pointer values after integer values. This helps SCEVExpander 462 // form GEPs. 463 bool LIsPointer = LV->getType()->isPointerTy(), 464 RIsPointer = RV->getType()->isPointerTy(); 465 if (LIsPointer != RIsPointer) 466 return (int)LIsPointer - (int)RIsPointer; 467 468 // Compare getValueID values. 469 unsigned LID = LV->getValueID(), 470 RID = RV->getValueID(); 471 if (LID != RID) 472 return (int)LID - (int)RID; 473 474 // Sort arguments by their position. 475 if (const Argument *LA = dyn_cast<Argument>(LV)) { 476 const Argument *RA = cast<Argument>(RV); 477 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 478 return (int)LArgNo - (int)RArgNo; 479 } 480 481 // For instructions, compare their loop depth, and their operand 482 // count. This is pretty loose. 483 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { 484 const Instruction *RInst = cast<Instruction>(RV); 485 486 // Compare loop depths. 487 const BasicBlock *LParent = LInst->getParent(), 488 *RParent = RInst->getParent(); 489 if (LParent != RParent) { 490 unsigned LDepth = LI->getLoopDepth(LParent), 491 RDepth = LI->getLoopDepth(RParent); 492 if (LDepth != RDepth) 493 return (int)LDepth - (int)RDepth; 494 } 495 496 // Compare the number of operands. 497 unsigned LNumOps = LInst->getNumOperands(), 498 RNumOps = RInst->getNumOperands(); 499 return (int)LNumOps - (int)RNumOps; 500 } 501 502 return 0; 503 } 504 505 case scConstant: { 506 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 507 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 508 509 // Compare constant values. 510 const APInt &LA = LC->getValue()->getValue(); 511 const APInt &RA = RC->getValue()->getValue(); 512 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 513 if (LBitWidth != RBitWidth) 514 return (int)LBitWidth - (int)RBitWidth; 515 return LA.ult(RA) ? -1 : 1; 516 } 517 518 case scAddRecExpr: { 519 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 520 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 521 522 // Compare addrec loop depths. 523 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 524 if (LLoop != RLoop) { 525 unsigned LDepth = LLoop->getLoopDepth(), 526 RDepth = RLoop->getLoopDepth(); 527 if (LDepth != RDepth) 528 return (int)LDepth - (int)RDepth; 529 } 530 531 // Addrec complexity grows with operand count. 532 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 533 if (LNumOps != RNumOps) 534 return (int)LNumOps - (int)RNumOps; 535 536 // Lexicographically compare. 537 for (unsigned i = 0; i != LNumOps; ++i) { 538 long X = compare(LA->getOperand(i), RA->getOperand(i)); 539 if (X != 0) 540 return X; 541 } 542 543 return 0; 544 } 545 546 case scAddExpr: 547 case scMulExpr: 548 case scSMaxExpr: 549 case scUMaxExpr: { 550 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 551 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 552 553 // Lexicographically compare n-ary expressions. 554 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 555 for (unsigned i = 0; i != LNumOps; ++i) { 556 if (i >= RNumOps) 557 return 1; 558 long X = compare(LC->getOperand(i), RC->getOperand(i)); 559 if (X != 0) 560 return X; 561 } 562 return (int)LNumOps - (int)RNumOps; 563 } 564 565 case scUDivExpr: { 566 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 567 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 568 569 // Lexicographically compare udiv expressions. 570 long X = compare(LC->getLHS(), RC->getLHS()); 571 if (X != 0) 572 return X; 573 return compare(LC->getRHS(), RC->getRHS()); 574 } 575 576 case scTruncate: 577 case scZeroExtend: 578 case scSignExtend: { 579 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 580 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 581 582 // Compare cast expressions by operand. 583 return compare(LC->getOperand(), RC->getOperand()); 584 } 585 586 default: 587 break; 588 } 589 590 llvm_unreachable("Unknown SCEV kind!"); 591 return 0; 592 } 593 }; 594 } 595 596 /// GroupByComplexity - Given a list of SCEV objects, order them by their 597 /// complexity, and group objects of the same complexity together by value. 598 /// When this routine is finished, we know that any duplicates in the vector are 599 /// consecutive and that complexity is monotonically increasing. 600 /// 601 /// Note that we go take special precautions to ensure that we get deterministic 602 /// results from this routine. In other words, we don't want the results of 603 /// this to depend on where the addresses of various SCEV objects happened to 604 /// land in memory. 605 /// 606 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 607 LoopInfo *LI) { 608 if (Ops.size() < 2) return; // Noop 609 if (Ops.size() == 2) { 610 // This is the common case, which also happens to be trivially simple. 611 // Special case it. 612 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 613 if (SCEVComplexityCompare(LI)(RHS, LHS)) 614 std::swap(LHS, RHS); 615 return; 616 } 617 618 // Do the rough sort by complexity. 619 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 620 621 // Now that we are sorted by complexity, group elements of the same 622 // complexity. Note that this is, at worst, N^2, but the vector is likely to 623 // be extremely short in practice. Note that we take this approach because we 624 // do not want to depend on the addresses of the objects we are grouping. 625 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 626 const SCEV *S = Ops[i]; 627 unsigned Complexity = S->getSCEVType(); 628 629 // If there are any objects of the same complexity and same value as this 630 // one, group them. 631 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 632 if (Ops[j] == S) { // Found a duplicate. 633 // Move it to immediately after i'th element. 634 std::swap(Ops[i+1], Ops[j]); 635 ++i; // no need to rescan it. 636 if (i == e-2) return; // Done! 637 } 638 } 639 } 640 } 641 642 643 644 //===----------------------------------------------------------------------===// 645 // Simple SCEV method implementations 646 //===----------------------------------------------------------------------===// 647 648 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 649 /// Assume, K > 0. 650 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 651 ScalarEvolution &SE, 652 const Type* ResultTy) { 653 // Handle the simplest case efficiently. 654 if (K == 1) 655 return SE.getTruncateOrZeroExtend(It, ResultTy); 656 657 // We are using the following formula for BC(It, K): 658 // 659 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 660 // 661 // Suppose, W is the bitwidth of the return value. We must be prepared for 662 // overflow. Hence, we must assure that the result of our computation is 663 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 664 // safe in modular arithmetic. 665 // 666 // However, this code doesn't use exactly that formula; the formula it uses 667 // is something like the following, where T is the number of factors of 2 in 668 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 669 // exponentiation: 670 // 671 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 672 // 673 // This formula is trivially equivalent to the previous formula. However, 674 // this formula can be implemented much more efficiently. The trick is that 675 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 676 // arithmetic. To do exact division in modular arithmetic, all we have 677 // to do is multiply by the inverse. Therefore, this step can be done at 678 // width W. 679 // 680 // The next issue is how to safely do the division by 2^T. The way this 681 // is done is by doing the multiplication step at a width of at least W + T 682 // bits. This way, the bottom W+T bits of the product are accurate. Then, 683 // when we perform the division by 2^T (which is equivalent to a right shift 684 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 685 // truncated out after the division by 2^T. 686 // 687 // In comparison to just directly using the first formula, this technique 688 // is much more efficient; using the first formula requires W * K bits, 689 // but this formula less than W + K bits. Also, the first formula requires 690 // a division step, whereas this formula only requires multiplies and shifts. 691 // 692 // It doesn't matter whether the subtraction step is done in the calculation 693 // width or the input iteration count's width; if the subtraction overflows, 694 // the result must be zero anyway. We prefer here to do it in the width of 695 // the induction variable because it helps a lot for certain cases; CodeGen 696 // isn't smart enough to ignore the overflow, which leads to much less 697 // efficient code if the width of the subtraction is wider than the native 698 // register width. 699 // 700 // (It's possible to not widen at all by pulling out factors of 2 before 701 // the multiplication; for example, K=2 can be calculated as 702 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 703 // extra arithmetic, so it's not an obvious win, and it gets 704 // much more complicated for K > 3.) 705 706 // Protection from insane SCEVs; this bound is conservative, 707 // but it probably doesn't matter. 708 if (K > 1000) 709 return SE.getCouldNotCompute(); 710 711 unsigned W = SE.getTypeSizeInBits(ResultTy); 712 713 // Calculate K! / 2^T and T; we divide out the factors of two before 714 // multiplying for calculating K! / 2^T to avoid overflow. 715 // Other overflow doesn't matter because we only care about the bottom 716 // W bits of the result. 717 APInt OddFactorial(W, 1); 718 unsigned T = 1; 719 for (unsigned i = 3; i <= K; ++i) { 720 APInt Mult(W, i); 721 unsigned TwoFactors = Mult.countTrailingZeros(); 722 T += TwoFactors; 723 Mult = Mult.lshr(TwoFactors); 724 OddFactorial *= Mult; 725 } 726 727 // We need at least W + T bits for the multiplication step 728 unsigned CalculationBits = W + T; 729 730 // Calculate 2^T, at width T+W. 731 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 732 733 // Calculate the multiplicative inverse of K! / 2^T; 734 // this multiplication factor will perform the exact division by 735 // K! / 2^T. 736 APInt Mod = APInt::getSignedMinValue(W+1); 737 APInt MultiplyFactor = OddFactorial.zext(W+1); 738 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 739 MultiplyFactor = MultiplyFactor.trunc(W); 740 741 // Calculate the product, at width T+W 742 const IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 743 CalculationBits); 744 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 745 for (unsigned i = 1; i != K; ++i) { 746 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 747 Dividend = SE.getMulExpr(Dividend, 748 SE.getTruncateOrZeroExtend(S, CalculationTy)); 749 } 750 751 // Divide by 2^T 752 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 753 754 // Truncate the result, and divide by K! / 2^T. 755 756 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 757 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 758 } 759 760 /// evaluateAtIteration - Return the value of this chain of recurrences at 761 /// the specified iteration number. We can evaluate this recurrence by 762 /// multiplying each element in the chain by the binomial coefficient 763 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 764 /// 765 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 766 /// 767 /// where BC(It, k) stands for binomial coefficient. 768 /// 769 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 770 ScalarEvolution &SE) const { 771 const SCEV *Result = getStart(); 772 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 773 // The computation is correct in the face of overflow provided that the 774 // multiplication is performed _after_ the evaluation of the binomial 775 // coefficient. 776 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 777 if (isa<SCEVCouldNotCompute>(Coeff)) 778 return Coeff; 779 780 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 781 } 782 return Result; 783 } 784 785 //===----------------------------------------------------------------------===// 786 // SCEV Expression folder implementations 787 //===----------------------------------------------------------------------===// 788 789 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 790 const Type *Ty) { 791 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 792 "This is not a truncating conversion!"); 793 assert(isSCEVable(Ty) && 794 "This is not a conversion to a SCEVable type!"); 795 Ty = getEffectiveSCEVType(Ty); 796 797 FoldingSetNodeID ID; 798 ID.AddInteger(scTruncate); 799 ID.AddPointer(Op); 800 ID.AddPointer(Ty); 801 void *IP = 0; 802 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 803 804 // Fold if the operand is constant. 805 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 806 return getConstant( 807 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), 808 getEffectiveSCEVType(Ty)))); 809 810 // trunc(trunc(x)) --> trunc(x) 811 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 812 return getTruncateExpr(ST->getOperand(), Ty); 813 814 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 815 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 816 return getTruncateOrSignExtend(SS->getOperand(), Ty); 817 818 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 819 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 820 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 821 822 // If the input value is a chrec scev, truncate the chrec's operands. 823 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 824 SmallVector<const SCEV *, 4> Operands; 825 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 826 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 827 return getAddRecExpr(Operands, AddRec->getLoop()); 828 } 829 830 // As a special case, fold trunc(undef) to undef. We don't want to 831 // know too much about SCEVUnknowns, but this special case is handy 832 // and harmless. 833 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 834 if (isa<UndefValue>(U->getValue())) 835 return getSCEV(UndefValue::get(Ty)); 836 837 // The cast wasn't folded; create an explicit cast node. We can reuse 838 // the existing insert position since if we get here, we won't have 839 // made any changes which would invalidate it. 840 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 841 Op, Ty); 842 UniqueSCEVs.InsertNode(S, IP); 843 return S; 844 } 845 846 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 847 const Type *Ty) { 848 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 849 "This is not an extending conversion!"); 850 assert(isSCEVable(Ty) && 851 "This is not a conversion to a SCEVable type!"); 852 Ty = getEffectiveSCEVType(Ty); 853 854 // Fold if the operand is constant. 855 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 856 return getConstant( 857 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), 858 getEffectiveSCEVType(Ty)))); 859 860 // zext(zext(x)) --> zext(x) 861 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 862 return getZeroExtendExpr(SZ->getOperand(), Ty); 863 864 // Before doing any expensive analysis, check to see if we've already 865 // computed a SCEV for this Op and Ty. 866 FoldingSetNodeID ID; 867 ID.AddInteger(scZeroExtend); 868 ID.AddPointer(Op); 869 ID.AddPointer(Ty); 870 void *IP = 0; 871 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 872 873 // If the input value is a chrec scev, and we can prove that the value 874 // did not overflow the old, smaller, value, we can zero extend all of the 875 // operands (often constants). This allows analysis of something like 876 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 877 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 878 if (AR->isAffine()) { 879 const SCEV *Start = AR->getStart(); 880 const SCEV *Step = AR->getStepRecurrence(*this); 881 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 882 const Loop *L = AR->getLoop(); 883 884 // If we have special knowledge that this addrec won't overflow, 885 // we don't need to do any further analysis. 886 if (AR->hasNoUnsignedWrap()) 887 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 888 getZeroExtendExpr(Step, Ty), 889 L); 890 891 // Check whether the backedge-taken count is SCEVCouldNotCompute. 892 // Note that this serves two purposes: It filters out loops that are 893 // simply not analyzable, and it covers the case where this code is 894 // being called from within backedge-taken count analysis, such that 895 // attempting to ask for the backedge-taken count would likely result 896 // in infinite recursion. In the later case, the analysis code will 897 // cope with a conservative value, and it will take care to purge 898 // that value once it has finished. 899 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 900 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 901 // Manually compute the final value for AR, checking for 902 // overflow. 903 904 // Check whether the backedge-taken count can be losslessly casted to 905 // the addrec's type. The count is always unsigned. 906 const SCEV *CastedMaxBECount = 907 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 908 const SCEV *RecastedMaxBECount = 909 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 910 if (MaxBECount == RecastedMaxBECount) { 911 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 912 // Check whether Start+Step*MaxBECount has no unsigned overflow. 913 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 914 const SCEV *Add = getAddExpr(Start, ZMul); 915 const SCEV *OperandExtendedAdd = 916 getAddExpr(getZeroExtendExpr(Start, WideTy), 917 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 918 getZeroExtendExpr(Step, WideTy))); 919 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 920 // Return the expression with the addrec on the outside. 921 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 922 getZeroExtendExpr(Step, Ty), 923 L); 924 925 // Similar to above, only this time treat the step value as signed. 926 // This covers loops that count down. 927 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 928 Add = getAddExpr(Start, SMul); 929 OperandExtendedAdd = 930 getAddExpr(getZeroExtendExpr(Start, WideTy), 931 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 932 getSignExtendExpr(Step, WideTy))); 933 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 934 // Return the expression with the addrec on the outside. 935 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 936 getSignExtendExpr(Step, Ty), 937 L); 938 } 939 940 // If the backedge is guarded by a comparison with the pre-inc value 941 // the addrec is safe. Also, if the entry is guarded by a comparison 942 // with the start value and the backedge is guarded by a comparison 943 // with the post-inc value, the addrec is safe. 944 if (isKnownPositive(Step)) { 945 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 946 getUnsignedRange(Step).getUnsignedMax()); 947 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 948 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 949 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 950 AR->getPostIncExpr(*this), N))) 951 // Return the expression with the addrec on the outside. 952 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 953 getZeroExtendExpr(Step, Ty), 954 L); 955 } else if (isKnownNegative(Step)) { 956 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 957 getSignedRange(Step).getSignedMin()); 958 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 959 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 960 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 961 AR->getPostIncExpr(*this), N))) 962 // Return the expression with the addrec on the outside. 963 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 964 getSignExtendExpr(Step, Ty), 965 L); 966 } 967 } 968 } 969 970 // The cast wasn't folded; create an explicit cast node. 971 // Recompute the insert position, as it may have been invalidated. 972 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 973 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 974 Op, Ty); 975 UniqueSCEVs.InsertNode(S, IP); 976 return S; 977 } 978 979 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 980 const Type *Ty) { 981 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 982 "This is not an extending conversion!"); 983 assert(isSCEVable(Ty) && 984 "This is not a conversion to a SCEVable type!"); 985 Ty = getEffectiveSCEVType(Ty); 986 987 // Fold if the operand is constant. 988 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 989 return getConstant( 990 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), 991 getEffectiveSCEVType(Ty)))); 992 993 // sext(sext(x)) --> sext(x) 994 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 995 return getSignExtendExpr(SS->getOperand(), Ty); 996 997 // Before doing any expensive analysis, check to see if we've already 998 // computed a SCEV for this Op and Ty. 999 FoldingSetNodeID ID; 1000 ID.AddInteger(scSignExtend); 1001 ID.AddPointer(Op); 1002 ID.AddPointer(Ty); 1003 void *IP = 0; 1004 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1005 1006 // If the input value is a chrec scev, and we can prove that the value 1007 // did not overflow the old, smaller, value, we can sign extend all of the 1008 // operands (often constants). This allows analysis of something like 1009 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1010 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1011 if (AR->isAffine()) { 1012 const SCEV *Start = AR->getStart(); 1013 const SCEV *Step = AR->getStepRecurrence(*this); 1014 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1015 const Loop *L = AR->getLoop(); 1016 1017 // If we have special knowledge that this addrec won't overflow, 1018 // we don't need to do any further analysis. 1019 if (AR->hasNoSignedWrap()) 1020 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1021 getSignExtendExpr(Step, Ty), 1022 L); 1023 1024 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1025 // Note that this serves two purposes: It filters out loops that are 1026 // simply not analyzable, and it covers the case where this code is 1027 // being called from within backedge-taken count analysis, such that 1028 // attempting to ask for the backedge-taken count would likely result 1029 // in infinite recursion. In the later case, the analysis code will 1030 // cope with a conservative value, and it will take care to purge 1031 // that value once it has finished. 1032 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1033 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1034 // Manually compute the final value for AR, checking for 1035 // overflow. 1036 1037 // Check whether the backedge-taken count can be losslessly casted to 1038 // the addrec's type. The count is always unsigned. 1039 const SCEV *CastedMaxBECount = 1040 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1041 const SCEV *RecastedMaxBECount = 1042 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1043 if (MaxBECount == RecastedMaxBECount) { 1044 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1045 // Check whether Start+Step*MaxBECount has no signed overflow. 1046 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1047 const SCEV *Add = getAddExpr(Start, SMul); 1048 const SCEV *OperandExtendedAdd = 1049 getAddExpr(getSignExtendExpr(Start, WideTy), 1050 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1051 getSignExtendExpr(Step, WideTy))); 1052 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1053 // Return the expression with the addrec on the outside. 1054 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1055 getSignExtendExpr(Step, Ty), 1056 L); 1057 1058 // Similar to above, only this time treat the step value as unsigned. 1059 // This covers loops that count up with an unsigned step. 1060 const SCEV *UMul = getMulExpr(CastedMaxBECount, Step); 1061 Add = getAddExpr(Start, UMul); 1062 OperandExtendedAdd = 1063 getAddExpr(getSignExtendExpr(Start, WideTy), 1064 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1065 getZeroExtendExpr(Step, WideTy))); 1066 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1067 // Return the expression with the addrec on the outside. 1068 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1069 getZeroExtendExpr(Step, Ty), 1070 L); 1071 } 1072 1073 // If the backedge is guarded by a comparison with the pre-inc value 1074 // the addrec is safe. Also, if the entry is guarded by a comparison 1075 // with the start value and the backedge is guarded by a comparison 1076 // with the post-inc value, the addrec is safe. 1077 if (isKnownPositive(Step)) { 1078 const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - 1079 getSignedRange(Step).getSignedMax()); 1080 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || 1081 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && 1082 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, 1083 AR->getPostIncExpr(*this), N))) 1084 // Return the expression with the addrec on the outside. 1085 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1086 getSignExtendExpr(Step, Ty), 1087 L); 1088 } else if (isKnownNegative(Step)) { 1089 const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - 1090 getSignedRange(Step).getSignedMin()); 1091 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || 1092 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && 1093 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, 1094 AR->getPostIncExpr(*this), N))) 1095 // Return the expression with the addrec on the outside. 1096 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1097 getSignExtendExpr(Step, Ty), 1098 L); 1099 } 1100 } 1101 } 1102 1103 // The cast wasn't folded; create an explicit cast node. 1104 // Recompute the insert position, as it may have been invalidated. 1105 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1106 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1107 Op, Ty); 1108 UniqueSCEVs.InsertNode(S, IP); 1109 return S; 1110 } 1111 1112 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1113 /// unspecified bits out to the given type. 1114 /// 1115 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1116 const Type *Ty) { 1117 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1118 "This is not an extending conversion!"); 1119 assert(isSCEVable(Ty) && 1120 "This is not a conversion to a SCEVable type!"); 1121 Ty = getEffectiveSCEVType(Ty); 1122 1123 // Sign-extend negative constants. 1124 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1125 if (SC->getValue()->getValue().isNegative()) 1126 return getSignExtendExpr(Op, Ty); 1127 1128 // Peel off a truncate cast. 1129 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1130 const SCEV *NewOp = T->getOperand(); 1131 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1132 return getAnyExtendExpr(NewOp, Ty); 1133 return getTruncateOrNoop(NewOp, Ty); 1134 } 1135 1136 // Next try a zext cast. If the cast is folded, use it. 1137 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1138 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1139 return ZExt; 1140 1141 // Next try a sext cast. If the cast is folded, use it. 1142 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1143 if (!isa<SCEVSignExtendExpr>(SExt)) 1144 return SExt; 1145 1146 // Force the cast to be folded into the operands of an addrec. 1147 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1148 SmallVector<const SCEV *, 4> Ops; 1149 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 1150 I != E; ++I) 1151 Ops.push_back(getAnyExtendExpr(*I, Ty)); 1152 return getAddRecExpr(Ops, AR->getLoop()); 1153 } 1154 1155 // As a special case, fold anyext(undef) to undef. We don't want to 1156 // know too much about SCEVUnknowns, but this special case is handy 1157 // and harmless. 1158 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 1159 if (isa<UndefValue>(U->getValue())) 1160 return getSCEV(UndefValue::get(Ty)); 1161 1162 // If the expression is obviously signed, use the sext cast value. 1163 if (isa<SCEVSMaxExpr>(Op)) 1164 return SExt; 1165 1166 // Absent any other information, use the zext cast value. 1167 return ZExt; 1168 } 1169 1170 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1171 /// a list of operands to be added under the given scale, update the given 1172 /// map. This is a helper function for getAddRecExpr. As an example of 1173 /// what it does, given a sequence of operands that would form an add 1174 /// expression like this: 1175 /// 1176 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 1177 /// 1178 /// where A and B are constants, update the map with these values: 1179 /// 1180 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1181 /// 1182 /// and add 13 + A*B*29 to AccumulatedConstant. 1183 /// This will allow getAddRecExpr to produce this: 1184 /// 1185 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1186 /// 1187 /// This form often exposes folding opportunities that are hidden in 1188 /// the original operand list. 1189 /// 1190 /// Return true iff it appears that any interesting folding opportunities 1191 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1192 /// the common case where no interesting opportunities are present, and 1193 /// is also used as a check to avoid infinite recursion. 1194 /// 1195 static bool 1196 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1197 SmallVector<const SCEV *, 8> &NewOps, 1198 APInt &AccumulatedConstant, 1199 const SCEV *const *Ops, size_t NumOperands, 1200 const APInt &Scale, 1201 ScalarEvolution &SE) { 1202 bool Interesting = false; 1203 1204 // Iterate over the add operands. They are sorted, with constants first. 1205 unsigned i = 0; 1206 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1207 ++i; 1208 // Pull a buried constant out to the outside. 1209 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 1210 Interesting = true; 1211 AccumulatedConstant += Scale * C->getValue()->getValue(); 1212 } 1213 1214 // Next comes everything else. We're especially interested in multiplies 1215 // here, but they're in the middle, so just visit the rest with one loop. 1216 for (; i != NumOperands; ++i) { 1217 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1218 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1219 APInt NewScale = 1220 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 1221 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1222 // A multiplication of a constant with another add; recurse. 1223 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 1224 Interesting |= 1225 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1226 Add->op_begin(), Add->getNumOperands(), 1227 NewScale, SE); 1228 } else { 1229 // A multiplication of a constant with some other value. Update 1230 // the map. 1231 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1232 const SCEV *Key = SE.getMulExpr(MulOps); 1233 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1234 M.insert(std::make_pair(Key, NewScale)); 1235 if (Pair.second) { 1236 NewOps.push_back(Pair.first->first); 1237 } else { 1238 Pair.first->second += NewScale; 1239 // The map already had an entry for this value, which may indicate 1240 // a folding opportunity. 1241 Interesting = true; 1242 } 1243 } 1244 } else { 1245 // An ordinary operand. Update the map. 1246 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1247 M.insert(std::make_pair(Ops[i], Scale)); 1248 if (Pair.second) { 1249 NewOps.push_back(Pair.first->first); 1250 } else { 1251 Pair.first->second += Scale; 1252 // The map already had an entry for this value, which may indicate 1253 // a folding opportunity. 1254 Interesting = true; 1255 } 1256 } 1257 } 1258 1259 return Interesting; 1260 } 1261 1262 namespace { 1263 struct APIntCompare { 1264 bool operator()(const APInt &LHS, const APInt &RHS) const { 1265 return LHS.ult(RHS); 1266 } 1267 }; 1268 } 1269 1270 /// getAddExpr - Get a canonical add expression, or something simpler if 1271 /// possible. 1272 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 1273 bool HasNUW, bool HasNSW) { 1274 assert(!Ops.empty() && "Cannot get empty add!"); 1275 if (Ops.size() == 1) return Ops[0]; 1276 #ifndef NDEBUG 1277 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1278 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1279 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1280 "SCEVAddExpr operand types don't match!"); 1281 #endif 1282 1283 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1284 if (!HasNUW && HasNSW) { 1285 bool All = true; 1286 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1287 E = Ops.end(); I != E; ++I) 1288 if (!isKnownNonNegative(*I)) { 1289 All = false; 1290 break; 1291 } 1292 if (All) HasNUW = true; 1293 } 1294 1295 // Sort by complexity, this groups all similar expression types together. 1296 GroupByComplexity(Ops, LI); 1297 1298 // If there are any constants, fold them together. 1299 unsigned Idx = 0; 1300 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1301 ++Idx; 1302 assert(Idx < Ops.size()); 1303 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1304 // We found two constants, fold them together! 1305 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1306 RHSC->getValue()->getValue()); 1307 if (Ops.size() == 2) return Ops[0]; 1308 Ops.erase(Ops.begin()+1); // Erase the folded element 1309 LHSC = cast<SCEVConstant>(Ops[0]); 1310 } 1311 1312 // If we are left with a constant zero being added, strip it off. 1313 if (LHSC->getValue()->isZero()) { 1314 Ops.erase(Ops.begin()); 1315 --Idx; 1316 } 1317 1318 if (Ops.size() == 1) return Ops[0]; 1319 } 1320 1321 // Okay, check to see if the same value occurs in the operand list more than 1322 // once. If so, merge them together into an multiply expression. Since we 1323 // sorted the list, these values are required to be adjacent. 1324 const Type *Ty = Ops[0]->getType(); 1325 bool FoundMatch = false; 1326 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 1327 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1328 // Scan ahead to count how many equal operands there are. 1329 unsigned Count = 2; 1330 while (i+Count != e && Ops[i+Count] == Ops[i]) 1331 ++Count; 1332 // Merge the values into a multiply. 1333 const SCEV *Scale = getConstant(Ty, Count); 1334 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 1335 if (Ops.size() == Count) 1336 return Mul; 1337 Ops[i] = Mul; 1338 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 1339 --i; e -= Count - 1; 1340 FoundMatch = true; 1341 } 1342 if (FoundMatch) 1343 return getAddExpr(Ops, HasNUW, HasNSW); 1344 1345 // Check for truncates. If all the operands are truncated from the same 1346 // type, see if factoring out the truncate would permit the result to be 1347 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1348 // if the contents of the resulting outer trunc fold to something simple. 1349 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1350 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1351 const Type *DstType = Trunc->getType(); 1352 const Type *SrcType = Trunc->getOperand()->getType(); 1353 SmallVector<const SCEV *, 8> LargeOps; 1354 bool Ok = true; 1355 // Check all the operands to see if they can be represented in the 1356 // source type of the truncate. 1357 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1358 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1359 if (T->getOperand()->getType() != SrcType) { 1360 Ok = false; 1361 break; 1362 } 1363 LargeOps.push_back(T->getOperand()); 1364 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1365 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 1366 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1367 SmallVector<const SCEV *, 8> LargeMulOps; 1368 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1369 if (const SCEVTruncateExpr *T = 1370 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1371 if (T->getOperand()->getType() != SrcType) { 1372 Ok = false; 1373 break; 1374 } 1375 LargeMulOps.push_back(T->getOperand()); 1376 } else if (const SCEVConstant *C = 1377 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1378 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 1379 } else { 1380 Ok = false; 1381 break; 1382 } 1383 } 1384 if (Ok) 1385 LargeOps.push_back(getMulExpr(LargeMulOps)); 1386 } else { 1387 Ok = false; 1388 break; 1389 } 1390 } 1391 if (Ok) { 1392 // Evaluate the expression in the larger type. 1393 const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW); 1394 // If it folds to something simple, use it. Otherwise, don't. 1395 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1396 return getTruncateExpr(Fold, DstType); 1397 } 1398 } 1399 1400 // Skip past any other cast SCEVs. 1401 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1402 ++Idx; 1403 1404 // If there are add operands they would be next. 1405 if (Idx < Ops.size()) { 1406 bool DeletedAdd = false; 1407 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1408 // If we have an add, expand the add operands onto the end of the operands 1409 // list. 1410 Ops.erase(Ops.begin()+Idx); 1411 Ops.append(Add->op_begin(), Add->op_end()); 1412 DeletedAdd = true; 1413 } 1414 1415 // If we deleted at least one add, we added operands to the end of the list, 1416 // and they are not necessarily sorted. Recurse to resort and resimplify 1417 // any operands we just acquired. 1418 if (DeletedAdd) 1419 return getAddExpr(Ops); 1420 } 1421 1422 // Skip over the add expression until we get to a multiply. 1423 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1424 ++Idx; 1425 1426 // Check to see if there are any folding opportunities present with 1427 // operands multiplied by constant values. 1428 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1429 uint64_t BitWidth = getTypeSizeInBits(Ty); 1430 DenseMap<const SCEV *, APInt> M; 1431 SmallVector<const SCEV *, 8> NewOps; 1432 APInt AccumulatedConstant(BitWidth, 0); 1433 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1434 Ops.data(), Ops.size(), 1435 APInt(BitWidth, 1), *this)) { 1436 // Some interesting folding opportunity is present, so its worthwhile to 1437 // re-generate the operands list. Group the operands by constant scale, 1438 // to avoid multiplying by the same constant scale multiple times. 1439 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 1440 for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(), 1441 E = NewOps.end(); I != E; ++I) 1442 MulOpLists[M.find(*I)->second].push_back(*I); 1443 // Re-generate the operands list. 1444 Ops.clear(); 1445 if (AccumulatedConstant != 0) 1446 Ops.push_back(getConstant(AccumulatedConstant)); 1447 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1448 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1449 if (I->first != 0) 1450 Ops.push_back(getMulExpr(getConstant(I->first), 1451 getAddExpr(I->second))); 1452 if (Ops.empty()) 1453 return getConstant(Ty, 0); 1454 if (Ops.size() == 1) 1455 return Ops[0]; 1456 return getAddExpr(Ops); 1457 } 1458 } 1459 1460 // If we are adding something to a multiply expression, make sure the 1461 // something is not already an operand of the multiply. If so, merge it into 1462 // the multiply. 1463 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1464 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1465 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1466 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1467 if (isa<SCEVConstant>(MulOpSCEV)) 1468 continue; 1469 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1470 if (MulOpSCEV == Ops[AddOp]) { 1471 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1472 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 1473 if (Mul->getNumOperands() != 2) { 1474 // If the multiply has more than two operands, we must get the 1475 // Y*Z term. 1476 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1477 Mul->op_begin()+MulOp); 1478 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1479 InnerMul = getMulExpr(MulOps); 1480 } 1481 const SCEV *One = getConstant(Ty, 1); 1482 const SCEV *AddOne = getAddExpr(One, InnerMul); 1483 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 1484 if (Ops.size() == 2) return OuterMul; 1485 if (AddOp < Idx) { 1486 Ops.erase(Ops.begin()+AddOp); 1487 Ops.erase(Ops.begin()+Idx-1); 1488 } else { 1489 Ops.erase(Ops.begin()+Idx); 1490 Ops.erase(Ops.begin()+AddOp-1); 1491 } 1492 Ops.push_back(OuterMul); 1493 return getAddExpr(Ops); 1494 } 1495 1496 // Check this multiply against other multiplies being added together. 1497 for (unsigned OtherMulIdx = Idx+1; 1498 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1499 ++OtherMulIdx) { 1500 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1501 // If MulOp occurs in OtherMul, we can fold the two multiplies 1502 // together. 1503 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1504 OMulOp != e; ++OMulOp) 1505 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1506 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1507 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 1508 if (Mul->getNumOperands() != 2) { 1509 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1510 Mul->op_begin()+MulOp); 1511 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1512 InnerMul1 = getMulExpr(MulOps); 1513 } 1514 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1515 if (OtherMul->getNumOperands() != 2) { 1516 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1517 OtherMul->op_begin()+OMulOp); 1518 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 1519 InnerMul2 = getMulExpr(MulOps); 1520 } 1521 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1522 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1523 if (Ops.size() == 2) return OuterMul; 1524 Ops.erase(Ops.begin()+Idx); 1525 Ops.erase(Ops.begin()+OtherMulIdx-1); 1526 Ops.push_back(OuterMul); 1527 return getAddExpr(Ops); 1528 } 1529 } 1530 } 1531 } 1532 1533 // If there are any add recurrences in the operands list, see if any other 1534 // added values are loop invariant. If so, we can fold them into the 1535 // recurrence. 1536 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1537 ++Idx; 1538 1539 // Scan over all recurrences, trying to fold loop invariants into them. 1540 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1541 // Scan all of the other operands to this add and add them to the vector if 1542 // they are loop invariant w.r.t. the recurrence. 1543 SmallVector<const SCEV *, 8> LIOps; 1544 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1545 const Loop *AddRecLoop = AddRec->getLoop(); 1546 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1547 if (isLoopInvariant(Ops[i], AddRecLoop)) { 1548 LIOps.push_back(Ops[i]); 1549 Ops.erase(Ops.begin()+i); 1550 --i; --e; 1551 } 1552 1553 // If we found some loop invariants, fold them into the recurrence. 1554 if (!LIOps.empty()) { 1555 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1556 LIOps.push_back(AddRec->getStart()); 1557 1558 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1559 AddRec->op_end()); 1560 AddRecOps[0] = getAddExpr(LIOps); 1561 1562 // Build the new addrec. Propagate the NUW and NSW flags if both the 1563 // outer add and the inner addrec are guaranteed to have no overflow or if 1564 // there is no outer part. 1565 if (Ops.size() != 1) { 1566 HasNUW &= AddRec->hasNoUnsignedWrap(); 1567 HasNSW &= AddRec->hasNoSignedWrap(); 1568 } 1569 1570 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, HasNUW, HasNSW); 1571 1572 // If all of the other operands were loop invariant, we are done. 1573 if (Ops.size() == 1) return NewRec; 1574 1575 // Otherwise, add the folded AddRec by the non-liv parts. 1576 for (unsigned i = 0;; ++i) 1577 if (Ops[i] == AddRec) { 1578 Ops[i] = NewRec; 1579 break; 1580 } 1581 return getAddExpr(Ops); 1582 } 1583 1584 // Okay, if there weren't any loop invariants to be folded, check to see if 1585 // there are multiple AddRec's with the same loop induction variable being 1586 // added together. If so, we can fold them. 1587 for (unsigned OtherIdx = Idx+1; 1588 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1589 ++OtherIdx) 1590 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 1591 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 1592 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1593 AddRec->op_end()); 1594 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1595 ++OtherIdx) 1596 if (const SCEVAddRecExpr *OtherAddRec = 1597 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 1598 if (OtherAddRec->getLoop() == AddRecLoop) { 1599 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 1600 i != e; ++i) { 1601 if (i >= AddRecOps.size()) { 1602 AddRecOps.append(OtherAddRec->op_begin()+i, 1603 OtherAddRec->op_end()); 1604 break; 1605 } 1606 AddRecOps[i] = getAddExpr(AddRecOps[i], 1607 OtherAddRec->getOperand(i)); 1608 } 1609 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 1610 } 1611 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop); 1612 return getAddExpr(Ops); 1613 } 1614 1615 // Otherwise couldn't fold anything into this recurrence. Move onto the 1616 // next one. 1617 } 1618 1619 // Okay, it looks like we really DO need an add expr. Check to see if we 1620 // already have one, otherwise create a new one. 1621 FoldingSetNodeID ID; 1622 ID.AddInteger(scAddExpr); 1623 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1624 ID.AddPointer(Ops[i]); 1625 void *IP = 0; 1626 SCEVAddExpr *S = 1627 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1628 if (!S) { 1629 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 1630 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 1631 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), 1632 O, Ops.size()); 1633 UniqueSCEVs.InsertNode(S, IP); 1634 } 1635 if (HasNUW) S->setHasNoUnsignedWrap(true); 1636 if (HasNSW) S->setHasNoSignedWrap(true); 1637 return S; 1638 } 1639 1640 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1641 /// possible. 1642 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 1643 bool HasNUW, bool HasNSW) { 1644 assert(!Ops.empty() && "Cannot get empty mul!"); 1645 if (Ops.size() == 1) return Ops[0]; 1646 #ifndef NDEBUG 1647 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1648 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1649 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1650 "SCEVMulExpr operand types don't match!"); 1651 #endif 1652 1653 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1654 if (!HasNUW && HasNSW) { 1655 bool All = true; 1656 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1657 E = Ops.end(); I != E; ++I) 1658 if (!isKnownNonNegative(*I)) { 1659 All = false; 1660 break; 1661 } 1662 if (All) HasNUW = true; 1663 } 1664 1665 // Sort by complexity, this groups all similar expression types together. 1666 GroupByComplexity(Ops, LI); 1667 1668 // If there are any constants, fold them together. 1669 unsigned Idx = 0; 1670 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1671 1672 // C1*(C2+V) -> C1*C2 + C1*V 1673 if (Ops.size() == 2) 1674 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1675 if (Add->getNumOperands() == 2 && 1676 isa<SCEVConstant>(Add->getOperand(0))) 1677 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1678 getMulExpr(LHSC, Add->getOperand(1))); 1679 1680 ++Idx; 1681 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1682 // We found two constants, fold them together! 1683 ConstantInt *Fold = ConstantInt::get(getContext(), 1684 LHSC->getValue()->getValue() * 1685 RHSC->getValue()->getValue()); 1686 Ops[0] = getConstant(Fold); 1687 Ops.erase(Ops.begin()+1); // Erase the folded element 1688 if (Ops.size() == 1) return Ops[0]; 1689 LHSC = cast<SCEVConstant>(Ops[0]); 1690 } 1691 1692 // If we are left with a constant one being multiplied, strip it off. 1693 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1694 Ops.erase(Ops.begin()); 1695 --Idx; 1696 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1697 // If we have a multiply of zero, it will always be zero. 1698 return Ops[0]; 1699 } else if (Ops[0]->isAllOnesValue()) { 1700 // If we have a mul by -1 of an add, try distributing the -1 among the 1701 // add operands. 1702 if (Ops.size() == 2) 1703 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 1704 SmallVector<const SCEV *, 4> NewOps; 1705 bool AnyFolded = false; 1706 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1707 I != E; ++I) { 1708 const SCEV *Mul = getMulExpr(Ops[0], *I); 1709 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 1710 NewOps.push_back(Mul); 1711 } 1712 if (AnyFolded) 1713 return getAddExpr(NewOps); 1714 } 1715 } 1716 1717 if (Ops.size() == 1) 1718 return Ops[0]; 1719 } 1720 1721 // Skip over the add expression until we get to a multiply. 1722 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1723 ++Idx; 1724 1725 // If there are mul operands inline them all into this expression. 1726 if (Idx < Ops.size()) { 1727 bool DeletedMul = false; 1728 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1729 // If we have an mul, expand the mul operands onto the end of the operands 1730 // list. 1731 Ops.erase(Ops.begin()+Idx); 1732 Ops.append(Mul->op_begin(), Mul->op_end()); 1733 DeletedMul = true; 1734 } 1735 1736 // If we deleted at least one mul, we added operands to the end of the list, 1737 // and they are not necessarily sorted. Recurse to resort and resimplify 1738 // any operands we just acquired. 1739 if (DeletedMul) 1740 return getMulExpr(Ops); 1741 } 1742 1743 // If there are any add recurrences in the operands list, see if any other 1744 // added values are loop invariant. If so, we can fold them into the 1745 // recurrence. 1746 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1747 ++Idx; 1748 1749 // Scan over all recurrences, trying to fold loop invariants into them. 1750 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1751 // Scan all of the other operands to this mul and add them to the vector if 1752 // they are loop invariant w.r.t. the recurrence. 1753 SmallVector<const SCEV *, 8> LIOps; 1754 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1755 const Loop *AddRecLoop = AddRec->getLoop(); 1756 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1757 if (isLoopInvariant(Ops[i], AddRecLoop)) { 1758 LIOps.push_back(Ops[i]); 1759 Ops.erase(Ops.begin()+i); 1760 --i; --e; 1761 } 1762 1763 // If we found some loop invariants, fold them into the recurrence. 1764 if (!LIOps.empty()) { 1765 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1766 SmallVector<const SCEV *, 4> NewOps; 1767 NewOps.reserve(AddRec->getNumOperands()); 1768 const SCEV *Scale = getMulExpr(LIOps); 1769 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1770 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1771 1772 // Build the new addrec. Propagate the NUW and NSW flags if both the 1773 // outer mul and the inner addrec are guaranteed to have no overflow. 1774 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, 1775 HasNUW && AddRec->hasNoUnsignedWrap(), 1776 HasNSW && AddRec->hasNoSignedWrap()); 1777 1778 // If all of the other operands were loop invariant, we are done. 1779 if (Ops.size() == 1) return NewRec; 1780 1781 // Otherwise, multiply the folded AddRec by the non-liv parts. 1782 for (unsigned i = 0;; ++i) 1783 if (Ops[i] == AddRec) { 1784 Ops[i] = NewRec; 1785 break; 1786 } 1787 return getMulExpr(Ops); 1788 } 1789 1790 // Okay, if there weren't any loop invariants to be folded, check to see if 1791 // there are multiple AddRec's with the same loop induction variable being 1792 // multiplied together. If so, we can fold them. 1793 for (unsigned OtherIdx = Idx+1; 1794 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1795 ++OtherIdx) 1796 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 1797 // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> --> 1798 // {A*C,+,F*D + G*B + B*D}<L> 1799 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1800 ++OtherIdx) 1801 if (const SCEVAddRecExpr *OtherAddRec = 1802 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 1803 if (OtherAddRec->getLoop() == AddRecLoop) { 1804 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1805 const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart()); 1806 const SCEV *B = F->getStepRecurrence(*this); 1807 const SCEV *D = G->getStepRecurrence(*this); 1808 const SCEV *NewStep = getAddExpr(getMulExpr(F, D), 1809 getMulExpr(G, B), 1810 getMulExpr(B, D)); 1811 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, 1812 F->getLoop()); 1813 if (Ops.size() == 2) return NewAddRec; 1814 Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec); 1815 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 1816 } 1817 return getMulExpr(Ops); 1818 } 1819 1820 // Otherwise couldn't fold anything into this recurrence. Move onto the 1821 // next one. 1822 } 1823 1824 // Okay, it looks like we really DO need an mul expr. Check to see if we 1825 // already have one, otherwise create a new one. 1826 FoldingSetNodeID ID; 1827 ID.AddInteger(scMulExpr); 1828 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1829 ID.AddPointer(Ops[i]); 1830 void *IP = 0; 1831 SCEVMulExpr *S = 1832 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1833 if (!S) { 1834 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 1835 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 1836 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 1837 O, Ops.size()); 1838 UniqueSCEVs.InsertNode(S, IP); 1839 } 1840 if (HasNUW) S->setHasNoUnsignedWrap(true); 1841 if (HasNSW) S->setHasNoSignedWrap(true); 1842 return S; 1843 } 1844 1845 /// getUDivExpr - Get a canonical unsigned division expression, or something 1846 /// simpler if possible. 1847 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 1848 const SCEV *RHS) { 1849 assert(getEffectiveSCEVType(LHS->getType()) == 1850 getEffectiveSCEVType(RHS->getType()) && 1851 "SCEVUDivExpr operand types don't match!"); 1852 1853 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1854 if (RHSC->getValue()->equalsInt(1)) 1855 return LHS; // X udiv 1 --> x 1856 // If the denominator is zero, the result of the udiv is undefined. Don't 1857 // try to analyze it, because the resolution chosen here may differ from 1858 // the resolution chosen in other parts of the compiler. 1859 if (!RHSC->getValue()->isZero()) { 1860 // Determine if the division can be folded into the operands of 1861 // its operands. 1862 // TODO: Generalize this to non-constants by using known-bits information. 1863 const Type *Ty = LHS->getType(); 1864 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1865 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 1866 // For non-power-of-two values, effectively round the value up to the 1867 // nearest power of two. 1868 if (!RHSC->getValue()->getValue().isPowerOf2()) 1869 ++MaxShiftAmt; 1870 const IntegerType *ExtTy = 1871 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 1872 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1873 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1874 if (const SCEVConstant *Step = 1875 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1876 if (!Step->getValue()->getValue() 1877 .urem(RHSC->getValue()->getValue()) && 1878 getZeroExtendExpr(AR, ExtTy) == 1879 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1880 getZeroExtendExpr(Step, ExtTy), 1881 AR->getLoop())) { 1882 SmallVector<const SCEV *, 4> Operands; 1883 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1884 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1885 return getAddRecExpr(Operands, AR->getLoop()); 1886 } 1887 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1888 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1889 SmallVector<const SCEV *, 4> Operands; 1890 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1891 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1892 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1893 // Find an operand that's safely divisible. 1894 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1895 const SCEV *Op = M->getOperand(i); 1896 const SCEV *Div = getUDivExpr(Op, RHSC); 1897 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1898 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 1899 M->op_end()); 1900 Operands[i] = Div; 1901 return getMulExpr(Operands); 1902 } 1903 } 1904 } 1905 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1906 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1907 SmallVector<const SCEV *, 4> Operands; 1908 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1909 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1910 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1911 Operands.clear(); 1912 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1913 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 1914 if (isa<SCEVUDivExpr>(Op) || 1915 getMulExpr(Op, RHS) != A->getOperand(i)) 1916 break; 1917 Operands.push_back(Op); 1918 } 1919 if (Operands.size() == A->getNumOperands()) 1920 return getAddExpr(Operands); 1921 } 1922 } 1923 1924 // Fold if both operands are constant. 1925 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1926 Constant *LHSCV = LHSC->getValue(); 1927 Constant *RHSCV = RHSC->getValue(); 1928 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 1929 RHSCV))); 1930 } 1931 } 1932 } 1933 1934 FoldingSetNodeID ID; 1935 ID.AddInteger(scUDivExpr); 1936 ID.AddPointer(LHS); 1937 ID.AddPointer(RHS); 1938 void *IP = 0; 1939 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1940 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 1941 LHS, RHS); 1942 UniqueSCEVs.InsertNode(S, IP); 1943 return S; 1944 } 1945 1946 1947 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1948 /// Simplify the expression as much as possible. 1949 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, 1950 const SCEV *Step, const Loop *L, 1951 bool HasNUW, bool HasNSW) { 1952 SmallVector<const SCEV *, 4> Operands; 1953 Operands.push_back(Start); 1954 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1955 if (StepChrec->getLoop() == L) { 1956 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 1957 return getAddRecExpr(Operands, L); 1958 } 1959 1960 Operands.push_back(Step); 1961 return getAddRecExpr(Operands, L, HasNUW, HasNSW); 1962 } 1963 1964 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1965 /// Simplify the expression as much as possible. 1966 const SCEV * 1967 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 1968 const Loop *L, 1969 bool HasNUW, bool HasNSW) { 1970 if (Operands.size() == 1) return Operands[0]; 1971 #ifndef NDEBUG 1972 const Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 1973 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1974 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 1975 "SCEVAddRecExpr operand types don't match!"); 1976 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1977 assert(isLoopInvariant(Operands[i], L) && 1978 "SCEVAddRecExpr operand is not loop-invariant!"); 1979 #endif 1980 1981 if (Operands.back()->isZero()) { 1982 Operands.pop_back(); 1983 return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X 1984 } 1985 1986 // It's tempting to want to call getMaxBackedgeTakenCount count here and 1987 // use that information to infer NUW and NSW flags. However, computing a 1988 // BE count requires calling getAddRecExpr, so we may not yet have a 1989 // meaningful BE count at this point (and if we don't, we'd be stuck 1990 // with a SCEVCouldNotCompute as the cached BE count). 1991 1992 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1993 if (!HasNUW && HasNSW) { 1994 bool All = true; 1995 for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(), 1996 E = Operands.end(); I != E; ++I) 1997 if (!isKnownNonNegative(*I)) { 1998 All = false; 1999 break; 2000 } 2001 if (All) HasNUW = true; 2002 } 2003 2004 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 2005 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 2006 const Loop *NestedLoop = NestedAR->getLoop(); 2007 if (L->contains(NestedLoop) ? 2008 (L->getLoopDepth() < NestedLoop->getLoopDepth()) : 2009 (!NestedLoop->contains(L) && 2010 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { 2011 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 2012 NestedAR->op_end()); 2013 Operands[0] = NestedAR->getStart(); 2014 // AddRecs require their operands be loop-invariant with respect to their 2015 // loops. Don't perform this transformation if it would break this 2016 // requirement. 2017 bool AllInvariant = true; 2018 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2019 if (!isLoopInvariant(Operands[i], L)) { 2020 AllInvariant = false; 2021 break; 2022 } 2023 if (AllInvariant) { 2024 NestedOperands[0] = getAddRecExpr(Operands, L); 2025 AllInvariant = true; 2026 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 2027 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) { 2028 AllInvariant = false; 2029 break; 2030 } 2031 if (AllInvariant) 2032 // Ok, both add recurrences are valid after the transformation. 2033 return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW); 2034 } 2035 // Reset Operands to its original state. 2036 Operands[0] = NestedAR; 2037 } 2038 } 2039 2040 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2041 // already have one, otherwise create a new one. 2042 FoldingSetNodeID ID; 2043 ID.AddInteger(scAddRecExpr); 2044 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2045 ID.AddPointer(Operands[i]); 2046 ID.AddPointer(L); 2047 void *IP = 0; 2048 SCEVAddRecExpr *S = 2049 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2050 if (!S) { 2051 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 2052 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 2053 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 2054 O, Operands.size(), L); 2055 UniqueSCEVs.InsertNode(S, IP); 2056 } 2057 if (HasNUW) S->setHasNoUnsignedWrap(true); 2058 if (HasNSW) S->setHasNoSignedWrap(true); 2059 return S; 2060 } 2061 2062 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 2063 const SCEV *RHS) { 2064 SmallVector<const SCEV *, 2> Ops; 2065 Ops.push_back(LHS); 2066 Ops.push_back(RHS); 2067 return getSMaxExpr(Ops); 2068 } 2069 2070 const SCEV * 2071 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2072 assert(!Ops.empty() && "Cannot get empty smax!"); 2073 if (Ops.size() == 1) return Ops[0]; 2074 #ifndef NDEBUG 2075 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2076 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2077 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2078 "SCEVSMaxExpr operand types don't match!"); 2079 #endif 2080 2081 // Sort by complexity, this groups all similar expression types together. 2082 GroupByComplexity(Ops, LI); 2083 2084 // If there are any constants, fold them together. 2085 unsigned Idx = 0; 2086 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2087 ++Idx; 2088 assert(Idx < Ops.size()); 2089 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2090 // We found two constants, fold them together! 2091 ConstantInt *Fold = ConstantInt::get(getContext(), 2092 APIntOps::smax(LHSC->getValue()->getValue(), 2093 RHSC->getValue()->getValue())); 2094 Ops[0] = getConstant(Fold); 2095 Ops.erase(Ops.begin()+1); // Erase the folded element 2096 if (Ops.size() == 1) return Ops[0]; 2097 LHSC = cast<SCEVConstant>(Ops[0]); 2098 } 2099 2100 // If we are left with a constant minimum-int, strip it off. 2101 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 2102 Ops.erase(Ops.begin()); 2103 --Idx; 2104 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 2105 // If we have an smax with a constant maximum-int, it will always be 2106 // maximum-int. 2107 return Ops[0]; 2108 } 2109 2110 if (Ops.size() == 1) return Ops[0]; 2111 } 2112 2113 // Find the first SMax 2114 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 2115 ++Idx; 2116 2117 // Check to see if one of the operands is an SMax. If so, expand its operands 2118 // onto our operand list, and recurse to simplify. 2119 if (Idx < Ops.size()) { 2120 bool DeletedSMax = false; 2121 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 2122 Ops.erase(Ops.begin()+Idx); 2123 Ops.append(SMax->op_begin(), SMax->op_end()); 2124 DeletedSMax = true; 2125 } 2126 2127 if (DeletedSMax) 2128 return getSMaxExpr(Ops); 2129 } 2130 2131 // Okay, check to see if the same value occurs in the operand list twice. If 2132 // so, delete one. Since we sorted the list, these values are required to 2133 // be adjacent. 2134 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2135 // X smax Y smax Y --> X smax Y 2136 // X smax Y --> X, if X is always greater than Y 2137 if (Ops[i] == Ops[i+1] || 2138 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 2139 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2140 --i; --e; 2141 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 2142 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2143 --i; --e; 2144 } 2145 2146 if (Ops.size() == 1) return Ops[0]; 2147 2148 assert(!Ops.empty() && "Reduced smax down to nothing!"); 2149 2150 // Okay, it looks like we really DO need an smax expr. Check to see if we 2151 // already have one, otherwise create a new one. 2152 FoldingSetNodeID ID; 2153 ID.AddInteger(scSMaxExpr); 2154 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2155 ID.AddPointer(Ops[i]); 2156 void *IP = 0; 2157 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2158 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2159 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2160 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 2161 O, Ops.size()); 2162 UniqueSCEVs.InsertNode(S, IP); 2163 return S; 2164 } 2165 2166 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 2167 const SCEV *RHS) { 2168 SmallVector<const SCEV *, 2> Ops; 2169 Ops.push_back(LHS); 2170 Ops.push_back(RHS); 2171 return getUMaxExpr(Ops); 2172 } 2173 2174 const SCEV * 2175 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2176 assert(!Ops.empty() && "Cannot get empty umax!"); 2177 if (Ops.size() == 1) return Ops[0]; 2178 #ifndef NDEBUG 2179 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2180 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2181 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2182 "SCEVUMaxExpr operand types don't match!"); 2183 #endif 2184 2185 // Sort by complexity, this groups all similar expression types together. 2186 GroupByComplexity(Ops, LI); 2187 2188 // If there are any constants, fold them together. 2189 unsigned Idx = 0; 2190 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2191 ++Idx; 2192 assert(Idx < Ops.size()); 2193 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2194 // We found two constants, fold them together! 2195 ConstantInt *Fold = ConstantInt::get(getContext(), 2196 APIntOps::umax(LHSC->getValue()->getValue(), 2197 RHSC->getValue()->getValue())); 2198 Ops[0] = getConstant(Fold); 2199 Ops.erase(Ops.begin()+1); // Erase the folded element 2200 if (Ops.size() == 1) return Ops[0]; 2201 LHSC = cast<SCEVConstant>(Ops[0]); 2202 } 2203 2204 // If we are left with a constant minimum-int, strip it off. 2205 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 2206 Ops.erase(Ops.begin()); 2207 --Idx; 2208 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 2209 // If we have an umax with a constant maximum-int, it will always be 2210 // maximum-int. 2211 return Ops[0]; 2212 } 2213 2214 if (Ops.size() == 1) return Ops[0]; 2215 } 2216 2217 // Find the first UMax 2218 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 2219 ++Idx; 2220 2221 // Check to see if one of the operands is a UMax. If so, expand its operands 2222 // onto our operand list, and recurse to simplify. 2223 if (Idx < Ops.size()) { 2224 bool DeletedUMax = false; 2225 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 2226 Ops.erase(Ops.begin()+Idx); 2227 Ops.append(UMax->op_begin(), UMax->op_end()); 2228 DeletedUMax = true; 2229 } 2230 2231 if (DeletedUMax) 2232 return getUMaxExpr(Ops); 2233 } 2234 2235 // Okay, check to see if the same value occurs in the operand list twice. If 2236 // so, delete one. Since we sorted the list, these values are required to 2237 // be adjacent. 2238 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2239 // X umax Y umax Y --> X umax Y 2240 // X umax Y --> X, if X is always greater than Y 2241 if (Ops[i] == Ops[i+1] || 2242 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 2243 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2244 --i; --e; 2245 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 2246 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2247 --i; --e; 2248 } 2249 2250 if (Ops.size() == 1) return Ops[0]; 2251 2252 assert(!Ops.empty() && "Reduced umax down to nothing!"); 2253 2254 // Okay, it looks like we really DO need a umax expr. Check to see if we 2255 // already have one, otherwise create a new one. 2256 FoldingSetNodeID ID; 2257 ID.AddInteger(scUMaxExpr); 2258 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2259 ID.AddPointer(Ops[i]); 2260 void *IP = 0; 2261 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2262 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2263 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2264 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 2265 O, Ops.size()); 2266 UniqueSCEVs.InsertNode(S, IP); 2267 return S; 2268 } 2269 2270 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 2271 const SCEV *RHS) { 2272 // ~smax(~x, ~y) == smin(x, y). 2273 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2274 } 2275 2276 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 2277 const SCEV *RHS) { 2278 // ~umax(~x, ~y) == umin(x, y) 2279 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2280 } 2281 2282 const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) { 2283 // If we have TargetData, we can bypass creating a target-independent 2284 // constant expression and then folding it back into a ConstantInt. 2285 // This is just a compile-time optimization. 2286 if (TD) 2287 return getConstant(TD->getIntPtrType(getContext()), 2288 TD->getTypeAllocSize(AllocTy)); 2289 2290 Constant *C = ConstantExpr::getSizeOf(AllocTy); 2291 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2292 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2293 C = Folded; 2294 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2295 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2296 } 2297 2298 const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) { 2299 Constant *C = ConstantExpr::getAlignOf(AllocTy); 2300 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2301 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2302 C = Folded; 2303 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2304 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2305 } 2306 2307 const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy, 2308 unsigned FieldNo) { 2309 // If we have TargetData, we can bypass creating a target-independent 2310 // constant expression and then folding it back into a ConstantInt. 2311 // This is just a compile-time optimization. 2312 if (TD) 2313 return getConstant(TD->getIntPtrType(getContext()), 2314 TD->getStructLayout(STy)->getElementOffset(FieldNo)); 2315 2316 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); 2317 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2318 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2319 C = Folded; 2320 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); 2321 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2322 } 2323 2324 const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy, 2325 Constant *FieldNo) { 2326 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); 2327 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2328 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2329 C = Folded; 2330 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); 2331 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2332 } 2333 2334 const SCEV *ScalarEvolution::getUnknown(Value *V) { 2335 // Don't attempt to do anything other than create a SCEVUnknown object 2336 // here. createSCEV only calls getUnknown after checking for all other 2337 // interesting possibilities, and any other code that calls getUnknown 2338 // is doing so in order to hide a value from SCEV canonicalization. 2339 2340 FoldingSetNodeID ID; 2341 ID.AddInteger(scUnknown); 2342 ID.AddPointer(V); 2343 void *IP = 0; 2344 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 2345 assert(cast<SCEVUnknown>(S)->getValue() == V && 2346 "Stale SCEVUnknown in uniquing map!"); 2347 return S; 2348 } 2349 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 2350 FirstUnknown); 2351 FirstUnknown = cast<SCEVUnknown>(S); 2352 UniqueSCEVs.InsertNode(S, IP); 2353 return S; 2354 } 2355 2356 //===----------------------------------------------------------------------===// 2357 // Basic SCEV Analysis and PHI Idiom Recognition Code 2358 // 2359 2360 /// isSCEVable - Test if values of the given type are analyzable within 2361 /// the SCEV framework. This primarily includes integer types, and it 2362 /// can optionally include pointer types if the ScalarEvolution class 2363 /// has access to target-specific information. 2364 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 2365 // Integers and pointers are always SCEVable. 2366 return Ty->isIntegerTy() || Ty->isPointerTy(); 2367 } 2368 2369 /// getTypeSizeInBits - Return the size in bits of the specified type, 2370 /// for which isSCEVable must return true. 2371 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 2372 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2373 2374 // If we have a TargetData, use it! 2375 if (TD) 2376 return TD->getTypeSizeInBits(Ty); 2377 2378 // Integer types have fixed sizes. 2379 if (Ty->isIntegerTy()) 2380 return Ty->getPrimitiveSizeInBits(); 2381 2382 // The only other support type is pointer. Without TargetData, conservatively 2383 // assume pointers are 64-bit. 2384 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); 2385 return 64; 2386 } 2387 2388 /// getEffectiveSCEVType - Return a type with the same bitwidth as 2389 /// the given type and which represents how SCEV will treat the given 2390 /// type, for which isSCEVable must return true. For pointer types, 2391 /// this is the pointer-sized integer type. 2392 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 2393 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2394 2395 if (Ty->isIntegerTy()) 2396 return Ty; 2397 2398 // The only other support type is pointer. 2399 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 2400 if (TD) return TD->getIntPtrType(getContext()); 2401 2402 // Without TargetData, conservatively assume pointers are 64-bit. 2403 return Type::getInt64Ty(getContext()); 2404 } 2405 2406 const SCEV *ScalarEvolution::getCouldNotCompute() { 2407 return &CouldNotCompute; 2408 } 2409 2410 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 2411 /// expression and create a new one. 2412 const SCEV *ScalarEvolution::getSCEV(Value *V) { 2413 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 2414 2415 ValueExprMapType::const_iterator I = ValueExprMap.find(V); 2416 if (I != ValueExprMap.end()) return I->second; 2417 const SCEV *S = createSCEV(V); 2418 2419 // The process of creating a SCEV for V may have caused other SCEVs 2420 // to have been created, so it's necessary to insert the new entry 2421 // from scratch, rather than trying to remember the insert position 2422 // above. 2423 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 2424 return S; 2425 } 2426 2427 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 2428 /// 2429 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { 2430 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2431 return getConstant( 2432 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 2433 2434 const Type *Ty = V->getType(); 2435 Ty = getEffectiveSCEVType(Ty); 2436 return getMulExpr(V, 2437 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); 2438 } 2439 2440 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 2441 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 2442 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2443 return getConstant( 2444 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 2445 2446 const Type *Ty = V->getType(); 2447 Ty = getEffectiveSCEVType(Ty); 2448 const SCEV *AllOnes = 2449 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 2450 return getMinusSCEV(AllOnes, V); 2451 } 2452 2453 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 2454 /// 2455 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 2456 bool HasNUW, bool HasNSW) { 2457 // Fast path: X - X --> 0. 2458 if (LHS == RHS) 2459 return getConstant(LHS->getType(), 0); 2460 2461 // X - Y --> X + -Y 2462 return getAddExpr(LHS, getNegativeSCEV(RHS), HasNUW, HasNSW); 2463 } 2464 2465 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2466 /// input value to the specified type. If the type must be extended, it is zero 2467 /// extended. 2468 const SCEV * 2469 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) { 2470 const Type *SrcTy = V->getType(); 2471 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2472 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2473 "Cannot truncate or zero extend with non-integer arguments!"); 2474 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2475 return V; // No conversion 2476 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2477 return getTruncateExpr(V, Ty); 2478 return getZeroExtendExpr(V, Ty); 2479 } 2480 2481 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2482 /// input value to the specified type. If the type must be extended, it is sign 2483 /// extended. 2484 const SCEV * 2485 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 2486 const Type *Ty) { 2487 const Type *SrcTy = V->getType(); 2488 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2489 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2490 "Cannot truncate or zero extend with non-integer arguments!"); 2491 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2492 return V; // No conversion 2493 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2494 return getTruncateExpr(V, Ty); 2495 return getSignExtendExpr(V, Ty); 2496 } 2497 2498 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2499 /// input value to the specified type. If the type must be extended, it is zero 2500 /// extended. The conversion must not be narrowing. 2501 const SCEV * 2502 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { 2503 const Type *SrcTy = V->getType(); 2504 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2505 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2506 "Cannot noop or zero extend with non-integer arguments!"); 2507 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2508 "getNoopOrZeroExtend cannot truncate!"); 2509 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2510 return V; // No conversion 2511 return getZeroExtendExpr(V, Ty); 2512 } 2513 2514 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2515 /// input value to the specified type. If the type must be extended, it is sign 2516 /// extended. The conversion must not be narrowing. 2517 const SCEV * 2518 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { 2519 const Type *SrcTy = V->getType(); 2520 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2521 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2522 "Cannot noop or sign extend with non-integer arguments!"); 2523 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2524 "getNoopOrSignExtend cannot truncate!"); 2525 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2526 return V; // No conversion 2527 return getSignExtendExpr(V, Ty); 2528 } 2529 2530 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2531 /// the input value to the specified type. If the type must be extended, 2532 /// it is extended with unspecified bits. The conversion must not be 2533 /// narrowing. 2534 const SCEV * 2535 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { 2536 const Type *SrcTy = V->getType(); 2537 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2538 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2539 "Cannot noop or any extend with non-integer arguments!"); 2540 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2541 "getNoopOrAnyExtend cannot truncate!"); 2542 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2543 return V; // No conversion 2544 return getAnyExtendExpr(V, Ty); 2545 } 2546 2547 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2548 /// input value to the specified type. The conversion must not be widening. 2549 const SCEV * 2550 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { 2551 const Type *SrcTy = V->getType(); 2552 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2553 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2554 "Cannot truncate or noop with non-integer arguments!"); 2555 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2556 "getTruncateOrNoop cannot extend!"); 2557 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2558 return V; // No conversion 2559 return getTruncateExpr(V, Ty); 2560 } 2561 2562 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2563 /// the types using zero-extension, and then perform a umax operation 2564 /// with them. 2565 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2566 const SCEV *RHS) { 2567 const SCEV *PromotedLHS = LHS; 2568 const SCEV *PromotedRHS = RHS; 2569 2570 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2571 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2572 else 2573 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2574 2575 return getUMaxExpr(PromotedLHS, PromotedRHS); 2576 } 2577 2578 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2579 /// the types using zero-extension, and then perform a umin operation 2580 /// with them. 2581 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2582 const SCEV *RHS) { 2583 const SCEV *PromotedLHS = LHS; 2584 const SCEV *PromotedRHS = RHS; 2585 2586 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2587 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2588 else 2589 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2590 2591 return getUMinExpr(PromotedLHS, PromotedRHS); 2592 } 2593 2594 /// PushDefUseChildren - Push users of the given Instruction 2595 /// onto the given Worklist. 2596 static void 2597 PushDefUseChildren(Instruction *I, 2598 SmallVectorImpl<Instruction *> &Worklist) { 2599 // Push the def-use children onto the Worklist stack. 2600 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2601 UI != UE; ++UI) 2602 Worklist.push_back(cast<Instruction>(*UI)); 2603 } 2604 2605 /// ForgetSymbolicValue - This looks up computed SCEV values for all 2606 /// instructions that depend on the given instruction and removes them from 2607 /// the ValueExprMapType map if they reference SymName. This is used during PHI 2608 /// resolution. 2609 void 2610 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { 2611 SmallVector<Instruction *, 16> Worklist; 2612 PushDefUseChildren(PN, Worklist); 2613 2614 SmallPtrSet<Instruction *, 8> Visited; 2615 Visited.insert(PN); 2616 while (!Worklist.empty()) { 2617 Instruction *I = Worklist.pop_back_val(); 2618 if (!Visited.insert(I)) continue; 2619 2620 ValueExprMapType::iterator It = 2621 ValueExprMap.find(static_cast<Value *>(I)); 2622 if (It != ValueExprMap.end()) { 2623 const SCEV *Old = It->second; 2624 2625 // Short-circuit the def-use traversal if the symbolic name 2626 // ceases to appear in expressions. 2627 if (Old != SymName && !hasOperand(Old, SymName)) 2628 continue; 2629 2630 // SCEVUnknown for a PHI either means that it has an unrecognized 2631 // structure, it's a PHI that's in the progress of being computed 2632 // by createNodeForPHI, or it's a single-value PHI. In the first case, 2633 // additional loop trip count information isn't going to change anything. 2634 // In the second case, createNodeForPHI will perform the necessary 2635 // updates on its own when it gets to that point. In the third, we do 2636 // want to forget the SCEVUnknown. 2637 if (!isa<PHINode>(I) || 2638 !isa<SCEVUnknown>(Old) || 2639 (I != PN && Old == SymName)) { 2640 forgetMemoizedResults(Old); 2641 ValueExprMap.erase(It); 2642 } 2643 } 2644 2645 PushDefUseChildren(I, Worklist); 2646 } 2647 } 2648 2649 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2650 /// a loop header, making it a potential recurrence, or it doesn't. 2651 /// 2652 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 2653 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2654 if (L->getHeader() == PN->getParent()) { 2655 // The loop may have multiple entrances or multiple exits; we can analyze 2656 // this phi as an addrec if it has a unique entry value and a unique 2657 // backedge value. 2658 Value *BEValueV = 0, *StartValueV = 0; 2659 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 2660 Value *V = PN->getIncomingValue(i); 2661 if (L->contains(PN->getIncomingBlock(i))) { 2662 if (!BEValueV) { 2663 BEValueV = V; 2664 } else if (BEValueV != V) { 2665 BEValueV = 0; 2666 break; 2667 } 2668 } else if (!StartValueV) { 2669 StartValueV = V; 2670 } else if (StartValueV != V) { 2671 StartValueV = 0; 2672 break; 2673 } 2674 } 2675 if (BEValueV && StartValueV) { 2676 // While we are analyzing this PHI node, handle its value symbolically. 2677 const SCEV *SymbolicName = getUnknown(PN); 2678 assert(ValueExprMap.find(PN) == ValueExprMap.end() && 2679 "PHI node already processed?"); 2680 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2681 2682 // Using this symbolic name for the PHI, analyze the value coming around 2683 // the back-edge. 2684 const SCEV *BEValue = getSCEV(BEValueV); 2685 2686 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2687 // has a special value for the first iteration of the loop. 2688 2689 // If the value coming around the backedge is an add with the symbolic 2690 // value we just inserted, then we found a simple induction variable! 2691 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2692 // If there is a single occurrence of the symbolic value, replace it 2693 // with a recurrence. 2694 unsigned FoundIndex = Add->getNumOperands(); 2695 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2696 if (Add->getOperand(i) == SymbolicName) 2697 if (FoundIndex == e) { 2698 FoundIndex = i; 2699 break; 2700 } 2701 2702 if (FoundIndex != Add->getNumOperands()) { 2703 // Create an add with everything but the specified operand. 2704 SmallVector<const SCEV *, 8> Ops; 2705 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2706 if (i != FoundIndex) 2707 Ops.push_back(Add->getOperand(i)); 2708 const SCEV *Accum = getAddExpr(Ops); 2709 2710 // This is not a valid addrec if the step amount is varying each 2711 // loop iteration, but is not itself an addrec in this loop. 2712 if (isLoopInvariant(Accum, L) || 2713 (isa<SCEVAddRecExpr>(Accum) && 2714 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2715 bool HasNUW = false; 2716 bool HasNSW = false; 2717 2718 // If the increment doesn't overflow, then neither the addrec nor 2719 // the post-increment will overflow. 2720 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { 2721 if (OBO->hasNoUnsignedWrap()) 2722 HasNUW = true; 2723 if (OBO->hasNoSignedWrap()) 2724 HasNSW = true; 2725 } else if (const GEPOperator *GEP = 2726 dyn_cast<GEPOperator>(BEValueV)) { 2727 // If the increment is a GEP, then we know it won't perform an 2728 // unsigned overflow, because the address space cannot be 2729 // wrapped around. 2730 HasNUW |= GEP->isInBounds(); 2731 } 2732 2733 const SCEV *StartVal = getSCEV(StartValueV); 2734 const SCEV *PHISCEV = 2735 getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW); 2736 2737 // Since the no-wrap flags are on the increment, they apply to the 2738 // post-incremented value as well. 2739 if (isLoopInvariant(Accum, L)) 2740 (void)getAddRecExpr(getAddExpr(StartVal, Accum), 2741 Accum, L, HasNUW, HasNSW); 2742 2743 // Okay, for the entire analysis of this edge we assumed the PHI 2744 // to be symbolic. We now need to go back and purge all of the 2745 // entries for the scalars that use the symbolic expression. 2746 ForgetSymbolicName(PN, SymbolicName); 2747 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 2748 return PHISCEV; 2749 } 2750 } 2751 } else if (const SCEVAddRecExpr *AddRec = 2752 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2753 // Otherwise, this could be a loop like this: 2754 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2755 // In this case, j = {1,+,1} and BEValue is j. 2756 // Because the other in-value of i (0) fits the evolution of BEValue 2757 // i really is an addrec evolution. 2758 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2759 const SCEV *StartVal = getSCEV(StartValueV); 2760 2761 // If StartVal = j.start - j.stride, we can use StartVal as the 2762 // initial step of the addrec evolution. 2763 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2764 AddRec->getOperand(1))) { 2765 const SCEV *PHISCEV = 2766 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2767 2768 // Okay, for the entire analysis of this edge we assumed the PHI 2769 // to be symbolic. We now need to go back and purge all of the 2770 // entries for the scalars that use the symbolic expression. 2771 ForgetSymbolicName(PN, SymbolicName); 2772 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 2773 return PHISCEV; 2774 } 2775 } 2776 } 2777 } 2778 } 2779 2780 // If the PHI has a single incoming value, follow that value, unless the 2781 // PHI's incoming blocks are in a different loop, in which case doing so 2782 // risks breaking LCSSA form. Instcombine would normally zap these, but 2783 // it doesn't have DominatorTree information, so it may miss cases. 2784 if (Value *V = SimplifyInstruction(PN, TD, DT)) 2785 if (LI->replacementPreservesLCSSAForm(PN, V)) 2786 return getSCEV(V); 2787 2788 // If it's not a loop phi, we can't handle it yet. 2789 return getUnknown(PN); 2790 } 2791 2792 /// createNodeForGEP - Expand GEP instructions into add and multiply 2793 /// operations. This allows them to be analyzed by regular SCEV code. 2794 /// 2795 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 2796 2797 // Don't blindly transfer the inbounds flag from the GEP instruction to the 2798 // Add expression, because the Instruction may be guarded by control flow 2799 // and the no-overflow bits may not be valid for the expression in any 2800 // context. 2801 2802 const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); 2803 Value *Base = GEP->getOperand(0); 2804 // Don't attempt to analyze GEPs over unsized objects. 2805 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2806 return getUnknown(GEP); 2807 const SCEV *TotalOffset = getConstant(IntPtrTy, 0); 2808 gep_type_iterator GTI = gep_type_begin(GEP); 2809 for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()), 2810 E = GEP->op_end(); 2811 I != E; ++I) { 2812 Value *Index = *I; 2813 // Compute the (potentially symbolic) offset in bytes for this index. 2814 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2815 // For a struct, add the member offset. 2816 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2817 const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo); 2818 2819 // Add the field offset to the running total offset. 2820 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 2821 } else { 2822 // For an array, add the element offset, explicitly scaled. 2823 const SCEV *ElementSize = getSizeOfExpr(*GTI); 2824 const SCEV *IndexS = getSCEV(Index); 2825 // Getelementptr indices are signed. 2826 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy); 2827 2828 // Multiply the index by the element size to compute the element offset. 2829 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize); 2830 2831 // Add the element offset to the running total offset. 2832 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2833 } 2834 } 2835 2836 // Get the SCEV for the GEP base. 2837 const SCEV *BaseS = getSCEV(Base); 2838 2839 // Add the total offset from all the GEP indices to the base. 2840 return getAddExpr(BaseS, TotalOffset); 2841 } 2842 2843 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2844 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2845 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2846 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2847 uint32_t 2848 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 2849 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2850 return C->getValue()->getValue().countTrailingZeros(); 2851 2852 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2853 return std::min(GetMinTrailingZeros(T->getOperand()), 2854 (uint32_t)getTypeSizeInBits(T->getType())); 2855 2856 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2857 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2858 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2859 getTypeSizeInBits(E->getType()) : OpRes; 2860 } 2861 2862 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2863 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2864 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2865 getTypeSizeInBits(E->getType()) : OpRes; 2866 } 2867 2868 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2869 // The result is the min of all operands results. 2870 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2871 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2872 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2873 return MinOpRes; 2874 } 2875 2876 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2877 // The result is the sum of all operands results. 2878 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2879 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2880 for (unsigned i = 1, e = M->getNumOperands(); 2881 SumOpRes != BitWidth && i != e; ++i) 2882 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2883 BitWidth); 2884 return SumOpRes; 2885 } 2886 2887 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2888 // The result is the min of all operands results. 2889 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2890 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2891 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2892 return MinOpRes; 2893 } 2894 2895 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2896 // The result is the min of all operands results. 2897 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2898 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2899 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2900 return MinOpRes; 2901 } 2902 2903 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2904 // The result is the min of all operands results. 2905 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2906 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2907 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2908 return MinOpRes; 2909 } 2910 2911 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2912 // For a SCEVUnknown, ask ValueTracking. 2913 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2914 APInt Mask = APInt::getAllOnesValue(BitWidth); 2915 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2916 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2917 return Zeros.countTrailingOnes(); 2918 } 2919 2920 // SCEVUDivExpr 2921 return 0; 2922 } 2923 2924 /// getUnsignedRange - Determine the unsigned range for a particular SCEV. 2925 /// 2926 ConstantRange 2927 ScalarEvolution::getUnsignedRange(const SCEV *S) { 2928 // See if we've computed this range already. 2929 DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S); 2930 if (I != UnsignedRanges.end()) 2931 return I->second; 2932 2933 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2934 return setUnsignedRange(C, ConstantRange(C->getValue()->getValue())); 2935 2936 unsigned BitWidth = getTypeSizeInBits(S->getType()); 2937 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 2938 2939 // If the value has known zeros, the maximum unsigned value will have those 2940 // known zeros as well. 2941 uint32_t TZ = GetMinTrailingZeros(S); 2942 if (TZ != 0) 2943 ConservativeResult = 2944 ConstantRange(APInt::getMinValue(BitWidth), 2945 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 2946 2947 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2948 ConstantRange X = getUnsignedRange(Add->getOperand(0)); 2949 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 2950 X = X.add(getUnsignedRange(Add->getOperand(i))); 2951 return setUnsignedRange(Add, ConservativeResult.intersectWith(X)); 2952 } 2953 2954 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2955 ConstantRange X = getUnsignedRange(Mul->getOperand(0)); 2956 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 2957 X = X.multiply(getUnsignedRange(Mul->getOperand(i))); 2958 return setUnsignedRange(Mul, ConservativeResult.intersectWith(X)); 2959 } 2960 2961 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 2962 ConstantRange X = getUnsignedRange(SMax->getOperand(0)); 2963 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 2964 X = X.smax(getUnsignedRange(SMax->getOperand(i))); 2965 return setUnsignedRange(SMax, ConservativeResult.intersectWith(X)); 2966 } 2967 2968 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 2969 ConstantRange X = getUnsignedRange(UMax->getOperand(0)); 2970 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 2971 X = X.umax(getUnsignedRange(UMax->getOperand(i))); 2972 return setUnsignedRange(UMax, ConservativeResult.intersectWith(X)); 2973 } 2974 2975 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 2976 ConstantRange X = getUnsignedRange(UDiv->getLHS()); 2977 ConstantRange Y = getUnsignedRange(UDiv->getRHS()); 2978 return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y))); 2979 } 2980 2981 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 2982 ConstantRange X = getUnsignedRange(ZExt->getOperand()); 2983 return setUnsignedRange(ZExt, 2984 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 2985 } 2986 2987 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 2988 ConstantRange X = getUnsignedRange(SExt->getOperand()); 2989 return setUnsignedRange(SExt, 2990 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 2991 } 2992 2993 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 2994 ConstantRange X = getUnsignedRange(Trunc->getOperand()); 2995 return setUnsignedRange(Trunc, 2996 ConservativeResult.intersectWith(X.truncate(BitWidth))); 2997 } 2998 2999 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3000 // If there's no unsigned wrap, the value will never be less than its 3001 // initial value. 3002 if (AddRec->hasNoUnsignedWrap()) 3003 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 3004 if (!C->getValue()->isZero()) 3005 ConservativeResult = 3006 ConservativeResult.intersectWith( 3007 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); 3008 3009 // TODO: non-affine addrec 3010 if (AddRec->isAffine()) { 3011 const Type *Ty = AddRec->getType(); 3012 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3013 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3014 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3015 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3016 3017 const SCEV *Start = AddRec->getStart(); 3018 const SCEV *Step = AddRec->getStepRecurrence(*this); 3019 3020 ConstantRange StartRange = getUnsignedRange(Start); 3021 ConstantRange StepRange = getSignedRange(Step); 3022 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3023 ConstantRange EndRange = 3024 StartRange.add(MaxBECountRange.multiply(StepRange)); 3025 3026 // Check for overflow. This must be done with ConstantRange arithmetic 3027 // because we could be called from within the ScalarEvolution overflow 3028 // checking code. 3029 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1); 3030 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3031 ConstantRange ExtMaxBECountRange = 3032 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3033 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1); 3034 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3035 ExtEndRange) 3036 return setUnsignedRange(AddRec, ConservativeResult); 3037 3038 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), 3039 EndRange.getUnsignedMin()); 3040 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), 3041 EndRange.getUnsignedMax()); 3042 if (Min.isMinValue() && Max.isMaxValue()) 3043 return setUnsignedRange(AddRec, ConservativeResult); 3044 return setUnsignedRange(AddRec, 3045 ConservativeResult.intersectWith(ConstantRange(Min, Max+1))); 3046 } 3047 } 3048 3049 return setUnsignedRange(AddRec, ConservativeResult); 3050 } 3051 3052 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3053 // For a SCEVUnknown, ask ValueTracking. 3054 APInt Mask = APInt::getAllOnesValue(BitWidth); 3055 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 3056 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 3057 if (Ones == ~Zeros + 1) 3058 return setUnsignedRange(U, ConservativeResult); 3059 return setUnsignedRange(U, 3060 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1))); 3061 } 3062 3063 return setUnsignedRange(S, ConservativeResult); 3064 } 3065 3066 /// getSignedRange - Determine the signed range for a particular SCEV. 3067 /// 3068 ConstantRange 3069 ScalarEvolution::getSignedRange(const SCEV *S) { 3070 DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S); 3071 if (I != SignedRanges.end()) 3072 return I->second; 3073 3074 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3075 return setSignedRange(C, ConstantRange(C->getValue()->getValue())); 3076 3077 unsigned BitWidth = getTypeSizeInBits(S->getType()); 3078 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 3079 3080 // If the value has known zeros, the maximum signed value will have those 3081 // known zeros as well. 3082 uint32_t TZ = GetMinTrailingZeros(S); 3083 if (TZ != 0) 3084 ConservativeResult = 3085 ConstantRange(APInt::getSignedMinValue(BitWidth), 3086 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 3087 3088 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3089 ConstantRange X = getSignedRange(Add->getOperand(0)); 3090 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 3091 X = X.add(getSignedRange(Add->getOperand(i))); 3092 return setSignedRange(Add, ConservativeResult.intersectWith(X)); 3093 } 3094 3095 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3096 ConstantRange X = getSignedRange(Mul->getOperand(0)); 3097 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 3098 X = X.multiply(getSignedRange(Mul->getOperand(i))); 3099 return setSignedRange(Mul, ConservativeResult.intersectWith(X)); 3100 } 3101 3102 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 3103 ConstantRange X = getSignedRange(SMax->getOperand(0)); 3104 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 3105 X = X.smax(getSignedRange(SMax->getOperand(i))); 3106 return setSignedRange(SMax, ConservativeResult.intersectWith(X)); 3107 } 3108 3109 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 3110 ConstantRange X = getSignedRange(UMax->getOperand(0)); 3111 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 3112 X = X.umax(getSignedRange(UMax->getOperand(i))); 3113 return setSignedRange(UMax, ConservativeResult.intersectWith(X)); 3114 } 3115 3116 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 3117 ConstantRange X = getSignedRange(UDiv->getLHS()); 3118 ConstantRange Y = getSignedRange(UDiv->getRHS()); 3119 return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y))); 3120 } 3121 3122 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 3123 ConstantRange X = getSignedRange(ZExt->getOperand()); 3124 return setSignedRange(ZExt, 3125 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 3126 } 3127 3128 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 3129 ConstantRange X = getSignedRange(SExt->getOperand()); 3130 return setSignedRange(SExt, 3131 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 3132 } 3133 3134 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3135 ConstantRange X = getSignedRange(Trunc->getOperand()); 3136 return setSignedRange(Trunc, 3137 ConservativeResult.intersectWith(X.truncate(BitWidth))); 3138 } 3139 3140 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3141 // If there's no signed wrap, and all the operands have the same sign or 3142 // zero, the value won't ever change sign. 3143 if (AddRec->hasNoSignedWrap()) { 3144 bool AllNonNeg = true; 3145 bool AllNonPos = true; 3146 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 3147 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 3148 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 3149 } 3150 if (AllNonNeg) 3151 ConservativeResult = ConservativeResult.intersectWith( 3152 ConstantRange(APInt(BitWidth, 0), 3153 APInt::getSignedMinValue(BitWidth))); 3154 else if (AllNonPos) 3155 ConservativeResult = ConservativeResult.intersectWith( 3156 ConstantRange(APInt::getSignedMinValue(BitWidth), 3157 APInt(BitWidth, 1))); 3158 } 3159 3160 // TODO: non-affine addrec 3161 if (AddRec->isAffine()) { 3162 const Type *Ty = AddRec->getType(); 3163 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3164 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3165 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3166 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3167 3168 const SCEV *Start = AddRec->getStart(); 3169 const SCEV *Step = AddRec->getStepRecurrence(*this); 3170 3171 ConstantRange StartRange = getSignedRange(Start); 3172 ConstantRange StepRange = getSignedRange(Step); 3173 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3174 ConstantRange EndRange = 3175 StartRange.add(MaxBECountRange.multiply(StepRange)); 3176 3177 // Check for overflow. This must be done with ConstantRange arithmetic 3178 // because we could be called from within the ScalarEvolution overflow 3179 // checking code. 3180 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1); 3181 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3182 ConstantRange ExtMaxBECountRange = 3183 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3184 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1); 3185 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3186 ExtEndRange) 3187 return setSignedRange(AddRec, ConservativeResult); 3188 3189 APInt Min = APIntOps::smin(StartRange.getSignedMin(), 3190 EndRange.getSignedMin()); 3191 APInt Max = APIntOps::smax(StartRange.getSignedMax(), 3192 EndRange.getSignedMax()); 3193 if (Min.isMinSignedValue() && Max.isMaxSignedValue()) 3194 return setSignedRange(AddRec, ConservativeResult); 3195 return setSignedRange(AddRec, 3196 ConservativeResult.intersectWith(ConstantRange(Min, Max+1))); 3197 } 3198 } 3199 3200 return setSignedRange(AddRec, ConservativeResult); 3201 } 3202 3203 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3204 // For a SCEVUnknown, ask ValueTracking. 3205 if (!U->getValue()->getType()->isIntegerTy() && !TD) 3206 return setSignedRange(U, ConservativeResult); 3207 unsigned NS = ComputeNumSignBits(U->getValue(), TD); 3208 if (NS == 1) 3209 return setSignedRange(U, ConservativeResult); 3210 return setSignedRange(U, ConservativeResult.intersectWith( 3211 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 3212 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1))); 3213 } 3214 3215 return setSignedRange(S, ConservativeResult); 3216 } 3217 3218 /// createSCEV - We know that there is no SCEV for the specified value. 3219 /// Analyze the expression. 3220 /// 3221 const SCEV *ScalarEvolution::createSCEV(Value *V) { 3222 if (!isSCEVable(V->getType())) 3223 return getUnknown(V); 3224 3225 unsigned Opcode = Instruction::UserOp1; 3226 if (Instruction *I = dyn_cast<Instruction>(V)) { 3227 Opcode = I->getOpcode(); 3228 3229 // Don't attempt to analyze instructions in blocks that aren't 3230 // reachable. Such instructions don't matter, and they aren't required 3231 // to obey basic rules for definitions dominating uses which this 3232 // analysis depends on. 3233 if (!DT->isReachableFromEntry(I->getParent())) 3234 return getUnknown(V); 3235 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 3236 Opcode = CE->getOpcode(); 3237 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 3238 return getConstant(CI); 3239 else if (isa<ConstantPointerNull>(V)) 3240 return getConstant(V->getType(), 0); 3241 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 3242 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); 3243 else 3244 return getUnknown(V); 3245 3246 Operator *U = cast<Operator>(V); 3247 switch (Opcode) { 3248 case Instruction::Add: { 3249 // The simple thing to do would be to just call getSCEV on both operands 3250 // and call getAddExpr with the result. However if we're looking at a 3251 // bunch of things all added together, this can be quite inefficient, 3252 // because it leads to N-1 getAddExpr calls for N ultimate operands. 3253 // Instead, gather up all the operands and make a single getAddExpr call. 3254 // LLVM IR canonical form means we need only traverse the left operands. 3255 SmallVector<const SCEV *, 4> AddOps; 3256 AddOps.push_back(getSCEV(U->getOperand(1))); 3257 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) { 3258 unsigned Opcode = Op->getValueID() - Value::InstructionVal; 3259 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 3260 break; 3261 U = cast<Operator>(Op); 3262 const SCEV *Op1 = getSCEV(U->getOperand(1)); 3263 if (Opcode == Instruction::Sub) 3264 AddOps.push_back(getNegativeSCEV(Op1)); 3265 else 3266 AddOps.push_back(Op1); 3267 } 3268 AddOps.push_back(getSCEV(U->getOperand(0))); 3269 return getAddExpr(AddOps); 3270 } 3271 case Instruction::Mul: { 3272 // See the Add code above. 3273 SmallVector<const SCEV *, 4> MulOps; 3274 MulOps.push_back(getSCEV(U->getOperand(1))); 3275 for (Value *Op = U->getOperand(0); 3276 Op->getValueID() == Instruction::Mul + Value::InstructionVal; 3277 Op = U->getOperand(0)) { 3278 U = cast<Operator>(Op); 3279 MulOps.push_back(getSCEV(U->getOperand(1))); 3280 } 3281 MulOps.push_back(getSCEV(U->getOperand(0))); 3282 return getMulExpr(MulOps); 3283 } 3284 case Instruction::UDiv: 3285 return getUDivExpr(getSCEV(U->getOperand(0)), 3286 getSCEV(U->getOperand(1))); 3287 case Instruction::Sub: 3288 return getMinusSCEV(getSCEV(U->getOperand(0)), 3289 getSCEV(U->getOperand(1))); 3290 case Instruction::And: 3291 // For an expression like x&255 that merely masks off the high bits, 3292 // use zext(trunc(x)) as the SCEV expression. 3293 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3294 if (CI->isNullValue()) 3295 return getSCEV(U->getOperand(1)); 3296 if (CI->isAllOnesValue()) 3297 return getSCEV(U->getOperand(0)); 3298 const APInt &A = CI->getValue(); 3299 3300 // Instcombine's ShrinkDemandedConstant may strip bits out of 3301 // constants, obscuring what would otherwise be a low-bits mask. 3302 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 3303 // knew about to reconstruct a low-bits mask value. 3304 unsigned LZ = A.countLeadingZeros(); 3305 unsigned BitWidth = A.getBitWidth(); 3306 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 3307 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 3308 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 3309 3310 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 3311 3312 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 3313 return 3314 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 3315 IntegerType::get(getContext(), BitWidth - LZ)), 3316 U->getType()); 3317 } 3318 break; 3319 3320 case Instruction::Or: 3321 // If the RHS of the Or is a constant, we may have something like: 3322 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 3323 // optimizations will transparently handle this case. 3324 // 3325 // In order for this transformation to be safe, the LHS must be of the 3326 // form X*(2^n) and the Or constant must be less than 2^n. 3327 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3328 const SCEV *LHS = getSCEV(U->getOperand(0)); 3329 const APInt &CIVal = CI->getValue(); 3330 if (GetMinTrailingZeros(LHS) >= 3331 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 3332 // Build a plain add SCEV. 3333 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 3334 // If the LHS of the add was an addrec and it has no-wrap flags, 3335 // transfer the no-wrap flags, since an or won't introduce a wrap. 3336 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 3337 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 3338 if (OldAR->hasNoUnsignedWrap()) 3339 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true); 3340 if (OldAR->hasNoSignedWrap()) 3341 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true); 3342 } 3343 return S; 3344 } 3345 } 3346 break; 3347 case Instruction::Xor: 3348 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3349 // If the RHS of the xor is a signbit, then this is just an add. 3350 // Instcombine turns add of signbit into xor as a strength reduction step. 3351 if (CI->getValue().isSignBit()) 3352 return getAddExpr(getSCEV(U->getOperand(0)), 3353 getSCEV(U->getOperand(1))); 3354 3355 // If the RHS of xor is -1, then this is a not operation. 3356 if (CI->isAllOnesValue()) 3357 return getNotSCEV(getSCEV(U->getOperand(0))); 3358 3359 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 3360 // This is a variant of the check for xor with -1, and it handles 3361 // the case where instcombine has trimmed non-demanded bits out 3362 // of an xor with -1. 3363 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 3364 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 3365 if (BO->getOpcode() == Instruction::And && 3366 LCI->getValue() == CI->getValue()) 3367 if (const SCEVZeroExtendExpr *Z = 3368 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 3369 const Type *UTy = U->getType(); 3370 const SCEV *Z0 = Z->getOperand(); 3371 const Type *Z0Ty = Z0->getType(); 3372 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 3373 3374 // If C is a low-bits mask, the zero extend is serving to 3375 // mask off the high bits. Complement the operand and 3376 // re-apply the zext. 3377 if (APIntOps::isMask(Z0TySize, CI->getValue())) 3378 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 3379 3380 // If C is a single bit, it may be in the sign-bit position 3381 // before the zero-extend. In this case, represent the xor 3382 // using an add, which is equivalent, and re-apply the zext. 3383 APInt Trunc = CI->getValue().trunc(Z0TySize); 3384 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 3385 Trunc.isSignBit()) 3386 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 3387 UTy); 3388 } 3389 } 3390 break; 3391 3392 case Instruction::Shl: 3393 // Turn shift left of a constant amount into a multiply. 3394 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3395 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3396 3397 // If the shift count is not less than the bitwidth, the result of 3398 // the shift is undefined. Don't try to analyze it, because the 3399 // resolution chosen here may differ from the resolution chosen in 3400 // other parts of the compiler. 3401 if (SA->getValue().uge(BitWidth)) 3402 break; 3403 3404 Constant *X = ConstantInt::get(getContext(), 3405 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3406 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3407 } 3408 break; 3409 3410 case Instruction::LShr: 3411 // Turn logical shift right of a constant into a unsigned divide. 3412 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3413 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3414 3415 // If the shift count is not less than the bitwidth, the result of 3416 // the shift is undefined. Don't try to analyze it, because the 3417 // resolution chosen here may differ from the resolution chosen in 3418 // other parts of the compiler. 3419 if (SA->getValue().uge(BitWidth)) 3420 break; 3421 3422 Constant *X = ConstantInt::get(getContext(), 3423 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3424 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3425 } 3426 break; 3427 3428 case Instruction::AShr: 3429 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 3430 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 3431 if (Operator *L = dyn_cast<Operator>(U->getOperand(0))) 3432 if (L->getOpcode() == Instruction::Shl && 3433 L->getOperand(1) == U->getOperand(1)) { 3434 uint64_t BitWidth = getTypeSizeInBits(U->getType()); 3435 3436 // If the shift count is not less than the bitwidth, the result of 3437 // the shift is undefined. Don't try to analyze it, because the 3438 // resolution chosen here may differ from the resolution chosen in 3439 // other parts of the compiler. 3440 if (CI->getValue().uge(BitWidth)) 3441 break; 3442 3443 uint64_t Amt = BitWidth - CI->getZExtValue(); 3444 if (Amt == BitWidth) 3445 return getSCEV(L->getOperand(0)); // shift by zero --> noop 3446 return 3447 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 3448 IntegerType::get(getContext(), 3449 Amt)), 3450 U->getType()); 3451 } 3452 break; 3453 3454 case Instruction::Trunc: 3455 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 3456 3457 case Instruction::ZExt: 3458 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3459 3460 case Instruction::SExt: 3461 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3462 3463 case Instruction::BitCast: 3464 // BitCasts are no-op casts so we just eliminate the cast. 3465 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 3466 return getSCEV(U->getOperand(0)); 3467 break; 3468 3469 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 3470 // lead to pointer expressions which cannot safely be expanded to GEPs, 3471 // because ScalarEvolution doesn't respect the GEP aliasing rules when 3472 // simplifying integer expressions. 3473 3474 case Instruction::GetElementPtr: 3475 return createNodeForGEP(cast<GEPOperator>(U)); 3476 3477 case Instruction::PHI: 3478 return createNodeForPHI(cast<PHINode>(U)); 3479 3480 case Instruction::Select: 3481 // This could be a smax or umax that was lowered earlier. 3482 // Try to recover it. 3483 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 3484 Value *LHS = ICI->getOperand(0); 3485 Value *RHS = ICI->getOperand(1); 3486 switch (ICI->getPredicate()) { 3487 case ICmpInst::ICMP_SLT: 3488 case ICmpInst::ICMP_SLE: 3489 std::swap(LHS, RHS); 3490 // fall through 3491 case ICmpInst::ICMP_SGT: 3492 case ICmpInst::ICMP_SGE: 3493 // a >s b ? a+x : b+x -> smax(a, b)+x 3494 // a >s b ? b+x : a+x -> smin(a, b)+x 3495 if (LHS->getType() == U->getType()) { 3496 const SCEV *LS = getSCEV(LHS); 3497 const SCEV *RS = getSCEV(RHS); 3498 const SCEV *LA = getSCEV(U->getOperand(1)); 3499 const SCEV *RA = getSCEV(U->getOperand(2)); 3500 const SCEV *LDiff = getMinusSCEV(LA, LS); 3501 const SCEV *RDiff = getMinusSCEV(RA, RS); 3502 if (LDiff == RDiff) 3503 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 3504 LDiff = getMinusSCEV(LA, RS); 3505 RDiff = getMinusSCEV(RA, LS); 3506 if (LDiff == RDiff) 3507 return getAddExpr(getSMinExpr(LS, RS), LDiff); 3508 } 3509 break; 3510 case ICmpInst::ICMP_ULT: 3511 case ICmpInst::ICMP_ULE: 3512 std::swap(LHS, RHS); 3513 // fall through 3514 case ICmpInst::ICMP_UGT: 3515 case ICmpInst::ICMP_UGE: 3516 // a >u b ? a+x : b+x -> umax(a, b)+x 3517 // a >u b ? b+x : a+x -> umin(a, b)+x 3518 if (LHS->getType() == U->getType()) { 3519 const SCEV *LS = getSCEV(LHS); 3520 const SCEV *RS = getSCEV(RHS); 3521 const SCEV *LA = getSCEV(U->getOperand(1)); 3522 const SCEV *RA = getSCEV(U->getOperand(2)); 3523 const SCEV *LDiff = getMinusSCEV(LA, LS); 3524 const SCEV *RDiff = getMinusSCEV(RA, RS); 3525 if (LDiff == RDiff) 3526 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 3527 LDiff = getMinusSCEV(LA, RS); 3528 RDiff = getMinusSCEV(RA, LS); 3529 if (LDiff == RDiff) 3530 return getAddExpr(getUMinExpr(LS, RS), LDiff); 3531 } 3532 break; 3533 case ICmpInst::ICMP_NE: 3534 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 3535 if (LHS->getType() == U->getType() && 3536 isa<ConstantInt>(RHS) && 3537 cast<ConstantInt>(RHS)->isZero()) { 3538 const SCEV *One = getConstant(LHS->getType(), 1); 3539 const SCEV *LS = getSCEV(LHS); 3540 const SCEV *LA = getSCEV(U->getOperand(1)); 3541 const SCEV *RA = getSCEV(U->getOperand(2)); 3542 const SCEV *LDiff = getMinusSCEV(LA, LS); 3543 const SCEV *RDiff = getMinusSCEV(RA, One); 3544 if (LDiff == RDiff) 3545 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3546 } 3547 break; 3548 case ICmpInst::ICMP_EQ: 3549 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 3550 if (LHS->getType() == U->getType() && 3551 isa<ConstantInt>(RHS) && 3552 cast<ConstantInt>(RHS)->isZero()) { 3553 const SCEV *One = getConstant(LHS->getType(), 1); 3554 const SCEV *LS = getSCEV(LHS); 3555 const SCEV *LA = getSCEV(U->getOperand(1)); 3556 const SCEV *RA = getSCEV(U->getOperand(2)); 3557 const SCEV *LDiff = getMinusSCEV(LA, One); 3558 const SCEV *RDiff = getMinusSCEV(RA, LS); 3559 if (LDiff == RDiff) 3560 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3561 } 3562 break; 3563 default: 3564 break; 3565 } 3566 } 3567 3568 default: // We cannot analyze this expression. 3569 break; 3570 } 3571 3572 return getUnknown(V); 3573 } 3574 3575 3576 3577 //===----------------------------------------------------------------------===// 3578 // Iteration Count Computation Code 3579 // 3580 3581 /// getBackedgeTakenCount - If the specified loop has a predictable 3582 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 3583 /// object. The backedge-taken count is the number of times the loop header 3584 /// will be branched to from within the loop. This is one less than the 3585 /// trip count of the loop, since it doesn't count the first iteration, 3586 /// when the header is branched to from outside the loop. 3587 /// 3588 /// Note that it is not valid to call this method on a loop without a 3589 /// loop-invariant backedge-taken count (see 3590 /// hasLoopInvariantBackedgeTakenCount). 3591 /// 3592 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 3593 return getBackedgeTakenInfo(L).Exact; 3594 } 3595 3596 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 3597 /// return the least SCEV value that is known never to be less than the 3598 /// actual backedge taken count. 3599 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 3600 return getBackedgeTakenInfo(L).Max; 3601 } 3602 3603 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 3604 /// onto the given Worklist. 3605 static void 3606 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 3607 BasicBlock *Header = L->getHeader(); 3608 3609 // Push all Loop-header PHIs onto the Worklist stack. 3610 for (BasicBlock::iterator I = Header->begin(); 3611 PHINode *PN = dyn_cast<PHINode>(I); ++I) 3612 Worklist.push_back(PN); 3613 } 3614 3615 const ScalarEvolution::BackedgeTakenInfo & 3616 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 3617 // Initially insert a CouldNotCompute for this loop. If the insertion 3618 // succeeds, proceed to actually compute a backedge-taken count and 3619 // update the value. The temporary CouldNotCompute value tells SCEV 3620 // code elsewhere that it shouldn't attempt to request a new 3621 // backedge-taken count, which could result in infinite recursion. 3622 std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 3623 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 3624 if (!Pair.second) 3625 return Pair.first->second; 3626 3627 BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L); 3628 if (BECount.Exact != getCouldNotCompute()) { 3629 assert(isLoopInvariant(BECount.Exact, L) && 3630 isLoopInvariant(BECount.Max, L) && 3631 "Computed backedge-taken count isn't loop invariant for loop!"); 3632 ++NumTripCountsComputed; 3633 3634 // Update the value in the map. 3635 Pair.first->second = BECount; 3636 } else { 3637 if (BECount.Max != getCouldNotCompute()) 3638 // Update the value in the map. 3639 Pair.first->second = BECount; 3640 if (isa<PHINode>(L->getHeader()->begin())) 3641 // Only count loops that have phi nodes as not being computable. 3642 ++NumTripCountsNotComputed; 3643 } 3644 3645 // Now that we know more about the trip count for this loop, forget any 3646 // existing SCEV values for PHI nodes in this loop since they are only 3647 // conservative estimates made without the benefit of trip count 3648 // information. This is similar to the code in forgetLoop, except that 3649 // it handles SCEVUnknown PHI nodes specially. 3650 if (BECount.hasAnyInfo()) { 3651 SmallVector<Instruction *, 16> Worklist; 3652 PushLoopPHIs(L, Worklist); 3653 3654 SmallPtrSet<Instruction *, 8> Visited; 3655 while (!Worklist.empty()) { 3656 Instruction *I = Worklist.pop_back_val(); 3657 if (!Visited.insert(I)) continue; 3658 3659 ValueExprMapType::iterator It = 3660 ValueExprMap.find(static_cast<Value *>(I)); 3661 if (It != ValueExprMap.end()) { 3662 const SCEV *Old = It->second; 3663 3664 // SCEVUnknown for a PHI either means that it has an unrecognized 3665 // structure, or it's a PHI that's in the progress of being computed 3666 // by createNodeForPHI. In the former case, additional loop trip 3667 // count information isn't going to change anything. In the later 3668 // case, createNodeForPHI will perform the necessary updates on its 3669 // own when it gets to that point. 3670 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 3671 forgetMemoizedResults(Old); 3672 ValueExprMap.erase(It); 3673 } 3674 if (PHINode *PN = dyn_cast<PHINode>(I)) 3675 ConstantEvolutionLoopExitValue.erase(PN); 3676 } 3677 3678 PushDefUseChildren(I, Worklist); 3679 } 3680 } 3681 return Pair.first->second; 3682 } 3683 3684 /// forgetLoop - This method should be called by the client when it has 3685 /// changed a loop in a way that may effect ScalarEvolution's ability to 3686 /// compute a trip count, or if the loop is deleted. 3687 void ScalarEvolution::forgetLoop(const Loop *L) { 3688 // Drop any stored trip count value. 3689 BackedgeTakenCounts.erase(L); 3690 3691 // Drop information about expressions based on loop-header PHIs. 3692 SmallVector<Instruction *, 16> Worklist; 3693 PushLoopPHIs(L, Worklist); 3694 3695 SmallPtrSet<Instruction *, 8> Visited; 3696 while (!Worklist.empty()) { 3697 Instruction *I = Worklist.pop_back_val(); 3698 if (!Visited.insert(I)) continue; 3699 3700 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 3701 if (It != ValueExprMap.end()) { 3702 forgetMemoizedResults(It->second); 3703 ValueExprMap.erase(It); 3704 if (PHINode *PN = dyn_cast<PHINode>(I)) 3705 ConstantEvolutionLoopExitValue.erase(PN); 3706 } 3707 3708 PushDefUseChildren(I, Worklist); 3709 } 3710 3711 // Forget all contained loops too, to avoid dangling entries in the 3712 // ValuesAtScopes map. 3713 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 3714 forgetLoop(*I); 3715 } 3716 3717 /// forgetValue - This method should be called by the client when it has 3718 /// changed a value in a way that may effect its value, or which may 3719 /// disconnect it from a def-use chain linking it to a loop. 3720 void ScalarEvolution::forgetValue(Value *V) { 3721 Instruction *I = dyn_cast<Instruction>(V); 3722 if (!I) return; 3723 3724 // Drop information about expressions based on loop-header PHIs. 3725 SmallVector<Instruction *, 16> Worklist; 3726 Worklist.push_back(I); 3727 3728 SmallPtrSet<Instruction *, 8> Visited; 3729 while (!Worklist.empty()) { 3730 I = Worklist.pop_back_val(); 3731 if (!Visited.insert(I)) continue; 3732 3733 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 3734 if (It != ValueExprMap.end()) { 3735 forgetMemoizedResults(It->second); 3736 ValueExprMap.erase(It); 3737 if (PHINode *PN = dyn_cast<PHINode>(I)) 3738 ConstantEvolutionLoopExitValue.erase(PN); 3739 } 3740 3741 PushDefUseChildren(I, Worklist); 3742 } 3743 } 3744 3745 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 3746 /// of the specified loop will execute. 3747 ScalarEvolution::BackedgeTakenInfo 3748 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 3749 SmallVector<BasicBlock *, 8> ExitingBlocks; 3750 L->getExitingBlocks(ExitingBlocks); 3751 3752 // Examine all exits and pick the most conservative values. 3753 const SCEV *BECount = getCouldNotCompute(); 3754 const SCEV *MaxBECount = getCouldNotCompute(); 3755 bool CouldNotComputeBECount = false; 3756 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 3757 BackedgeTakenInfo NewBTI = 3758 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 3759 3760 if (NewBTI.Exact == getCouldNotCompute()) { 3761 // We couldn't compute an exact value for this exit, so 3762 // we won't be able to compute an exact value for the loop. 3763 CouldNotComputeBECount = true; 3764 BECount = getCouldNotCompute(); 3765 } else if (!CouldNotComputeBECount) { 3766 if (BECount == getCouldNotCompute()) 3767 BECount = NewBTI.Exact; 3768 else 3769 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); 3770 } 3771 if (MaxBECount == getCouldNotCompute()) 3772 MaxBECount = NewBTI.Max; 3773 else if (NewBTI.Max != getCouldNotCompute()) 3774 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); 3775 } 3776 3777 return BackedgeTakenInfo(BECount, MaxBECount); 3778 } 3779 3780 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 3781 /// of the specified loop will execute if it exits via the specified block. 3782 ScalarEvolution::BackedgeTakenInfo 3783 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 3784 BasicBlock *ExitingBlock) { 3785 3786 // Okay, we've chosen an exiting block. See what condition causes us to 3787 // exit at this block. 3788 // 3789 // FIXME: we should be able to handle switch instructions (with a single exit) 3790 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 3791 if (ExitBr == 0) return getCouldNotCompute(); 3792 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 3793 3794 // At this point, we know we have a conditional branch that determines whether 3795 // the loop is exited. However, we don't know if the branch is executed each 3796 // time through the loop. If not, then the execution count of the branch will 3797 // not be equal to the trip count of the loop. 3798 // 3799 // Currently we check for this by checking to see if the Exit branch goes to 3800 // the loop header. If so, we know it will always execute the same number of 3801 // times as the loop. We also handle the case where the exit block *is* the 3802 // loop header. This is common for un-rotated loops. 3803 // 3804 // If both of those tests fail, walk up the unique predecessor chain to the 3805 // header, stopping if there is an edge that doesn't exit the loop. If the 3806 // header is reached, the execution count of the branch will be equal to the 3807 // trip count of the loop. 3808 // 3809 // More extensive analysis could be done to handle more cases here. 3810 // 3811 if (ExitBr->getSuccessor(0) != L->getHeader() && 3812 ExitBr->getSuccessor(1) != L->getHeader() && 3813 ExitBr->getParent() != L->getHeader()) { 3814 // The simple checks failed, try climbing the unique predecessor chain 3815 // up to the header. 3816 bool Ok = false; 3817 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 3818 BasicBlock *Pred = BB->getUniquePredecessor(); 3819 if (!Pred) 3820 return getCouldNotCompute(); 3821 TerminatorInst *PredTerm = Pred->getTerminator(); 3822 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 3823 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 3824 if (PredSucc == BB) 3825 continue; 3826 // If the predecessor has a successor that isn't BB and isn't 3827 // outside the loop, assume the worst. 3828 if (L->contains(PredSucc)) 3829 return getCouldNotCompute(); 3830 } 3831 if (Pred == L->getHeader()) { 3832 Ok = true; 3833 break; 3834 } 3835 BB = Pred; 3836 } 3837 if (!Ok) 3838 return getCouldNotCompute(); 3839 } 3840 3841 // Proceed to the next level to examine the exit condition expression. 3842 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 3843 ExitBr->getSuccessor(0), 3844 ExitBr->getSuccessor(1)); 3845 } 3846 3847 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 3848 /// backedge of the specified loop will execute if its exit condition 3849 /// were a conditional branch of ExitCond, TBB, and FBB. 3850 ScalarEvolution::BackedgeTakenInfo 3851 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 3852 Value *ExitCond, 3853 BasicBlock *TBB, 3854 BasicBlock *FBB) { 3855 // Check if the controlling expression for this loop is an And or Or. 3856 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 3857 if (BO->getOpcode() == Instruction::And) { 3858 // Recurse on the operands of the and. 3859 BackedgeTakenInfo BTI0 = 3860 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3861 BackedgeTakenInfo BTI1 = 3862 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3863 const SCEV *BECount = getCouldNotCompute(); 3864 const SCEV *MaxBECount = getCouldNotCompute(); 3865 if (L->contains(TBB)) { 3866 // Both conditions must be true for the loop to continue executing. 3867 // Choose the less conservative count. 3868 if (BTI0.Exact == getCouldNotCompute() || 3869 BTI1.Exact == getCouldNotCompute()) 3870 BECount = getCouldNotCompute(); 3871 else 3872 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3873 if (BTI0.Max == getCouldNotCompute()) 3874 MaxBECount = BTI1.Max; 3875 else if (BTI1.Max == getCouldNotCompute()) 3876 MaxBECount = BTI0.Max; 3877 else 3878 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3879 } else { 3880 // Both conditions must be true at the same time for the loop to exit. 3881 // For now, be conservative. 3882 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 3883 if (BTI0.Max == BTI1.Max) 3884 MaxBECount = BTI0.Max; 3885 if (BTI0.Exact == BTI1.Exact) 3886 BECount = BTI0.Exact; 3887 } 3888 3889 return BackedgeTakenInfo(BECount, MaxBECount); 3890 } 3891 if (BO->getOpcode() == Instruction::Or) { 3892 // Recurse on the operands of the or. 3893 BackedgeTakenInfo BTI0 = 3894 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3895 BackedgeTakenInfo BTI1 = 3896 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3897 const SCEV *BECount = getCouldNotCompute(); 3898 const SCEV *MaxBECount = getCouldNotCompute(); 3899 if (L->contains(FBB)) { 3900 // Both conditions must be false for the loop to continue executing. 3901 // Choose the less conservative count. 3902 if (BTI0.Exact == getCouldNotCompute() || 3903 BTI1.Exact == getCouldNotCompute()) 3904 BECount = getCouldNotCompute(); 3905 else 3906 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3907 if (BTI0.Max == getCouldNotCompute()) 3908 MaxBECount = BTI1.Max; 3909 else if (BTI1.Max == getCouldNotCompute()) 3910 MaxBECount = BTI0.Max; 3911 else 3912 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3913 } else { 3914 // Both conditions must be false at the same time for the loop to exit. 3915 // For now, be conservative. 3916 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 3917 if (BTI0.Max == BTI1.Max) 3918 MaxBECount = BTI0.Max; 3919 if (BTI0.Exact == BTI1.Exact) 3920 BECount = BTI0.Exact; 3921 } 3922 3923 return BackedgeTakenInfo(BECount, MaxBECount); 3924 } 3925 } 3926 3927 // With an icmp, it may be feasible to compute an exact backedge-taken count. 3928 // Proceed to the next level to examine the icmp. 3929 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3930 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3931 3932 // Check for a constant condition. These are normally stripped out by 3933 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 3934 // preserve the CFG and is temporarily leaving constant conditions 3935 // in place. 3936 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 3937 if (L->contains(FBB) == !CI->getZExtValue()) 3938 // The backedge is always taken. 3939 return getCouldNotCompute(); 3940 else 3941 // The backedge is never taken. 3942 return getConstant(CI->getType(), 0); 3943 } 3944 3945 // If it's not an integer or pointer comparison then compute it the hard way. 3946 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3947 } 3948 3949 static const SCEVAddRecExpr * 3950 isSimpleUnwrappingAddRec(const SCEV *S, const Loop *L) { 3951 const SCEVAddRecExpr *SA = dyn_cast<SCEVAddRecExpr>(S); 3952 3953 // The SCEV must be an addrec of this loop. 3954 if (!SA || SA->getLoop() != L || !SA->isAffine()) 3955 return 0; 3956 3957 // The SCEV must be known to not wrap in some way to be interesting. 3958 if (!SA->hasNoUnsignedWrap() && !SA->hasNoSignedWrap()) 3959 return 0; 3960 3961 // The stride must be a constant so that we know if it is striding up or down. 3962 if (!isa<SCEVConstant>(SA->getOperand(1))) 3963 return 0; 3964 return SA; 3965 } 3966 3967 /// getMinusSCEVForExitTest - When considering an exit test for a loop with a 3968 /// "x != y" exit test, we turn this into a computation that evaluates x-y != 0, 3969 /// and this function returns the expression to use for x-y. We know and take 3970 /// advantage of the fact that this subtraction is only being used in a 3971 /// comparison by zero context. 3972 /// 3973 static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS, 3974 const Loop *L, ScalarEvolution &SE) { 3975 // If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not 3976 // wrap (either NSW or NUW), then we know that the value will either become 3977 // the other one (and thus the loop terminates), that the loop will terminate 3978 // through some other exit condition first, or that the loop has undefined 3979 // behavior. This information is useful when the addrec has a stride that is 3980 // != 1 or -1, because it means we can't "miss" the exit value. 3981 // 3982 // In any of these three cases, it is safe to turn the exit condition into a 3983 // "counting down" AddRec (to zero) by subtracting the two inputs as normal, 3984 // but since we know that the "end cannot be missed" we can force the 3985 // resulting AddRec to be a NUW addrec. Since it is counting down, this means 3986 // that the AddRec *cannot* pass zero. 3987 3988 // See if LHS and RHS are addrec's we can handle. 3989 const SCEVAddRecExpr *LHSA = isSimpleUnwrappingAddRec(LHS, L); 3990 const SCEVAddRecExpr *RHSA = isSimpleUnwrappingAddRec(RHS, L); 3991 3992 // If neither addrec is interesting, just return a minus. 3993 if (RHSA == 0 && LHSA == 0) 3994 return SE.getMinusSCEV(LHS, RHS); 3995 3996 // If only one of LHS and RHS are an AddRec of this loop, make sure it is LHS. 3997 if (RHSA && LHSA == 0) { 3998 // Safe because a-b === b-a for comparisons against zero. 3999 std::swap(LHS, RHS); 4000 std::swap(LHSA, RHSA); 4001 } 4002 4003 // Handle the case when only one is advancing in a non-overflowing way. 4004 if (RHSA == 0) { 4005 // If RHS is loop varying, then we can't predict when LHS will cross it. 4006 if (!SE.isLoopInvariant(RHS, L)) 4007 return SE.getMinusSCEV(LHS, RHS); 4008 4009 // If LHS has a positive stride, then we compute RHS-LHS, because the loop 4010 // is counting up until it crosses RHS (which must be larger than LHS). If 4011 // it is negative, we compute LHS-RHS because we're counting down to RHS. 4012 const ConstantInt *Stride = 4013 cast<SCEVConstant>(LHSA->getOperand(1))->getValue(); 4014 if (Stride->getValue().isNegative()) 4015 std::swap(LHS, RHS); 4016 4017 return SE.getMinusSCEV(RHS, LHS, true /*HasNUW*/); 4018 } 4019 4020 // If both LHS and RHS are interesting, we have something like: 4021 // a+i*4 != b+i*8. 4022 const ConstantInt *LHSStride = 4023 cast<SCEVConstant>(LHSA->getOperand(1))->getValue(); 4024 const ConstantInt *RHSStride = 4025 cast<SCEVConstant>(RHSA->getOperand(1))->getValue(); 4026 4027 // If the strides are equal, then this is just a (complex) loop invariant 4028 // comparison of a/b. 4029 if (LHSStride == RHSStride) 4030 return SE.getMinusSCEV(LHSA->getStart(), RHSA->getStart()); 4031 4032 // If the signs of the strides differ, then the negative stride is counting 4033 // down to the positive stride. 4034 if (LHSStride->getValue().isNegative() != RHSStride->getValue().isNegative()){ 4035 if (RHSStride->getValue().isNegative()) 4036 std::swap(LHS, RHS); 4037 } else { 4038 // If LHS's stride is smaller than RHS's stride, then "b" must be less than 4039 // "a" and "b" is RHS is counting up (catching up) to LHS. This is true 4040 // whether the strides are positive or negative. 4041 if (RHSStride->getValue().slt(LHSStride->getValue())) 4042 std::swap(LHS, RHS); 4043 } 4044 4045 return SE.getMinusSCEV(LHS, RHS, true /*HasNUW*/); 4046 } 4047 4048 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 4049 /// backedge of the specified loop will execute if its exit condition 4050 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 4051 ScalarEvolution::BackedgeTakenInfo 4052 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 4053 ICmpInst *ExitCond, 4054 BasicBlock *TBB, 4055 BasicBlock *FBB) { 4056 4057 // If the condition was exit on true, convert the condition to exit on false 4058 ICmpInst::Predicate Cond; 4059 if (!L->contains(FBB)) 4060 Cond = ExitCond->getPredicate(); 4061 else 4062 Cond = ExitCond->getInversePredicate(); 4063 4064 // Handle common loops like: for (X = "string"; *X; ++X) 4065 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 4066 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 4067 BackedgeTakenInfo ItCnt = 4068 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 4069 if (ItCnt.hasAnyInfo()) 4070 return ItCnt; 4071 } 4072 4073 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 4074 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 4075 4076 // Try to evaluate any dependencies out of the loop. 4077 LHS = getSCEVAtScope(LHS, L); 4078 RHS = getSCEVAtScope(RHS, L); 4079 4080 // At this point, we would like to compute how many iterations of the 4081 // loop the predicate will return true for these inputs. 4082 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 4083 // If there is a loop-invariant, force it into the RHS. 4084 std::swap(LHS, RHS); 4085 Cond = ICmpInst::getSwappedPredicate(Cond); 4086 } 4087 4088 // Simplify the operands before analyzing them. 4089 (void)SimplifyICmpOperands(Cond, LHS, RHS); 4090 4091 // If we have a comparison of a chrec against a constant, try to use value 4092 // ranges to answer this query. 4093 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 4094 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 4095 if (AddRec->getLoop() == L) { 4096 // Form the constant range. 4097 ConstantRange CompRange( 4098 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 4099 4100 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 4101 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 4102 } 4103 4104 switch (Cond) { 4105 case ICmpInst::ICMP_NE: { // while (X != Y) 4106 // Convert to: while (X-Y != 0) 4107 BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEVForExitTest(LHS, RHS, L, 4108 *this), L); 4109 if (BTI.hasAnyInfo()) return BTI; 4110 break; 4111 } 4112 case ICmpInst::ICMP_EQ: { // while (X == Y) 4113 // Convert to: while (X-Y == 0) 4114 BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 4115 if (BTI.hasAnyInfo()) return BTI; 4116 break; 4117 } 4118 case ICmpInst::ICMP_SLT: { 4119 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 4120 if (BTI.hasAnyInfo()) return BTI; 4121 break; 4122 } 4123 case ICmpInst::ICMP_SGT: { 4124 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 4125 getNotSCEV(RHS), L, true); 4126 if (BTI.hasAnyInfo()) return BTI; 4127 break; 4128 } 4129 case ICmpInst::ICMP_ULT: { 4130 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 4131 if (BTI.hasAnyInfo()) return BTI; 4132 break; 4133 } 4134 case ICmpInst::ICMP_UGT: { 4135 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 4136 getNotSCEV(RHS), L, false); 4137 if (BTI.hasAnyInfo()) return BTI; 4138 break; 4139 } 4140 default: 4141 #if 0 4142 dbgs() << "ComputeBackedgeTakenCount "; 4143 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 4144 dbgs() << "[unsigned] "; 4145 dbgs() << *LHS << " " 4146 << Instruction::getOpcodeName(Instruction::ICmp) 4147 << " " << *RHS << "\n"; 4148 #endif 4149 break; 4150 } 4151 return 4152 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 4153 } 4154 4155 static ConstantInt * 4156 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 4157 ScalarEvolution &SE) { 4158 const SCEV *InVal = SE.getConstant(C); 4159 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 4160 assert(isa<SCEVConstant>(Val) && 4161 "Evaluation of SCEV at constant didn't fold correctly?"); 4162 return cast<SCEVConstant>(Val)->getValue(); 4163 } 4164 4165 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 4166 /// and a GEP expression (missing the pointer index) indexing into it, return 4167 /// the addressed element of the initializer or null if the index expression is 4168 /// invalid. 4169 static Constant * 4170 GetAddressedElementFromGlobal(GlobalVariable *GV, 4171 const std::vector<ConstantInt*> &Indices) { 4172 Constant *Init = GV->getInitializer(); 4173 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 4174 uint64_t Idx = Indices[i]->getZExtValue(); 4175 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 4176 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 4177 Init = cast<Constant>(CS->getOperand(Idx)); 4178 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 4179 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 4180 Init = cast<Constant>(CA->getOperand(Idx)); 4181 } else if (isa<ConstantAggregateZero>(Init)) { 4182 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 4183 assert(Idx < STy->getNumElements() && "Bad struct index!"); 4184 Init = Constant::getNullValue(STy->getElementType(Idx)); 4185 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 4186 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 4187 Init = Constant::getNullValue(ATy->getElementType()); 4188 } else { 4189 llvm_unreachable("Unknown constant aggregate type!"); 4190 } 4191 return 0; 4192 } else { 4193 return 0; // Unknown initializer type 4194 } 4195 } 4196 return Init; 4197 } 4198 4199 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 4200 /// 'icmp op load X, cst', try to see if we can compute the backedge 4201 /// execution count. 4202 ScalarEvolution::BackedgeTakenInfo 4203 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( 4204 LoadInst *LI, 4205 Constant *RHS, 4206 const Loop *L, 4207 ICmpInst::Predicate predicate) { 4208 if (LI->isVolatile()) return getCouldNotCompute(); 4209 4210 // Check to see if the loaded pointer is a getelementptr of a global. 4211 // TODO: Use SCEV instead of manually grubbing with GEPs. 4212 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 4213 if (!GEP) return getCouldNotCompute(); 4214 4215 // Make sure that it is really a constant global we are gepping, with an 4216 // initializer, and make sure the first IDX is really 0. 4217 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 4218 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 4219 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 4220 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 4221 return getCouldNotCompute(); 4222 4223 // Okay, we allow one non-constant index into the GEP instruction. 4224 Value *VarIdx = 0; 4225 std::vector<ConstantInt*> Indexes; 4226 unsigned VarIdxNum = 0; 4227 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 4228 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 4229 Indexes.push_back(CI); 4230 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 4231 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 4232 VarIdx = GEP->getOperand(i); 4233 VarIdxNum = i-2; 4234 Indexes.push_back(0); 4235 } 4236 4237 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 4238 // Check to see if X is a loop variant variable value now. 4239 const SCEV *Idx = getSCEV(VarIdx); 4240 Idx = getSCEVAtScope(Idx, L); 4241 4242 // We can only recognize very limited forms of loop index expressions, in 4243 // particular, only affine AddRec's like {C1,+,C2}. 4244 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 4245 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 4246 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 4247 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 4248 return getCouldNotCompute(); 4249 4250 unsigned MaxSteps = MaxBruteForceIterations; 4251 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 4252 ConstantInt *ItCst = ConstantInt::get( 4253 cast<IntegerType>(IdxExpr->getType()), IterationNum); 4254 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 4255 4256 // Form the GEP offset. 4257 Indexes[VarIdxNum] = Val; 4258 4259 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 4260 if (Result == 0) break; // Cannot compute! 4261 4262 // Evaluate the condition for this iteration. 4263 Result = ConstantExpr::getICmp(predicate, Result, RHS); 4264 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 4265 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 4266 #if 0 4267 dbgs() << "\n***\n*** Computed loop count " << *ItCst 4268 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 4269 << "***\n"; 4270 #endif 4271 ++NumArrayLenItCounts; 4272 return getConstant(ItCst); // Found terminating iteration! 4273 } 4274 } 4275 return getCouldNotCompute(); 4276 } 4277 4278 4279 /// CanConstantFold - Return true if we can constant fold an instruction of the 4280 /// specified type, assuming that all operands were constants. 4281 static bool CanConstantFold(const Instruction *I) { 4282 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 4283 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 4284 return true; 4285 4286 if (const CallInst *CI = dyn_cast<CallInst>(I)) 4287 if (const Function *F = CI->getCalledFunction()) 4288 return canConstantFoldCallTo(F); 4289 return false; 4290 } 4291 4292 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 4293 /// in the loop that V is derived from. We allow arbitrary operations along the 4294 /// way, but the operands of an operation must either be constants or a value 4295 /// derived from a constant PHI. If this expression does not fit with these 4296 /// constraints, return null. 4297 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 4298 // If this is not an instruction, or if this is an instruction outside of the 4299 // loop, it can't be derived from a loop PHI. 4300 Instruction *I = dyn_cast<Instruction>(V); 4301 if (I == 0 || !L->contains(I)) return 0; 4302 4303 if (PHINode *PN = dyn_cast<PHINode>(I)) { 4304 if (L->getHeader() == I->getParent()) 4305 return PN; 4306 else 4307 // We don't currently keep track of the control flow needed to evaluate 4308 // PHIs, so we cannot handle PHIs inside of loops. 4309 return 0; 4310 } 4311 4312 // If we won't be able to constant fold this expression even if the operands 4313 // are constants, return early. 4314 if (!CanConstantFold(I)) return 0; 4315 4316 // Otherwise, we can evaluate this instruction if all of its operands are 4317 // constant or derived from a PHI node themselves. 4318 PHINode *PHI = 0; 4319 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 4320 if (!isa<Constant>(I->getOperand(Op))) { 4321 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 4322 if (P == 0) return 0; // Not evolving from PHI 4323 if (PHI == 0) 4324 PHI = P; 4325 else if (PHI != P) 4326 return 0; // Evolving from multiple different PHIs. 4327 } 4328 4329 // This is a expression evolving from a constant PHI! 4330 return PHI; 4331 } 4332 4333 /// EvaluateExpression - Given an expression that passes the 4334 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 4335 /// in the loop has the value PHIVal. If we can't fold this expression for some 4336 /// reason, return null. 4337 static Constant *EvaluateExpression(Value *V, Constant *PHIVal, 4338 const TargetData *TD) { 4339 if (isa<PHINode>(V)) return PHIVal; 4340 if (Constant *C = dyn_cast<Constant>(V)) return C; 4341 Instruction *I = cast<Instruction>(V); 4342 4343 std::vector<Constant*> Operands(I->getNumOperands()); 4344 4345 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4346 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); 4347 if (Operands[i] == 0) return 0; 4348 } 4349 4350 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 4351 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 4352 Operands[1], TD); 4353 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 4354 &Operands[0], Operands.size(), TD); 4355 } 4356 4357 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 4358 /// in the header of its containing loop, we know the loop executes a 4359 /// constant number of times, and the PHI node is just a recurrence 4360 /// involving constants, fold it. 4361 Constant * 4362 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 4363 const APInt &BEs, 4364 const Loop *L) { 4365 std::map<PHINode*, Constant*>::const_iterator I = 4366 ConstantEvolutionLoopExitValue.find(PN); 4367 if (I != ConstantEvolutionLoopExitValue.end()) 4368 return I->second; 4369 4370 if (BEs.ugt(MaxBruteForceIterations)) 4371 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 4372 4373 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 4374 4375 // Since the loop is canonicalized, the PHI node must have two entries. One 4376 // entry must be a constant (coming in from outside of the loop), and the 4377 // second must be derived from the same PHI. 4378 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4379 Constant *StartCST = 4380 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4381 if (StartCST == 0) 4382 return RetVal = 0; // Must be a constant. 4383 4384 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4385 if (getConstantEvolvingPHI(BEValue, L) != PN && 4386 !isa<Constant>(BEValue)) 4387 return RetVal = 0; // Not derived from same PHI. 4388 4389 // Execute the loop symbolically to determine the exit value. 4390 if (BEs.getActiveBits() >= 32) 4391 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 4392 4393 unsigned NumIterations = BEs.getZExtValue(); // must be in range 4394 unsigned IterationNum = 0; 4395 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 4396 if (IterationNum == NumIterations) 4397 return RetVal = PHIVal; // Got exit value! 4398 4399 // Compute the value of the PHI node for the next iteration. 4400 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4401 if (NextPHI == PHIVal) 4402 return RetVal = NextPHI; // Stopped evolving! 4403 if (NextPHI == 0) 4404 return 0; // Couldn't evaluate! 4405 PHIVal = NextPHI; 4406 } 4407 } 4408 4409 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a 4410 /// constant number of times (the condition evolves only from constants), 4411 /// try to evaluate a few iterations of the loop until we get the exit 4412 /// condition gets a value of ExitWhen (true or false). If we cannot 4413 /// evaluate the trip count of the loop, return getCouldNotCompute(). 4414 const SCEV * 4415 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, 4416 Value *Cond, 4417 bool ExitWhen) { 4418 PHINode *PN = getConstantEvolvingPHI(Cond, L); 4419 if (PN == 0) return getCouldNotCompute(); 4420 4421 // If the loop is canonicalized, the PHI will have exactly two entries. 4422 // That's the only form we support here. 4423 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 4424 4425 // One entry must be a constant (coming in from outside of the loop), and the 4426 // second must be derived from the same PHI. 4427 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4428 Constant *StartCST = 4429 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4430 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. 4431 4432 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4433 if (getConstantEvolvingPHI(BEValue, L) != PN && 4434 !isa<Constant>(BEValue)) 4435 return getCouldNotCompute(); // Not derived from same PHI. 4436 4437 // Okay, we find a PHI node that defines the trip count of this loop. Execute 4438 // the loop symbolically to determine when the condition gets a value of 4439 // "ExitWhen". 4440 unsigned IterationNum = 0; 4441 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 4442 for (Constant *PHIVal = StartCST; 4443 IterationNum != MaxIterations; ++IterationNum) { 4444 ConstantInt *CondVal = 4445 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD)); 4446 4447 // Couldn't symbolically evaluate. 4448 if (!CondVal) return getCouldNotCompute(); 4449 4450 if (CondVal->getValue() == uint64_t(ExitWhen)) { 4451 ++NumBruteForceTripCountsComputed; 4452 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 4453 } 4454 4455 // Compute the value of the PHI node for the next iteration. 4456 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4457 if (NextPHI == 0 || NextPHI == PHIVal) 4458 return getCouldNotCompute();// Couldn't evaluate or not making progress... 4459 PHIVal = NextPHI; 4460 } 4461 4462 // Too many iterations were needed to evaluate. 4463 return getCouldNotCompute(); 4464 } 4465 4466 /// getSCEVAtScope - Return a SCEV expression for the specified value 4467 /// at the specified scope in the program. The L value specifies a loop 4468 /// nest to evaluate the expression at, where null is the top-level or a 4469 /// specified loop is immediately inside of the loop. 4470 /// 4471 /// This method can be used to compute the exit value for a variable defined 4472 /// in a loop by querying what the value will hold in the parent loop. 4473 /// 4474 /// In the case that a relevant loop exit value cannot be computed, the 4475 /// original value V is returned. 4476 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 4477 // Check to see if we've folded this expression at this loop before. 4478 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V]; 4479 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair = 4480 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0))); 4481 if (!Pair.second) 4482 return Pair.first->second ? Pair.first->second : V; 4483 4484 // Otherwise compute it. 4485 const SCEV *C = computeSCEVAtScope(V, L); 4486 ValuesAtScopes[V][L] = C; 4487 return C; 4488 } 4489 4490 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 4491 if (isa<SCEVConstant>(V)) return V; 4492 4493 // If this instruction is evolved from a constant-evolving PHI, compute the 4494 // exit value from the loop without using SCEVs. 4495 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 4496 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 4497 const Loop *LI = (*this->LI)[I->getParent()]; 4498 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 4499 if (PHINode *PN = dyn_cast<PHINode>(I)) 4500 if (PN->getParent() == LI->getHeader()) { 4501 // Okay, there is no closed form solution for the PHI node. Check 4502 // to see if the loop that contains it has a known backedge-taken 4503 // count. If so, we may be able to force computation of the exit 4504 // value. 4505 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 4506 if (const SCEVConstant *BTCC = 4507 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 4508 // Okay, we know how many times the containing loop executes. If 4509 // this is a constant evolving PHI node, get the final value at 4510 // the specified iteration number. 4511 Constant *RV = getConstantEvolutionLoopExitValue(PN, 4512 BTCC->getValue()->getValue(), 4513 LI); 4514 if (RV) return getSCEV(RV); 4515 } 4516 } 4517 4518 // Okay, this is an expression that we cannot symbolically evaluate 4519 // into a SCEV. Check to see if it's possible to symbolically evaluate 4520 // the arguments into constants, and if so, try to constant propagate the 4521 // result. This is particularly useful for computing loop exit values. 4522 if (CanConstantFold(I)) { 4523 SmallVector<Constant *, 4> Operands; 4524 bool MadeImprovement = false; 4525 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4526 Value *Op = I->getOperand(i); 4527 if (Constant *C = dyn_cast<Constant>(Op)) { 4528 Operands.push_back(C); 4529 continue; 4530 } 4531 4532 // If any of the operands is non-constant and if they are 4533 // non-integer and non-pointer, don't even try to analyze them 4534 // with scev techniques. 4535 if (!isSCEVable(Op->getType())) 4536 return V; 4537 4538 const SCEV *OrigV = getSCEV(Op); 4539 const SCEV *OpV = getSCEVAtScope(OrigV, L); 4540 MadeImprovement |= OrigV != OpV; 4541 4542 Constant *C = 0; 4543 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) 4544 C = SC->getValue(); 4545 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) 4546 C = dyn_cast<Constant>(SU->getValue()); 4547 if (!C) return V; 4548 if (C->getType() != Op->getType()) 4549 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4550 Op->getType(), 4551 false), 4552 C, Op->getType()); 4553 Operands.push_back(C); 4554 } 4555 4556 // Check to see if getSCEVAtScope actually made an improvement. 4557 if (MadeImprovement) { 4558 Constant *C = 0; 4559 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 4560 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 4561 Operands[0], Operands[1], TD); 4562 else 4563 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 4564 &Operands[0], Operands.size(), TD); 4565 if (!C) return V; 4566 return getSCEV(C); 4567 } 4568 } 4569 } 4570 4571 // This is some other type of SCEVUnknown, just return it. 4572 return V; 4573 } 4574 4575 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 4576 // Avoid performing the look-up in the common case where the specified 4577 // expression has no loop-variant portions. 4578 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 4579 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4580 if (OpAtScope != Comm->getOperand(i)) { 4581 // Okay, at least one of these operands is loop variant but might be 4582 // foldable. Build a new instance of the folded commutative expression. 4583 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 4584 Comm->op_begin()+i); 4585 NewOps.push_back(OpAtScope); 4586 4587 for (++i; i != e; ++i) { 4588 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4589 NewOps.push_back(OpAtScope); 4590 } 4591 if (isa<SCEVAddExpr>(Comm)) 4592 return getAddExpr(NewOps); 4593 if (isa<SCEVMulExpr>(Comm)) 4594 return getMulExpr(NewOps); 4595 if (isa<SCEVSMaxExpr>(Comm)) 4596 return getSMaxExpr(NewOps); 4597 if (isa<SCEVUMaxExpr>(Comm)) 4598 return getUMaxExpr(NewOps); 4599 llvm_unreachable("Unknown commutative SCEV type!"); 4600 } 4601 } 4602 // If we got here, all operands are loop invariant. 4603 return Comm; 4604 } 4605 4606 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 4607 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 4608 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 4609 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 4610 return Div; // must be loop invariant 4611 return getUDivExpr(LHS, RHS); 4612 } 4613 4614 // If this is a loop recurrence for a loop that does not contain L, then we 4615 // are dealing with the final value computed by the loop. 4616 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4617 // First, attempt to evaluate each operand. 4618 // Avoid performing the look-up in the common case where the specified 4619 // expression has no loop-variant portions. 4620 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4621 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 4622 if (OpAtScope == AddRec->getOperand(i)) 4623 continue; 4624 4625 // Okay, at least one of these operands is loop variant but might be 4626 // foldable. Build a new instance of the folded commutative expression. 4627 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 4628 AddRec->op_begin()+i); 4629 NewOps.push_back(OpAtScope); 4630 for (++i; i != e; ++i) 4631 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 4632 4633 AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop())); 4634 break; 4635 } 4636 4637 // If the scope is outside the addrec's loop, evaluate it by using the 4638 // loop exit value of the addrec. 4639 if (!AddRec->getLoop()->contains(L)) { 4640 // To evaluate this recurrence, we need to know how many times the AddRec 4641 // loop iterates. Compute this now. 4642 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 4643 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 4644 4645 // Then, evaluate the AddRec. 4646 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 4647 } 4648 4649 return AddRec; 4650 } 4651 4652 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 4653 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4654 if (Op == Cast->getOperand()) 4655 return Cast; // must be loop invariant 4656 return getZeroExtendExpr(Op, Cast->getType()); 4657 } 4658 4659 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 4660 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4661 if (Op == Cast->getOperand()) 4662 return Cast; // must be loop invariant 4663 return getSignExtendExpr(Op, Cast->getType()); 4664 } 4665 4666 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 4667 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4668 if (Op == Cast->getOperand()) 4669 return Cast; // must be loop invariant 4670 return getTruncateExpr(Op, Cast->getType()); 4671 } 4672 4673 llvm_unreachable("Unknown SCEV type!"); 4674 return 0; 4675 } 4676 4677 /// getSCEVAtScope - This is a convenience function which does 4678 /// getSCEVAtScope(getSCEV(V), L). 4679 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 4680 return getSCEVAtScope(getSCEV(V), L); 4681 } 4682 4683 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 4684 /// following equation: 4685 /// 4686 /// A * X = B (mod N) 4687 /// 4688 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 4689 /// A and B isn't important. 4690 /// 4691 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 4692 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 4693 ScalarEvolution &SE) { 4694 uint32_t BW = A.getBitWidth(); 4695 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 4696 assert(A != 0 && "A must be non-zero."); 4697 4698 // 1. D = gcd(A, N) 4699 // 4700 // The gcd of A and N may have only one prime factor: 2. The number of 4701 // trailing zeros in A is its multiplicity 4702 uint32_t Mult2 = A.countTrailingZeros(); 4703 // D = 2^Mult2 4704 4705 // 2. Check if B is divisible by D. 4706 // 4707 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 4708 // is not less than multiplicity of this prime factor for D. 4709 if (B.countTrailingZeros() < Mult2) 4710 return SE.getCouldNotCompute(); 4711 4712 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 4713 // modulo (N / D). 4714 // 4715 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 4716 // bit width during computations. 4717 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 4718 APInt Mod(BW + 1, 0); 4719 Mod.setBit(BW - Mult2); // Mod = N / D 4720 APInt I = AD.multiplicativeInverse(Mod); 4721 4722 // 4. Compute the minimum unsigned root of the equation: 4723 // I * (B / D) mod (N / D) 4724 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 4725 4726 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 4727 // bits. 4728 return SE.getConstant(Result.trunc(BW)); 4729 } 4730 4731 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 4732 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 4733 /// might be the same) or two SCEVCouldNotCompute objects. 4734 /// 4735 static std::pair<const SCEV *,const SCEV *> 4736 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 4737 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 4738 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 4739 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 4740 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 4741 4742 // We currently can only solve this if the coefficients are constants. 4743 if (!LC || !MC || !NC) { 4744 const SCEV *CNC = SE.getCouldNotCompute(); 4745 return std::make_pair(CNC, CNC); 4746 } 4747 4748 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 4749 const APInt &L = LC->getValue()->getValue(); 4750 const APInt &M = MC->getValue()->getValue(); 4751 const APInt &N = NC->getValue()->getValue(); 4752 APInt Two(BitWidth, 2); 4753 APInt Four(BitWidth, 4); 4754 4755 { 4756 using namespace APIntOps; 4757 const APInt& C = L; 4758 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 4759 // The B coefficient is M-N/2 4760 APInt B(M); 4761 B -= sdiv(N,Two); 4762 4763 // The A coefficient is N/2 4764 APInt A(N.sdiv(Two)); 4765 4766 // Compute the B^2-4ac term. 4767 APInt SqrtTerm(B); 4768 SqrtTerm *= B; 4769 SqrtTerm -= Four * (A * C); 4770 4771 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 4772 // integer value or else APInt::sqrt() will assert. 4773 APInt SqrtVal(SqrtTerm.sqrt()); 4774 4775 // Compute the two solutions for the quadratic formula. 4776 // The divisions must be performed as signed divisions. 4777 APInt NegB(-B); 4778 APInt TwoA( A << 1 ); 4779 if (TwoA.isMinValue()) { 4780 const SCEV *CNC = SE.getCouldNotCompute(); 4781 return std::make_pair(CNC, CNC); 4782 } 4783 4784 LLVMContext &Context = SE.getContext(); 4785 4786 ConstantInt *Solution1 = 4787 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 4788 ConstantInt *Solution2 = 4789 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 4790 4791 return std::make_pair(SE.getConstant(Solution1), 4792 SE.getConstant(Solution2)); 4793 } // end APIntOps namespace 4794 } 4795 4796 /// HowFarToZero - Return the number of times a backedge comparing the specified 4797 /// value to zero will execute. If not computable, return CouldNotCompute. 4798 ScalarEvolution::BackedgeTakenInfo 4799 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 4800 // If the value is a constant 4801 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4802 // If the value is already zero, the branch will execute zero times. 4803 if (C->getValue()->isZero()) return C; 4804 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4805 } 4806 4807 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 4808 if (!AddRec || AddRec->getLoop() != L) 4809 return getCouldNotCompute(); 4810 4811 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 4812 // the quadratic equation to solve it. 4813 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 4814 std::pair<const SCEV *,const SCEV *> Roots = 4815 SolveQuadraticEquation(AddRec, *this); 4816 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4817 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4818 if (R1 && R2) { 4819 #if 0 4820 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 4821 << " sol#2: " << *R2 << "\n"; 4822 #endif 4823 // Pick the smallest positive root value. 4824 if (ConstantInt *CB = 4825 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT, 4826 R1->getValue(), 4827 R2->getValue()))) { 4828 if (CB->getZExtValue() == false) 4829 std::swap(R1, R2); // R1 is the minimum root now. 4830 4831 // We can only use this value if the chrec ends up with an exact zero 4832 // value at this index. When solving for "X*X != 5", for example, we 4833 // should not accept a root of 2. 4834 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 4835 if (Val->isZero()) 4836 return R1; // We found a quadratic root! 4837 } 4838 } 4839 return getCouldNotCompute(); 4840 } 4841 4842 // Otherwise we can only handle this if it is affine. 4843 if (!AddRec->isAffine()) 4844 return getCouldNotCompute(); 4845 4846 // If this is an affine expression, the execution count of this branch is 4847 // the minimum unsigned root of the following equation: 4848 // 4849 // Start + Step*N = 0 (mod 2^BW) 4850 // 4851 // equivalent to: 4852 // 4853 // Step*N = -Start (mod 2^BW) 4854 // 4855 // where BW is the common bit width of Start and Step. 4856 4857 // Get the initial value for the loop. 4858 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 4859 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 4860 4861 // If the AddRec is NUW, then (in an unsigned sense) it cannot be counting up 4862 // to wrap to 0, it must be counting down to equal 0. Also, while counting 4863 // down, it cannot "miss" 0 (which would cause it to wrap), regardless of what 4864 // the stride is. As such, NUW addrec's will always become zero in 4865 // "start / -stride" steps, and we know that the division is exact. 4866 if (AddRec->hasNoUnsignedWrap()) 4867 // FIXME: We really want an "isexact" bit for udiv. 4868 return getUDivExpr(Start, getNegativeSCEV(Step)); 4869 4870 // For now we handle only constant steps. 4871 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 4872 if (StepC == 0) 4873 return getCouldNotCompute(); 4874 4875 // First, handle unitary steps. 4876 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 4877 return getNegativeSCEV(Start); // N = -Start (as unsigned) 4878 4879 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 4880 return Start; // N = Start (as unsigned) 4881 4882 // Then, try to solve the above equation provided that Start is constant. 4883 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 4884 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 4885 -StartC->getValue()->getValue(), 4886 *this); 4887 return getCouldNotCompute(); 4888 } 4889 4890 /// HowFarToNonZero - Return the number of times a backedge checking the 4891 /// specified value for nonzero will execute. If not computable, return 4892 /// CouldNotCompute 4893 ScalarEvolution::BackedgeTakenInfo 4894 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 4895 // Loops that look like: while (X == 0) are very strange indeed. We don't 4896 // handle them yet except for the trivial case. This could be expanded in the 4897 // future as needed. 4898 4899 // If the value is a constant, check to see if it is known to be non-zero 4900 // already. If so, the backedge will execute zero times. 4901 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4902 if (!C->getValue()->isNullValue()) 4903 return getConstant(C->getType(), 0); 4904 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4905 } 4906 4907 // We could implement others, but I really doubt anyone writes loops like 4908 // this, and if they did, they would already be constant folded. 4909 return getCouldNotCompute(); 4910 } 4911 4912 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 4913 /// (which may not be an immediate predecessor) which has exactly one 4914 /// successor from which BB is reachable, or null if no such block is 4915 /// found. 4916 /// 4917 std::pair<BasicBlock *, BasicBlock *> 4918 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 4919 // If the block has a unique predecessor, then there is no path from the 4920 // predecessor to the block that does not go through the direct edge 4921 // from the predecessor to the block. 4922 if (BasicBlock *Pred = BB->getSinglePredecessor()) 4923 return std::make_pair(Pred, BB); 4924 4925 // A loop's header is defined to be a block that dominates the loop. 4926 // If the header has a unique predecessor outside the loop, it must be 4927 // a block that has exactly one successor that can reach the loop. 4928 if (Loop *L = LI->getLoopFor(BB)) 4929 return std::make_pair(L->getLoopPredecessor(), L->getHeader()); 4930 4931 return std::pair<BasicBlock *, BasicBlock *>(); 4932 } 4933 4934 /// HasSameValue - SCEV structural equivalence is usually sufficient for 4935 /// testing whether two expressions are equal, however for the purposes of 4936 /// looking for a condition guarding a loop, it can be useful to be a little 4937 /// more general, since a front-end may have replicated the controlling 4938 /// expression. 4939 /// 4940 static bool HasSameValue(const SCEV *A, const SCEV *B) { 4941 // Quick check to see if they are the same SCEV. 4942 if (A == B) return true; 4943 4944 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 4945 // two different instructions with the same value. Check for this case. 4946 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 4947 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 4948 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 4949 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 4950 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) 4951 return true; 4952 4953 // Otherwise assume they may have a different value. 4954 return false; 4955 } 4956 4957 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with 4958 /// predicate Pred. Return true iff any changes were made. 4959 /// 4960 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 4961 const SCEV *&LHS, const SCEV *&RHS) { 4962 bool Changed = false; 4963 4964 // Canonicalize a constant to the right side. 4965 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 4966 // Check for both operands constant. 4967 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 4968 if (ConstantExpr::getICmp(Pred, 4969 LHSC->getValue(), 4970 RHSC->getValue())->isNullValue()) 4971 goto trivially_false; 4972 else 4973 goto trivially_true; 4974 } 4975 // Otherwise swap the operands to put the constant on the right. 4976 std::swap(LHS, RHS); 4977 Pred = ICmpInst::getSwappedPredicate(Pred); 4978 Changed = true; 4979 } 4980 4981 // If we're comparing an addrec with a value which is loop-invariant in the 4982 // addrec's loop, put the addrec on the left. Also make a dominance check, 4983 // as both operands could be addrecs loop-invariant in each other's loop. 4984 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 4985 const Loop *L = AR->getLoop(); 4986 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 4987 std::swap(LHS, RHS); 4988 Pred = ICmpInst::getSwappedPredicate(Pred); 4989 Changed = true; 4990 } 4991 } 4992 4993 // If there's a constant operand, canonicalize comparisons with boundary 4994 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 4995 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 4996 const APInt &RA = RC->getValue()->getValue(); 4997 switch (Pred) { 4998 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4999 case ICmpInst::ICMP_EQ: 5000 case ICmpInst::ICMP_NE: 5001 break; 5002 case ICmpInst::ICMP_UGE: 5003 if ((RA - 1).isMinValue()) { 5004 Pred = ICmpInst::ICMP_NE; 5005 RHS = getConstant(RA - 1); 5006 Changed = true; 5007 break; 5008 } 5009 if (RA.isMaxValue()) { 5010 Pred = ICmpInst::ICMP_EQ; 5011 Changed = true; 5012 break; 5013 } 5014 if (RA.isMinValue()) goto trivially_true; 5015 5016 Pred = ICmpInst::ICMP_UGT; 5017 RHS = getConstant(RA - 1); 5018 Changed = true; 5019 break; 5020 case ICmpInst::ICMP_ULE: 5021 if ((RA + 1).isMaxValue()) { 5022 Pred = ICmpInst::ICMP_NE; 5023 RHS = getConstant(RA + 1); 5024 Changed = true; 5025 break; 5026 } 5027 if (RA.isMinValue()) { 5028 Pred = ICmpInst::ICMP_EQ; 5029 Changed = true; 5030 break; 5031 } 5032 if (RA.isMaxValue()) goto trivially_true; 5033 5034 Pred = ICmpInst::ICMP_ULT; 5035 RHS = getConstant(RA + 1); 5036 Changed = true; 5037 break; 5038 case ICmpInst::ICMP_SGE: 5039 if ((RA - 1).isMinSignedValue()) { 5040 Pred = ICmpInst::ICMP_NE; 5041 RHS = getConstant(RA - 1); 5042 Changed = true; 5043 break; 5044 } 5045 if (RA.isMaxSignedValue()) { 5046 Pred = ICmpInst::ICMP_EQ; 5047 Changed = true; 5048 break; 5049 } 5050 if (RA.isMinSignedValue()) goto trivially_true; 5051 5052 Pred = ICmpInst::ICMP_SGT; 5053 RHS = getConstant(RA - 1); 5054 Changed = true; 5055 break; 5056 case ICmpInst::ICMP_SLE: 5057 if ((RA + 1).isMaxSignedValue()) { 5058 Pred = ICmpInst::ICMP_NE; 5059 RHS = getConstant(RA + 1); 5060 Changed = true; 5061 break; 5062 } 5063 if (RA.isMinSignedValue()) { 5064 Pred = ICmpInst::ICMP_EQ; 5065 Changed = true; 5066 break; 5067 } 5068 if (RA.isMaxSignedValue()) goto trivially_true; 5069 5070 Pred = ICmpInst::ICMP_SLT; 5071 RHS = getConstant(RA + 1); 5072 Changed = true; 5073 break; 5074 case ICmpInst::ICMP_UGT: 5075 if (RA.isMinValue()) { 5076 Pred = ICmpInst::ICMP_NE; 5077 Changed = true; 5078 break; 5079 } 5080 if ((RA + 1).isMaxValue()) { 5081 Pred = ICmpInst::ICMP_EQ; 5082 RHS = getConstant(RA + 1); 5083 Changed = true; 5084 break; 5085 } 5086 if (RA.isMaxValue()) goto trivially_false; 5087 break; 5088 case ICmpInst::ICMP_ULT: 5089 if (RA.isMaxValue()) { 5090 Pred = ICmpInst::ICMP_NE; 5091 Changed = true; 5092 break; 5093 } 5094 if ((RA - 1).isMinValue()) { 5095 Pred = ICmpInst::ICMP_EQ; 5096 RHS = getConstant(RA - 1); 5097 Changed = true; 5098 break; 5099 } 5100 if (RA.isMinValue()) goto trivially_false; 5101 break; 5102 case ICmpInst::ICMP_SGT: 5103 if (RA.isMinSignedValue()) { 5104 Pred = ICmpInst::ICMP_NE; 5105 Changed = true; 5106 break; 5107 } 5108 if ((RA + 1).isMaxSignedValue()) { 5109 Pred = ICmpInst::ICMP_EQ; 5110 RHS = getConstant(RA + 1); 5111 Changed = true; 5112 break; 5113 } 5114 if (RA.isMaxSignedValue()) goto trivially_false; 5115 break; 5116 case ICmpInst::ICMP_SLT: 5117 if (RA.isMaxSignedValue()) { 5118 Pred = ICmpInst::ICMP_NE; 5119 Changed = true; 5120 break; 5121 } 5122 if ((RA - 1).isMinSignedValue()) { 5123 Pred = ICmpInst::ICMP_EQ; 5124 RHS = getConstant(RA - 1); 5125 Changed = true; 5126 break; 5127 } 5128 if (RA.isMinSignedValue()) goto trivially_false; 5129 break; 5130 } 5131 } 5132 5133 // Check for obvious equality. 5134 if (HasSameValue(LHS, RHS)) { 5135 if (ICmpInst::isTrueWhenEqual(Pred)) 5136 goto trivially_true; 5137 if (ICmpInst::isFalseWhenEqual(Pred)) 5138 goto trivially_false; 5139 } 5140 5141 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 5142 // adding or subtracting 1 from one of the operands. 5143 switch (Pred) { 5144 case ICmpInst::ICMP_SLE: 5145 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 5146 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5147 /*HasNUW=*/false, /*HasNSW=*/true); 5148 Pred = ICmpInst::ICMP_SLT; 5149 Changed = true; 5150 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 5151 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5152 /*HasNUW=*/false, /*HasNSW=*/true); 5153 Pred = ICmpInst::ICMP_SLT; 5154 Changed = true; 5155 } 5156 break; 5157 case ICmpInst::ICMP_SGE: 5158 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 5159 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5160 /*HasNUW=*/false, /*HasNSW=*/true); 5161 Pred = ICmpInst::ICMP_SGT; 5162 Changed = true; 5163 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 5164 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5165 /*HasNUW=*/false, /*HasNSW=*/true); 5166 Pred = ICmpInst::ICMP_SGT; 5167 Changed = true; 5168 } 5169 break; 5170 case ICmpInst::ICMP_ULE: 5171 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 5172 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5173 /*HasNUW=*/true, /*HasNSW=*/false); 5174 Pred = ICmpInst::ICMP_ULT; 5175 Changed = true; 5176 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 5177 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5178 /*HasNUW=*/true, /*HasNSW=*/false); 5179 Pred = ICmpInst::ICMP_ULT; 5180 Changed = true; 5181 } 5182 break; 5183 case ICmpInst::ICMP_UGE: 5184 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 5185 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5186 /*HasNUW=*/true, /*HasNSW=*/false); 5187 Pred = ICmpInst::ICMP_UGT; 5188 Changed = true; 5189 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 5190 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5191 /*HasNUW=*/true, /*HasNSW=*/false); 5192 Pred = ICmpInst::ICMP_UGT; 5193 Changed = true; 5194 } 5195 break; 5196 default: 5197 break; 5198 } 5199 5200 // TODO: More simplifications are possible here. 5201 5202 return Changed; 5203 5204 trivially_true: 5205 // Return 0 == 0. 5206 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 5207 Pred = ICmpInst::ICMP_EQ; 5208 return true; 5209 5210 trivially_false: 5211 // Return 0 != 0. 5212 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 5213 Pred = ICmpInst::ICMP_NE; 5214 return true; 5215 } 5216 5217 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 5218 return getSignedRange(S).getSignedMax().isNegative(); 5219 } 5220 5221 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 5222 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 5223 } 5224 5225 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 5226 return !getSignedRange(S).getSignedMin().isNegative(); 5227 } 5228 5229 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 5230 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 5231 } 5232 5233 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 5234 return isKnownNegative(S) || isKnownPositive(S); 5235 } 5236 5237 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 5238 const SCEV *LHS, const SCEV *RHS) { 5239 // Canonicalize the inputs first. 5240 (void)SimplifyICmpOperands(Pred, LHS, RHS); 5241 5242 // If LHS or RHS is an addrec, check to see if the condition is true in 5243 // every iteration of the loop. 5244 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 5245 if (isLoopEntryGuardedByCond( 5246 AR->getLoop(), Pred, AR->getStart(), RHS) && 5247 isLoopBackedgeGuardedByCond( 5248 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS)) 5249 return true; 5250 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) 5251 if (isLoopEntryGuardedByCond( 5252 AR->getLoop(), Pred, LHS, AR->getStart()) && 5253 isLoopBackedgeGuardedByCond( 5254 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this))) 5255 return true; 5256 5257 // Otherwise see what can be done with known constant ranges. 5258 return isKnownPredicateWithRanges(Pred, LHS, RHS); 5259 } 5260 5261 bool 5262 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, 5263 const SCEV *LHS, const SCEV *RHS) { 5264 if (HasSameValue(LHS, RHS)) 5265 return ICmpInst::isTrueWhenEqual(Pred); 5266 5267 // This code is split out from isKnownPredicate because it is called from 5268 // within isLoopEntryGuardedByCond. 5269 switch (Pred) { 5270 default: 5271 llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5272 break; 5273 case ICmpInst::ICMP_SGT: 5274 Pred = ICmpInst::ICMP_SLT; 5275 std::swap(LHS, RHS); 5276 case ICmpInst::ICMP_SLT: { 5277 ConstantRange LHSRange = getSignedRange(LHS); 5278 ConstantRange RHSRange = getSignedRange(RHS); 5279 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) 5280 return true; 5281 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) 5282 return false; 5283 break; 5284 } 5285 case ICmpInst::ICMP_SGE: 5286 Pred = ICmpInst::ICMP_SLE; 5287 std::swap(LHS, RHS); 5288 case ICmpInst::ICMP_SLE: { 5289 ConstantRange LHSRange = getSignedRange(LHS); 5290 ConstantRange RHSRange = getSignedRange(RHS); 5291 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) 5292 return true; 5293 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) 5294 return false; 5295 break; 5296 } 5297 case ICmpInst::ICMP_UGT: 5298 Pred = ICmpInst::ICMP_ULT; 5299 std::swap(LHS, RHS); 5300 case ICmpInst::ICMP_ULT: { 5301 ConstantRange LHSRange = getUnsignedRange(LHS); 5302 ConstantRange RHSRange = getUnsignedRange(RHS); 5303 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) 5304 return true; 5305 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) 5306 return false; 5307 break; 5308 } 5309 case ICmpInst::ICMP_UGE: 5310 Pred = ICmpInst::ICMP_ULE; 5311 std::swap(LHS, RHS); 5312 case ICmpInst::ICMP_ULE: { 5313 ConstantRange LHSRange = getUnsignedRange(LHS); 5314 ConstantRange RHSRange = getUnsignedRange(RHS); 5315 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) 5316 return true; 5317 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) 5318 return false; 5319 break; 5320 } 5321 case ICmpInst::ICMP_NE: { 5322 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) 5323 return true; 5324 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) 5325 return true; 5326 5327 const SCEV *Diff = getMinusSCEV(LHS, RHS); 5328 if (isKnownNonZero(Diff)) 5329 return true; 5330 break; 5331 } 5332 case ICmpInst::ICMP_EQ: 5333 // The check at the top of the function catches the case where 5334 // the values are known to be equal. 5335 break; 5336 } 5337 return false; 5338 } 5339 5340 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 5341 /// protected by a conditional between LHS and RHS. This is used to 5342 /// to eliminate casts. 5343 bool 5344 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 5345 ICmpInst::Predicate Pred, 5346 const SCEV *LHS, const SCEV *RHS) { 5347 // Interpret a null as meaning no loop, where there is obviously no guard 5348 // (interprocedural conditions notwithstanding). 5349 if (!L) return true; 5350 5351 BasicBlock *Latch = L->getLoopLatch(); 5352 if (!Latch) 5353 return false; 5354 5355 BranchInst *LoopContinuePredicate = 5356 dyn_cast<BranchInst>(Latch->getTerminator()); 5357 if (!LoopContinuePredicate || 5358 LoopContinuePredicate->isUnconditional()) 5359 return false; 5360 5361 return isImpliedCond(Pred, LHS, RHS, 5362 LoopContinuePredicate->getCondition(), 5363 LoopContinuePredicate->getSuccessor(0) != L->getHeader()); 5364 } 5365 5366 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected 5367 /// by a conditional between LHS and RHS. This is used to help avoid max 5368 /// expressions in loop trip counts, and to eliminate casts. 5369 bool 5370 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 5371 ICmpInst::Predicate Pred, 5372 const SCEV *LHS, const SCEV *RHS) { 5373 // Interpret a null as meaning no loop, where there is obviously no guard 5374 // (interprocedural conditions notwithstanding). 5375 if (!L) return false; 5376 5377 // Starting at the loop predecessor, climb up the predecessor chain, as long 5378 // as there are predecessors that can be found that have unique successors 5379 // leading to the original header. 5380 for (std::pair<BasicBlock *, BasicBlock *> 5381 Pair(L->getLoopPredecessor(), L->getHeader()); 5382 Pair.first; 5383 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 5384 5385 BranchInst *LoopEntryPredicate = 5386 dyn_cast<BranchInst>(Pair.first->getTerminator()); 5387 if (!LoopEntryPredicate || 5388 LoopEntryPredicate->isUnconditional()) 5389 continue; 5390 5391 if (isImpliedCond(Pred, LHS, RHS, 5392 LoopEntryPredicate->getCondition(), 5393 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 5394 return true; 5395 } 5396 5397 return false; 5398 } 5399 5400 /// isImpliedCond - Test whether the condition described by Pred, LHS, 5401 /// and RHS is true whenever the given Cond value evaluates to true. 5402 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 5403 const SCEV *LHS, const SCEV *RHS, 5404 Value *FoundCondValue, 5405 bool Inverse) { 5406 // Recursively handle And and Or conditions. 5407 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 5408 if (BO->getOpcode() == Instruction::And) { 5409 if (!Inverse) 5410 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 5411 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 5412 } else if (BO->getOpcode() == Instruction::Or) { 5413 if (Inverse) 5414 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 5415 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 5416 } 5417 } 5418 5419 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 5420 if (!ICI) return false; 5421 5422 // Bail if the ICmp's operands' types are wider than the needed type 5423 // before attempting to call getSCEV on them. This avoids infinite 5424 // recursion, since the analysis of widening casts can require loop 5425 // exit condition information for overflow checking, which would 5426 // lead back here. 5427 if (getTypeSizeInBits(LHS->getType()) < 5428 getTypeSizeInBits(ICI->getOperand(0)->getType())) 5429 return false; 5430 5431 // Now that we found a conditional branch that dominates the loop, check to 5432 // see if it is the comparison we are looking for. 5433 ICmpInst::Predicate FoundPred; 5434 if (Inverse) 5435 FoundPred = ICI->getInversePredicate(); 5436 else 5437 FoundPred = ICI->getPredicate(); 5438 5439 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 5440 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 5441 5442 // Balance the types. The case where FoundLHS' type is wider than 5443 // LHS' type is checked for above. 5444 if (getTypeSizeInBits(LHS->getType()) > 5445 getTypeSizeInBits(FoundLHS->getType())) { 5446 if (CmpInst::isSigned(Pred)) { 5447 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 5448 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 5449 } else { 5450 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 5451 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 5452 } 5453 } 5454 5455 // Canonicalize the query to match the way instcombine will have 5456 // canonicalized the comparison. 5457 if (SimplifyICmpOperands(Pred, LHS, RHS)) 5458 if (LHS == RHS) 5459 return CmpInst::isTrueWhenEqual(Pred); 5460 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 5461 if (FoundLHS == FoundRHS) 5462 return CmpInst::isFalseWhenEqual(Pred); 5463 5464 // Check to see if we can make the LHS or RHS match. 5465 if (LHS == FoundRHS || RHS == FoundLHS) { 5466 if (isa<SCEVConstant>(RHS)) { 5467 std::swap(FoundLHS, FoundRHS); 5468 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 5469 } else { 5470 std::swap(LHS, RHS); 5471 Pred = ICmpInst::getSwappedPredicate(Pred); 5472 } 5473 } 5474 5475 // Check whether the found predicate is the same as the desired predicate. 5476 if (FoundPred == Pred) 5477 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 5478 5479 // Check whether swapping the found predicate makes it the same as the 5480 // desired predicate. 5481 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 5482 if (isa<SCEVConstant>(RHS)) 5483 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 5484 else 5485 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 5486 RHS, LHS, FoundLHS, FoundRHS); 5487 } 5488 5489 // Check whether the actual condition is beyond sufficient. 5490 if (FoundPred == ICmpInst::ICMP_EQ) 5491 if (ICmpInst::isTrueWhenEqual(Pred)) 5492 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 5493 return true; 5494 if (Pred == ICmpInst::ICMP_NE) 5495 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 5496 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 5497 return true; 5498 5499 // Otherwise assume the worst. 5500 return false; 5501 } 5502 5503 /// isImpliedCondOperands - Test whether the condition described by Pred, 5504 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, 5505 /// and FoundRHS is true. 5506 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 5507 const SCEV *LHS, const SCEV *RHS, 5508 const SCEV *FoundLHS, 5509 const SCEV *FoundRHS) { 5510 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 5511 FoundLHS, FoundRHS) || 5512 // ~x < ~y --> x > y 5513 isImpliedCondOperandsHelper(Pred, LHS, RHS, 5514 getNotSCEV(FoundRHS), 5515 getNotSCEV(FoundLHS)); 5516 } 5517 5518 /// isImpliedCondOperandsHelper - Test whether the condition described by 5519 /// Pred, LHS, and RHS is true whenever the condition described by Pred, 5520 /// FoundLHS, and FoundRHS is true. 5521 bool 5522 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 5523 const SCEV *LHS, const SCEV *RHS, 5524 const SCEV *FoundLHS, 5525 const SCEV *FoundRHS) { 5526 switch (Pred) { 5527 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5528 case ICmpInst::ICMP_EQ: 5529 case ICmpInst::ICMP_NE: 5530 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 5531 return true; 5532 break; 5533 case ICmpInst::ICMP_SLT: 5534 case ICmpInst::ICMP_SLE: 5535 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 5536 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 5537 return true; 5538 break; 5539 case ICmpInst::ICMP_SGT: 5540 case ICmpInst::ICMP_SGE: 5541 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 5542 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 5543 return true; 5544 break; 5545 case ICmpInst::ICMP_ULT: 5546 case ICmpInst::ICMP_ULE: 5547 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 5548 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 5549 return true; 5550 break; 5551 case ICmpInst::ICMP_UGT: 5552 case ICmpInst::ICMP_UGE: 5553 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 5554 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 5555 return true; 5556 break; 5557 } 5558 5559 return false; 5560 } 5561 5562 /// getBECount - Subtract the end and start values and divide by the step, 5563 /// rounding up, to get the number of times the backedge is executed. Return 5564 /// CouldNotCompute if an intermediate computation overflows. 5565 const SCEV *ScalarEvolution::getBECount(const SCEV *Start, 5566 const SCEV *End, 5567 const SCEV *Step, 5568 bool NoWrap) { 5569 assert(!isKnownNegative(Step) && 5570 "This code doesn't handle negative strides yet!"); 5571 5572 const Type *Ty = Start->getType(); 5573 const SCEV *NegOne = getConstant(Ty, (uint64_t)-1); 5574 const SCEV *Diff = getMinusSCEV(End, Start); 5575 const SCEV *RoundUp = getAddExpr(Step, NegOne); 5576 5577 // Add an adjustment to the difference between End and Start so that 5578 // the division will effectively round up. 5579 const SCEV *Add = getAddExpr(Diff, RoundUp); 5580 5581 if (!NoWrap) { 5582 // Check Add for unsigned overflow. 5583 // TODO: More sophisticated things could be done here. 5584 const Type *WideTy = IntegerType::get(getContext(), 5585 getTypeSizeInBits(Ty) + 1); 5586 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); 5587 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); 5588 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); 5589 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 5590 return getCouldNotCompute(); 5591 } 5592 5593 return getUDivExpr(Add, Step); 5594 } 5595 5596 /// HowManyLessThans - Return the number of times a backedge containing the 5597 /// specified less-than comparison will execute. If not computable, return 5598 /// CouldNotCompute. 5599 ScalarEvolution::BackedgeTakenInfo 5600 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 5601 const Loop *L, bool isSigned) { 5602 // Only handle: "ADDREC < LoopInvariant". 5603 if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); 5604 5605 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 5606 if (!AddRec || AddRec->getLoop() != L) 5607 return getCouldNotCompute(); 5608 5609 // Check to see if we have a flag which makes analysis easy. 5610 bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() : 5611 AddRec->hasNoUnsignedWrap(); 5612 5613 if (AddRec->isAffine()) { 5614 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 5615 const SCEV *Step = AddRec->getStepRecurrence(*this); 5616 5617 if (Step->isZero()) 5618 return getCouldNotCompute(); 5619 if (Step->isOne()) { 5620 // With unit stride, the iteration never steps past the limit value. 5621 } else if (isKnownPositive(Step)) { 5622 // Test whether a positive iteration can step past the limit 5623 // value and past the maximum value for its type in a single step. 5624 // Note that it's not sufficient to check NoWrap here, because even 5625 // though the value after a wrap is undefined, it's not undefined 5626 // behavior, so if wrap does occur, the loop could either terminate or 5627 // loop infinitely, but in either case, the loop is guaranteed to 5628 // iterate at least until the iteration where the wrapping occurs. 5629 const SCEV *One = getConstant(Step->getType(), 1); 5630 if (isSigned) { 5631 APInt Max = APInt::getSignedMaxValue(BitWidth); 5632 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) 5633 .slt(getSignedRange(RHS).getSignedMax())) 5634 return getCouldNotCompute(); 5635 } else { 5636 APInt Max = APInt::getMaxValue(BitWidth); 5637 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) 5638 .ult(getUnsignedRange(RHS).getUnsignedMax())) 5639 return getCouldNotCompute(); 5640 } 5641 } else 5642 // TODO: Handle negative strides here and below. 5643 return getCouldNotCompute(); 5644 5645 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 5646 // m. So, we count the number of iterations in which {n,+,s} < m is true. 5647 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 5648 // treat m-n as signed nor unsigned due to overflow possibility. 5649 5650 // First, we get the value of the LHS in the first iteration: n 5651 const SCEV *Start = AddRec->getOperand(0); 5652 5653 // Determine the minimum constant start value. 5654 const SCEV *MinStart = getConstant(isSigned ? 5655 getSignedRange(Start).getSignedMin() : 5656 getUnsignedRange(Start).getUnsignedMin()); 5657 5658 // If we know that the condition is true in order to enter the loop, 5659 // then we know that it will run exactly (m-n)/s times. Otherwise, we 5660 // only know that it will execute (max(m,n)-n)/s times. In both cases, 5661 // the division must round up. 5662 const SCEV *End = RHS; 5663 if (!isLoopEntryGuardedByCond(L, 5664 isSigned ? ICmpInst::ICMP_SLT : 5665 ICmpInst::ICMP_ULT, 5666 getMinusSCEV(Start, Step), RHS)) 5667 End = isSigned ? getSMaxExpr(RHS, Start) 5668 : getUMaxExpr(RHS, Start); 5669 5670 // Determine the maximum constant end value. 5671 const SCEV *MaxEnd = getConstant(isSigned ? 5672 getSignedRange(End).getSignedMax() : 5673 getUnsignedRange(End).getUnsignedMax()); 5674 5675 // If MaxEnd is within a step of the maximum integer value in its type, 5676 // adjust it down to the minimum value which would produce the same effect. 5677 // This allows the subsequent ceiling division of (N+(step-1))/step to 5678 // compute the correct value. 5679 const SCEV *StepMinusOne = getMinusSCEV(Step, 5680 getConstant(Step->getType(), 1)); 5681 MaxEnd = isSigned ? 5682 getSMinExpr(MaxEnd, 5683 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), 5684 StepMinusOne)) : 5685 getUMinExpr(MaxEnd, 5686 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), 5687 StepMinusOne)); 5688 5689 // Finally, we subtract these two values and divide, rounding up, to get 5690 // the number of times the backedge is executed. 5691 const SCEV *BECount = getBECount(Start, End, Step, NoWrap); 5692 5693 // The maximum backedge count is similar, except using the minimum start 5694 // value and the maximum end value. 5695 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap); 5696 5697 return BackedgeTakenInfo(BECount, MaxBECount); 5698 } 5699 5700 return getCouldNotCompute(); 5701 } 5702 5703 /// getNumIterationsInRange - Return the number of iterations of this loop that 5704 /// produce values in the specified constant range. Another way of looking at 5705 /// this is that it returns the first iteration number where the value is not in 5706 /// the condition, thus computing the exit count. If the iteration count can't 5707 /// be computed, an instance of SCEVCouldNotCompute is returned. 5708 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 5709 ScalarEvolution &SE) const { 5710 if (Range.isFullSet()) // Infinite loop. 5711 return SE.getCouldNotCompute(); 5712 5713 // If the start is a non-zero constant, shift the range to simplify things. 5714 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 5715 if (!SC->getValue()->isZero()) { 5716 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 5717 Operands[0] = SE.getConstant(SC->getType(), 0); 5718 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); 5719 if (const SCEVAddRecExpr *ShiftedAddRec = 5720 dyn_cast<SCEVAddRecExpr>(Shifted)) 5721 return ShiftedAddRec->getNumIterationsInRange( 5722 Range.subtract(SC->getValue()->getValue()), SE); 5723 // This is strange and shouldn't happen. 5724 return SE.getCouldNotCompute(); 5725 } 5726 5727 // The only time we can solve this is when we have all constant indices. 5728 // Otherwise, we cannot determine the overflow conditions. 5729 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 5730 if (!isa<SCEVConstant>(getOperand(i))) 5731 return SE.getCouldNotCompute(); 5732 5733 5734 // Okay at this point we know that all elements of the chrec are constants and 5735 // that the start element is zero. 5736 5737 // First check to see if the range contains zero. If not, the first 5738 // iteration exits. 5739 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 5740 if (!Range.contains(APInt(BitWidth, 0))) 5741 return SE.getConstant(getType(), 0); 5742 5743 if (isAffine()) { 5744 // If this is an affine expression then we have this situation: 5745 // Solve {0,+,A} in Range === Ax in Range 5746 5747 // We know that zero is in the range. If A is positive then we know that 5748 // the upper value of the range must be the first possible exit value. 5749 // If A is negative then the lower of the range is the last possible loop 5750 // value. Also note that we already checked for a full range. 5751 APInt One(BitWidth,1); 5752 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 5753 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 5754 5755 // The exit value should be (End+A)/A. 5756 APInt ExitVal = (End + A).udiv(A); 5757 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 5758 5759 // Evaluate at the exit value. If we really did fall out of the valid 5760 // range, then we computed our trip count, otherwise wrap around or other 5761 // things must have happened. 5762 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 5763 if (Range.contains(Val->getValue())) 5764 return SE.getCouldNotCompute(); // Something strange happened 5765 5766 // Ensure that the previous value is in the range. This is a sanity check. 5767 assert(Range.contains( 5768 EvaluateConstantChrecAtConstant(this, 5769 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 5770 "Linear scev computation is off in a bad way!"); 5771 return SE.getConstant(ExitValue); 5772 } else if (isQuadratic()) { 5773 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 5774 // quadratic equation to solve it. To do this, we must frame our problem in 5775 // terms of figuring out when zero is crossed, instead of when 5776 // Range.getUpper() is crossed. 5777 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 5778 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 5779 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 5780 5781 // Next, solve the constructed addrec 5782 std::pair<const SCEV *,const SCEV *> Roots = 5783 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 5784 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 5785 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 5786 if (R1) { 5787 // Pick the smallest positive root value. 5788 if (ConstantInt *CB = 5789 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 5790 R1->getValue(), R2->getValue()))) { 5791 if (CB->getZExtValue() == false) 5792 std::swap(R1, R2); // R1 is the minimum root now. 5793 5794 // Make sure the root is not off by one. The returned iteration should 5795 // not be in the range, but the previous one should be. When solving 5796 // for "X*X < 5", for example, we should not return a root of 2. 5797 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 5798 R1->getValue(), 5799 SE); 5800 if (Range.contains(R1Val->getValue())) { 5801 // The next iteration must be out of the range... 5802 ConstantInt *NextVal = 5803 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); 5804 5805 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5806 if (!Range.contains(R1Val->getValue())) 5807 return SE.getConstant(NextVal); 5808 return SE.getCouldNotCompute(); // Something strange happened 5809 } 5810 5811 // If R1 was not in the range, then it is a good return value. Make 5812 // sure that R1-1 WAS in the range though, just in case. 5813 ConstantInt *NextVal = 5814 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); 5815 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5816 if (Range.contains(R1Val->getValue())) 5817 return R1; 5818 return SE.getCouldNotCompute(); // Something strange happened 5819 } 5820 } 5821 } 5822 5823 return SE.getCouldNotCompute(); 5824 } 5825 5826 5827 5828 //===----------------------------------------------------------------------===// 5829 // SCEVCallbackVH Class Implementation 5830 //===----------------------------------------------------------------------===// 5831 5832 void ScalarEvolution::SCEVCallbackVH::deleted() { 5833 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5834 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 5835 SE->ConstantEvolutionLoopExitValue.erase(PN); 5836 SE->ValueExprMap.erase(getValPtr()); 5837 // this now dangles! 5838 } 5839 5840 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 5841 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5842 5843 // Forget all the expressions associated with users of the old value, 5844 // so that future queries will recompute the expressions using the new 5845 // value. 5846 Value *Old = getValPtr(); 5847 SmallVector<User *, 16> Worklist; 5848 SmallPtrSet<User *, 8> Visited; 5849 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 5850 UI != UE; ++UI) 5851 Worklist.push_back(*UI); 5852 while (!Worklist.empty()) { 5853 User *U = Worklist.pop_back_val(); 5854 // Deleting the Old value will cause this to dangle. Postpone 5855 // that until everything else is done. 5856 if (U == Old) 5857 continue; 5858 if (!Visited.insert(U)) 5859 continue; 5860 if (PHINode *PN = dyn_cast<PHINode>(U)) 5861 SE->ConstantEvolutionLoopExitValue.erase(PN); 5862 SE->ValueExprMap.erase(U); 5863 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 5864 UI != UE; ++UI) 5865 Worklist.push_back(*UI); 5866 } 5867 // Delete the Old value. 5868 if (PHINode *PN = dyn_cast<PHINode>(Old)) 5869 SE->ConstantEvolutionLoopExitValue.erase(PN); 5870 SE->ValueExprMap.erase(Old); 5871 // this now dangles! 5872 } 5873 5874 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 5875 : CallbackVH(V), SE(se) {} 5876 5877 //===----------------------------------------------------------------------===// 5878 // ScalarEvolution Class Implementation 5879 //===----------------------------------------------------------------------===// 5880 5881 ScalarEvolution::ScalarEvolution() 5882 : FunctionPass(ID), FirstUnknown(0) { 5883 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry()); 5884 } 5885 5886 bool ScalarEvolution::runOnFunction(Function &F) { 5887 this->F = &F; 5888 LI = &getAnalysis<LoopInfo>(); 5889 TD = getAnalysisIfAvailable<TargetData>(); 5890 DT = &getAnalysis<DominatorTree>(); 5891 return false; 5892 } 5893 5894 void ScalarEvolution::releaseMemory() { 5895 // Iterate through all the SCEVUnknown instances and call their 5896 // destructors, so that they release their references to their values. 5897 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) 5898 U->~SCEVUnknown(); 5899 FirstUnknown = 0; 5900 5901 ValueExprMap.clear(); 5902 BackedgeTakenCounts.clear(); 5903 ConstantEvolutionLoopExitValue.clear(); 5904 ValuesAtScopes.clear(); 5905 LoopDispositions.clear(); 5906 BlockDispositions.clear(); 5907 UnsignedRanges.clear(); 5908 SignedRanges.clear(); 5909 UniqueSCEVs.clear(); 5910 SCEVAllocator.Reset(); 5911 } 5912 5913 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 5914 AU.setPreservesAll(); 5915 AU.addRequiredTransitive<LoopInfo>(); 5916 AU.addRequiredTransitive<DominatorTree>(); 5917 } 5918 5919 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 5920 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 5921 } 5922 5923 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 5924 const Loop *L) { 5925 // Print all inner loops first 5926 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 5927 PrintLoopInfo(OS, SE, *I); 5928 5929 OS << "Loop "; 5930 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5931 OS << ": "; 5932 5933 SmallVector<BasicBlock *, 8> ExitBlocks; 5934 L->getExitBlocks(ExitBlocks); 5935 if (ExitBlocks.size() != 1) 5936 OS << "<multiple exits> "; 5937 5938 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 5939 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 5940 } else { 5941 OS << "Unpredictable backedge-taken count. "; 5942 } 5943 5944 OS << "\n" 5945 "Loop "; 5946 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5947 OS << ": "; 5948 5949 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 5950 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 5951 } else { 5952 OS << "Unpredictable max backedge-taken count. "; 5953 } 5954 5955 OS << "\n"; 5956 } 5957 5958 void ScalarEvolution::print(raw_ostream &OS, const Module *) const { 5959 // ScalarEvolution's implementation of the print method is to print 5960 // out SCEV values of all instructions that are interesting. Doing 5961 // this potentially causes it to create new SCEV objects though, 5962 // which technically conflicts with the const qualifier. This isn't 5963 // observable from outside the class though, so casting away the 5964 // const isn't dangerous. 5965 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 5966 5967 OS << "Classifying expressions for: "; 5968 WriteAsOperand(OS, F, /*PrintType=*/false); 5969 OS << "\n"; 5970 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 5971 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) { 5972 OS << *I << '\n'; 5973 OS << " --> "; 5974 const SCEV *SV = SE.getSCEV(&*I); 5975 SV->print(OS); 5976 5977 const Loop *L = LI->getLoopFor((*I).getParent()); 5978 5979 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 5980 if (AtUse != SV) { 5981 OS << " --> "; 5982 AtUse->print(OS); 5983 } 5984 5985 if (L) { 5986 OS << "\t\t" "Exits: "; 5987 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 5988 if (!SE.isLoopInvariant(ExitValue, L)) { 5989 OS << "<<Unknown>>"; 5990 } else { 5991 OS << *ExitValue; 5992 } 5993 } 5994 5995 OS << "\n"; 5996 } 5997 5998 OS << "Determining loop execution counts for: "; 5999 WriteAsOperand(OS, F, /*PrintType=*/false); 6000 OS << "\n"; 6001 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 6002 PrintLoopInfo(OS, &SE, *I); 6003 } 6004 6005 ScalarEvolution::LoopDisposition 6006 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 6007 std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S]; 6008 std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair = 6009 Values.insert(std::make_pair(L, LoopVariant)); 6010 if (!Pair.second) 6011 return Pair.first->second; 6012 6013 LoopDisposition D = computeLoopDisposition(S, L); 6014 return LoopDispositions[S][L] = D; 6015 } 6016 6017 ScalarEvolution::LoopDisposition 6018 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 6019 switch (S->getSCEVType()) { 6020 case scConstant: 6021 return LoopInvariant; 6022 case scTruncate: 6023 case scZeroExtend: 6024 case scSignExtend: 6025 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 6026 case scAddRecExpr: { 6027 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 6028 6029 // If L is the addrec's loop, it's computable. 6030 if (AR->getLoop() == L) 6031 return LoopComputable; 6032 6033 // Add recurrences are never invariant in the function-body (null loop). 6034 if (!L) 6035 return LoopVariant; 6036 6037 // This recurrence is variant w.r.t. L if L contains AR's loop. 6038 if (L->contains(AR->getLoop())) 6039 return LoopVariant; 6040 6041 // This recurrence is invariant w.r.t. L if AR's loop contains L. 6042 if (AR->getLoop()->contains(L)) 6043 return LoopInvariant; 6044 6045 // This recurrence is variant w.r.t. L if any of its operands 6046 // are variant. 6047 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 6048 I != E; ++I) 6049 if (!isLoopInvariant(*I, L)) 6050 return LoopVariant; 6051 6052 // Otherwise it's loop-invariant. 6053 return LoopInvariant; 6054 } 6055 case scAddExpr: 6056 case scMulExpr: 6057 case scUMaxExpr: 6058 case scSMaxExpr: { 6059 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6060 bool HasVarying = false; 6061 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6062 I != E; ++I) { 6063 LoopDisposition D = getLoopDisposition(*I, L); 6064 if (D == LoopVariant) 6065 return LoopVariant; 6066 if (D == LoopComputable) 6067 HasVarying = true; 6068 } 6069 return HasVarying ? LoopComputable : LoopInvariant; 6070 } 6071 case scUDivExpr: { 6072 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6073 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 6074 if (LD == LoopVariant) 6075 return LoopVariant; 6076 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 6077 if (RD == LoopVariant) 6078 return LoopVariant; 6079 return (LD == LoopInvariant && RD == LoopInvariant) ? 6080 LoopInvariant : LoopComputable; 6081 } 6082 case scUnknown: 6083 // All non-instruction values are loop invariant. All instructions are loop 6084 // invariant if they are not contained in the specified loop. 6085 // Instructions are never considered invariant in the function body 6086 // (null loop) because they are defined within the "loop". 6087 if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 6088 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 6089 return LoopInvariant; 6090 case scCouldNotCompute: 6091 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6092 return LoopVariant; 6093 default: break; 6094 } 6095 llvm_unreachable("Unknown SCEV kind!"); 6096 return LoopVariant; 6097 } 6098 6099 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 6100 return getLoopDisposition(S, L) == LoopInvariant; 6101 } 6102 6103 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 6104 return getLoopDisposition(S, L) == LoopComputable; 6105 } 6106 6107 ScalarEvolution::BlockDisposition 6108 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 6109 std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S]; 6110 std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool> 6111 Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock)); 6112 if (!Pair.second) 6113 return Pair.first->second; 6114 6115 BlockDisposition D = computeBlockDisposition(S, BB); 6116 return BlockDispositions[S][BB] = D; 6117 } 6118 6119 ScalarEvolution::BlockDisposition 6120 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 6121 switch (S->getSCEVType()) { 6122 case scConstant: 6123 return ProperlyDominatesBlock; 6124 case scTruncate: 6125 case scZeroExtend: 6126 case scSignExtend: 6127 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 6128 case scAddRecExpr: { 6129 // This uses a "dominates" query instead of "properly dominates" query 6130 // to test for proper dominance too, because the instruction which 6131 // produces the addrec's value is a PHI, and a PHI effectively properly 6132 // dominates its entire containing block. 6133 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 6134 if (!DT->dominates(AR->getLoop()->getHeader(), BB)) 6135 return DoesNotDominateBlock; 6136 } 6137 // FALL THROUGH into SCEVNAryExpr handling. 6138 case scAddExpr: 6139 case scMulExpr: 6140 case scUMaxExpr: 6141 case scSMaxExpr: { 6142 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6143 bool Proper = true; 6144 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6145 I != E; ++I) { 6146 BlockDisposition D = getBlockDisposition(*I, BB); 6147 if (D == DoesNotDominateBlock) 6148 return DoesNotDominateBlock; 6149 if (D == DominatesBlock) 6150 Proper = false; 6151 } 6152 return Proper ? ProperlyDominatesBlock : DominatesBlock; 6153 } 6154 case scUDivExpr: { 6155 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6156 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 6157 BlockDisposition LD = getBlockDisposition(LHS, BB); 6158 if (LD == DoesNotDominateBlock) 6159 return DoesNotDominateBlock; 6160 BlockDisposition RD = getBlockDisposition(RHS, BB); 6161 if (RD == DoesNotDominateBlock) 6162 return DoesNotDominateBlock; 6163 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 6164 ProperlyDominatesBlock : DominatesBlock; 6165 } 6166 case scUnknown: 6167 if (Instruction *I = 6168 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 6169 if (I->getParent() == BB) 6170 return DominatesBlock; 6171 if (DT->properlyDominates(I->getParent(), BB)) 6172 return ProperlyDominatesBlock; 6173 return DoesNotDominateBlock; 6174 } 6175 return ProperlyDominatesBlock; 6176 case scCouldNotCompute: 6177 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6178 return DoesNotDominateBlock; 6179 default: break; 6180 } 6181 llvm_unreachable("Unknown SCEV kind!"); 6182 return DoesNotDominateBlock; 6183 } 6184 6185 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 6186 return getBlockDisposition(S, BB) >= DominatesBlock; 6187 } 6188 6189 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 6190 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 6191 } 6192 6193 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 6194 switch (S->getSCEVType()) { 6195 case scConstant: 6196 return false; 6197 case scTruncate: 6198 case scZeroExtend: 6199 case scSignExtend: { 6200 const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S); 6201 const SCEV *CastOp = Cast->getOperand(); 6202 return Op == CastOp || hasOperand(CastOp, Op); 6203 } 6204 case scAddRecExpr: 6205 case scAddExpr: 6206 case scMulExpr: 6207 case scUMaxExpr: 6208 case scSMaxExpr: { 6209 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6210 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6211 I != E; ++I) { 6212 const SCEV *NAryOp = *I; 6213 if (NAryOp == Op || hasOperand(NAryOp, Op)) 6214 return true; 6215 } 6216 return false; 6217 } 6218 case scUDivExpr: { 6219 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6220 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 6221 return LHS == Op || hasOperand(LHS, Op) || 6222 RHS == Op || hasOperand(RHS, Op); 6223 } 6224 case scUnknown: 6225 return false; 6226 case scCouldNotCompute: 6227 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6228 return false; 6229 default: break; 6230 } 6231 llvm_unreachable("Unknown SCEV kind!"); 6232 return false; 6233 } 6234 6235 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 6236 ValuesAtScopes.erase(S); 6237 LoopDispositions.erase(S); 6238 BlockDispositions.erase(S); 6239 UnsignedRanges.erase(S); 6240 SignedRanges.erase(S); 6241 } 6242