1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #define DEBUG_TYPE "scalar-evolution" 62 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 63 #include "llvm/Constants.h" 64 #include "llvm/DerivedTypes.h" 65 #include "llvm/GlobalVariable.h" 66 #include "llvm/GlobalAlias.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/LLVMContext.h" 69 #include "llvm/Operator.h" 70 #include "llvm/Analysis/ConstantFolding.h" 71 #include "llvm/Analysis/Dominators.h" 72 #include "llvm/Analysis/InstructionSimplify.h" 73 #include "llvm/Analysis/LoopInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/Assembly/Writer.h" 76 #include "llvm/Target/TargetData.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/ConstantRange.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/GetElementPtrTypeIterator.h" 82 #include "llvm/Support/InstIterator.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/ADT/Statistic.h" 86 #include "llvm/ADT/STLExtras.h" 87 #include "llvm/ADT/SmallPtrSet.h" 88 #include <algorithm> 89 using namespace llvm; 90 91 STATISTIC(NumArrayLenItCounts, 92 "Number of trip counts computed with array length"); 93 STATISTIC(NumTripCountsComputed, 94 "Number of loops with predictable loop counts"); 95 STATISTIC(NumTripCountsNotComputed, 96 "Number of loops without predictable loop counts"); 97 STATISTIC(NumBruteForceTripCountsComputed, 98 "Number of loops with trip counts computed by force"); 99 100 static cl::opt<unsigned> 101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 102 cl::desc("Maximum number of iterations SCEV will " 103 "symbolically execute a constant " 104 "derived loop"), 105 cl::init(100)); 106 107 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution", 108 "Scalar Evolution Analysis", false, true) 109 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 110 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 111 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution", 112 "Scalar Evolution Analysis", false, true) 113 char ScalarEvolution::ID = 0; 114 115 //===----------------------------------------------------------------------===// 116 // SCEV class definitions 117 //===----------------------------------------------------------------------===// 118 119 //===----------------------------------------------------------------------===// 120 // Implementation of the SCEV class. 121 // 122 123 void SCEV::dump() const { 124 print(dbgs()); 125 dbgs() << '\n'; 126 } 127 128 void SCEV::print(raw_ostream &OS) const { 129 switch (getSCEVType()) { 130 case scConstant: 131 WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false); 132 return; 133 case scTruncate: { 134 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 135 const SCEV *Op = Trunc->getOperand(); 136 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 137 << *Trunc->getType() << ")"; 138 return; 139 } 140 case scZeroExtend: { 141 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 142 const SCEV *Op = ZExt->getOperand(); 143 OS << "(zext " << *Op->getType() << " " << *Op << " to " 144 << *ZExt->getType() << ")"; 145 return; 146 } 147 case scSignExtend: { 148 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 149 const SCEV *Op = SExt->getOperand(); 150 OS << "(sext " << *Op->getType() << " " << *Op << " to " 151 << *SExt->getType() << ")"; 152 return; 153 } 154 case scAddRecExpr: { 155 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 156 OS << "{" << *AR->getOperand(0); 157 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 158 OS << ",+," << *AR->getOperand(i); 159 OS << "}<"; 160 if (AR->getNoWrapFlags(FlagNUW)) 161 OS << "nuw><"; 162 if (AR->getNoWrapFlags(FlagNSW)) 163 OS << "nsw><"; 164 if (AR->getNoWrapFlags(FlagNW) && 165 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 166 OS << "nw><"; 167 WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false); 168 OS << ">"; 169 return; 170 } 171 case scAddExpr: 172 case scMulExpr: 173 case scUMaxExpr: 174 case scSMaxExpr: { 175 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 176 const char *OpStr = 0; 177 switch (NAry->getSCEVType()) { 178 case scAddExpr: OpStr = " + "; break; 179 case scMulExpr: OpStr = " * "; break; 180 case scUMaxExpr: OpStr = " umax "; break; 181 case scSMaxExpr: OpStr = " smax "; break; 182 } 183 OS << "("; 184 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 185 I != E; ++I) { 186 OS << **I; 187 if (llvm::next(I) != E) 188 OS << OpStr; 189 } 190 OS << ")"; 191 return; 192 } 193 case scUDivExpr: { 194 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 195 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 196 return; 197 } 198 case scUnknown: { 199 const SCEVUnknown *U = cast<SCEVUnknown>(this); 200 Type *AllocTy; 201 if (U->isSizeOf(AllocTy)) { 202 OS << "sizeof(" << *AllocTy << ")"; 203 return; 204 } 205 if (U->isAlignOf(AllocTy)) { 206 OS << "alignof(" << *AllocTy << ")"; 207 return; 208 } 209 210 Type *CTy; 211 Constant *FieldNo; 212 if (U->isOffsetOf(CTy, FieldNo)) { 213 OS << "offsetof(" << *CTy << ", "; 214 WriteAsOperand(OS, FieldNo, false); 215 OS << ")"; 216 return; 217 } 218 219 // Otherwise just print it normally. 220 WriteAsOperand(OS, U->getValue(), false); 221 return; 222 } 223 case scCouldNotCompute: 224 OS << "***COULDNOTCOMPUTE***"; 225 return; 226 default: break; 227 } 228 llvm_unreachable("Unknown SCEV kind!"); 229 } 230 231 Type *SCEV::getType() const { 232 switch (getSCEVType()) { 233 case scConstant: 234 return cast<SCEVConstant>(this)->getType(); 235 case scTruncate: 236 case scZeroExtend: 237 case scSignExtend: 238 return cast<SCEVCastExpr>(this)->getType(); 239 case scAddRecExpr: 240 case scMulExpr: 241 case scUMaxExpr: 242 case scSMaxExpr: 243 return cast<SCEVNAryExpr>(this)->getType(); 244 case scAddExpr: 245 return cast<SCEVAddExpr>(this)->getType(); 246 case scUDivExpr: 247 return cast<SCEVUDivExpr>(this)->getType(); 248 case scUnknown: 249 return cast<SCEVUnknown>(this)->getType(); 250 case scCouldNotCompute: 251 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 252 return 0; 253 default: break; 254 } 255 llvm_unreachable("Unknown SCEV kind!"); 256 return 0; 257 } 258 259 bool SCEV::isZero() const { 260 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 261 return SC->getValue()->isZero(); 262 return false; 263 } 264 265 bool SCEV::isOne() const { 266 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 267 return SC->getValue()->isOne(); 268 return false; 269 } 270 271 bool SCEV::isAllOnesValue() const { 272 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 273 return SC->getValue()->isAllOnesValue(); 274 return false; 275 } 276 277 SCEVCouldNotCompute::SCEVCouldNotCompute() : 278 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 279 280 bool SCEVCouldNotCompute::classof(const SCEV *S) { 281 return S->getSCEVType() == scCouldNotCompute; 282 } 283 284 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 285 FoldingSetNodeID ID; 286 ID.AddInteger(scConstant); 287 ID.AddPointer(V); 288 void *IP = 0; 289 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 290 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 291 UniqueSCEVs.InsertNode(S, IP); 292 return S; 293 } 294 295 const SCEV *ScalarEvolution::getConstant(const APInt& Val) { 296 return getConstant(ConstantInt::get(getContext(), Val)); 297 } 298 299 const SCEV * 300 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 301 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 302 return getConstant(ConstantInt::get(ITy, V, isSigned)); 303 } 304 305 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 306 unsigned SCEVTy, const SCEV *op, Type *ty) 307 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 308 309 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 310 const SCEV *op, Type *ty) 311 : SCEVCastExpr(ID, scTruncate, op, ty) { 312 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 313 (Ty->isIntegerTy() || Ty->isPointerTy()) && 314 "Cannot truncate non-integer value!"); 315 } 316 317 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 318 const SCEV *op, Type *ty) 319 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 320 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 321 (Ty->isIntegerTy() || Ty->isPointerTy()) && 322 "Cannot zero extend non-integer value!"); 323 } 324 325 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 326 const SCEV *op, Type *ty) 327 : SCEVCastExpr(ID, scSignExtend, op, ty) { 328 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 329 (Ty->isIntegerTy() || Ty->isPointerTy()) && 330 "Cannot sign extend non-integer value!"); 331 } 332 333 void SCEVUnknown::deleted() { 334 // Clear this SCEVUnknown from various maps. 335 SE->forgetMemoizedResults(this); 336 337 // Remove this SCEVUnknown from the uniquing map. 338 SE->UniqueSCEVs.RemoveNode(this); 339 340 // Release the value. 341 setValPtr(0); 342 } 343 344 void SCEVUnknown::allUsesReplacedWith(Value *New) { 345 // Clear this SCEVUnknown from various maps. 346 SE->forgetMemoizedResults(this); 347 348 // Remove this SCEVUnknown from the uniquing map. 349 SE->UniqueSCEVs.RemoveNode(this); 350 351 // Update this SCEVUnknown to point to the new value. This is needed 352 // because there may still be outstanding SCEVs which still point to 353 // this SCEVUnknown. 354 setValPtr(New); 355 } 356 357 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 358 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 359 if (VCE->getOpcode() == Instruction::PtrToInt) 360 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 361 if (CE->getOpcode() == Instruction::GetElementPtr && 362 CE->getOperand(0)->isNullValue() && 363 CE->getNumOperands() == 2) 364 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 365 if (CI->isOne()) { 366 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 367 ->getElementType(); 368 return true; 369 } 370 371 return false; 372 } 373 374 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 375 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 376 if (VCE->getOpcode() == Instruction::PtrToInt) 377 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 378 if (CE->getOpcode() == Instruction::GetElementPtr && 379 CE->getOperand(0)->isNullValue()) { 380 Type *Ty = 381 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 382 if (StructType *STy = dyn_cast<StructType>(Ty)) 383 if (!STy->isPacked() && 384 CE->getNumOperands() == 3 && 385 CE->getOperand(1)->isNullValue()) { 386 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 387 if (CI->isOne() && 388 STy->getNumElements() == 2 && 389 STy->getElementType(0)->isIntegerTy(1)) { 390 AllocTy = STy->getElementType(1); 391 return true; 392 } 393 } 394 } 395 396 return false; 397 } 398 399 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 400 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 401 if (VCE->getOpcode() == Instruction::PtrToInt) 402 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 403 if (CE->getOpcode() == Instruction::GetElementPtr && 404 CE->getNumOperands() == 3 && 405 CE->getOperand(0)->isNullValue() && 406 CE->getOperand(1)->isNullValue()) { 407 Type *Ty = 408 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 409 // Ignore vector types here so that ScalarEvolutionExpander doesn't 410 // emit getelementptrs that index into vectors. 411 if (Ty->isStructTy() || Ty->isArrayTy()) { 412 CTy = Ty; 413 FieldNo = CE->getOperand(2); 414 return true; 415 } 416 } 417 418 return false; 419 } 420 421 //===----------------------------------------------------------------------===// 422 // SCEV Utilities 423 //===----------------------------------------------------------------------===// 424 425 namespace { 426 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 427 /// than the complexity of the RHS. This comparator is used to canonicalize 428 /// expressions. 429 class SCEVComplexityCompare { 430 const LoopInfo *const LI; 431 public: 432 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} 433 434 // Return true or false if LHS is less than, or at least RHS, respectively. 435 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 436 return compare(LHS, RHS) < 0; 437 } 438 439 // Return negative, zero, or positive, if LHS is less than, equal to, or 440 // greater than RHS, respectively. A three-way result allows recursive 441 // comparisons to be more efficient. 442 int compare(const SCEV *LHS, const SCEV *RHS) const { 443 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 444 if (LHS == RHS) 445 return 0; 446 447 // Primarily, sort the SCEVs by their getSCEVType(). 448 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 449 if (LType != RType) 450 return (int)LType - (int)RType; 451 452 // Aside from the getSCEVType() ordering, the particular ordering 453 // isn't very important except that it's beneficial to be consistent, 454 // so that (a + b) and (b + a) don't end up as different expressions. 455 switch (LType) { 456 case scUnknown: { 457 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 458 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 459 460 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 461 // not as complete as it could be. 462 const Value *LV = LU->getValue(), *RV = RU->getValue(); 463 464 // Order pointer values after integer values. This helps SCEVExpander 465 // form GEPs. 466 bool LIsPointer = LV->getType()->isPointerTy(), 467 RIsPointer = RV->getType()->isPointerTy(); 468 if (LIsPointer != RIsPointer) 469 return (int)LIsPointer - (int)RIsPointer; 470 471 // Compare getValueID values. 472 unsigned LID = LV->getValueID(), 473 RID = RV->getValueID(); 474 if (LID != RID) 475 return (int)LID - (int)RID; 476 477 // Sort arguments by their position. 478 if (const Argument *LA = dyn_cast<Argument>(LV)) { 479 const Argument *RA = cast<Argument>(RV); 480 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 481 return (int)LArgNo - (int)RArgNo; 482 } 483 484 // For instructions, compare their loop depth, and their operand 485 // count. This is pretty loose. 486 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { 487 const Instruction *RInst = cast<Instruction>(RV); 488 489 // Compare loop depths. 490 const BasicBlock *LParent = LInst->getParent(), 491 *RParent = RInst->getParent(); 492 if (LParent != RParent) { 493 unsigned LDepth = LI->getLoopDepth(LParent), 494 RDepth = LI->getLoopDepth(RParent); 495 if (LDepth != RDepth) 496 return (int)LDepth - (int)RDepth; 497 } 498 499 // Compare the number of operands. 500 unsigned LNumOps = LInst->getNumOperands(), 501 RNumOps = RInst->getNumOperands(); 502 return (int)LNumOps - (int)RNumOps; 503 } 504 505 return 0; 506 } 507 508 case scConstant: { 509 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 510 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 511 512 // Compare constant values. 513 const APInt &LA = LC->getValue()->getValue(); 514 const APInt &RA = RC->getValue()->getValue(); 515 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 516 if (LBitWidth != RBitWidth) 517 return (int)LBitWidth - (int)RBitWidth; 518 return LA.ult(RA) ? -1 : 1; 519 } 520 521 case scAddRecExpr: { 522 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 523 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 524 525 // Compare addrec loop depths. 526 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 527 if (LLoop != RLoop) { 528 unsigned LDepth = LLoop->getLoopDepth(), 529 RDepth = RLoop->getLoopDepth(); 530 if (LDepth != RDepth) 531 return (int)LDepth - (int)RDepth; 532 } 533 534 // Addrec complexity grows with operand count. 535 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 536 if (LNumOps != RNumOps) 537 return (int)LNumOps - (int)RNumOps; 538 539 // Lexicographically compare. 540 for (unsigned i = 0; i != LNumOps; ++i) { 541 long X = compare(LA->getOperand(i), RA->getOperand(i)); 542 if (X != 0) 543 return X; 544 } 545 546 return 0; 547 } 548 549 case scAddExpr: 550 case scMulExpr: 551 case scSMaxExpr: 552 case scUMaxExpr: { 553 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 554 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 555 556 // Lexicographically compare n-ary expressions. 557 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 558 for (unsigned i = 0; i != LNumOps; ++i) { 559 if (i >= RNumOps) 560 return 1; 561 long X = compare(LC->getOperand(i), RC->getOperand(i)); 562 if (X != 0) 563 return X; 564 } 565 return (int)LNumOps - (int)RNumOps; 566 } 567 568 case scUDivExpr: { 569 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 570 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 571 572 // Lexicographically compare udiv expressions. 573 long X = compare(LC->getLHS(), RC->getLHS()); 574 if (X != 0) 575 return X; 576 return compare(LC->getRHS(), RC->getRHS()); 577 } 578 579 case scTruncate: 580 case scZeroExtend: 581 case scSignExtend: { 582 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 583 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 584 585 // Compare cast expressions by operand. 586 return compare(LC->getOperand(), RC->getOperand()); 587 } 588 589 default: 590 break; 591 } 592 593 llvm_unreachable("Unknown SCEV kind!"); 594 return 0; 595 } 596 }; 597 } 598 599 /// GroupByComplexity - Given a list of SCEV objects, order them by their 600 /// complexity, and group objects of the same complexity together by value. 601 /// When this routine is finished, we know that any duplicates in the vector are 602 /// consecutive and that complexity is monotonically increasing. 603 /// 604 /// Note that we go take special precautions to ensure that we get deterministic 605 /// results from this routine. In other words, we don't want the results of 606 /// this to depend on where the addresses of various SCEV objects happened to 607 /// land in memory. 608 /// 609 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 610 LoopInfo *LI) { 611 if (Ops.size() < 2) return; // Noop 612 if (Ops.size() == 2) { 613 // This is the common case, which also happens to be trivially simple. 614 // Special case it. 615 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 616 if (SCEVComplexityCompare(LI)(RHS, LHS)) 617 std::swap(LHS, RHS); 618 return; 619 } 620 621 // Do the rough sort by complexity. 622 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 623 624 // Now that we are sorted by complexity, group elements of the same 625 // complexity. Note that this is, at worst, N^2, but the vector is likely to 626 // be extremely short in practice. Note that we take this approach because we 627 // do not want to depend on the addresses of the objects we are grouping. 628 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 629 const SCEV *S = Ops[i]; 630 unsigned Complexity = S->getSCEVType(); 631 632 // If there are any objects of the same complexity and same value as this 633 // one, group them. 634 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 635 if (Ops[j] == S) { // Found a duplicate. 636 // Move it to immediately after i'th element. 637 std::swap(Ops[i+1], Ops[j]); 638 ++i; // no need to rescan it. 639 if (i == e-2) return; // Done! 640 } 641 } 642 } 643 } 644 645 646 647 //===----------------------------------------------------------------------===// 648 // Simple SCEV method implementations 649 //===----------------------------------------------------------------------===// 650 651 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 652 /// Assume, K > 0. 653 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 654 ScalarEvolution &SE, 655 Type *ResultTy) { 656 // Handle the simplest case efficiently. 657 if (K == 1) 658 return SE.getTruncateOrZeroExtend(It, ResultTy); 659 660 // We are using the following formula for BC(It, K): 661 // 662 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 663 // 664 // Suppose, W is the bitwidth of the return value. We must be prepared for 665 // overflow. Hence, we must assure that the result of our computation is 666 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 667 // safe in modular arithmetic. 668 // 669 // However, this code doesn't use exactly that formula; the formula it uses 670 // is something like the following, where T is the number of factors of 2 in 671 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 672 // exponentiation: 673 // 674 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 675 // 676 // This formula is trivially equivalent to the previous formula. However, 677 // this formula can be implemented much more efficiently. The trick is that 678 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 679 // arithmetic. To do exact division in modular arithmetic, all we have 680 // to do is multiply by the inverse. Therefore, this step can be done at 681 // width W. 682 // 683 // The next issue is how to safely do the division by 2^T. The way this 684 // is done is by doing the multiplication step at a width of at least W + T 685 // bits. This way, the bottom W+T bits of the product are accurate. Then, 686 // when we perform the division by 2^T (which is equivalent to a right shift 687 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 688 // truncated out after the division by 2^T. 689 // 690 // In comparison to just directly using the first formula, this technique 691 // is much more efficient; using the first formula requires W * K bits, 692 // but this formula less than W + K bits. Also, the first formula requires 693 // a division step, whereas this formula only requires multiplies and shifts. 694 // 695 // It doesn't matter whether the subtraction step is done in the calculation 696 // width or the input iteration count's width; if the subtraction overflows, 697 // the result must be zero anyway. We prefer here to do it in the width of 698 // the induction variable because it helps a lot for certain cases; CodeGen 699 // isn't smart enough to ignore the overflow, which leads to much less 700 // efficient code if the width of the subtraction is wider than the native 701 // register width. 702 // 703 // (It's possible to not widen at all by pulling out factors of 2 before 704 // the multiplication; for example, K=2 can be calculated as 705 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 706 // extra arithmetic, so it's not an obvious win, and it gets 707 // much more complicated for K > 3.) 708 709 // Protection from insane SCEVs; this bound is conservative, 710 // but it probably doesn't matter. 711 if (K > 1000) 712 return SE.getCouldNotCompute(); 713 714 unsigned W = SE.getTypeSizeInBits(ResultTy); 715 716 // Calculate K! / 2^T and T; we divide out the factors of two before 717 // multiplying for calculating K! / 2^T to avoid overflow. 718 // Other overflow doesn't matter because we only care about the bottom 719 // W bits of the result. 720 APInt OddFactorial(W, 1); 721 unsigned T = 1; 722 for (unsigned i = 3; i <= K; ++i) { 723 APInt Mult(W, i); 724 unsigned TwoFactors = Mult.countTrailingZeros(); 725 T += TwoFactors; 726 Mult = Mult.lshr(TwoFactors); 727 OddFactorial *= Mult; 728 } 729 730 // We need at least W + T bits for the multiplication step 731 unsigned CalculationBits = W + T; 732 733 // Calculate 2^T, at width T+W. 734 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 735 736 // Calculate the multiplicative inverse of K! / 2^T; 737 // this multiplication factor will perform the exact division by 738 // K! / 2^T. 739 APInt Mod = APInt::getSignedMinValue(W+1); 740 APInt MultiplyFactor = OddFactorial.zext(W+1); 741 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 742 MultiplyFactor = MultiplyFactor.trunc(W); 743 744 // Calculate the product, at width T+W 745 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 746 CalculationBits); 747 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 748 for (unsigned i = 1; i != K; ++i) { 749 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 750 Dividend = SE.getMulExpr(Dividend, 751 SE.getTruncateOrZeroExtend(S, CalculationTy)); 752 } 753 754 // Divide by 2^T 755 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 756 757 // Truncate the result, and divide by K! / 2^T. 758 759 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 760 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 761 } 762 763 /// evaluateAtIteration - Return the value of this chain of recurrences at 764 /// the specified iteration number. We can evaluate this recurrence by 765 /// multiplying each element in the chain by the binomial coefficient 766 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 767 /// 768 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 769 /// 770 /// where BC(It, k) stands for binomial coefficient. 771 /// 772 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 773 ScalarEvolution &SE) const { 774 const SCEV *Result = getStart(); 775 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 776 // The computation is correct in the face of overflow provided that the 777 // multiplication is performed _after_ the evaluation of the binomial 778 // coefficient. 779 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 780 if (isa<SCEVCouldNotCompute>(Coeff)) 781 return Coeff; 782 783 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 784 } 785 return Result; 786 } 787 788 //===----------------------------------------------------------------------===// 789 // SCEV Expression folder implementations 790 //===----------------------------------------------------------------------===// 791 792 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 793 Type *Ty) { 794 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 795 "This is not a truncating conversion!"); 796 assert(isSCEVable(Ty) && 797 "This is not a conversion to a SCEVable type!"); 798 Ty = getEffectiveSCEVType(Ty); 799 800 FoldingSetNodeID ID; 801 ID.AddInteger(scTruncate); 802 ID.AddPointer(Op); 803 ID.AddPointer(Ty); 804 void *IP = 0; 805 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 806 807 // Fold if the operand is constant. 808 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 809 return getConstant( 810 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), 811 getEffectiveSCEVType(Ty)))); 812 813 // trunc(trunc(x)) --> trunc(x) 814 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 815 return getTruncateExpr(ST->getOperand(), Ty); 816 817 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 818 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 819 return getTruncateOrSignExtend(SS->getOperand(), Ty); 820 821 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 822 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 823 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 824 825 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 826 // eliminate all the truncates. 827 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 828 SmallVector<const SCEV *, 4> Operands; 829 bool hasTrunc = false; 830 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 831 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 832 hasTrunc = isa<SCEVTruncateExpr>(S); 833 Operands.push_back(S); 834 } 835 if (!hasTrunc) 836 return getAddExpr(Operands); 837 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 838 } 839 840 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 841 // eliminate all the truncates. 842 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 843 SmallVector<const SCEV *, 4> Operands; 844 bool hasTrunc = false; 845 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 846 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 847 hasTrunc = isa<SCEVTruncateExpr>(S); 848 Operands.push_back(S); 849 } 850 if (!hasTrunc) 851 return getMulExpr(Operands); 852 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 853 } 854 855 // If the input value is a chrec scev, truncate the chrec's operands. 856 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 857 SmallVector<const SCEV *, 4> Operands; 858 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 859 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 860 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 861 } 862 863 // As a special case, fold trunc(undef) to undef. We don't want to 864 // know too much about SCEVUnknowns, but this special case is handy 865 // and harmless. 866 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 867 if (isa<UndefValue>(U->getValue())) 868 return getSCEV(UndefValue::get(Ty)); 869 870 // The cast wasn't folded; create an explicit cast node. We can reuse 871 // the existing insert position since if we get here, we won't have 872 // made any changes which would invalidate it. 873 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 874 Op, Ty); 875 UniqueSCEVs.InsertNode(S, IP); 876 return S; 877 } 878 879 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 880 Type *Ty) { 881 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 882 "This is not an extending conversion!"); 883 assert(isSCEVable(Ty) && 884 "This is not a conversion to a SCEVable type!"); 885 Ty = getEffectiveSCEVType(Ty); 886 887 // Fold if the operand is constant. 888 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 889 return getConstant( 890 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), 891 getEffectiveSCEVType(Ty)))); 892 893 // zext(zext(x)) --> zext(x) 894 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 895 return getZeroExtendExpr(SZ->getOperand(), Ty); 896 897 // Before doing any expensive analysis, check to see if we've already 898 // computed a SCEV for this Op and Ty. 899 FoldingSetNodeID ID; 900 ID.AddInteger(scZeroExtend); 901 ID.AddPointer(Op); 902 ID.AddPointer(Ty); 903 void *IP = 0; 904 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 905 906 // zext(trunc(x)) --> zext(x) or x or trunc(x) 907 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 908 // It's possible the bits taken off by the truncate were all zero bits. If 909 // so, we should be able to simplify this further. 910 const SCEV *X = ST->getOperand(); 911 ConstantRange CR = getUnsignedRange(X); 912 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 913 unsigned NewBits = getTypeSizeInBits(Ty); 914 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 915 CR.zextOrTrunc(NewBits))) 916 return getTruncateOrZeroExtend(X, Ty); 917 } 918 919 // If the input value is a chrec scev, and we can prove that the value 920 // did not overflow the old, smaller, value, we can zero extend all of the 921 // operands (often constants). This allows analysis of something like 922 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 923 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 924 if (AR->isAffine()) { 925 const SCEV *Start = AR->getStart(); 926 const SCEV *Step = AR->getStepRecurrence(*this); 927 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 928 const Loop *L = AR->getLoop(); 929 930 // If we have special knowledge that this addrec won't overflow, 931 // we don't need to do any further analysis. 932 if (AR->getNoWrapFlags(SCEV::FlagNUW)) 933 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 934 getZeroExtendExpr(Step, Ty), 935 L, AR->getNoWrapFlags()); 936 937 // Check whether the backedge-taken count is SCEVCouldNotCompute. 938 // Note that this serves two purposes: It filters out loops that are 939 // simply not analyzable, and it covers the case where this code is 940 // being called from within backedge-taken count analysis, such that 941 // attempting to ask for the backedge-taken count would likely result 942 // in infinite recursion. In the later case, the analysis code will 943 // cope with a conservative value, and it will take care to purge 944 // that value once it has finished. 945 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 946 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 947 // Manually compute the final value for AR, checking for 948 // overflow. 949 950 // Check whether the backedge-taken count can be losslessly casted to 951 // the addrec's type. The count is always unsigned. 952 const SCEV *CastedMaxBECount = 953 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 954 const SCEV *RecastedMaxBECount = 955 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 956 if (MaxBECount == RecastedMaxBECount) { 957 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 958 // Check whether Start+Step*MaxBECount has no unsigned overflow. 959 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 960 const SCEV *Add = getAddExpr(Start, ZMul); 961 const SCEV *OperandExtendedAdd = 962 getAddExpr(getZeroExtendExpr(Start, WideTy), 963 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 964 getZeroExtendExpr(Step, WideTy))); 965 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) { 966 // Cache knowledge of AR NUW, which is propagated to this AddRec. 967 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 968 // Return the expression with the addrec on the outside. 969 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 970 getZeroExtendExpr(Step, Ty), 971 L, AR->getNoWrapFlags()); 972 } 973 // Similar to above, only this time treat the step value as signed. 974 // This covers loops that count down. 975 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 976 Add = getAddExpr(Start, SMul); 977 OperandExtendedAdd = 978 getAddExpr(getZeroExtendExpr(Start, WideTy), 979 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 980 getSignExtendExpr(Step, WideTy))); 981 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) { 982 // Cache knowledge of AR NW, which is propagated to this AddRec. 983 // Negative step causes unsigned wrap, but it still can't self-wrap. 984 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 985 // Return the expression with the addrec on the outside. 986 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 987 getSignExtendExpr(Step, Ty), 988 L, AR->getNoWrapFlags()); 989 } 990 } 991 992 // If the backedge is guarded by a comparison with the pre-inc value 993 // the addrec is safe. Also, if the entry is guarded by a comparison 994 // with the start value and the backedge is guarded by a comparison 995 // with the post-inc value, the addrec is safe. 996 if (isKnownPositive(Step)) { 997 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 998 getUnsignedRange(Step).getUnsignedMax()); 999 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1000 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1001 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1002 AR->getPostIncExpr(*this), N))) { 1003 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1004 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1005 // Return the expression with the addrec on the outside. 1006 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1007 getZeroExtendExpr(Step, Ty), 1008 L, AR->getNoWrapFlags()); 1009 } 1010 } else if (isKnownNegative(Step)) { 1011 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1012 getSignedRange(Step).getSignedMin()); 1013 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1014 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1015 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1016 AR->getPostIncExpr(*this), N))) { 1017 // Cache knowledge of AR NW, which is propagated to this AddRec. 1018 // Negative step causes unsigned wrap, but it still can't self-wrap. 1019 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1020 // Return the expression with the addrec on the outside. 1021 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1022 getSignExtendExpr(Step, Ty), 1023 L, AR->getNoWrapFlags()); 1024 } 1025 } 1026 } 1027 } 1028 1029 // The cast wasn't folded; create an explicit cast node. 1030 // Recompute the insert position, as it may have been invalidated. 1031 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1032 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1033 Op, Ty); 1034 UniqueSCEVs.InsertNode(S, IP); 1035 return S; 1036 } 1037 1038 // Get the limit of a recurrence such that incrementing by Step cannot cause 1039 // signed overflow as long as the value of the recurrence within the loop does 1040 // not exceed this limit before incrementing. 1041 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1042 ICmpInst::Predicate *Pred, 1043 ScalarEvolution *SE) { 1044 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1045 if (SE->isKnownPositive(Step)) { 1046 *Pred = ICmpInst::ICMP_SLT; 1047 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1048 SE->getSignedRange(Step).getSignedMax()); 1049 } 1050 if (SE->isKnownNegative(Step)) { 1051 *Pred = ICmpInst::ICMP_SGT; 1052 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1053 SE->getSignedRange(Step).getSignedMin()); 1054 } 1055 return 0; 1056 } 1057 1058 // The recurrence AR has been shown to have no signed wrap. Typically, if we can 1059 // prove NSW for AR, then we can just as easily prove NSW for its preincrement 1060 // or postincrement sibling. This allows normalizing a sign extended AddRec as 1061 // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a 1062 // result, the expression "Step + sext(PreIncAR)" is congruent with 1063 // "sext(PostIncAR)" 1064 static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR, 1065 Type *Ty, 1066 ScalarEvolution *SE) { 1067 const Loop *L = AR->getLoop(); 1068 const SCEV *Start = AR->getStart(); 1069 const SCEV *Step = AR->getStepRecurrence(*SE); 1070 1071 // Check for a simple looking step prior to loop entry. 1072 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1073 if (!SA) 1074 return 0; 1075 1076 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1077 // subtraction is expensive. For this purpose, perform a quick and dirty 1078 // difference, by checking for Step in the operand list. 1079 SmallVector<const SCEV *, 4> DiffOps; 1080 for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end(); 1081 I != E; ++I) { 1082 if (*I != Step) 1083 DiffOps.push_back(*I); 1084 } 1085 if (DiffOps.size() == SA->getNumOperands()) 1086 return 0; 1087 1088 // This is a postinc AR. Check for overflow on the preinc recurrence using the 1089 // same three conditions that getSignExtendedExpr checks. 1090 1091 // 1. NSW flags on the step increment. 1092 const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags()); 1093 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1094 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1095 1096 if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW)) 1097 return PreStart; 1098 1099 // 2. Direct overflow check on the step operation's expression. 1100 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1101 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1102 const SCEV *OperandExtendedStart = 1103 SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy), 1104 SE->getSignExtendExpr(Step, WideTy)); 1105 if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) { 1106 // Cache knowledge of PreAR NSW. 1107 if (PreAR) 1108 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW); 1109 // FIXME: this optimization needs a unit test 1110 DEBUG(dbgs() << "SCEV: untested prestart overflow check\n"); 1111 return PreStart; 1112 } 1113 1114 // 3. Loop precondition. 1115 ICmpInst::Predicate Pred; 1116 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE); 1117 1118 if (OverflowLimit && 1119 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) { 1120 return PreStart; 1121 } 1122 return 0; 1123 } 1124 1125 // Get the normalized sign-extended expression for this AddRec's Start. 1126 static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR, 1127 Type *Ty, 1128 ScalarEvolution *SE) { 1129 const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE); 1130 if (!PreStart) 1131 return SE->getSignExtendExpr(AR->getStart(), Ty); 1132 1133 return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty), 1134 SE->getSignExtendExpr(PreStart, Ty)); 1135 } 1136 1137 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 1138 Type *Ty) { 1139 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1140 "This is not an extending conversion!"); 1141 assert(isSCEVable(Ty) && 1142 "This is not a conversion to a SCEVable type!"); 1143 Ty = getEffectiveSCEVType(Ty); 1144 1145 // Fold if the operand is constant. 1146 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1147 return getConstant( 1148 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), 1149 getEffectiveSCEVType(Ty)))); 1150 1151 // sext(sext(x)) --> sext(x) 1152 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1153 return getSignExtendExpr(SS->getOperand(), Ty); 1154 1155 // sext(zext(x)) --> zext(x) 1156 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1157 return getZeroExtendExpr(SZ->getOperand(), Ty); 1158 1159 // Before doing any expensive analysis, check to see if we've already 1160 // computed a SCEV for this Op and Ty. 1161 FoldingSetNodeID ID; 1162 ID.AddInteger(scSignExtend); 1163 ID.AddPointer(Op); 1164 ID.AddPointer(Ty); 1165 void *IP = 0; 1166 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1167 1168 // If the input value is provably positive, build a zext instead. 1169 if (isKnownNonNegative(Op)) 1170 return getZeroExtendExpr(Op, Ty); 1171 1172 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1173 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1174 // It's possible the bits taken off by the truncate were all sign bits. If 1175 // so, we should be able to simplify this further. 1176 const SCEV *X = ST->getOperand(); 1177 ConstantRange CR = getSignedRange(X); 1178 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1179 unsigned NewBits = getTypeSizeInBits(Ty); 1180 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1181 CR.sextOrTrunc(NewBits))) 1182 return getTruncateOrSignExtend(X, Ty); 1183 } 1184 1185 // If the input value is a chrec scev, and we can prove that the value 1186 // did not overflow the old, smaller, value, we can sign extend all of the 1187 // operands (often constants). This allows analysis of something like 1188 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1189 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1190 if (AR->isAffine()) { 1191 const SCEV *Start = AR->getStart(); 1192 const SCEV *Step = AR->getStepRecurrence(*this); 1193 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1194 const Loop *L = AR->getLoop(); 1195 1196 // If we have special knowledge that this addrec won't overflow, 1197 // we don't need to do any further analysis. 1198 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 1199 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this), 1200 getSignExtendExpr(Step, Ty), 1201 L, SCEV::FlagNSW); 1202 1203 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1204 // Note that this serves two purposes: It filters out loops that are 1205 // simply not analyzable, and it covers the case where this code is 1206 // being called from within backedge-taken count analysis, such that 1207 // attempting to ask for the backedge-taken count would likely result 1208 // in infinite recursion. In the later case, the analysis code will 1209 // cope with a conservative value, and it will take care to purge 1210 // that value once it has finished. 1211 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1212 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1213 // Manually compute the final value for AR, checking for 1214 // overflow. 1215 1216 // Check whether the backedge-taken count can be losslessly casted to 1217 // the addrec's type. The count is always unsigned. 1218 const SCEV *CastedMaxBECount = 1219 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1220 const SCEV *RecastedMaxBECount = 1221 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1222 if (MaxBECount == RecastedMaxBECount) { 1223 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1224 // Check whether Start+Step*MaxBECount has no signed overflow. 1225 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1226 const SCEV *Add = getAddExpr(Start, SMul); 1227 const SCEV *OperandExtendedAdd = 1228 getAddExpr(getSignExtendExpr(Start, WideTy), 1229 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1230 getSignExtendExpr(Step, WideTy))); 1231 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) { 1232 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1233 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1234 // Return the expression with the addrec on the outside. 1235 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this), 1236 getSignExtendExpr(Step, Ty), 1237 L, AR->getNoWrapFlags()); 1238 } 1239 // Similar to above, only this time treat the step value as unsigned. 1240 // This covers loops that count up with an unsigned step. 1241 const SCEV *UMul = getMulExpr(CastedMaxBECount, Step); 1242 Add = getAddExpr(Start, UMul); 1243 OperandExtendedAdd = 1244 getAddExpr(getSignExtendExpr(Start, WideTy), 1245 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1246 getZeroExtendExpr(Step, WideTy))); 1247 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) { 1248 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1249 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1250 // Return the expression with the addrec on the outside. 1251 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this), 1252 getZeroExtendExpr(Step, Ty), 1253 L, AR->getNoWrapFlags()); 1254 } 1255 } 1256 1257 // If the backedge is guarded by a comparison with the pre-inc value 1258 // the addrec is safe. Also, if the entry is guarded by a comparison 1259 // with the start value and the backedge is guarded by a comparison 1260 // with the post-inc value, the addrec is safe. 1261 ICmpInst::Predicate Pred; 1262 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this); 1263 if (OverflowLimit && 1264 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1265 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1266 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1267 OverflowLimit)))) { 1268 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1269 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1270 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this), 1271 getSignExtendExpr(Step, Ty), 1272 L, AR->getNoWrapFlags()); 1273 } 1274 } 1275 } 1276 1277 // The cast wasn't folded; create an explicit cast node. 1278 // Recompute the insert position, as it may have been invalidated. 1279 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1280 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1281 Op, Ty); 1282 UniqueSCEVs.InsertNode(S, IP); 1283 return S; 1284 } 1285 1286 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1287 /// unspecified bits out to the given type. 1288 /// 1289 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1290 Type *Ty) { 1291 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1292 "This is not an extending conversion!"); 1293 assert(isSCEVable(Ty) && 1294 "This is not a conversion to a SCEVable type!"); 1295 Ty = getEffectiveSCEVType(Ty); 1296 1297 // Sign-extend negative constants. 1298 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1299 if (SC->getValue()->getValue().isNegative()) 1300 return getSignExtendExpr(Op, Ty); 1301 1302 // Peel off a truncate cast. 1303 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1304 const SCEV *NewOp = T->getOperand(); 1305 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1306 return getAnyExtendExpr(NewOp, Ty); 1307 return getTruncateOrNoop(NewOp, Ty); 1308 } 1309 1310 // Next try a zext cast. If the cast is folded, use it. 1311 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1312 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1313 return ZExt; 1314 1315 // Next try a sext cast. If the cast is folded, use it. 1316 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1317 if (!isa<SCEVSignExtendExpr>(SExt)) 1318 return SExt; 1319 1320 // Force the cast to be folded into the operands of an addrec. 1321 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1322 SmallVector<const SCEV *, 4> Ops; 1323 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 1324 I != E; ++I) 1325 Ops.push_back(getAnyExtendExpr(*I, Ty)); 1326 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1327 } 1328 1329 // As a special case, fold anyext(undef) to undef. We don't want to 1330 // know too much about SCEVUnknowns, but this special case is handy 1331 // and harmless. 1332 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 1333 if (isa<UndefValue>(U->getValue())) 1334 return getSCEV(UndefValue::get(Ty)); 1335 1336 // If the expression is obviously signed, use the sext cast value. 1337 if (isa<SCEVSMaxExpr>(Op)) 1338 return SExt; 1339 1340 // Absent any other information, use the zext cast value. 1341 return ZExt; 1342 } 1343 1344 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1345 /// a list of operands to be added under the given scale, update the given 1346 /// map. This is a helper function for getAddRecExpr. As an example of 1347 /// what it does, given a sequence of operands that would form an add 1348 /// expression like this: 1349 /// 1350 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 1351 /// 1352 /// where A and B are constants, update the map with these values: 1353 /// 1354 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1355 /// 1356 /// and add 13 + A*B*29 to AccumulatedConstant. 1357 /// This will allow getAddRecExpr to produce this: 1358 /// 1359 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1360 /// 1361 /// This form often exposes folding opportunities that are hidden in 1362 /// the original operand list. 1363 /// 1364 /// Return true iff it appears that any interesting folding opportunities 1365 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1366 /// the common case where no interesting opportunities are present, and 1367 /// is also used as a check to avoid infinite recursion. 1368 /// 1369 static bool 1370 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1371 SmallVector<const SCEV *, 8> &NewOps, 1372 APInt &AccumulatedConstant, 1373 const SCEV *const *Ops, size_t NumOperands, 1374 const APInt &Scale, 1375 ScalarEvolution &SE) { 1376 bool Interesting = false; 1377 1378 // Iterate over the add operands. They are sorted, with constants first. 1379 unsigned i = 0; 1380 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1381 ++i; 1382 // Pull a buried constant out to the outside. 1383 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 1384 Interesting = true; 1385 AccumulatedConstant += Scale * C->getValue()->getValue(); 1386 } 1387 1388 // Next comes everything else. We're especially interested in multiplies 1389 // here, but they're in the middle, so just visit the rest with one loop. 1390 for (; i != NumOperands; ++i) { 1391 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1392 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1393 APInt NewScale = 1394 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 1395 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1396 // A multiplication of a constant with another add; recurse. 1397 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 1398 Interesting |= 1399 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1400 Add->op_begin(), Add->getNumOperands(), 1401 NewScale, SE); 1402 } else { 1403 // A multiplication of a constant with some other value. Update 1404 // the map. 1405 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1406 const SCEV *Key = SE.getMulExpr(MulOps); 1407 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1408 M.insert(std::make_pair(Key, NewScale)); 1409 if (Pair.second) { 1410 NewOps.push_back(Pair.first->first); 1411 } else { 1412 Pair.first->second += NewScale; 1413 // The map already had an entry for this value, which may indicate 1414 // a folding opportunity. 1415 Interesting = true; 1416 } 1417 } 1418 } else { 1419 // An ordinary operand. Update the map. 1420 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1421 M.insert(std::make_pair(Ops[i], Scale)); 1422 if (Pair.second) { 1423 NewOps.push_back(Pair.first->first); 1424 } else { 1425 Pair.first->second += Scale; 1426 // The map already had an entry for this value, which may indicate 1427 // a folding opportunity. 1428 Interesting = true; 1429 } 1430 } 1431 } 1432 1433 return Interesting; 1434 } 1435 1436 namespace { 1437 struct APIntCompare { 1438 bool operator()(const APInt &LHS, const APInt &RHS) const { 1439 return LHS.ult(RHS); 1440 } 1441 }; 1442 } 1443 1444 /// getAddExpr - Get a canonical add expression, or something simpler if 1445 /// possible. 1446 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 1447 SCEV::NoWrapFlags Flags) { 1448 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 1449 "only nuw or nsw allowed"); 1450 assert(!Ops.empty() && "Cannot get empty add!"); 1451 if (Ops.size() == 1) return Ops[0]; 1452 #ifndef NDEBUG 1453 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1454 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1455 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1456 "SCEVAddExpr operand types don't match!"); 1457 #endif 1458 1459 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 1460 // And vice-versa. 1461 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 1462 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); 1463 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { 1464 bool All = true; 1465 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1466 E = Ops.end(); I != E; ++I) 1467 if (!isKnownNonNegative(*I)) { 1468 All = false; 1469 break; 1470 } 1471 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 1472 } 1473 1474 // Sort by complexity, this groups all similar expression types together. 1475 GroupByComplexity(Ops, LI); 1476 1477 // If there are any constants, fold them together. 1478 unsigned Idx = 0; 1479 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1480 ++Idx; 1481 assert(Idx < Ops.size()); 1482 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1483 // We found two constants, fold them together! 1484 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1485 RHSC->getValue()->getValue()); 1486 if (Ops.size() == 2) return Ops[0]; 1487 Ops.erase(Ops.begin()+1); // Erase the folded element 1488 LHSC = cast<SCEVConstant>(Ops[0]); 1489 } 1490 1491 // If we are left with a constant zero being added, strip it off. 1492 if (LHSC->getValue()->isZero()) { 1493 Ops.erase(Ops.begin()); 1494 --Idx; 1495 } 1496 1497 if (Ops.size() == 1) return Ops[0]; 1498 } 1499 1500 // Okay, check to see if the same value occurs in the operand list more than 1501 // once. If so, merge them together into an multiply expression. Since we 1502 // sorted the list, these values are required to be adjacent. 1503 Type *Ty = Ops[0]->getType(); 1504 bool FoundMatch = false; 1505 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 1506 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1507 // Scan ahead to count how many equal operands there are. 1508 unsigned Count = 2; 1509 while (i+Count != e && Ops[i+Count] == Ops[i]) 1510 ++Count; 1511 // Merge the values into a multiply. 1512 const SCEV *Scale = getConstant(Ty, Count); 1513 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 1514 if (Ops.size() == Count) 1515 return Mul; 1516 Ops[i] = Mul; 1517 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 1518 --i; e -= Count - 1; 1519 FoundMatch = true; 1520 } 1521 if (FoundMatch) 1522 return getAddExpr(Ops, Flags); 1523 1524 // Check for truncates. If all the operands are truncated from the same 1525 // type, see if factoring out the truncate would permit the result to be 1526 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1527 // if the contents of the resulting outer trunc fold to something simple. 1528 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1529 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1530 Type *DstType = Trunc->getType(); 1531 Type *SrcType = Trunc->getOperand()->getType(); 1532 SmallVector<const SCEV *, 8> LargeOps; 1533 bool Ok = true; 1534 // Check all the operands to see if they can be represented in the 1535 // source type of the truncate. 1536 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1537 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1538 if (T->getOperand()->getType() != SrcType) { 1539 Ok = false; 1540 break; 1541 } 1542 LargeOps.push_back(T->getOperand()); 1543 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1544 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 1545 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1546 SmallVector<const SCEV *, 8> LargeMulOps; 1547 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1548 if (const SCEVTruncateExpr *T = 1549 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1550 if (T->getOperand()->getType() != SrcType) { 1551 Ok = false; 1552 break; 1553 } 1554 LargeMulOps.push_back(T->getOperand()); 1555 } else if (const SCEVConstant *C = 1556 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1557 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 1558 } else { 1559 Ok = false; 1560 break; 1561 } 1562 } 1563 if (Ok) 1564 LargeOps.push_back(getMulExpr(LargeMulOps)); 1565 } else { 1566 Ok = false; 1567 break; 1568 } 1569 } 1570 if (Ok) { 1571 // Evaluate the expression in the larger type. 1572 const SCEV *Fold = getAddExpr(LargeOps, Flags); 1573 // If it folds to something simple, use it. Otherwise, don't. 1574 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1575 return getTruncateExpr(Fold, DstType); 1576 } 1577 } 1578 1579 // Skip past any other cast SCEVs. 1580 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1581 ++Idx; 1582 1583 // If there are add operands they would be next. 1584 if (Idx < Ops.size()) { 1585 bool DeletedAdd = false; 1586 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1587 // If we have an add, expand the add operands onto the end of the operands 1588 // list. 1589 Ops.erase(Ops.begin()+Idx); 1590 Ops.append(Add->op_begin(), Add->op_end()); 1591 DeletedAdd = true; 1592 } 1593 1594 // If we deleted at least one add, we added operands to the end of the list, 1595 // and they are not necessarily sorted. Recurse to resort and resimplify 1596 // any operands we just acquired. 1597 if (DeletedAdd) 1598 return getAddExpr(Ops); 1599 } 1600 1601 // Skip over the add expression until we get to a multiply. 1602 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1603 ++Idx; 1604 1605 // Check to see if there are any folding opportunities present with 1606 // operands multiplied by constant values. 1607 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1608 uint64_t BitWidth = getTypeSizeInBits(Ty); 1609 DenseMap<const SCEV *, APInt> M; 1610 SmallVector<const SCEV *, 8> NewOps; 1611 APInt AccumulatedConstant(BitWidth, 0); 1612 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1613 Ops.data(), Ops.size(), 1614 APInt(BitWidth, 1), *this)) { 1615 // Some interesting folding opportunity is present, so its worthwhile to 1616 // re-generate the operands list. Group the operands by constant scale, 1617 // to avoid multiplying by the same constant scale multiple times. 1618 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 1619 for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(), 1620 E = NewOps.end(); I != E; ++I) 1621 MulOpLists[M.find(*I)->second].push_back(*I); 1622 // Re-generate the operands list. 1623 Ops.clear(); 1624 if (AccumulatedConstant != 0) 1625 Ops.push_back(getConstant(AccumulatedConstant)); 1626 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1627 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1628 if (I->first != 0) 1629 Ops.push_back(getMulExpr(getConstant(I->first), 1630 getAddExpr(I->second))); 1631 if (Ops.empty()) 1632 return getConstant(Ty, 0); 1633 if (Ops.size() == 1) 1634 return Ops[0]; 1635 return getAddExpr(Ops); 1636 } 1637 } 1638 1639 // If we are adding something to a multiply expression, make sure the 1640 // something is not already an operand of the multiply. If so, merge it into 1641 // the multiply. 1642 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1643 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1644 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1645 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1646 if (isa<SCEVConstant>(MulOpSCEV)) 1647 continue; 1648 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1649 if (MulOpSCEV == Ops[AddOp]) { 1650 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1651 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 1652 if (Mul->getNumOperands() != 2) { 1653 // If the multiply has more than two operands, we must get the 1654 // Y*Z term. 1655 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1656 Mul->op_begin()+MulOp); 1657 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1658 InnerMul = getMulExpr(MulOps); 1659 } 1660 const SCEV *One = getConstant(Ty, 1); 1661 const SCEV *AddOne = getAddExpr(One, InnerMul); 1662 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 1663 if (Ops.size() == 2) return OuterMul; 1664 if (AddOp < Idx) { 1665 Ops.erase(Ops.begin()+AddOp); 1666 Ops.erase(Ops.begin()+Idx-1); 1667 } else { 1668 Ops.erase(Ops.begin()+Idx); 1669 Ops.erase(Ops.begin()+AddOp-1); 1670 } 1671 Ops.push_back(OuterMul); 1672 return getAddExpr(Ops); 1673 } 1674 1675 // Check this multiply against other multiplies being added together. 1676 for (unsigned OtherMulIdx = Idx+1; 1677 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1678 ++OtherMulIdx) { 1679 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1680 // If MulOp occurs in OtherMul, we can fold the two multiplies 1681 // together. 1682 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1683 OMulOp != e; ++OMulOp) 1684 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1685 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1686 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 1687 if (Mul->getNumOperands() != 2) { 1688 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1689 Mul->op_begin()+MulOp); 1690 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1691 InnerMul1 = getMulExpr(MulOps); 1692 } 1693 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1694 if (OtherMul->getNumOperands() != 2) { 1695 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1696 OtherMul->op_begin()+OMulOp); 1697 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 1698 InnerMul2 = getMulExpr(MulOps); 1699 } 1700 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1701 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1702 if (Ops.size() == 2) return OuterMul; 1703 Ops.erase(Ops.begin()+Idx); 1704 Ops.erase(Ops.begin()+OtherMulIdx-1); 1705 Ops.push_back(OuterMul); 1706 return getAddExpr(Ops); 1707 } 1708 } 1709 } 1710 } 1711 1712 // If there are any add recurrences in the operands list, see if any other 1713 // added values are loop invariant. If so, we can fold them into the 1714 // recurrence. 1715 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1716 ++Idx; 1717 1718 // Scan over all recurrences, trying to fold loop invariants into them. 1719 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1720 // Scan all of the other operands to this add and add them to the vector if 1721 // they are loop invariant w.r.t. the recurrence. 1722 SmallVector<const SCEV *, 8> LIOps; 1723 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1724 const Loop *AddRecLoop = AddRec->getLoop(); 1725 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1726 if (isLoopInvariant(Ops[i], AddRecLoop)) { 1727 LIOps.push_back(Ops[i]); 1728 Ops.erase(Ops.begin()+i); 1729 --i; --e; 1730 } 1731 1732 // If we found some loop invariants, fold them into the recurrence. 1733 if (!LIOps.empty()) { 1734 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1735 LIOps.push_back(AddRec->getStart()); 1736 1737 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1738 AddRec->op_end()); 1739 AddRecOps[0] = getAddExpr(LIOps); 1740 1741 // Build the new addrec. Propagate the NUW and NSW flags if both the 1742 // outer add and the inner addrec are guaranteed to have no overflow. 1743 // Always propagate NW. 1744 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 1745 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 1746 1747 // If all of the other operands were loop invariant, we are done. 1748 if (Ops.size() == 1) return NewRec; 1749 1750 // Otherwise, add the folded AddRec by the non-invariant parts. 1751 for (unsigned i = 0;; ++i) 1752 if (Ops[i] == AddRec) { 1753 Ops[i] = NewRec; 1754 break; 1755 } 1756 return getAddExpr(Ops); 1757 } 1758 1759 // Okay, if there weren't any loop invariants to be folded, check to see if 1760 // there are multiple AddRec's with the same loop induction variable being 1761 // added together. If so, we can fold them. 1762 for (unsigned OtherIdx = Idx+1; 1763 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1764 ++OtherIdx) 1765 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 1766 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 1767 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1768 AddRec->op_end()); 1769 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1770 ++OtherIdx) 1771 if (const SCEVAddRecExpr *OtherAddRec = 1772 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 1773 if (OtherAddRec->getLoop() == AddRecLoop) { 1774 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 1775 i != e; ++i) { 1776 if (i >= AddRecOps.size()) { 1777 AddRecOps.append(OtherAddRec->op_begin()+i, 1778 OtherAddRec->op_end()); 1779 break; 1780 } 1781 AddRecOps[i] = getAddExpr(AddRecOps[i], 1782 OtherAddRec->getOperand(i)); 1783 } 1784 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 1785 } 1786 // Step size has changed, so we cannot guarantee no self-wraparound. 1787 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 1788 return getAddExpr(Ops); 1789 } 1790 1791 // Otherwise couldn't fold anything into this recurrence. Move onto the 1792 // next one. 1793 } 1794 1795 // Okay, it looks like we really DO need an add expr. Check to see if we 1796 // already have one, otherwise create a new one. 1797 FoldingSetNodeID ID; 1798 ID.AddInteger(scAddExpr); 1799 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1800 ID.AddPointer(Ops[i]); 1801 void *IP = 0; 1802 SCEVAddExpr *S = 1803 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1804 if (!S) { 1805 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 1806 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 1807 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), 1808 O, Ops.size()); 1809 UniqueSCEVs.InsertNode(S, IP); 1810 } 1811 S->setNoWrapFlags(Flags); 1812 return S; 1813 } 1814 1815 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 1816 uint64_t k = i*j; 1817 if (j > 1 && k / j != i) Overflow = true; 1818 return k; 1819 } 1820 1821 /// Compute the result of "n choose k", the binomial coefficient. If an 1822 /// intermediate computation overflows, Overflow will be set and the return will 1823 /// be garbage. Overflow is not cleared on absense of overflow. 1824 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 1825 // We use the multiplicative formula: 1826 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 1827 // At each iteration, we take the n-th term of the numeral and divide by the 1828 // (k-n)th term of the denominator. This division will always produce an 1829 // integral result, and helps reduce the chance of overflow in the 1830 // intermediate computations. However, we can still overflow even when the 1831 // final result would fit. 1832 1833 if (n == 0 || n == k) return 1; 1834 if (k > n) return 0; 1835 1836 if (k > n/2) 1837 k = n-k; 1838 1839 uint64_t r = 1; 1840 for (uint64_t i = 1; i <= k; ++i) { 1841 r = umul_ov(r, n-(i-1), Overflow); 1842 r /= i; 1843 } 1844 return r; 1845 } 1846 1847 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1848 /// possible. 1849 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 1850 SCEV::NoWrapFlags Flags) { 1851 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 1852 "only nuw or nsw allowed"); 1853 assert(!Ops.empty() && "Cannot get empty mul!"); 1854 if (Ops.size() == 1) return Ops[0]; 1855 #ifndef NDEBUG 1856 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1857 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1858 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1859 "SCEVMulExpr operand types don't match!"); 1860 #endif 1861 1862 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 1863 // And vice-versa. 1864 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 1865 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); 1866 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { 1867 bool All = true; 1868 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1869 E = Ops.end(); I != E; ++I) 1870 if (!isKnownNonNegative(*I)) { 1871 All = false; 1872 break; 1873 } 1874 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 1875 } 1876 1877 // Sort by complexity, this groups all similar expression types together. 1878 GroupByComplexity(Ops, LI); 1879 1880 // If there are any constants, fold them together. 1881 unsigned Idx = 0; 1882 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1883 1884 // C1*(C2+V) -> C1*C2 + C1*V 1885 if (Ops.size() == 2) 1886 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1887 if (Add->getNumOperands() == 2 && 1888 isa<SCEVConstant>(Add->getOperand(0))) 1889 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1890 getMulExpr(LHSC, Add->getOperand(1))); 1891 1892 ++Idx; 1893 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1894 // We found two constants, fold them together! 1895 ConstantInt *Fold = ConstantInt::get(getContext(), 1896 LHSC->getValue()->getValue() * 1897 RHSC->getValue()->getValue()); 1898 Ops[0] = getConstant(Fold); 1899 Ops.erase(Ops.begin()+1); // Erase the folded element 1900 if (Ops.size() == 1) return Ops[0]; 1901 LHSC = cast<SCEVConstant>(Ops[0]); 1902 } 1903 1904 // If we are left with a constant one being multiplied, strip it off. 1905 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1906 Ops.erase(Ops.begin()); 1907 --Idx; 1908 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1909 // If we have a multiply of zero, it will always be zero. 1910 return Ops[0]; 1911 } else if (Ops[0]->isAllOnesValue()) { 1912 // If we have a mul by -1 of an add, try distributing the -1 among the 1913 // add operands. 1914 if (Ops.size() == 2) { 1915 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 1916 SmallVector<const SCEV *, 4> NewOps; 1917 bool AnyFolded = false; 1918 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), 1919 E = Add->op_end(); I != E; ++I) { 1920 const SCEV *Mul = getMulExpr(Ops[0], *I); 1921 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 1922 NewOps.push_back(Mul); 1923 } 1924 if (AnyFolded) 1925 return getAddExpr(NewOps); 1926 } 1927 else if (const SCEVAddRecExpr * 1928 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 1929 // Negation preserves a recurrence's no self-wrap property. 1930 SmallVector<const SCEV *, 4> Operands; 1931 for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(), 1932 E = AddRec->op_end(); I != E; ++I) { 1933 Operands.push_back(getMulExpr(Ops[0], *I)); 1934 } 1935 return getAddRecExpr(Operands, AddRec->getLoop(), 1936 AddRec->getNoWrapFlags(SCEV::FlagNW)); 1937 } 1938 } 1939 } 1940 1941 if (Ops.size() == 1) 1942 return Ops[0]; 1943 } 1944 1945 // Skip over the add expression until we get to a multiply. 1946 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1947 ++Idx; 1948 1949 // If there are mul operands inline them all into this expression. 1950 if (Idx < Ops.size()) { 1951 bool DeletedMul = false; 1952 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1953 // If we have an mul, expand the mul operands onto the end of the operands 1954 // list. 1955 Ops.erase(Ops.begin()+Idx); 1956 Ops.append(Mul->op_begin(), Mul->op_end()); 1957 DeletedMul = true; 1958 } 1959 1960 // If we deleted at least one mul, we added operands to the end of the list, 1961 // and they are not necessarily sorted. Recurse to resort and resimplify 1962 // any operands we just acquired. 1963 if (DeletedMul) 1964 return getMulExpr(Ops); 1965 } 1966 1967 // If there are any add recurrences in the operands list, see if any other 1968 // added values are loop invariant. If so, we can fold them into the 1969 // recurrence. 1970 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1971 ++Idx; 1972 1973 // Scan over all recurrences, trying to fold loop invariants into them. 1974 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1975 // Scan all of the other operands to this mul and add them to the vector if 1976 // they are loop invariant w.r.t. the recurrence. 1977 SmallVector<const SCEV *, 8> LIOps; 1978 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1979 const Loop *AddRecLoop = AddRec->getLoop(); 1980 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1981 if (isLoopInvariant(Ops[i], AddRecLoop)) { 1982 LIOps.push_back(Ops[i]); 1983 Ops.erase(Ops.begin()+i); 1984 --i; --e; 1985 } 1986 1987 // If we found some loop invariants, fold them into the recurrence. 1988 if (!LIOps.empty()) { 1989 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1990 SmallVector<const SCEV *, 4> NewOps; 1991 NewOps.reserve(AddRec->getNumOperands()); 1992 const SCEV *Scale = getMulExpr(LIOps); 1993 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1994 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1995 1996 // Build the new addrec. Propagate the NUW and NSW flags if both the 1997 // outer mul and the inner addrec are guaranteed to have no overflow. 1998 // 1999 // No self-wrap cannot be guaranteed after changing the step size, but 2000 // will be inferred if either NUW or NSW is true. 2001 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2002 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2003 2004 // If all of the other operands were loop invariant, we are done. 2005 if (Ops.size() == 1) return NewRec; 2006 2007 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2008 for (unsigned i = 0;; ++i) 2009 if (Ops[i] == AddRec) { 2010 Ops[i] = NewRec; 2011 break; 2012 } 2013 return getMulExpr(Ops); 2014 } 2015 2016 // Okay, if there weren't any loop invariants to be folded, check to see if 2017 // there are multiple AddRec's with the same loop induction variable being 2018 // multiplied together. If so, we can fold them. 2019 for (unsigned OtherIdx = Idx+1; 2020 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2021 ++OtherIdx) { 2022 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2023 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2024 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2025 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2026 // ]]],+,...up to x=2n}. 2027 // Note that the arguments to choose() are always integers with values 2028 // known at compile time, never SCEV objects. 2029 // 2030 // The implementation avoids pointless extra computations when the two 2031 // addrec's are of different length (mathematically, it's equivalent to 2032 // an infinite stream of zeros on the right). 2033 bool OpsModified = false; 2034 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2035 ++OtherIdx) 2036 if (const SCEVAddRecExpr *OtherAddRec = 2037 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 2038 if (OtherAddRec->getLoop() == AddRecLoop) { 2039 bool Overflow = false; 2040 Type *Ty = AddRec->getType(); 2041 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2042 SmallVector<const SCEV*, 7> AddRecOps; 2043 for (int x = 0, xe = AddRec->getNumOperands() + 2044 OtherAddRec->getNumOperands() - 1; 2045 x != xe && !Overflow; ++x) { 2046 const SCEV *Term = getConstant(Ty, 0); 2047 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2048 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2049 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2050 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2051 z < ze && !Overflow; ++z) { 2052 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2053 uint64_t Coeff; 2054 if (LargerThan64Bits) 2055 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2056 else 2057 Coeff = Coeff1*Coeff2; 2058 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2059 const SCEV *Term1 = AddRec->getOperand(y-z); 2060 const SCEV *Term2 = OtherAddRec->getOperand(z); 2061 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); 2062 } 2063 } 2064 AddRecOps.push_back(Term); 2065 } 2066 if (!Overflow) { 2067 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, 2068 AddRec->getLoop(), 2069 SCEV::FlagAnyWrap); 2070 if (Ops.size() == 2) return NewAddRec; 2071 Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec); 2072 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2073 OpsModified = true; 2074 } 2075 } 2076 if (OpsModified) 2077 return getMulExpr(Ops); 2078 } 2079 } 2080 2081 // Otherwise couldn't fold anything into this recurrence. Move onto the 2082 // next one. 2083 } 2084 2085 // Okay, it looks like we really DO need an mul expr. Check to see if we 2086 // already have one, otherwise create a new one. 2087 FoldingSetNodeID ID; 2088 ID.AddInteger(scMulExpr); 2089 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2090 ID.AddPointer(Ops[i]); 2091 void *IP = 0; 2092 SCEVMulExpr *S = 2093 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2094 if (!S) { 2095 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2096 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2097 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2098 O, Ops.size()); 2099 UniqueSCEVs.InsertNode(S, IP); 2100 } 2101 S->setNoWrapFlags(Flags); 2102 return S; 2103 } 2104 2105 /// getUDivExpr - Get a canonical unsigned division expression, or something 2106 /// simpler if possible. 2107 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2108 const SCEV *RHS) { 2109 assert(getEffectiveSCEVType(LHS->getType()) == 2110 getEffectiveSCEVType(RHS->getType()) && 2111 "SCEVUDivExpr operand types don't match!"); 2112 2113 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2114 if (RHSC->getValue()->equalsInt(1)) 2115 return LHS; // X udiv 1 --> x 2116 // If the denominator is zero, the result of the udiv is undefined. Don't 2117 // try to analyze it, because the resolution chosen here may differ from 2118 // the resolution chosen in other parts of the compiler. 2119 if (!RHSC->getValue()->isZero()) { 2120 // Determine if the division can be folded into the operands of 2121 // its operands. 2122 // TODO: Generalize this to non-constants by using known-bits information. 2123 Type *Ty = LHS->getType(); 2124 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 2125 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2126 // For non-power-of-two values, effectively round the value up to the 2127 // nearest power of two. 2128 if (!RHSC->getValue()->getValue().isPowerOf2()) 2129 ++MaxShiftAmt; 2130 IntegerType *ExtTy = 2131 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2132 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2133 if (const SCEVConstant *Step = 2134 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2135 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2136 const APInt &StepInt = Step->getValue()->getValue(); 2137 const APInt &DivInt = RHSC->getValue()->getValue(); 2138 if (!StepInt.urem(DivInt) && 2139 getZeroExtendExpr(AR, ExtTy) == 2140 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2141 getZeroExtendExpr(Step, ExtTy), 2142 AR->getLoop(), SCEV::FlagAnyWrap)) { 2143 SmallVector<const SCEV *, 4> Operands; 2144 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 2145 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 2146 return getAddRecExpr(Operands, AR->getLoop(), 2147 SCEV::FlagNW); 2148 } 2149 /// Get a canonical UDivExpr for a recurrence. 2150 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2151 // We can currently only fold X%N if X is constant. 2152 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2153 if (StartC && !DivInt.urem(StepInt) && 2154 getZeroExtendExpr(AR, ExtTy) == 2155 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2156 getZeroExtendExpr(Step, ExtTy), 2157 AR->getLoop(), SCEV::FlagAnyWrap)) { 2158 const APInt &StartInt = StartC->getValue()->getValue(); 2159 const APInt &StartRem = StartInt.urem(StepInt); 2160 if (StartRem != 0) 2161 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2162 AR->getLoop(), SCEV::FlagNW); 2163 } 2164 } 2165 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2166 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2167 SmallVector<const SCEV *, 4> Operands; 2168 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 2169 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 2170 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 2171 // Find an operand that's safely divisible. 2172 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 2173 const SCEV *Op = M->getOperand(i); 2174 const SCEV *Div = getUDivExpr(Op, RHSC); 2175 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 2176 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 2177 M->op_end()); 2178 Operands[i] = Div; 2179 return getMulExpr(Operands); 2180 } 2181 } 2182 } 2183 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 2184 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 2185 SmallVector<const SCEV *, 4> Operands; 2186 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 2187 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 2188 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 2189 Operands.clear(); 2190 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2191 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 2192 if (isa<SCEVUDivExpr>(Op) || 2193 getMulExpr(Op, RHS) != A->getOperand(i)) 2194 break; 2195 Operands.push_back(Op); 2196 } 2197 if (Operands.size() == A->getNumOperands()) 2198 return getAddExpr(Operands); 2199 } 2200 } 2201 2202 // Fold if both operands are constant. 2203 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 2204 Constant *LHSCV = LHSC->getValue(); 2205 Constant *RHSCV = RHSC->getValue(); 2206 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 2207 RHSCV))); 2208 } 2209 } 2210 } 2211 2212 FoldingSetNodeID ID; 2213 ID.AddInteger(scUDivExpr); 2214 ID.AddPointer(LHS); 2215 ID.AddPointer(RHS); 2216 void *IP = 0; 2217 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2218 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 2219 LHS, RHS); 2220 UniqueSCEVs.InsertNode(S, IP); 2221 return S; 2222 } 2223 2224 2225 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2226 /// Simplify the expression as much as possible. 2227 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 2228 const Loop *L, 2229 SCEV::NoWrapFlags Flags) { 2230 SmallVector<const SCEV *, 4> Operands; 2231 Operands.push_back(Start); 2232 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 2233 if (StepChrec->getLoop() == L) { 2234 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 2235 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 2236 } 2237 2238 Operands.push_back(Step); 2239 return getAddRecExpr(Operands, L, Flags); 2240 } 2241 2242 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2243 /// Simplify the expression as much as possible. 2244 const SCEV * 2245 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 2246 const Loop *L, SCEV::NoWrapFlags Flags) { 2247 if (Operands.size() == 1) return Operands[0]; 2248 #ifndef NDEBUG 2249 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 2250 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 2251 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 2252 "SCEVAddRecExpr operand types don't match!"); 2253 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2254 assert(isLoopInvariant(Operands[i], L) && 2255 "SCEVAddRecExpr operand is not loop-invariant!"); 2256 #endif 2257 2258 if (Operands.back()->isZero()) { 2259 Operands.pop_back(); 2260 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 2261 } 2262 2263 // It's tempting to want to call getMaxBackedgeTakenCount count here and 2264 // use that information to infer NUW and NSW flags. However, computing a 2265 // BE count requires calling getAddRecExpr, so we may not yet have a 2266 // meaningful BE count at this point (and if we don't, we'd be stuck 2267 // with a SCEVCouldNotCompute as the cached BE count). 2268 2269 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2270 // And vice-versa. 2271 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2272 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); 2273 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { 2274 bool All = true; 2275 for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(), 2276 E = Operands.end(); I != E; ++I) 2277 if (!isKnownNonNegative(*I)) { 2278 All = false; 2279 break; 2280 } 2281 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2282 } 2283 2284 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 2285 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 2286 const Loop *NestedLoop = NestedAR->getLoop(); 2287 if (L->contains(NestedLoop) ? 2288 (L->getLoopDepth() < NestedLoop->getLoopDepth()) : 2289 (!NestedLoop->contains(L) && 2290 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { 2291 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 2292 NestedAR->op_end()); 2293 Operands[0] = NestedAR->getStart(); 2294 // AddRecs require their operands be loop-invariant with respect to their 2295 // loops. Don't perform this transformation if it would break this 2296 // requirement. 2297 bool AllInvariant = true; 2298 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2299 if (!isLoopInvariant(Operands[i], L)) { 2300 AllInvariant = false; 2301 break; 2302 } 2303 if (AllInvariant) { 2304 // Create a recurrence for the outer loop with the same step size. 2305 // 2306 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 2307 // inner recurrence has the same property. 2308 SCEV::NoWrapFlags OuterFlags = 2309 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 2310 2311 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 2312 AllInvariant = true; 2313 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 2314 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) { 2315 AllInvariant = false; 2316 break; 2317 } 2318 if (AllInvariant) { 2319 // Ok, both add recurrences are valid after the transformation. 2320 // 2321 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 2322 // the outer recurrence has the same property. 2323 SCEV::NoWrapFlags InnerFlags = 2324 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 2325 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 2326 } 2327 } 2328 // Reset Operands to its original state. 2329 Operands[0] = NestedAR; 2330 } 2331 } 2332 2333 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2334 // already have one, otherwise create a new one. 2335 FoldingSetNodeID ID; 2336 ID.AddInteger(scAddRecExpr); 2337 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2338 ID.AddPointer(Operands[i]); 2339 ID.AddPointer(L); 2340 void *IP = 0; 2341 SCEVAddRecExpr *S = 2342 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2343 if (!S) { 2344 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 2345 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 2346 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 2347 O, Operands.size(), L); 2348 UniqueSCEVs.InsertNode(S, IP); 2349 } 2350 S->setNoWrapFlags(Flags); 2351 return S; 2352 } 2353 2354 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 2355 const SCEV *RHS) { 2356 SmallVector<const SCEV *, 2> Ops; 2357 Ops.push_back(LHS); 2358 Ops.push_back(RHS); 2359 return getSMaxExpr(Ops); 2360 } 2361 2362 const SCEV * 2363 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2364 assert(!Ops.empty() && "Cannot get empty smax!"); 2365 if (Ops.size() == 1) return Ops[0]; 2366 #ifndef NDEBUG 2367 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2368 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2369 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2370 "SCEVSMaxExpr operand types don't match!"); 2371 #endif 2372 2373 // Sort by complexity, this groups all similar expression types together. 2374 GroupByComplexity(Ops, LI); 2375 2376 // If there are any constants, fold them together. 2377 unsigned Idx = 0; 2378 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2379 ++Idx; 2380 assert(Idx < Ops.size()); 2381 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2382 // We found two constants, fold them together! 2383 ConstantInt *Fold = ConstantInt::get(getContext(), 2384 APIntOps::smax(LHSC->getValue()->getValue(), 2385 RHSC->getValue()->getValue())); 2386 Ops[0] = getConstant(Fold); 2387 Ops.erase(Ops.begin()+1); // Erase the folded element 2388 if (Ops.size() == 1) return Ops[0]; 2389 LHSC = cast<SCEVConstant>(Ops[0]); 2390 } 2391 2392 // If we are left with a constant minimum-int, strip it off. 2393 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 2394 Ops.erase(Ops.begin()); 2395 --Idx; 2396 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 2397 // If we have an smax with a constant maximum-int, it will always be 2398 // maximum-int. 2399 return Ops[0]; 2400 } 2401 2402 if (Ops.size() == 1) return Ops[0]; 2403 } 2404 2405 // Find the first SMax 2406 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 2407 ++Idx; 2408 2409 // Check to see if one of the operands is an SMax. If so, expand its operands 2410 // onto our operand list, and recurse to simplify. 2411 if (Idx < Ops.size()) { 2412 bool DeletedSMax = false; 2413 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 2414 Ops.erase(Ops.begin()+Idx); 2415 Ops.append(SMax->op_begin(), SMax->op_end()); 2416 DeletedSMax = true; 2417 } 2418 2419 if (DeletedSMax) 2420 return getSMaxExpr(Ops); 2421 } 2422 2423 // Okay, check to see if the same value occurs in the operand list twice. If 2424 // so, delete one. Since we sorted the list, these values are required to 2425 // be adjacent. 2426 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2427 // X smax Y smax Y --> X smax Y 2428 // X smax Y --> X, if X is always greater than Y 2429 if (Ops[i] == Ops[i+1] || 2430 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 2431 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2432 --i; --e; 2433 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 2434 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2435 --i; --e; 2436 } 2437 2438 if (Ops.size() == 1) return Ops[0]; 2439 2440 assert(!Ops.empty() && "Reduced smax down to nothing!"); 2441 2442 // Okay, it looks like we really DO need an smax expr. Check to see if we 2443 // already have one, otherwise create a new one. 2444 FoldingSetNodeID ID; 2445 ID.AddInteger(scSMaxExpr); 2446 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2447 ID.AddPointer(Ops[i]); 2448 void *IP = 0; 2449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2450 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2451 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2452 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 2453 O, Ops.size()); 2454 UniqueSCEVs.InsertNode(S, IP); 2455 return S; 2456 } 2457 2458 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 2459 const SCEV *RHS) { 2460 SmallVector<const SCEV *, 2> Ops; 2461 Ops.push_back(LHS); 2462 Ops.push_back(RHS); 2463 return getUMaxExpr(Ops); 2464 } 2465 2466 const SCEV * 2467 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2468 assert(!Ops.empty() && "Cannot get empty umax!"); 2469 if (Ops.size() == 1) return Ops[0]; 2470 #ifndef NDEBUG 2471 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2472 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2473 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2474 "SCEVUMaxExpr operand types don't match!"); 2475 #endif 2476 2477 // Sort by complexity, this groups all similar expression types together. 2478 GroupByComplexity(Ops, LI); 2479 2480 // If there are any constants, fold them together. 2481 unsigned Idx = 0; 2482 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2483 ++Idx; 2484 assert(Idx < Ops.size()); 2485 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2486 // We found two constants, fold them together! 2487 ConstantInt *Fold = ConstantInt::get(getContext(), 2488 APIntOps::umax(LHSC->getValue()->getValue(), 2489 RHSC->getValue()->getValue())); 2490 Ops[0] = getConstant(Fold); 2491 Ops.erase(Ops.begin()+1); // Erase the folded element 2492 if (Ops.size() == 1) return Ops[0]; 2493 LHSC = cast<SCEVConstant>(Ops[0]); 2494 } 2495 2496 // If we are left with a constant minimum-int, strip it off. 2497 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 2498 Ops.erase(Ops.begin()); 2499 --Idx; 2500 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 2501 // If we have an umax with a constant maximum-int, it will always be 2502 // maximum-int. 2503 return Ops[0]; 2504 } 2505 2506 if (Ops.size() == 1) return Ops[0]; 2507 } 2508 2509 // Find the first UMax 2510 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 2511 ++Idx; 2512 2513 // Check to see if one of the operands is a UMax. If so, expand its operands 2514 // onto our operand list, and recurse to simplify. 2515 if (Idx < Ops.size()) { 2516 bool DeletedUMax = false; 2517 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 2518 Ops.erase(Ops.begin()+Idx); 2519 Ops.append(UMax->op_begin(), UMax->op_end()); 2520 DeletedUMax = true; 2521 } 2522 2523 if (DeletedUMax) 2524 return getUMaxExpr(Ops); 2525 } 2526 2527 // Okay, check to see if the same value occurs in the operand list twice. If 2528 // so, delete one. Since we sorted the list, these values are required to 2529 // be adjacent. 2530 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2531 // X umax Y umax Y --> X umax Y 2532 // X umax Y --> X, if X is always greater than Y 2533 if (Ops[i] == Ops[i+1] || 2534 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 2535 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2536 --i; --e; 2537 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 2538 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2539 --i; --e; 2540 } 2541 2542 if (Ops.size() == 1) return Ops[0]; 2543 2544 assert(!Ops.empty() && "Reduced umax down to nothing!"); 2545 2546 // Okay, it looks like we really DO need a umax expr. Check to see if we 2547 // already have one, otherwise create a new one. 2548 FoldingSetNodeID ID; 2549 ID.AddInteger(scUMaxExpr); 2550 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2551 ID.AddPointer(Ops[i]); 2552 void *IP = 0; 2553 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2554 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2555 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2556 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 2557 O, Ops.size()); 2558 UniqueSCEVs.InsertNode(S, IP); 2559 return S; 2560 } 2561 2562 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 2563 const SCEV *RHS) { 2564 // ~smax(~x, ~y) == smin(x, y). 2565 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2566 } 2567 2568 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 2569 const SCEV *RHS) { 2570 // ~umax(~x, ~y) == umin(x, y) 2571 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2572 } 2573 2574 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) { 2575 // If we have TargetData, we can bypass creating a target-independent 2576 // constant expression and then folding it back into a ConstantInt. 2577 // This is just a compile-time optimization. 2578 if (TD) 2579 return getConstant(TD->getIntPtrType(getContext()), 2580 TD->getTypeAllocSize(AllocTy)); 2581 2582 Constant *C = ConstantExpr::getSizeOf(AllocTy); 2583 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2584 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2585 C = Folded; 2586 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2587 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2588 } 2589 2590 const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) { 2591 Constant *C = ConstantExpr::getAlignOf(AllocTy); 2592 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2593 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2594 C = Folded; 2595 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2596 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2597 } 2598 2599 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, 2600 unsigned FieldNo) { 2601 // If we have TargetData, we can bypass creating a target-independent 2602 // constant expression and then folding it back into a ConstantInt. 2603 // This is just a compile-time optimization. 2604 if (TD) 2605 return getConstant(TD->getIntPtrType(getContext()), 2606 TD->getStructLayout(STy)->getElementOffset(FieldNo)); 2607 2608 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); 2609 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2610 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2611 C = Folded; 2612 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); 2613 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2614 } 2615 2616 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy, 2617 Constant *FieldNo) { 2618 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); 2619 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2620 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2621 C = Folded; 2622 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); 2623 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2624 } 2625 2626 const SCEV *ScalarEvolution::getUnknown(Value *V) { 2627 // Don't attempt to do anything other than create a SCEVUnknown object 2628 // here. createSCEV only calls getUnknown after checking for all other 2629 // interesting possibilities, and any other code that calls getUnknown 2630 // is doing so in order to hide a value from SCEV canonicalization. 2631 2632 FoldingSetNodeID ID; 2633 ID.AddInteger(scUnknown); 2634 ID.AddPointer(V); 2635 void *IP = 0; 2636 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 2637 assert(cast<SCEVUnknown>(S)->getValue() == V && 2638 "Stale SCEVUnknown in uniquing map!"); 2639 return S; 2640 } 2641 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 2642 FirstUnknown); 2643 FirstUnknown = cast<SCEVUnknown>(S); 2644 UniqueSCEVs.InsertNode(S, IP); 2645 return S; 2646 } 2647 2648 //===----------------------------------------------------------------------===// 2649 // Basic SCEV Analysis and PHI Idiom Recognition Code 2650 // 2651 2652 /// isSCEVable - Test if values of the given type are analyzable within 2653 /// the SCEV framework. This primarily includes integer types, and it 2654 /// can optionally include pointer types if the ScalarEvolution class 2655 /// has access to target-specific information. 2656 bool ScalarEvolution::isSCEVable(Type *Ty) const { 2657 // Integers and pointers are always SCEVable. 2658 return Ty->isIntegerTy() || Ty->isPointerTy(); 2659 } 2660 2661 /// getTypeSizeInBits - Return the size in bits of the specified type, 2662 /// for which isSCEVable must return true. 2663 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 2664 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2665 2666 // If we have a TargetData, use it! 2667 if (TD) 2668 return TD->getTypeSizeInBits(Ty); 2669 2670 // Integer types have fixed sizes. 2671 if (Ty->isIntegerTy()) 2672 return Ty->getPrimitiveSizeInBits(); 2673 2674 // The only other support type is pointer. Without TargetData, conservatively 2675 // assume pointers are 64-bit. 2676 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); 2677 return 64; 2678 } 2679 2680 /// getEffectiveSCEVType - Return a type with the same bitwidth as 2681 /// the given type and which represents how SCEV will treat the given 2682 /// type, for which isSCEVable must return true. For pointer types, 2683 /// this is the pointer-sized integer type. 2684 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 2685 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2686 2687 if (Ty->isIntegerTy()) 2688 return Ty; 2689 2690 // The only other support type is pointer. 2691 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 2692 if (TD) return TD->getIntPtrType(getContext()); 2693 2694 // Without TargetData, conservatively assume pointers are 64-bit. 2695 return Type::getInt64Ty(getContext()); 2696 } 2697 2698 const SCEV *ScalarEvolution::getCouldNotCompute() { 2699 return &CouldNotCompute; 2700 } 2701 2702 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 2703 /// expression and create a new one. 2704 const SCEV *ScalarEvolution::getSCEV(Value *V) { 2705 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 2706 2707 ValueExprMapType::const_iterator I = ValueExprMap.find(V); 2708 if (I != ValueExprMap.end()) return I->second; 2709 const SCEV *S = createSCEV(V); 2710 2711 // The process of creating a SCEV for V may have caused other SCEVs 2712 // to have been created, so it's necessary to insert the new entry 2713 // from scratch, rather than trying to remember the insert position 2714 // above. 2715 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 2716 return S; 2717 } 2718 2719 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 2720 /// 2721 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { 2722 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2723 return getConstant( 2724 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 2725 2726 Type *Ty = V->getType(); 2727 Ty = getEffectiveSCEVType(Ty); 2728 return getMulExpr(V, 2729 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); 2730 } 2731 2732 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 2733 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 2734 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2735 return getConstant( 2736 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 2737 2738 Type *Ty = V->getType(); 2739 Ty = getEffectiveSCEVType(Ty); 2740 const SCEV *AllOnes = 2741 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 2742 return getMinusSCEV(AllOnes, V); 2743 } 2744 2745 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1. 2746 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 2747 SCEV::NoWrapFlags Flags) { 2748 assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW"); 2749 2750 // Fast path: X - X --> 0. 2751 if (LHS == RHS) 2752 return getConstant(LHS->getType(), 0); 2753 2754 // X - Y --> X + -Y 2755 return getAddExpr(LHS, getNegativeSCEV(RHS), Flags); 2756 } 2757 2758 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2759 /// input value to the specified type. If the type must be extended, it is zero 2760 /// extended. 2761 const SCEV * 2762 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 2763 Type *SrcTy = V->getType(); 2764 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2765 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2766 "Cannot truncate or zero extend with non-integer arguments!"); 2767 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2768 return V; // No conversion 2769 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2770 return getTruncateExpr(V, Ty); 2771 return getZeroExtendExpr(V, Ty); 2772 } 2773 2774 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2775 /// input value to the specified type. If the type must be extended, it is sign 2776 /// extended. 2777 const SCEV * 2778 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 2779 Type *Ty) { 2780 Type *SrcTy = V->getType(); 2781 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2782 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2783 "Cannot truncate or zero extend with non-integer arguments!"); 2784 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2785 return V; // No conversion 2786 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2787 return getTruncateExpr(V, Ty); 2788 return getSignExtendExpr(V, Ty); 2789 } 2790 2791 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2792 /// input value to the specified type. If the type must be extended, it is zero 2793 /// extended. The conversion must not be narrowing. 2794 const SCEV * 2795 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 2796 Type *SrcTy = V->getType(); 2797 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2798 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2799 "Cannot noop or zero extend with non-integer arguments!"); 2800 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2801 "getNoopOrZeroExtend cannot truncate!"); 2802 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2803 return V; // No conversion 2804 return getZeroExtendExpr(V, Ty); 2805 } 2806 2807 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2808 /// input value to the specified type. If the type must be extended, it is sign 2809 /// extended. The conversion must not be narrowing. 2810 const SCEV * 2811 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 2812 Type *SrcTy = V->getType(); 2813 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2814 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2815 "Cannot noop or sign extend with non-integer arguments!"); 2816 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2817 "getNoopOrSignExtend cannot truncate!"); 2818 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2819 return V; // No conversion 2820 return getSignExtendExpr(V, Ty); 2821 } 2822 2823 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2824 /// the input value to the specified type. If the type must be extended, 2825 /// it is extended with unspecified bits. The conversion must not be 2826 /// narrowing. 2827 const SCEV * 2828 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 2829 Type *SrcTy = V->getType(); 2830 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2831 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2832 "Cannot noop or any extend with non-integer arguments!"); 2833 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2834 "getNoopOrAnyExtend cannot truncate!"); 2835 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2836 return V; // No conversion 2837 return getAnyExtendExpr(V, Ty); 2838 } 2839 2840 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2841 /// input value to the specified type. The conversion must not be widening. 2842 const SCEV * 2843 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 2844 Type *SrcTy = V->getType(); 2845 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2846 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2847 "Cannot truncate or noop with non-integer arguments!"); 2848 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2849 "getTruncateOrNoop cannot extend!"); 2850 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2851 return V; // No conversion 2852 return getTruncateExpr(V, Ty); 2853 } 2854 2855 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2856 /// the types using zero-extension, and then perform a umax operation 2857 /// with them. 2858 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2859 const SCEV *RHS) { 2860 const SCEV *PromotedLHS = LHS; 2861 const SCEV *PromotedRHS = RHS; 2862 2863 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2864 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2865 else 2866 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2867 2868 return getUMaxExpr(PromotedLHS, PromotedRHS); 2869 } 2870 2871 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2872 /// the types using zero-extension, and then perform a umin operation 2873 /// with them. 2874 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2875 const SCEV *RHS) { 2876 const SCEV *PromotedLHS = LHS; 2877 const SCEV *PromotedRHS = RHS; 2878 2879 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2880 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2881 else 2882 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2883 2884 return getUMinExpr(PromotedLHS, PromotedRHS); 2885 } 2886 2887 /// getPointerBase - Transitively follow the chain of pointer-type operands 2888 /// until reaching a SCEV that does not have a single pointer operand. This 2889 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions, 2890 /// but corner cases do exist. 2891 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 2892 // A pointer operand may evaluate to a nonpointer expression, such as null. 2893 if (!V->getType()->isPointerTy()) 2894 return V; 2895 2896 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 2897 return getPointerBase(Cast->getOperand()); 2898 } 2899 else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 2900 const SCEV *PtrOp = 0; 2901 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 2902 I != E; ++I) { 2903 if ((*I)->getType()->isPointerTy()) { 2904 // Cannot find the base of an expression with multiple pointer operands. 2905 if (PtrOp) 2906 return V; 2907 PtrOp = *I; 2908 } 2909 } 2910 if (!PtrOp) 2911 return V; 2912 return getPointerBase(PtrOp); 2913 } 2914 return V; 2915 } 2916 2917 /// PushDefUseChildren - Push users of the given Instruction 2918 /// onto the given Worklist. 2919 static void 2920 PushDefUseChildren(Instruction *I, 2921 SmallVectorImpl<Instruction *> &Worklist) { 2922 // Push the def-use children onto the Worklist stack. 2923 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2924 UI != UE; ++UI) 2925 Worklist.push_back(cast<Instruction>(*UI)); 2926 } 2927 2928 /// ForgetSymbolicValue - This looks up computed SCEV values for all 2929 /// instructions that depend on the given instruction and removes them from 2930 /// the ValueExprMapType map if they reference SymName. This is used during PHI 2931 /// resolution. 2932 void 2933 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { 2934 SmallVector<Instruction *, 16> Worklist; 2935 PushDefUseChildren(PN, Worklist); 2936 2937 SmallPtrSet<Instruction *, 8> Visited; 2938 Visited.insert(PN); 2939 while (!Worklist.empty()) { 2940 Instruction *I = Worklist.pop_back_val(); 2941 if (!Visited.insert(I)) continue; 2942 2943 ValueExprMapType::iterator It = 2944 ValueExprMap.find(static_cast<Value *>(I)); 2945 if (It != ValueExprMap.end()) { 2946 const SCEV *Old = It->second; 2947 2948 // Short-circuit the def-use traversal if the symbolic name 2949 // ceases to appear in expressions. 2950 if (Old != SymName && !hasOperand(Old, SymName)) 2951 continue; 2952 2953 // SCEVUnknown for a PHI either means that it has an unrecognized 2954 // structure, it's a PHI that's in the progress of being computed 2955 // by createNodeForPHI, or it's a single-value PHI. In the first case, 2956 // additional loop trip count information isn't going to change anything. 2957 // In the second case, createNodeForPHI will perform the necessary 2958 // updates on its own when it gets to that point. In the third, we do 2959 // want to forget the SCEVUnknown. 2960 if (!isa<PHINode>(I) || 2961 !isa<SCEVUnknown>(Old) || 2962 (I != PN && Old == SymName)) { 2963 forgetMemoizedResults(Old); 2964 ValueExprMap.erase(It); 2965 } 2966 } 2967 2968 PushDefUseChildren(I, Worklist); 2969 } 2970 } 2971 2972 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2973 /// a loop header, making it a potential recurrence, or it doesn't. 2974 /// 2975 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 2976 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2977 if (L->getHeader() == PN->getParent()) { 2978 // The loop may have multiple entrances or multiple exits; we can analyze 2979 // this phi as an addrec if it has a unique entry value and a unique 2980 // backedge value. 2981 Value *BEValueV = 0, *StartValueV = 0; 2982 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 2983 Value *V = PN->getIncomingValue(i); 2984 if (L->contains(PN->getIncomingBlock(i))) { 2985 if (!BEValueV) { 2986 BEValueV = V; 2987 } else if (BEValueV != V) { 2988 BEValueV = 0; 2989 break; 2990 } 2991 } else if (!StartValueV) { 2992 StartValueV = V; 2993 } else if (StartValueV != V) { 2994 StartValueV = 0; 2995 break; 2996 } 2997 } 2998 if (BEValueV && StartValueV) { 2999 // While we are analyzing this PHI node, handle its value symbolically. 3000 const SCEV *SymbolicName = getUnknown(PN); 3001 assert(ValueExprMap.find(PN) == ValueExprMap.end() && 3002 "PHI node already processed?"); 3003 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 3004 3005 // Using this symbolic name for the PHI, analyze the value coming around 3006 // the back-edge. 3007 const SCEV *BEValue = getSCEV(BEValueV); 3008 3009 // NOTE: If BEValue is loop invariant, we know that the PHI node just 3010 // has a special value for the first iteration of the loop. 3011 3012 // If the value coming around the backedge is an add with the symbolic 3013 // value we just inserted, then we found a simple induction variable! 3014 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 3015 // If there is a single occurrence of the symbolic value, replace it 3016 // with a recurrence. 3017 unsigned FoundIndex = Add->getNumOperands(); 3018 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3019 if (Add->getOperand(i) == SymbolicName) 3020 if (FoundIndex == e) { 3021 FoundIndex = i; 3022 break; 3023 } 3024 3025 if (FoundIndex != Add->getNumOperands()) { 3026 // Create an add with everything but the specified operand. 3027 SmallVector<const SCEV *, 8> Ops; 3028 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3029 if (i != FoundIndex) 3030 Ops.push_back(Add->getOperand(i)); 3031 const SCEV *Accum = getAddExpr(Ops); 3032 3033 // This is not a valid addrec if the step amount is varying each 3034 // loop iteration, but is not itself an addrec in this loop. 3035 if (isLoopInvariant(Accum, L) || 3036 (isa<SCEVAddRecExpr>(Accum) && 3037 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 3038 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 3039 3040 // If the increment doesn't overflow, then neither the addrec nor 3041 // the post-increment will overflow. 3042 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { 3043 if (OBO->hasNoUnsignedWrap()) 3044 Flags = setFlags(Flags, SCEV::FlagNUW); 3045 if (OBO->hasNoSignedWrap()) 3046 Flags = setFlags(Flags, SCEV::FlagNSW); 3047 } else if (const GEPOperator *GEP = 3048 dyn_cast<GEPOperator>(BEValueV)) { 3049 // If the increment is an inbounds GEP, then we know the address 3050 // space cannot be wrapped around. We cannot make any guarantee 3051 // about signed or unsigned overflow because pointers are 3052 // unsigned but we may have a negative index from the base 3053 // pointer. 3054 if (GEP->isInBounds()) 3055 Flags = setFlags(Flags, SCEV::FlagNW); 3056 } 3057 3058 const SCEV *StartVal = getSCEV(StartValueV); 3059 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 3060 3061 // Since the no-wrap flags are on the increment, they apply to the 3062 // post-incremented value as well. 3063 if (isLoopInvariant(Accum, L)) 3064 (void)getAddRecExpr(getAddExpr(StartVal, Accum), 3065 Accum, L, Flags); 3066 3067 // Okay, for the entire analysis of this edge we assumed the PHI 3068 // to be symbolic. We now need to go back and purge all of the 3069 // entries for the scalars that use the symbolic expression. 3070 ForgetSymbolicName(PN, SymbolicName); 3071 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 3072 return PHISCEV; 3073 } 3074 } 3075 } else if (const SCEVAddRecExpr *AddRec = 3076 dyn_cast<SCEVAddRecExpr>(BEValue)) { 3077 // Otherwise, this could be a loop like this: 3078 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 3079 // In this case, j = {1,+,1} and BEValue is j. 3080 // Because the other in-value of i (0) fits the evolution of BEValue 3081 // i really is an addrec evolution. 3082 if (AddRec->getLoop() == L && AddRec->isAffine()) { 3083 const SCEV *StartVal = getSCEV(StartValueV); 3084 3085 // If StartVal = j.start - j.stride, we can use StartVal as the 3086 // initial step of the addrec evolution. 3087 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 3088 AddRec->getOperand(1))) { 3089 // FIXME: For constant StartVal, we should be able to infer 3090 // no-wrap flags. 3091 const SCEV *PHISCEV = 3092 getAddRecExpr(StartVal, AddRec->getOperand(1), L, 3093 SCEV::FlagAnyWrap); 3094 3095 // Okay, for the entire analysis of this edge we assumed the PHI 3096 // to be symbolic. We now need to go back and purge all of the 3097 // entries for the scalars that use the symbolic expression. 3098 ForgetSymbolicName(PN, SymbolicName); 3099 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 3100 return PHISCEV; 3101 } 3102 } 3103 } 3104 } 3105 } 3106 3107 // If the PHI has a single incoming value, follow that value, unless the 3108 // PHI's incoming blocks are in a different loop, in which case doing so 3109 // risks breaking LCSSA form. Instcombine would normally zap these, but 3110 // it doesn't have DominatorTree information, so it may miss cases. 3111 if (Value *V = SimplifyInstruction(PN, TD, DT)) 3112 if (LI->replacementPreservesLCSSAForm(PN, V)) 3113 return getSCEV(V); 3114 3115 // If it's not a loop phi, we can't handle it yet. 3116 return getUnknown(PN); 3117 } 3118 3119 /// createNodeForGEP - Expand GEP instructions into add and multiply 3120 /// operations. This allows them to be analyzed by regular SCEV code. 3121 /// 3122 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 3123 3124 // Don't blindly transfer the inbounds flag from the GEP instruction to the 3125 // Add expression, because the Instruction may be guarded by control flow 3126 // and the no-overflow bits may not be valid for the expression in any 3127 // context. 3128 bool isInBounds = GEP->isInBounds(); 3129 3130 Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); 3131 Value *Base = GEP->getOperand(0); 3132 // Don't attempt to analyze GEPs over unsized objects. 3133 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 3134 return getUnknown(GEP); 3135 const SCEV *TotalOffset = getConstant(IntPtrTy, 0); 3136 gep_type_iterator GTI = gep_type_begin(GEP); 3137 for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()), 3138 E = GEP->op_end(); 3139 I != E; ++I) { 3140 Value *Index = *I; 3141 // Compute the (potentially symbolic) offset in bytes for this index. 3142 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 3143 // For a struct, add the member offset. 3144 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 3145 const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo); 3146 3147 // Add the field offset to the running total offset. 3148 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3149 } else { 3150 // For an array, add the element offset, explicitly scaled. 3151 const SCEV *ElementSize = getSizeOfExpr(*GTI); 3152 const SCEV *IndexS = getSCEV(Index); 3153 // Getelementptr indices are signed. 3154 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy); 3155 3156 // Multiply the index by the element size to compute the element offset. 3157 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, 3158 isInBounds ? SCEV::FlagNSW : 3159 SCEV::FlagAnyWrap); 3160 3161 // Add the element offset to the running total offset. 3162 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3163 } 3164 } 3165 3166 // Get the SCEV for the GEP base. 3167 const SCEV *BaseS = getSCEV(Base); 3168 3169 // Add the total offset from all the GEP indices to the base. 3170 return getAddExpr(BaseS, TotalOffset, 3171 isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap); 3172 } 3173 3174 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 3175 /// guaranteed to end in (at every loop iteration). It is, at the same time, 3176 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 3177 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 3178 uint32_t 3179 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 3180 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3181 return C->getValue()->getValue().countTrailingZeros(); 3182 3183 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 3184 return std::min(GetMinTrailingZeros(T->getOperand()), 3185 (uint32_t)getTypeSizeInBits(T->getType())); 3186 3187 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 3188 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 3189 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 3190 getTypeSizeInBits(E->getType()) : OpRes; 3191 } 3192 3193 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 3194 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 3195 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 3196 getTypeSizeInBits(E->getType()) : OpRes; 3197 } 3198 3199 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 3200 // The result is the min of all operands results. 3201 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 3202 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 3203 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 3204 return MinOpRes; 3205 } 3206 3207 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 3208 // The result is the sum of all operands results. 3209 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 3210 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 3211 for (unsigned i = 1, e = M->getNumOperands(); 3212 SumOpRes != BitWidth && i != e; ++i) 3213 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 3214 BitWidth); 3215 return SumOpRes; 3216 } 3217 3218 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 3219 // The result is the min of all operands results. 3220 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 3221 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 3222 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 3223 return MinOpRes; 3224 } 3225 3226 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 3227 // The result is the min of all operands results. 3228 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 3229 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 3230 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 3231 return MinOpRes; 3232 } 3233 3234 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 3235 // The result is the min of all operands results. 3236 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 3237 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 3238 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 3239 return MinOpRes; 3240 } 3241 3242 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3243 // For a SCEVUnknown, ask ValueTracking. 3244 unsigned BitWidth = getTypeSizeInBits(U->getType()); 3245 APInt Mask = APInt::getAllOnesValue(BitWidth); 3246 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 3247 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 3248 return Zeros.countTrailingOnes(); 3249 } 3250 3251 // SCEVUDivExpr 3252 return 0; 3253 } 3254 3255 /// getUnsignedRange - Determine the unsigned range for a particular SCEV. 3256 /// 3257 ConstantRange 3258 ScalarEvolution::getUnsignedRange(const SCEV *S) { 3259 // See if we've computed this range already. 3260 DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S); 3261 if (I != UnsignedRanges.end()) 3262 return I->second; 3263 3264 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3265 return setUnsignedRange(C, ConstantRange(C->getValue()->getValue())); 3266 3267 unsigned BitWidth = getTypeSizeInBits(S->getType()); 3268 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 3269 3270 // If the value has known zeros, the maximum unsigned value will have those 3271 // known zeros as well. 3272 uint32_t TZ = GetMinTrailingZeros(S); 3273 if (TZ != 0) 3274 ConservativeResult = 3275 ConstantRange(APInt::getMinValue(BitWidth), 3276 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 3277 3278 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3279 ConstantRange X = getUnsignedRange(Add->getOperand(0)); 3280 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 3281 X = X.add(getUnsignedRange(Add->getOperand(i))); 3282 return setUnsignedRange(Add, ConservativeResult.intersectWith(X)); 3283 } 3284 3285 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3286 ConstantRange X = getUnsignedRange(Mul->getOperand(0)); 3287 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 3288 X = X.multiply(getUnsignedRange(Mul->getOperand(i))); 3289 return setUnsignedRange(Mul, ConservativeResult.intersectWith(X)); 3290 } 3291 3292 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 3293 ConstantRange X = getUnsignedRange(SMax->getOperand(0)); 3294 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 3295 X = X.smax(getUnsignedRange(SMax->getOperand(i))); 3296 return setUnsignedRange(SMax, ConservativeResult.intersectWith(X)); 3297 } 3298 3299 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 3300 ConstantRange X = getUnsignedRange(UMax->getOperand(0)); 3301 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 3302 X = X.umax(getUnsignedRange(UMax->getOperand(i))); 3303 return setUnsignedRange(UMax, ConservativeResult.intersectWith(X)); 3304 } 3305 3306 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 3307 ConstantRange X = getUnsignedRange(UDiv->getLHS()); 3308 ConstantRange Y = getUnsignedRange(UDiv->getRHS()); 3309 return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y))); 3310 } 3311 3312 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 3313 ConstantRange X = getUnsignedRange(ZExt->getOperand()); 3314 return setUnsignedRange(ZExt, 3315 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 3316 } 3317 3318 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 3319 ConstantRange X = getUnsignedRange(SExt->getOperand()); 3320 return setUnsignedRange(SExt, 3321 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 3322 } 3323 3324 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3325 ConstantRange X = getUnsignedRange(Trunc->getOperand()); 3326 return setUnsignedRange(Trunc, 3327 ConservativeResult.intersectWith(X.truncate(BitWidth))); 3328 } 3329 3330 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3331 // If there's no unsigned wrap, the value will never be less than its 3332 // initial value. 3333 if (AddRec->getNoWrapFlags(SCEV::FlagNUW)) 3334 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 3335 if (!C->getValue()->isZero()) 3336 ConservativeResult = 3337 ConservativeResult.intersectWith( 3338 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); 3339 3340 // TODO: non-affine addrec 3341 if (AddRec->isAffine()) { 3342 Type *Ty = AddRec->getType(); 3343 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3344 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3345 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3346 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3347 3348 const SCEV *Start = AddRec->getStart(); 3349 const SCEV *Step = AddRec->getStepRecurrence(*this); 3350 3351 ConstantRange StartRange = getUnsignedRange(Start); 3352 ConstantRange StepRange = getSignedRange(Step); 3353 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3354 ConstantRange EndRange = 3355 StartRange.add(MaxBECountRange.multiply(StepRange)); 3356 3357 // Check for overflow. This must be done with ConstantRange arithmetic 3358 // because we could be called from within the ScalarEvolution overflow 3359 // checking code. 3360 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1); 3361 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3362 ConstantRange ExtMaxBECountRange = 3363 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3364 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1); 3365 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3366 ExtEndRange) 3367 return setUnsignedRange(AddRec, ConservativeResult); 3368 3369 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), 3370 EndRange.getUnsignedMin()); 3371 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), 3372 EndRange.getUnsignedMax()); 3373 if (Min.isMinValue() && Max.isMaxValue()) 3374 return setUnsignedRange(AddRec, ConservativeResult); 3375 return setUnsignedRange(AddRec, 3376 ConservativeResult.intersectWith(ConstantRange(Min, Max+1))); 3377 } 3378 } 3379 3380 return setUnsignedRange(AddRec, ConservativeResult); 3381 } 3382 3383 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3384 // For a SCEVUnknown, ask ValueTracking. 3385 APInt Mask = APInt::getAllOnesValue(BitWidth); 3386 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 3387 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 3388 if (Ones == ~Zeros + 1) 3389 return setUnsignedRange(U, ConservativeResult); 3390 return setUnsignedRange(U, 3391 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1))); 3392 } 3393 3394 return setUnsignedRange(S, ConservativeResult); 3395 } 3396 3397 /// getSignedRange - Determine the signed range for a particular SCEV. 3398 /// 3399 ConstantRange 3400 ScalarEvolution::getSignedRange(const SCEV *S) { 3401 // See if we've computed this range already. 3402 DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S); 3403 if (I != SignedRanges.end()) 3404 return I->second; 3405 3406 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3407 return setSignedRange(C, ConstantRange(C->getValue()->getValue())); 3408 3409 unsigned BitWidth = getTypeSizeInBits(S->getType()); 3410 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 3411 3412 // If the value has known zeros, the maximum signed value will have those 3413 // known zeros as well. 3414 uint32_t TZ = GetMinTrailingZeros(S); 3415 if (TZ != 0) 3416 ConservativeResult = 3417 ConstantRange(APInt::getSignedMinValue(BitWidth), 3418 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 3419 3420 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3421 ConstantRange X = getSignedRange(Add->getOperand(0)); 3422 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 3423 X = X.add(getSignedRange(Add->getOperand(i))); 3424 return setSignedRange(Add, ConservativeResult.intersectWith(X)); 3425 } 3426 3427 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3428 ConstantRange X = getSignedRange(Mul->getOperand(0)); 3429 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 3430 X = X.multiply(getSignedRange(Mul->getOperand(i))); 3431 return setSignedRange(Mul, ConservativeResult.intersectWith(X)); 3432 } 3433 3434 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 3435 ConstantRange X = getSignedRange(SMax->getOperand(0)); 3436 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 3437 X = X.smax(getSignedRange(SMax->getOperand(i))); 3438 return setSignedRange(SMax, ConservativeResult.intersectWith(X)); 3439 } 3440 3441 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 3442 ConstantRange X = getSignedRange(UMax->getOperand(0)); 3443 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 3444 X = X.umax(getSignedRange(UMax->getOperand(i))); 3445 return setSignedRange(UMax, ConservativeResult.intersectWith(X)); 3446 } 3447 3448 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 3449 ConstantRange X = getSignedRange(UDiv->getLHS()); 3450 ConstantRange Y = getSignedRange(UDiv->getRHS()); 3451 return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y))); 3452 } 3453 3454 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 3455 ConstantRange X = getSignedRange(ZExt->getOperand()); 3456 return setSignedRange(ZExt, 3457 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 3458 } 3459 3460 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 3461 ConstantRange X = getSignedRange(SExt->getOperand()); 3462 return setSignedRange(SExt, 3463 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 3464 } 3465 3466 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3467 ConstantRange X = getSignedRange(Trunc->getOperand()); 3468 return setSignedRange(Trunc, 3469 ConservativeResult.intersectWith(X.truncate(BitWidth))); 3470 } 3471 3472 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3473 // If there's no signed wrap, and all the operands have the same sign or 3474 // zero, the value won't ever change sign. 3475 if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) { 3476 bool AllNonNeg = true; 3477 bool AllNonPos = true; 3478 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 3479 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 3480 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 3481 } 3482 if (AllNonNeg) 3483 ConservativeResult = ConservativeResult.intersectWith( 3484 ConstantRange(APInt(BitWidth, 0), 3485 APInt::getSignedMinValue(BitWidth))); 3486 else if (AllNonPos) 3487 ConservativeResult = ConservativeResult.intersectWith( 3488 ConstantRange(APInt::getSignedMinValue(BitWidth), 3489 APInt(BitWidth, 1))); 3490 } 3491 3492 // TODO: non-affine addrec 3493 if (AddRec->isAffine()) { 3494 Type *Ty = AddRec->getType(); 3495 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3496 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3497 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3498 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3499 3500 const SCEV *Start = AddRec->getStart(); 3501 const SCEV *Step = AddRec->getStepRecurrence(*this); 3502 3503 ConstantRange StartRange = getSignedRange(Start); 3504 ConstantRange StepRange = getSignedRange(Step); 3505 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3506 ConstantRange EndRange = 3507 StartRange.add(MaxBECountRange.multiply(StepRange)); 3508 3509 // Check for overflow. This must be done with ConstantRange arithmetic 3510 // because we could be called from within the ScalarEvolution overflow 3511 // checking code. 3512 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1); 3513 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3514 ConstantRange ExtMaxBECountRange = 3515 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3516 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1); 3517 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3518 ExtEndRange) 3519 return setSignedRange(AddRec, ConservativeResult); 3520 3521 APInt Min = APIntOps::smin(StartRange.getSignedMin(), 3522 EndRange.getSignedMin()); 3523 APInt Max = APIntOps::smax(StartRange.getSignedMax(), 3524 EndRange.getSignedMax()); 3525 if (Min.isMinSignedValue() && Max.isMaxSignedValue()) 3526 return setSignedRange(AddRec, ConservativeResult); 3527 return setSignedRange(AddRec, 3528 ConservativeResult.intersectWith(ConstantRange(Min, Max+1))); 3529 } 3530 } 3531 3532 return setSignedRange(AddRec, ConservativeResult); 3533 } 3534 3535 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3536 // For a SCEVUnknown, ask ValueTracking. 3537 if (!U->getValue()->getType()->isIntegerTy() && !TD) 3538 return setSignedRange(U, ConservativeResult); 3539 unsigned NS = ComputeNumSignBits(U->getValue(), TD); 3540 if (NS == 1) 3541 return setSignedRange(U, ConservativeResult); 3542 return setSignedRange(U, ConservativeResult.intersectWith( 3543 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 3544 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1))); 3545 } 3546 3547 return setSignedRange(S, ConservativeResult); 3548 } 3549 3550 /// createSCEV - We know that there is no SCEV for the specified value. 3551 /// Analyze the expression. 3552 /// 3553 const SCEV *ScalarEvolution::createSCEV(Value *V) { 3554 if (!isSCEVable(V->getType())) 3555 return getUnknown(V); 3556 3557 unsigned Opcode = Instruction::UserOp1; 3558 if (Instruction *I = dyn_cast<Instruction>(V)) { 3559 Opcode = I->getOpcode(); 3560 3561 // Don't attempt to analyze instructions in blocks that aren't 3562 // reachable. Such instructions don't matter, and they aren't required 3563 // to obey basic rules for definitions dominating uses which this 3564 // analysis depends on. 3565 if (!DT->isReachableFromEntry(I->getParent())) 3566 return getUnknown(V); 3567 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 3568 Opcode = CE->getOpcode(); 3569 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 3570 return getConstant(CI); 3571 else if (isa<ConstantPointerNull>(V)) 3572 return getConstant(V->getType(), 0); 3573 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 3574 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); 3575 else 3576 return getUnknown(V); 3577 3578 Operator *U = cast<Operator>(V); 3579 switch (Opcode) { 3580 case Instruction::Add: { 3581 // The simple thing to do would be to just call getSCEV on both operands 3582 // and call getAddExpr with the result. However if we're looking at a 3583 // bunch of things all added together, this can be quite inefficient, 3584 // because it leads to N-1 getAddExpr calls for N ultimate operands. 3585 // Instead, gather up all the operands and make a single getAddExpr call. 3586 // LLVM IR canonical form means we need only traverse the left operands. 3587 SmallVector<const SCEV *, 4> AddOps; 3588 AddOps.push_back(getSCEV(U->getOperand(1))); 3589 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) { 3590 unsigned Opcode = Op->getValueID() - Value::InstructionVal; 3591 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 3592 break; 3593 U = cast<Operator>(Op); 3594 const SCEV *Op1 = getSCEV(U->getOperand(1)); 3595 if (Opcode == Instruction::Sub) 3596 AddOps.push_back(getNegativeSCEV(Op1)); 3597 else 3598 AddOps.push_back(Op1); 3599 } 3600 AddOps.push_back(getSCEV(U->getOperand(0))); 3601 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 3602 OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(V); 3603 if (OBO->hasNoSignedWrap()) 3604 setFlags(Flags, SCEV::FlagNSW); 3605 if (OBO->hasNoUnsignedWrap()) 3606 setFlags(Flags, SCEV::FlagNUW); 3607 return getAddExpr(AddOps, Flags); 3608 } 3609 case Instruction::Mul: { 3610 // See the Add code above. 3611 SmallVector<const SCEV *, 4> MulOps; 3612 MulOps.push_back(getSCEV(U->getOperand(1))); 3613 for (Value *Op = U->getOperand(0); 3614 Op->getValueID() == Instruction::Mul + Value::InstructionVal; 3615 Op = U->getOperand(0)) { 3616 U = cast<Operator>(Op); 3617 MulOps.push_back(getSCEV(U->getOperand(1))); 3618 } 3619 MulOps.push_back(getSCEV(U->getOperand(0))); 3620 return getMulExpr(MulOps); 3621 } 3622 case Instruction::UDiv: 3623 return getUDivExpr(getSCEV(U->getOperand(0)), 3624 getSCEV(U->getOperand(1))); 3625 case Instruction::Sub: 3626 return getMinusSCEV(getSCEV(U->getOperand(0)), 3627 getSCEV(U->getOperand(1))); 3628 case Instruction::And: 3629 // For an expression like x&255 that merely masks off the high bits, 3630 // use zext(trunc(x)) as the SCEV expression. 3631 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3632 if (CI->isNullValue()) 3633 return getSCEV(U->getOperand(1)); 3634 if (CI->isAllOnesValue()) 3635 return getSCEV(U->getOperand(0)); 3636 const APInt &A = CI->getValue(); 3637 3638 // Instcombine's ShrinkDemandedConstant may strip bits out of 3639 // constants, obscuring what would otherwise be a low-bits mask. 3640 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 3641 // knew about to reconstruct a low-bits mask value. 3642 unsigned LZ = A.countLeadingZeros(); 3643 unsigned BitWidth = A.getBitWidth(); 3644 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 3645 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 3646 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 3647 3648 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 3649 3650 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 3651 return 3652 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 3653 IntegerType::get(getContext(), BitWidth - LZ)), 3654 U->getType()); 3655 } 3656 break; 3657 3658 case Instruction::Or: 3659 // If the RHS of the Or is a constant, we may have something like: 3660 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 3661 // optimizations will transparently handle this case. 3662 // 3663 // In order for this transformation to be safe, the LHS must be of the 3664 // form X*(2^n) and the Or constant must be less than 2^n. 3665 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3666 const SCEV *LHS = getSCEV(U->getOperand(0)); 3667 const APInt &CIVal = CI->getValue(); 3668 if (GetMinTrailingZeros(LHS) >= 3669 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 3670 // Build a plain add SCEV. 3671 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 3672 // If the LHS of the add was an addrec and it has no-wrap flags, 3673 // transfer the no-wrap flags, since an or won't introduce a wrap. 3674 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 3675 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 3676 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 3677 OldAR->getNoWrapFlags()); 3678 } 3679 return S; 3680 } 3681 } 3682 break; 3683 case Instruction::Xor: 3684 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3685 // If the RHS of the xor is a signbit, then this is just an add. 3686 // Instcombine turns add of signbit into xor as a strength reduction step. 3687 if (CI->getValue().isSignBit()) 3688 return getAddExpr(getSCEV(U->getOperand(0)), 3689 getSCEV(U->getOperand(1))); 3690 3691 // If the RHS of xor is -1, then this is a not operation. 3692 if (CI->isAllOnesValue()) 3693 return getNotSCEV(getSCEV(U->getOperand(0))); 3694 3695 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 3696 // This is a variant of the check for xor with -1, and it handles 3697 // the case where instcombine has trimmed non-demanded bits out 3698 // of an xor with -1. 3699 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 3700 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 3701 if (BO->getOpcode() == Instruction::And && 3702 LCI->getValue() == CI->getValue()) 3703 if (const SCEVZeroExtendExpr *Z = 3704 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 3705 Type *UTy = U->getType(); 3706 const SCEV *Z0 = Z->getOperand(); 3707 Type *Z0Ty = Z0->getType(); 3708 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 3709 3710 // If C is a low-bits mask, the zero extend is serving to 3711 // mask off the high bits. Complement the operand and 3712 // re-apply the zext. 3713 if (APIntOps::isMask(Z0TySize, CI->getValue())) 3714 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 3715 3716 // If C is a single bit, it may be in the sign-bit position 3717 // before the zero-extend. In this case, represent the xor 3718 // using an add, which is equivalent, and re-apply the zext. 3719 APInt Trunc = CI->getValue().trunc(Z0TySize); 3720 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 3721 Trunc.isSignBit()) 3722 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 3723 UTy); 3724 } 3725 } 3726 break; 3727 3728 case Instruction::Shl: 3729 // Turn shift left of a constant amount into a multiply. 3730 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3731 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3732 3733 // If the shift count is not less than the bitwidth, the result of 3734 // the shift is undefined. Don't try to analyze it, because the 3735 // resolution chosen here may differ from the resolution chosen in 3736 // other parts of the compiler. 3737 if (SA->getValue().uge(BitWidth)) 3738 break; 3739 3740 Constant *X = ConstantInt::get(getContext(), 3741 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3742 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3743 } 3744 break; 3745 3746 case Instruction::LShr: 3747 // Turn logical shift right of a constant into a unsigned divide. 3748 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3749 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3750 3751 // If the shift count is not less than the bitwidth, the result of 3752 // the shift is undefined. Don't try to analyze it, because the 3753 // resolution chosen here may differ from the resolution chosen in 3754 // other parts of the compiler. 3755 if (SA->getValue().uge(BitWidth)) 3756 break; 3757 3758 Constant *X = ConstantInt::get(getContext(), 3759 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3760 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3761 } 3762 break; 3763 3764 case Instruction::AShr: 3765 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 3766 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 3767 if (Operator *L = dyn_cast<Operator>(U->getOperand(0))) 3768 if (L->getOpcode() == Instruction::Shl && 3769 L->getOperand(1) == U->getOperand(1)) { 3770 uint64_t BitWidth = getTypeSizeInBits(U->getType()); 3771 3772 // If the shift count is not less than the bitwidth, the result of 3773 // the shift is undefined. Don't try to analyze it, because the 3774 // resolution chosen here may differ from the resolution chosen in 3775 // other parts of the compiler. 3776 if (CI->getValue().uge(BitWidth)) 3777 break; 3778 3779 uint64_t Amt = BitWidth - CI->getZExtValue(); 3780 if (Amt == BitWidth) 3781 return getSCEV(L->getOperand(0)); // shift by zero --> noop 3782 return 3783 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 3784 IntegerType::get(getContext(), 3785 Amt)), 3786 U->getType()); 3787 } 3788 break; 3789 3790 case Instruction::Trunc: 3791 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 3792 3793 case Instruction::ZExt: 3794 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3795 3796 case Instruction::SExt: 3797 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3798 3799 case Instruction::BitCast: 3800 // BitCasts are no-op casts so we just eliminate the cast. 3801 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 3802 return getSCEV(U->getOperand(0)); 3803 break; 3804 3805 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 3806 // lead to pointer expressions which cannot safely be expanded to GEPs, 3807 // because ScalarEvolution doesn't respect the GEP aliasing rules when 3808 // simplifying integer expressions. 3809 3810 case Instruction::GetElementPtr: 3811 return createNodeForGEP(cast<GEPOperator>(U)); 3812 3813 case Instruction::PHI: 3814 return createNodeForPHI(cast<PHINode>(U)); 3815 3816 case Instruction::Select: 3817 // This could be a smax or umax that was lowered earlier. 3818 // Try to recover it. 3819 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 3820 Value *LHS = ICI->getOperand(0); 3821 Value *RHS = ICI->getOperand(1); 3822 switch (ICI->getPredicate()) { 3823 case ICmpInst::ICMP_SLT: 3824 case ICmpInst::ICMP_SLE: 3825 std::swap(LHS, RHS); 3826 // fall through 3827 case ICmpInst::ICMP_SGT: 3828 case ICmpInst::ICMP_SGE: 3829 // a >s b ? a+x : b+x -> smax(a, b)+x 3830 // a >s b ? b+x : a+x -> smin(a, b)+x 3831 if (LHS->getType() == U->getType()) { 3832 const SCEV *LS = getSCEV(LHS); 3833 const SCEV *RS = getSCEV(RHS); 3834 const SCEV *LA = getSCEV(U->getOperand(1)); 3835 const SCEV *RA = getSCEV(U->getOperand(2)); 3836 const SCEV *LDiff = getMinusSCEV(LA, LS); 3837 const SCEV *RDiff = getMinusSCEV(RA, RS); 3838 if (LDiff == RDiff) 3839 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 3840 LDiff = getMinusSCEV(LA, RS); 3841 RDiff = getMinusSCEV(RA, LS); 3842 if (LDiff == RDiff) 3843 return getAddExpr(getSMinExpr(LS, RS), LDiff); 3844 } 3845 break; 3846 case ICmpInst::ICMP_ULT: 3847 case ICmpInst::ICMP_ULE: 3848 std::swap(LHS, RHS); 3849 // fall through 3850 case ICmpInst::ICMP_UGT: 3851 case ICmpInst::ICMP_UGE: 3852 // a >u b ? a+x : b+x -> umax(a, b)+x 3853 // a >u b ? b+x : a+x -> umin(a, b)+x 3854 if (LHS->getType() == U->getType()) { 3855 const SCEV *LS = getSCEV(LHS); 3856 const SCEV *RS = getSCEV(RHS); 3857 const SCEV *LA = getSCEV(U->getOperand(1)); 3858 const SCEV *RA = getSCEV(U->getOperand(2)); 3859 const SCEV *LDiff = getMinusSCEV(LA, LS); 3860 const SCEV *RDiff = getMinusSCEV(RA, RS); 3861 if (LDiff == RDiff) 3862 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 3863 LDiff = getMinusSCEV(LA, RS); 3864 RDiff = getMinusSCEV(RA, LS); 3865 if (LDiff == RDiff) 3866 return getAddExpr(getUMinExpr(LS, RS), LDiff); 3867 } 3868 break; 3869 case ICmpInst::ICMP_NE: 3870 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 3871 if (LHS->getType() == U->getType() && 3872 isa<ConstantInt>(RHS) && 3873 cast<ConstantInt>(RHS)->isZero()) { 3874 const SCEV *One = getConstant(LHS->getType(), 1); 3875 const SCEV *LS = getSCEV(LHS); 3876 const SCEV *LA = getSCEV(U->getOperand(1)); 3877 const SCEV *RA = getSCEV(U->getOperand(2)); 3878 const SCEV *LDiff = getMinusSCEV(LA, LS); 3879 const SCEV *RDiff = getMinusSCEV(RA, One); 3880 if (LDiff == RDiff) 3881 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3882 } 3883 break; 3884 case ICmpInst::ICMP_EQ: 3885 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 3886 if (LHS->getType() == U->getType() && 3887 isa<ConstantInt>(RHS) && 3888 cast<ConstantInt>(RHS)->isZero()) { 3889 const SCEV *One = getConstant(LHS->getType(), 1); 3890 const SCEV *LS = getSCEV(LHS); 3891 const SCEV *LA = getSCEV(U->getOperand(1)); 3892 const SCEV *RA = getSCEV(U->getOperand(2)); 3893 const SCEV *LDiff = getMinusSCEV(LA, One); 3894 const SCEV *RDiff = getMinusSCEV(RA, LS); 3895 if (LDiff == RDiff) 3896 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3897 } 3898 break; 3899 default: 3900 break; 3901 } 3902 } 3903 3904 default: // We cannot analyze this expression. 3905 break; 3906 } 3907 3908 return getUnknown(V); 3909 } 3910 3911 3912 3913 //===----------------------------------------------------------------------===// 3914 // Iteration Count Computation Code 3915 // 3916 3917 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a 3918 /// normal unsigned value, if possible. Returns 0 if the trip count is unknown 3919 /// or not constant. Will also return 0 if the maximum trip count is very large 3920 /// (>= 2^32) 3921 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L, 3922 BasicBlock *ExitBlock) { 3923 const SCEVConstant *ExitCount = 3924 dyn_cast<SCEVConstant>(getExitCount(L, ExitBlock)); 3925 if (!ExitCount) 3926 return 0; 3927 3928 ConstantInt *ExitConst = ExitCount->getValue(); 3929 3930 // Guard against huge trip counts. 3931 if (ExitConst->getValue().getActiveBits() > 32) 3932 return 0; 3933 3934 // In case of integer overflow, this returns 0, which is correct. 3935 return ((unsigned)ExitConst->getZExtValue()) + 1; 3936 } 3937 3938 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the 3939 /// trip count of this loop as a normal unsigned value, if possible. This 3940 /// means that the actual trip count is always a multiple of the returned 3941 /// value (don't forget the trip count could very well be zero as well!). 3942 /// 3943 /// Returns 1 if the trip count is unknown or not guaranteed to be the 3944 /// multiple of a constant (which is also the case if the trip count is simply 3945 /// constant, use getSmallConstantTripCount for that case), Will also return 1 3946 /// if the trip count is very large (>= 2^32). 3947 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L, 3948 BasicBlock *ExitBlock) { 3949 const SCEV *ExitCount = getExitCount(L, ExitBlock); 3950 if (ExitCount == getCouldNotCompute()) 3951 return 1; 3952 3953 // Get the trip count from the BE count by adding 1. 3954 const SCEV *TCMul = getAddExpr(ExitCount, 3955 getConstant(ExitCount->getType(), 1)); 3956 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt 3957 // to factor simple cases. 3958 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul)) 3959 TCMul = Mul->getOperand(0); 3960 3961 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul); 3962 if (!MulC) 3963 return 1; 3964 3965 ConstantInt *Result = MulC->getValue(); 3966 3967 // Guard against huge trip counts. 3968 if (!Result || Result->getValue().getActiveBits() > 32) 3969 return 1; 3970 3971 return (unsigned)Result->getZExtValue(); 3972 } 3973 3974 // getExitCount - Get the expression for the number of loop iterations for which 3975 // this loop is guaranteed not to exit via ExitintBlock. Otherwise return 3976 // SCEVCouldNotCompute. 3977 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) { 3978 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 3979 } 3980 3981 /// getBackedgeTakenCount - If the specified loop has a predictable 3982 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 3983 /// object. The backedge-taken count is the number of times the loop header 3984 /// will be branched to from within the loop. This is one less than the 3985 /// trip count of the loop, since it doesn't count the first iteration, 3986 /// when the header is branched to from outside the loop. 3987 /// 3988 /// Note that it is not valid to call this method on a loop without a 3989 /// loop-invariant backedge-taken count (see 3990 /// hasLoopInvariantBackedgeTakenCount). 3991 /// 3992 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 3993 return getBackedgeTakenInfo(L).getExact(this); 3994 } 3995 3996 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 3997 /// return the least SCEV value that is known never to be less than the 3998 /// actual backedge taken count. 3999 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 4000 return getBackedgeTakenInfo(L).getMax(this); 4001 } 4002 4003 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 4004 /// onto the given Worklist. 4005 static void 4006 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 4007 BasicBlock *Header = L->getHeader(); 4008 4009 // Push all Loop-header PHIs onto the Worklist stack. 4010 for (BasicBlock::iterator I = Header->begin(); 4011 PHINode *PN = dyn_cast<PHINode>(I); ++I) 4012 Worklist.push_back(PN); 4013 } 4014 4015 const ScalarEvolution::BackedgeTakenInfo & 4016 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 4017 // Initially insert an invalid entry for this loop. If the insertion 4018 // succeeds, proceed to actually compute a backedge-taken count and 4019 // update the value. The temporary CouldNotCompute value tells SCEV 4020 // code elsewhere that it shouldn't attempt to request a new 4021 // backedge-taken count, which could result in infinite recursion. 4022 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 4023 BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo())); 4024 if (!Pair.second) 4025 return Pair.first->second; 4026 4027 // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it 4028 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 4029 // must be cleared in this scope. 4030 BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L); 4031 4032 if (Result.getExact(this) != getCouldNotCompute()) { 4033 assert(isLoopInvariant(Result.getExact(this), L) && 4034 isLoopInvariant(Result.getMax(this), L) && 4035 "Computed backedge-taken count isn't loop invariant for loop!"); 4036 ++NumTripCountsComputed; 4037 } 4038 else if (Result.getMax(this) == getCouldNotCompute() && 4039 isa<PHINode>(L->getHeader()->begin())) { 4040 // Only count loops that have phi nodes as not being computable. 4041 ++NumTripCountsNotComputed; 4042 } 4043 4044 // Now that we know more about the trip count for this loop, forget any 4045 // existing SCEV values for PHI nodes in this loop since they are only 4046 // conservative estimates made without the benefit of trip count 4047 // information. This is similar to the code in forgetLoop, except that 4048 // it handles SCEVUnknown PHI nodes specially. 4049 if (Result.hasAnyInfo()) { 4050 SmallVector<Instruction *, 16> Worklist; 4051 PushLoopPHIs(L, Worklist); 4052 4053 SmallPtrSet<Instruction *, 8> Visited; 4054 while (!Worklist.empty()) { 4055 Instruction *I = Worklist.pop_back_val(); 4056 if (!Visited.insert(I)) continue; 4057 4058 ValueExprMapType::iterator It = 4059 ValueExprMap.find(static_cast<Value *>(I)); 4060 if (It != ValueExprMap.end()) { 4061 const SCEV *Old = It->second; 4062 4063 // SCEVUnknown for a PHI either means that it has an unrecognized 4064 // structure, or it's a PHI that's in the progress of being computed 4065 // by createNodeForPHI. In the former case, additional loop trip 4066 // count information isn't going to change anything. In the later 4067 // case, createNodeForPHI will perform the necessary updates on its 4068 // own when it gets to that point. 4069 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 4070 forgetMemoizedResults(Old); 4071 ValueExprMap.erase(It); 4072 } 4073 if (PHINode *PN = dyn_cast<PHINode>(I)) 4074 ConstantEvolutionLoopExitValue.erase(PN); 4075 } 4076 4077 PushDefUseChildren(I, Worklist); 4078 } 4079 } 4080 4081 // Re-lookup the insert position, since the call to 4082 // ComputeBackedgeTakenCount above could result in a 4083 // recusive call to getBackedgeTakenInfo (on a different 4084 // loop), which would invalidate the iterator computed 4085 // earlier. 4086 return BackedgeTakenCounts.find(L)->second = Result; 4087 } 4088 4089 /// forgetLoop - This method should be called by the client when it has 4090 /// changed a loop in a way that may effect ScalarEvolution's ability to 4091 /// compute a trip count, or if the loop is deleted. 4092 void ScalarEvolution::forgetLoop(const Loop *L) { 4093 // Drop any stored trip count value. 4094 DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos = 4095 BackedgeTakenCounts.find(L); 4096 if (BTCPos != BackedgeTakenCounts.end()) { 4097 BTCPos->second.clear(); 4098 BackedgeTakenCounts.erase(BTCPos); 4099 } 4100 4101 // Drop information about expressions based on loop-header PHIs. 4102 SmallVector<Instruction *, 16> Worklist; 4103 PushLoopPHIs(L, Worklist); 4104 4105 SmallPtrSet<Instruction *, 8> Visited; 4106 while (!Worklist.empty()) { 4107 Instruction *I = Worklist.pop_back_val(); 4108 if (!Visited.insert(I)) continue; 4109 4110 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 4111 if (It != ValueExprMap.end()) { 4112 forgetMemoizedResults(It->second); 4113 ValueExprMap.erase(It); 4114 if (PHINode *PN = dyn_cast<PHINode>(I)) 4115 ConstantEvolutionLoopExitValue.erase(PN); 4116 } 4117 4118 PushDefUseChildren(I, Worklist); 4119 } 4120 4121 // Forget all contained loops too, to avoid dangling entries in the 4122 // ValuesAtScopes map. 4123 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 4124 forgetLoop(*I); 4125 } 4126 4127 /// forgetValue - This method should be called by the client when it has 4128 /// changed a value in a way that may effect its value, or which may 4129 /// disconnect it from a def-use chain linking it to a loop. 4130 void ScalarEvolution::forgetValue(Value *V) { 4131 Instruction *I = dyn_cast<Instruction>(V); 4132 if (!I) return; 4133 4134 // Drop information about expressions based on loop-header PHIs. 4135 SmallVector<Instruction *, 16> Worklist; 4136 Worklist.push_back(I); 4137 4138 SmallPtrSet<Instruction *, 8> Visited; 4139 while (!Worklist.empty()) { 4140 I = Worklist.pop_back_val(); 4141 if (!Visited.insert(I)) continue; 4142 4143 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 4144 if (It != ValueExprMap.end()) { 4145 forgetMemoizedResults(It->second); 4146 ValueExprMap.erase(It); 4147 if (PHINode *PN = dyn_cast<PHINode>(I)) 4148 ConstantEvolutionLoopExitValue.erase(PN); 4149 } 4150 4151 PushDefUseChildren(I, Worklist); 4152 } 4153 } 4154 4155 /// getExact - Get the exact loop backedge taken count considering all loop 4156 /// exits. A computable result can only be return for loops with a single exit. 4157 /// Returning the minimum taken count among all exits is incorrect because one 4158 /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that 4159 /// the limit of each loop test is never skipped. This is a valid assumption as 4160 /// long as the loop exits via that test. For precise results, it is the 4161 /// caller's responsibility to specify the relevant loop exit using 4162 /// getExact(ExitingBlock, SE). 4163 const SCEV * 4164 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const { 4165 // If any exits were not computable, the loop is not computable. 4166 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute(); 4167 4168 // We need exactly one computable exit. 4169 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute(); 4170 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info"); 4171 4172 const SCEV *BECount = 0; 4173 for (const ExitNotTakenInfo *ENT = &ExitNotTaken; 4174 ENT != 0; ENT = ENT->getNextExit()) { 4175 4176 assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 4177 4178 if (!BECount) 4179 BECount = ENT->ExactNotTaken; 4180 else if (BECount != ENT->ExactNotTaken) 4181 return SE->getCouldNotCompute(); 4182 } 4183 assert(BECount && "Invalid not taken count for loop exit"); 4184 return BECount; 4185 } 4186 4187 /// getExact - Get the exact not taken count for this loop exit. 4188 const SCEV * 4189 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 4190 ScalarEvolution *SE) const { 4191 for (const ExitNotTakenInfo *ENT = &ExitNotTaken; 4192 ENT != 0; ENT = ENT->getNextExit()) { 4193 4194 if (ENT->ExitingBlock == ExitingBlock) 4195 return ENT->ExactNotTaken; 4196 } 4197 return SE->getCouldNotCompute(); 4198 } 4199 4200 /// getMax - Get the max backedge taken count for the loop. 4201 const SCEV * 4202 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 4203 return Max ? Max : SE->getCouldNotCompute(); 4204 } 4205 4206 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 4207 /// computable exit into a persistent ExitNotTakenInfo array. 4208 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 4209 SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts, 4210 bool Complete, const SCEV *MaxCount) : Max(MaxCount) { 4211 4212 if (!Complete) 4213 ExitNotTaken.setIncomplete(); 4214 4215 unsigned NumExits = ExitCounts.size(); 4216 if (NumExits == 0) return; 4217 4218 ExitNotTaken.ExitingBlock = ExitCounts[0].first; 4219 ExitNotTaken.ExactNotTaken = ExitCounts[0].second; 4220 if (NumExits == 1) return; 4221 4222 // Handle the rare case of multiple computable exits. 4223 ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1]; 4224 4225 ExitNotTakenInfo *PrevENT = &ExitNotTaken; 4226 for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) { 4227 PrevENT->setNextExit(ENT); 4228 ENT->ExitingBlock = ExitCounts[i].first; 4229 ENT->ExactNotTaken = ExitCounts[i].second; 4230 } 4231 } 4232 4233 /// clear - Invalidate this result and free the ExitNotTakenInfo array. 4234 void ScalarEvolution::BackedgeTakenInfo::clear() { 4235 ExitNotTaken.ExitingBlock = 0; 4236 ExitNotTaken.ExactNotTaken = 0; 4237 delete[] ExitNotTaken.getNextExit(); 4238 } 4239 4240 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 4241 /// of the specified loop will execute. 4242 ScalarEvolution::BackedgeTakenInfo 4243 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 4244 SmallVector<BasicBlock *, 8> ExitingBlocks; 4245 L->getExitingBlocks(ExitingBlocks); 4246 4247 // Examine all exits and pick the most conservative values. 4248 const SCEV *MaxBECount = getCouldNotCompute(); 4249 bool CouldComputeBECount = true; 4250 SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts; 4251 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 4252 ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]); 4253 if (EL.Exact == getCouldNotCompute()) 4254 // We couldn't compute an exact value for this exit, so 4255 // we won't be able to compute an exact value for the loop. 4256 CouldComputeBECount = false; 4257 else 4258 ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact)); 4259 4260 if (MaxBECount == getCouldNotCompute()) 4261 MaxBECount = EL.Max; 4262 else if (EL.Max != getCouldNotCompute()) { 4263 // We cannot take the "min" MaxBECount, because non-unit stride loops may 4264 // skip some loop tests. Taking the max over the exits is sufficiently 4265 // conservative. TODO: We could do better taking into consideration 4266 // that (1) the loop has unit stride (2) the last loop test is 4267 // less-than/greater-than (3) any loop test is less-than/greater-than AND 4268 // falls-through some constant times less then the other tests. 4269 MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max); 4270 } 4271 } 4272 4273 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount); 4274 } 4275 4276 /// ComputeExitLimit - Compute the number of times the backedge of the specified 4277 /// loop will execute if it exits via the specified block. 4278 ScalarEvolution::ExitLimit 4279 ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) { 4280 4281 // Okay, we've chosen an exiting block. See what condition causes us to 4282 // exit at this block. 4283 // 4284 // FIXME: we should be able to handle switch instructions (with a single exit) 4285 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 4286 if (ExitBr == 0) return getCouldNotCompute(); 4287 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 4288 4289 // At this point, we know we have a conditional branch that determines whether 4290 // the loop is exited. However, we don't know if the branch is executed each 4291 // time through the loop. If not, then the execution count of the branch will 4292 // not be equal to the trip count of the loop. 4293 // 4294 // Currently we check for this by checking to see if the Exit branch goes to 4295 // the loop header. If so, we know it will always execute the same number of 4296 // times as the loop. We also handle the case where the exit block *is* the 4297 // loop header. This is common for un-rotated loops. 4298 // 4299 // If both of those tests fail, walk up the unique predecessor chain to the 4300 // header, stopping if there is an edge that doesn't exit the loop. If the 4301 // header is reached, the execution count of the branch will be equal to the 4302 // trip count of the loop. 4303 // 4304 // More extensive analysis could be done to handle more cases here. 4305 // 4306 if (ExitBr->getSuccessor(0) != L->getHeader() && 4307 ExitBr->getSuccessor(1) != L->getHeader() && 4308 ExitBr->getParent() != L->getHeader()) { 4309 // The simple checks failed, try climbing the unique predecessor chain 4310 // up to the header. 4311 bool Ok = false; 4312 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 4313 BasicBlock *Pred = BB->getUniquePredecessor(); 4314 if (!Pred) 4315 return getCouldNotCompute(); 4316 TerminatorInst *PredTerm = Pred->getTerminator(); 4317 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 4318 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 4319 if (PredSucc == BB) 4320 continue; 4321 // If the predecessor has a successor that isn't BB and isn't 4322 // outside the loop, assume the worst. 4323 if (L->contains(PredSucc)) 4324 return getCouldNotCompute(); 4325 } 4326 if (Pred == L->getHeader()) { 4327 Ok = true; 4328 break; 4329 } 4330 BB = Pred; 4331 } 4332 if (!Ok) 4333 return getCouldNotCompute(); 4334 } 4335 4336 // Proceed to the next level to examine the exit condition expression. 4337 return ComputeExitLimitFromCond(L, ExitBr->getCondition(), 4338 ExitBr->getSuccessor(0), 4339 ExitBr->getSuccessor(1)); 4340 } 4341 4342 /// ComputeExitLimitFromCond - Compute the number of times the 4343 /// backedge of the specified loop will execute if its exit condition 4344 /// were a conditional branch of ExitCond, TBB, and FBB. 4345 ScalarEvolution::ExitLimit 4346 ScalarEvolution::ComputeExitLimitFromCond(const Loop *L, 4347 Value *ExitCond, 4348 BasicBlock *TBB, 4349 BasicBlock *FBB) { 4350 // Check if the controlling expression for this loop is an And or Or. 4351 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 4352 if (BO->getOpcode() == Instruction::And) { 4353 // Recurse on the operands of the and. 4354 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB); 4355 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB); 4356 const SCEV *BECount = getCouldNotCompute(); 4357 const SCEV *MaxBECount = getCouldNotCompute(); 4358 if (L->contains(TBB)) { 4359 // Both conditions must be true for the loop to continue executing. 4360 // Choose the less conservative count. 4361 if (EL0.Exact == getCouldNotCompute() || 4362 EL1.Exact == getCouldNotCompute()) 4363 BECount = getCouldNotCompute(); 4364 else 4365 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 4366 if (EL0.Max == getCouldNotCompute()) 4367 MaxBECount = EL1.Max; 4368 else if (EL1.Max == getCouldNotCompute()) 4369 MaxBECount = EL0.Max; 4370 else 4371 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 4372 } else { 4373 // Both conditions must be true at the same time for the loop to exit. 4374 // For now, be conservative. 4375 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 4376 if (EL0.Max == EL1.Max) 4377 MaxBECount = EL0.Max; 4378 if (EL0.Exact == EL1.Exact) 4379 BECount = EL0.Exact; 4380 } 4381 4382 return ExitLimit(BECount, MaxBECount); 4383 } 4384 if (BO->getOpcode() == Instruction::Or) { 4385 // Recurse on the operands of the or. 4386 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB); 4387 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB); 4388 const SCEV *BECount = getCouldNotCompute(); 4389 const SCEV *MaxBECount = getCouldNotCompute(); 4390 if (L->contains(FBB)) { 4391 // Both conditions must be false for the loop to continue executing. 4392 // Choose the less conservative count. 4393 if (EL0.Exact == getCouldNotCompute() || 4394 EL1.Exact == getCouldNotCompute()) 4395 BECount = getCouldNotCompute(); 4396 else 4397 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 4398 if (EL0.Max == getCouldNotCompute()) 4399 MaxBECount = EL1.Max; 4400 else if (EL1.Max == getCouldNotCompute()) 4401 MaxBECount = EL0.Max; 4402 else 4403 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 4404 } else { 4405 // Both conditions must be false at the same time for the loop to exit. 4406 // For now, be conservative. 4407 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 4408 if (EL0.Max == EL1.Max) 4409 MaxBECount = EL0.Max; 4410 if (EL0.Exact == EL1.Exact) 4411 BECount = EL0.Exact; 4412 } 4413 4414 return ExitLimit(BECount, MaxBECount); 4415 } 4416 } 4417 4418 // With an icmp, it may be feasible to compute an exact backedge-taken count. 4419 // Proceed to the next level to examine the icmp. 4420 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 4421 return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB); 4422 4423 // Check for a constant condition. These are normally stripped out by 4424 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 4425 // preserve the CFG and is temporarily leaving constant conditions 4426 // in place. 4427 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 4428 if (L->contains(FBB) == !CI->getZExtValue()) 4429 // The backedge is always taken. 4430 return getCouldNotCompute(); 4431 else 4432 // The backedge is never taken. 4433 return getConstant(CI->getType(), 0); 4434 } 4435 4436 // If it's not an integer or pointer comparison then compute it the hard way. 4437 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 4438 } 4439 4440 /// ComputeExitLimitFromICmp - Compute the number of times the 4441 /// backedge of the specified loop will execute if its exit condition 4442 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 4443 ScalarEvolution::ExitLimit 4444 ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L, 4445 ICmpInst *ExitCond, 4446 BasicBlock *TBB, 4447 BasicBlock *FBB) { 4448 4449 // If the condition was exit on true, convert the condition to exit on false 4450 ICmpInst::Predicate Cond; 4451 if (!L->contains(FBB)) 4452 Cond = ExitCond->getPredicate(); 4453 else 4454 Cond = ExitCond->getInversePredicate(); 4455 4456 // Handle common loops like: for (X = "string"; *X; ++X) 4457 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 4458 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 4459 ExitLimit ItCnt = 4460 ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 4461 if (ItCnt.hasAnyInfo()) 4462 return ItCnt; 4463 } 4464 4465 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 4466 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 4467 4468 // Try to evaluate any dependencies out of the loop. 4469 LHS = getSCEVAtScope(LHS, L); 4470 RHS = getSCEVAtScope(RHS, L); 4471 4472 // At this point, we would like to compute how many iterations of the 4473 // loop the predicate will return true for these inputs. 4474 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 4475 // If there is a loop-invariant, force it into the RHS. 4476 std::swap(LHS, RHS); 4477 Cond = ICmpInst::getSwappedPredicate(Cond); 4478 } 4479 4480 // Simplify the operands before analyzing them. 4481 (void)SimplifyICmpOperands(Cond, LHS, RHS); 4482 4483 // If we have a comparison of a chrec against a constant, try to use value 4484 // ranges to answer this query. 4485 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 4486 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 4487 if (AddRec->getLoop() == L) { 4488 // Form the constant range. 4489 ConstantRange CompRange( 4490 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 4491 4492 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 4493 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 4494 } 4495 4496 switch (Cond) { 4497 case ICmpInst::ICMP_NE: { // while (X != Y) 4498 // Convert to: while (X-Y != 0) 4499 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L); 4500 if (EL.hasAnyInfo()) return EL; 4501 break; 4502 } 4503 case ICmpInst::ICMP_EQ: { // while (X == Y) 4504 // Convert to: while (X-Y == 0) 4505 ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 4506 if (EL.hasAnyInfo()) return EL; 4507 break; 4508 } 4509 case ICmpInst::ICMP_SLT: { 4510 ExitLimit EL = HowManyLessThans(LHS, RHS, L, true); 4511 if (EL.hasAnyInfo()) return EL; 4512 break; 4513 } 4514 case ICmpInst::ICMP_SGT: { 4515 ExitLimit EL = HowManyLessThans(getNotSCEV(LHS), 4516 getNotSCEV(RHS), L, true); 4517 if (EL.hasAnyInfo()) return EL; 4518 break; 4519 } 4520 case ICmpInst::ICMP_ULT: { 4521 ExitLimit EL = HowManyLessThans(LHS, RHS, L, false); 4522 if (EL.hasAnyInfo()) return EL; 4523 break; 4524 } 4525 case ICmpInst::ICMP_UGT: { 4526 ExitLimit EL = HowManyLessThans(getNotSCEV(LHS), 4527 getNotSCEV(RHS), L, false); 4528 if (EL.hasAnyInfo()) return EL; 4529 break; 4530 } 4531 default: 4532 #if 0 4533 dbgs() << "ComputeBackedgeTakenCount "; 4534 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 4535 dbgs() << "[unsigned] "; 4536 dbgs() << *LHS << " " 4537 << Instruction::getOpcodeName(Instruction::ICmp) 4538 << " " << *RHS << "\n"; 4539 #endif 4540 break; 4541 } 4542 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 4543 } 4544 4545 static ConstantInt * 4546 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 4547 ScalarEvolution &SE) { 4548 const SCEV *InVal = SE.getConstant(C); 4549 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 4550 assert(isa<SCEVConstant>(Val) && 4551 "Evaluation of SCEV at constant didn't fold correctly?"); 4552 return cast<SCEVConstant>(Val)->getValue(); 4553 } 4554 4555 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 4556 /// and a GEP expression (missing the pointer index) indexing into it, return 4557 /// the addressed element of the initializer or null if the index expression is 4558 /// invalid. 4559 static Constant * 4560 GetAddressedElementFromGlobal(GlobalVariable *GV, 4561 const std::vector<ConstantInt*> &Indices) { 4562 Constant *Init = GV->getInitializer(); 4563 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 4564 uint64_t Idx = Indices[i]->getZExtValue(); 4565 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 4566 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 4567 Init = cast<Constant>(CS->getOperand(Idx)); 4568 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 4569 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 4570 Init = cast<Constant>(CA->getOperand(Idx)); 4571 } else if (isa<ConstantAggregateZero>(Init)) { 4572 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 4573 assert(Idx < STy->getNumElements() && "Bad struct index!"); 4574 Init = Constant::getNullValue(STy->getElementType(Idx)); 4575 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 4576 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 4577 Init = Constant::getNullValue(ATy->getElementType()); 4578 } else { 4579 llvm_unreachable("Unknown constant aggregate type!"); 4580 } 4581 return 0; 4582 } else { 4583 return 0; // Unknown initializer type 4584 } 4585 } 4586 return Init; 4587 } 4588 4589 /// ComputeLoadConstantCompareExitLimit - Given an exit condition of 4590 /// 'icmp op load X, cst', try to see if we can compute the backedge 4591 /// execution count. 4592 ScalarEvolution::ExitLimit 4593 ScalarEvolution::ComputeLoadConstantCompareExitLimit( 4594 LoadInst *LI, 4595 Constant *RHS, 4596 const Loop *L, 4597 ICmpInst::Predicate predicate) { 4598 4599 if (LI->isVolatile()) return getCouldNotCompute(); 4600 4601 // Check to see if the loaded pointer is a getelementptr of a global. 4602 // TODO: Use SCEV instead of manually grubbing with GEPs. 4603 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 4604 if (!GEP) return getCouldNotCompute(); 4605 4606 // Make sure that it is really a constant global we are gepping, with an 4607 // initializer, and make sure the first IDX is really 0. 4608 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 4609 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 4610 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 4611 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 4612 return getCouldNotCompute(); 4613 4614 // Okay, we allow one non-constant index into the GEP instruction. 4615 Value *VarIdx = 0; 4616 std::vector<ConstantInt*> Indexes; 4617 unsigned VarIdxNum = 0; 4618 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 4619 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 4620 Indexes.push_back(CI); 4621 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 4622 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 4623 VarIdx = GEP->getOperand(i); 4624 VarIdxNum = i-2; 4625 Indexes.push_back(0); 4626 } 4627 4628 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 4629 // Check to see if X is a loop variant variable value now. 4630 const SCEV *Idx = getSCEV(VarIdx); 4631 Idx = getSCEVAtScope(Idx, L); 4632 4633 // We can only recognize very limited forms of loop index expressions, in 4634 // particular, only affine AddRec's like {C1,+,C2}. 4635 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 4636 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 4637 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 4638 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 4639 return getCouldNotCompute(); 4640 4641 unsigned MaxSteps = MaxBruteForceIterations; 4642 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 4643 ConstantInt *ItCst = ConstantInt::get( 4644 cast<IntegerType>(IdxExpr->getType()), IterationNum); 4645 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 4646 4647 // Form the GEP offset. 4648 Indexes[VarIdxNum] = Val; 4649 4650 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 4651 if (Result == 0) break; // Cannot compute! 4652 4653 // Evaluate the condition for this iteration. 4654 Result = ConstantExpr::getICmp(predicate, Result, RHS); 4655 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 4656 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 4657 #if 0 4658 dbgs() << "\n***\n*** Computed loop count " << *ItCst 4659 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 4660 << "***\n"; 4661 #endif 4662 ++NumArrayLenItCounts; 4663 return getConstant(ItCst); // Found terminating iteration! 4664 } 4665 } 4666 return getCouldNotCompute(); 4667 } 4668 4669 4670 /// CanConstantFold - Return true if we can constant fold an instruction of the 4671 /// specified type, assuming that all operands were constants. 4672 static bool CanConstantFold(const Instruction *I) { 4673 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 4674 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 4675 isa<LoadInst>(I)) 4676 return true; 4677 4678 if (const CallInst *CI = dyn_cast<CallInst>(I)) 4679 if (const Function *F = CI->getCalledFunction()) 4680 return canConstantFoldCallTo(F); 4681 return false; 4682 } 4683 4684 /// Determine whether this instruction can constant evolve within this loop 4685 /// assuming its operands can all constant evolve. 4686 static bool canConstantEvolve(Instruction *I, const Loop *L) { 4687 // An instruction outside of the loop can't be derived from a loop PHI. 4688 if (!L->contains(I)) return false; 4689 4690 if (isa<PHINode>(I)) { 4691 if (L->getHeader() == I->getParent()) 4692 return true; 4693 else 4694 // We don't currently keep track of the control flow needed to evaluate 4695 // PHIs, so we cannot handle PHIs inside of loops. 4696 return false; 4697 } 4698 4699 // If we won't be able to constant fold this expression even if the operands 4700 // are constants, bail early. 4701 return CanConstantFold(I); 4702 } 4703 4704 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 4705 /// recursing through each instruction operand until reaching a loop header phi. 4706 static PHINode * 4707 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 4708 DenseMap<Instruction *, PHINode *> &PHIMap) { 4709 4710 // Otherwise, we can evaluate this instruction if all of its operands are 4711 // constant or derived from a PHI node themselves. 4712 PHINode *PHI = 0; 4713 for (Instruction::op_iterator OpI = UseInst->op_begin(), 4714 OpE = UseInst->op_end(); OpI != OpE; ++OpI) { 4715 4716 if (isa<Constant>(*OpI)) continue; 4717 4718 Instruction *OpInst = dyn_cast<Instruction>(*OpI); 4719 if (!OpInst || !canConstantEvolve(OpInst, L)) return 0; 4720 4721 PHINode *P = dyn_cast<PHINode>(OpInst); 4722 if (!P) 4723 // If this operand is already visited, reuse the prior result. 4724 // We may have P != PHI if this is the deepest point at which the 4725 // inconsistent paths meet. 4726 P = PHIMap.lookup(OpInst); 4727 if (!P) { 4728 // Recurse and memoize the results, whether a phi is found or not. 4729 // This recursive call invalidates pointers into PHIMap. 4730 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap); 4731 PHIMap[OpInst] = P; 4732 } 4733 if (P == 0) return 0; // Not evolving from PHI 4734 if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs. 4735 PHI = P; 4736 } 4737 // This is a expression evolving from a constant PHI! 4738 return PHI; 4739 } 4740 4741 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 4742 /// in the loop that V is derived from. We allow arbitrary operations along the 4743 /// way, but the operands of an operation must either be constants or a value 4744 /// derived from a constant PHI. If this expression does not fit with these 4745 /// constraints, return null. 4746 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 4747 Instruction *I = dyn_cast<Instruction>(V); 4748 if (I == 0 || !canConstantEvolve(I, L)) return 0; 4749 4750 if (PHINode *PN = dyn_cast<PHINode>(I)) { 4751 return PN; 4752 } 4753 4754 // Record non-constant instructions contained by the loop. 4755 DenseMap<Instruction *, PHINode *> PHIMap; 4756 return getConstantEvolvingPHIOperands(I, L, PHIMap); 4757 } 4758 4759 /// EvaluateExpression - Given an expression that passes the 4760 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 4761 /// in the loop has the value PHIVal. If we can't fold this expression for some 4762 /// reason, return null. 4763 static Constant *EvaluateExpression(Value *V, const Loop *L, 4764 DenseMap<Instruction *, Constant *> &Vals, 4765 const TargetData *TD) { 4766 // Convenient constant check, but redundant for recursive calls. 4767 if (Constant *C = dyn_cast<Constant>(V)) return C; 4768 Instruction *I = dyn_cast<Instruction>(V); 4769 if (!I) return 0; 4770 4771 if (Constant *C = Vals.lookup(I)) return C; 4772 4773 // An instruction inside the loop depends on a value outside the loop that we 4774 // weren't given a mapping for, or a value such as a call inside the loop. 4775 if (!canConstantEvolve(I, L)) return 0; 4776 4777 // An unmapped PHI can be due to a branch or another loop inside this loop, 4778 // or due to this not being the initial iteration through a loop where we 4779 // couldn't compute the evolution of this particular PHI last time. 4780 if (isa<PHINode>(I)) return 0; 4781 4782 std::vector<Constant*> Operands(I->getNumOperands()); 4783 4784 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4785 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 4786 if (!Operand) { 4787 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 4788 if (!Operands[i]) return 0; 4789 continue; 4790 } 4791 Constant *C = EvaluateExpression(Operand, L, Vals, TD); 4792 Vals[Operand] = C; 4793 if (!C) return 0; 4794 Operands[i] = C; 4795 } 4796 4797 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 4798 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 4799 Operands[1], TD); 4800 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 4801 if (!LI->isVolatile()) 4802 return ConstantFoldLoadFromConstPtr(Operands[0], TD); 4803 } 4804 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD); 4805 } 4806 4807 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 4808 /// in the header of its containing loop, we know the loop executes a 4809 /// constant number of times, and the PHI node is just a recurrence 4810 /// involving constants, fold it. 4811 Constant * 4812 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 4813 const APInt &BEs, 4814 const Loop *L) { 4815 DenseMap<PHINode*, Constant*>::const_iterator I = 4816 ConstantEvolutionLoopExitValue.find(PN); 4817 if (I != ConstantEvolutionLoopExitValue.end()) 4818 return I->second; 4819 4820 if (BEs.ugt(MaxBruteForceIterations)) 4821 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 4822 4823 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 4824 4825 DenseMap<Instruction *, Constant *> CurrentIterVals; 4826 BasicBlock *Header = L->getHeader(); 4827 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 4828 4829 // Since the loop is canonicalized, the PHI node must have two entries. One 4830 // entry must be a constant (coming in from outside of the loop), and the 4831 // second must be derived from the same PHI. 4832 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4833 PHINode *PHI = 0; 4834 for (BasicBlock::iterator I = Header->begin(); 4835 (PHI = dyn_cast<PHINode>(I)); ++I) { 4836 Constant *StartCST = 4837 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge)); 4838 if (StartCST == 0) continue; 4839 CurrentIterVals[PHI] = StartCST; 4840 } 4841 if (!CurrentIterVals.count(PN)) 4842 return RetVal = 0; 4843 4844 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4845 4846 // Execute the loop symbolically to determine the exit value. 4847 if (BEs.getActiveBits() >= 32) 4848 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 4849 4850 unsigned NumIterations = BEs.getZExtValue(); // must be in range 4851 unsigned IterationNum = 0; 4852 for (; ; ++IterationNum) { 4853 if (IterationNum == NumIterations) 4854 return RetVal = CurrentIterVals[PN]; // Got exit value! 4855 4856 // Compute the value of the PHIs for the next iteration. 4857 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 4858 DenseMap<Instruction *, Constant *> NextIterVals; 4859 Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD); 4860 if (NextPHI == 0) 4861 return 0; // Couldn't evaluate! 4862 NextIterVals[PN] = NextPHI; 4863 4864 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 4865 4866 // Also evaluate the other PHI nodes. However, we don't get to stop if we 4867 // cease to be able to evaluate one of them or if they stop evolving, 4868 // because that doesn't necessarily prevent us from computing PN. 4869 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 4870 for (DenseMap<Instruction *, Constant *>::const_iterator 4871 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){ 4872 PHINode *PHI = dyn_cast<PHINode>(I->first); 4873 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 4874 PHIsToCompute.push_back(std::make_pair(PHI, I->second)); 4875 } 4876 // We use two distinct loops because EvaluateExpression may invalidate any 4877 // iterators into CurrentIterVals. 4878 for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator 4879 I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) { 4880 PHINode *PHI = I->first; 4881 Constant *&NextPHI = NextIterVals[PHI]; 4882 if (!NextPHI) { // Not already computed. 4883 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); 4884 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD); 4885 } 4886 if (NextPHI != I->second) 4887 StoppedEvolving = false; 4888 } 4889 4890 // If all entries in CurrentIterVals == NextIterVals then we can stop 4891 // iterating, the loop can't continue to change. 4892 if (StoppedEvolving) 4893 return RetVal = CurrentIterVals[PN]; 4894 4895 CurrentIterVals.swap(NextIterVals); 4896 } 4897 } 4898 4899 /// ComputeExitCountExhaustively - If the loop is known to execute a 4900 /// constant number of times (the condition evolves only from constants), 4901 /// try to evaluate a few iterations of the loop until we get the exit 4902 /// condition gets a value of ExitWhen (true or false). If we cannot 4903 /// evaluate the trip count of the loop, return getCouldNotCompute(). 4904 const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, 4905 Value *Cond, 4906 bool ExitWhen) { 4907 PHINode *PN = getConstantEvolvingPHI(Cond, L); 4908 if (PN == 0) return getCouldNotCompute(); 4909 4910 // If the loop is canonicalized, the PHI will have exactly two entries. 4911 // That's the only form we support here. 4912 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 4913 4914 DenseMap<Instruction *, Constant *> CurrentIterVals; 4915 BasicBlock *Header = L->getHeader(); 4916 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 4917 4918 // One entry must be a constant (coming in from outside of the loop), and the 4919 // second must be derived from the same PHI. 4920 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4921 PHINode *PHI = 0; 4922 for (BasicBlock::iterator I = Header->begin(); 4923 (PHI = dyn_cast<PHINode>(I)); ++I) { 4924 Constant *StartCST = 4925 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge)); 4926 if (StartCST == 0) continue; 4927 CurrentIterVals[PHI] = StartCST; 4928 } 4929 if (!CurrentIterVals.count(PN)) 4930 return getCouldNotCompute(); 4931 4932 // Okay, we find a PHI node that defines the trip count of this loop. Execute 4933 // the loop symbolically to determine when the condition gets a value of 4934 // "ExitWhen". 4935 4936 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 4937 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 4938 ConstantInt *CondVal = 4939 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, 4940 CurrentIterVals, TD)); 4941 4942 // Couldn't symbolically evaluate. 4943 if (!CondVal) return getCouldNotCompute(); 4944 4945 if (CondVal->getValue() == uint64_t(ExitWhen)) { 4946 ++NumBruteForceTripCountsComputed; 4947 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 4948 } 4949 4950 // Update all the PHI nodes for the next iteration. 4951 DenseMap<Instruction *, Constant *> NextIterVals; 4952 4953 // Create a list of which PHIs we need to compute. We want to do this before 4954 // calling EvaluateExpression on them because that may invalidate iterators 4955 // into CurrentIterVals. 4956 SmallVector<PHINode *, 8> PHIsToCompute; 4957 for (DenseMap<Instruction *, Constant *>::const_iterator 4958 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){ 4959 PHINode *PHI = dyn_cast<PHINode>(I->first); 4960 if (!PHI || PHI->getParent() != Header) continue; 4961 PHIsToCompute.push_back(PHI); 4962 } 4963 for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(), 4964 E = PHIsToCompute.end(); I != E; ++I) { 4965 PHINode *PHI = *I; 4966 Constant *&NextPHI = NextIterVals[PHI]; 4967 if (NextPHI) continue; // Already computed! 4968 4969 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); 4970 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD); 4971 } 4972 CurrentIterVals.swap(NextIterVals); 4973 } 4974 4975 // Too many iterations were needed to evaluate. 4976 return getCouldNotCompute(); 4977 } 4978 4979 /// getSCEVAtScope - Return a SCEV expression for the specified value 4980 /// at the specified scope in the program. The L value specifies a loop 4981 /// nest to evaluate the expression at, where null is the top-level or a 4982 /// specified loop is immediately inside of the loop. 4983 /// 4984 /// This method can be used to compute the exit value for a variable defined 4985 /// in a loop by querying what the value will hold in the parent loop. 4986 /// 4987 /// In the case that a relevant loop exit value cannot be computed, the 4988 /// original value V is returned. 4989 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 4990 // Check to see if we've folded this expression at this loop before. 4991 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V]; 4992 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair = 4993 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0))); 4994 if (!Pair.second) 4995 return Pair.first->second ? Pair.first->second : V; 4996 4997 // Otherwise compute it. 4998 const SCEV *C = computeSCEVAtScope(V, L); 4999 ValuesAtScopes[V][L] = C; 5000 return C; 5001 } 5002 5003 /// This builds up a Constant using the ConstantExpr interface. That way, we 5004 /// will return Constants for objects which aren't represented by a 5005 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 5006 /// Returns NULL if the SCEV isn't representable as a Constant. 5007 static Constant *BuildConstantFromSCEV(const SCEV *V) { 5008 switch (V->getSCEVType()) { 5009 default: // TODO: smax, umax. 5010 case scCouldNotCompute: 5011 case scAddRecExpr: 5012 break; 5013 case scConstant: 5014 return cast<SCEVConstant>(V)->getValue(); 5015 case scUnknown: 5016 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 5017 case scSignExtend: { 5018 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 5019 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 5020 return ConstantExpr::getSExt(CastOp, SS->getType()); 5021 break; 5022 } 5023 case scZeroExtend: { 5024 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 5025 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 5026 return ConstantExpr::getZExt(CastOp, SZ->getType()); 5027 break; 5028 } 5029 case scTruncate: { 5030 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 5031 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 5032 return ConstantExpr::getTrunc(CastOp, ST->getType()); 5033 break; 5034 } 5035 case scAddExpr: { 5036 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 5037 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 5038 if (C->getType()->isPointerTy()) 5039 C = ConstantExpr::getBitCast(C, Type::getInt8PtrTy(C->getContext())); 5040 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 5041 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 5042 if (!C2) return 0; 5043 5044 // First pointer! 5045 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 5046 std::swap(C, C2); 5047 // The offsets have been converted to bytes. We can add bytes to an 5048 // i8* by GEP with the byte count in the first index. 5049 C = ConstantExpr::getBitCast(C,Type::getInt8PtrTy(C->getContext())); 5050 } 5051 5052 // Don't bother trying to sum two pointers. We probably can't 5053 // statically compute a load that results from it anyway. 5054 if (C2->getType()->isPointerTy()) 5055 return 0; 5056 5057 if (C->getType()->isPointerTy()) { 5058 if (cast<PointerType>(C->getType())->getElementType()->isStructTy()) 5059 C2 = ConstantExpr::getIntegerCast( 5060 C2, Type::getInt32Ty(C->getContext()), true); 5061 C = ConstantExpr::getGetElementPtr(C, C2); 5062 } else 5063 C = ConstantExpr::getAdd(C, C2); 5064 } 5065 return C; 5066 } 5067 break; 5068 } 5069 case scMulExpr: { 5070 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 5071 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 5072 // Don't bother with pointers at all. 5073 if (C->getType()->isPointerTy()) return 0; 5074 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 5075 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 5076 if (!C2 || C2->getType()->isPointerTy()) return 0; 5077 C = ConstantExpr::getMul(C, C2); 5078 } 5079 return C; 5080 } 5081 break; 5082 } 5083 case scUDivExpr: { 5084 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 5085 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 5086 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 5087 if (LHS->getType() == RHS->getType()) 5088 return ConstantExpr::getUDiv(LHS, RHS); 5089 break; 5090 } 5091 } 5092 return 0; 5093 } 5094 5095 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 5096 if (isa<SCEVConstant>(V)) return V; 5097 5098 // If this instruction is evolved from a constant-evolving PHI, compute the 5099 // exit value from the loop without using SCEVs. 5100 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 5101 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 5102 const Loop *LI = (*this->LI)[I->getParent()]; 5103 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 5104 if (PHINode *PN = dyn_cast<PHINode>(I)) 5105 if (PN->getParent() == LI->getHeader()) { 5106 // Okay, there is no closed form solution for the PHI node. Check 5107 // to see if the loop that contains it has a known backedge-taken 5108 // count. If so, we may be able to force computation of the exit 5109 // value. 5110 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 5111 if (const SCEVConstant *BTCC = 5112 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 5113 // Okay, we know how many times the containing loop executes. If 5114 // this is a constant evolving PHI node, get the final value at 5115 // the specified iteration number. 5116 Constant *RV = getConstantEvolutionLoopExitValue(PN, 5117 BTCC->getValue()->getValue(), 5118 LI); 5119 if (RV) return getSCEV(RV); 5120 } 5121 } 5122 5123 // Okay, this is an expression that we cannot symbolically evaluate 5124 // into a SCEV. Check to see if it's possible to symbolically evaluate 5125 // the arguments into constants, and if so, try to constant propagate the 5126 // result. This is particularly useful for computing loop exit values. 5127 if (CanConstantFold(I)) { 5128 SmallVector<Constant *, 4> Operands; 5129 bool MadeImprovement = false; 5130 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 5131 Value *Op = I->getOperand(i); 5132 if (Constant *C = dyn_cast<Constant>(Op)) { 5133 Operands.push_back(C); 5134 continue; 5135 } 5136 5137 // If any of the operands is non-constant and if they are 5138 // non-integer and non-pointer, don't even try to analyze them 5139 // with scev techniques. 5140 if (!isSCEVable(Op->getType())) 5141 return V; 5142 5143 const SCEV *OrigV = getSCEV(Op); 5144 const SCEV *OpV = getSCEVAtScope(OrigV, L); 5145 MadeImprovement |= OrigV != OpV; 5146 5147 Constant *C = BuildConstantFromSCEV(OpV); 5148 if (!C) return V; 5149 if (C->getType() != Op->getType()) 5150 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 5151 Op->getType(), 5152 false), 5153 C, Op->getType()); 5154 Operands.push_back(C); 5155 } 5156 5157 // Check to see if getSCEVAtScope actually made an improvement. 5158 if (MadeImprovement) { 5159 Constant *C = 0; 5160 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 5161 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 5162 Operands[0], Operands[1], TD); 5163 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 5164 if (!LI->isVolatile()) 5165 C = ConstantFoldLoadFromConstPtr(Operands[0], TD); 5166 } else 5167 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 5168 Operands, TD); 5169 if (!C) return V; 5170 return getSCEV(C); 5171 } 5172 } 5173 } 5174 5175 // This is some other type of SCEVUnknown, just return it. 5176 return V; 5177 } 5178 5179 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 5180 // Avoid performing the look-up in the common case where the specified 5181 // expression has no loop-variant portions. 5182 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 5183 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 5184 if (OpAtScope != Comm->getOperand(i)) { 5185 // Okay, at least one of these operands is loop variant but might be 5186 // foldable. Build a new instance of the folded commutative expression. 5187 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 5188 Comm->op_begin()+i); 5189 NewOps.push_back(OpAtScope); 5190 5191 for (++i; i != e; ++i) { 5192 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 5193 NewOps.push_back(OpAtScope); 5194 } 5195 if (isa<SCEVAddExpr>(Comm)) 5196 return getAddExpr(NewOps); 5197 if (isa<SCEVMulExpr>(Comm)) 5198 return getMulExpr(NewOps); 5199 if (isa<SCEVSMaxExpr>(Comm)) 5200 return getSMaxExpr(NewOps); 5201 if (isa<SCEVUMaxExpr>(Comm)) 5202 return getUMaxExpr(NewOps); 5203 llvm_unreachable("Unknown commutative SCEV type!"); 5204 } 5205 } 5206 // If we got here, all operands are loop invariant. 5207 return Comm; 5208 } 5209 5210 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 5211 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 5212 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 5213 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 5214 return Div; // must be loop invariant 5215 return getUDivExpr(LHS, RHS); 5216 } 5217 5218 // If this is a loop recurrence for a loop that does not contain L, then we 5219 // are dealing with the final value computed by the loop. 5220 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 5221 // First, attempt to evaluate each operand. 5222 // Avoid performing the look-up in the common case where the specified 5223 // expression has no loop-variant portions. 5224 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5225 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 5226 if (OpAtScope == AddRec->getOperand(i)) 5227 continue; 5228 5229 // Okay, at least one of these operands is loop variant but might be 5230 // foldable. Build a new instance of the folded commutative expression. 5231 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 5232 AddRec->op_begin()+i); 5233 NewOps.push_back(OpAtScope); 5234 for (++i; i != e; ++i) 5235 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 5236 5237 const SCEV *FoldedRec = 5238 getAddRecExpr(NewOps, AddRec->getLoop(), 5239 AddRec->getNoWrapFlags(SCEV::FlagNW)); 5240 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 5241 // The addrec may be folded to a nonrecurrence, for example, if the 5242 // induction variable is multiplied by zero after constant folding. Go 5243 // ahead and return the folded value. 5244 if (!AddRec) 5245 return FoldedRec; 5246 break; 5247 } 5248 5249 // If the scope is outside the addrec's loop, evaluate it by using the 5250 // loop exit value of the addrec. 5251 if (!AddRec->getLoop()->contains(L)) { 5252 // To evaluate this recurrence, we need to know how many times the AddRec 5253 // loop iterates. Compute this now. 5254 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 5255 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 5256 5257 // Then, evaluate the AddRec. 5258 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 5259 } 5260 5261 return AddRec; 5262 } 5263 5264 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 5265 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 5266 if (Op == Cast->getOperand()) 5267 return Cast; // must be loop invariant 5268 return getZeroExtendExpr(Op, Cast->getType()); 5269 } 5270 5271 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 5272 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 5273 if (Op == Cast->getOperand()) 5274 return Cast; // must be loop invariant 5275 return getSignExtendExpr(Op, Cast->getType()); 5276 } 5277 5278 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 5279 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 5280 if (Op == Cast->getOperand()) 5281 return Cast; // must be loop invariant 5282 return getTruncateExpr(Op, Cast->getType()); 5283 } 5284 5285 llvm_unreachable("Unknown SCEV type!"); 5286 return 0; 5287 } 5288 5289 /// getSCEVAtScope - This is a convenience function which does 5290 /// getSCEVAtScope(getSCEV(V), L). 5291 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 5292 return getSCEVAtScope(getSCEV(V), L); 5293 } 5294 5295 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 5296 /// following equation: 5297 /// 5298 /// A * X = B (mod N) 5299 /// 5300 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 5301 /// A and B isn't important. 5302 /// 5303 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 5304 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 5305 ScalarEvolution &SE) { 5306 uint32_t BW = A.getBitWidth(); 5307 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 5308 assert(A != 0 && "A must be non-zero."); 5309 5310 // 1. D = gcd(A, N) 5311 // 5312 // The gcd of A and N may have only one prime factor: 2. The number of 5313 // trailing zeros in A is its multiplicity 5314 uint32_t Mult2 = A.countTrailingZeros(); 5315 // D = 2^Mult2 5316 5317 // 2. Check if B is divisible by D. 5318 // 5319 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 5320 // is not less than multiplicity of this prime factor for D. 5321 if (B.countTrailingZeros() < Mult2) 5322 return SE.getCouldNotCompute(); 5323 5324 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 5325 // modulo (N / D). 5326 // 5327 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 5328 // bit width during computations. 5329 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 5330 APInt Mod(BW + 1, 0); 5331 Mod.setBit(BW - Mult2); // Mod = N / D 5332 APInt I = AD.multiplicativeInverse(Mod); 5333 5334 // 4. Compute the minimum unsigned root of the equation: 5335 // I * (B / D) mod (N / D) 5336 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 5337 5338 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 5339 // bits. 5340 return SE.getConstant(Result.trunc(BW)); 5341 } 5342 5343 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 5344 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 5345 /// might be the same) or two SCEVCouldNotCompute objects. 5346 /// 5347 static std::pair<const SCEV *,const SCEV *> 5348 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 5349 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 5350 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 5351 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 5352 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 5353 5354 // We currently can only solve this if the coefficients are constants. 5355 if (!LC || !MC || !NC) { 5356 const SCEV *CNC = SE.getCouldNotCompute(); 5357 return std::make_pair(CNC, CNC); 5358 } 5359 5360 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 5361 const APInt &L = LC->getValue()->getValue(); 5362 const APInt &M = MC->getValue()->getValue(); 5363 const APInt &N = NC->getValue()->getValue(); 5364 APInt Two(BitWidth, 2); 5365 APInt Four(BitWidth, 4); 5366 5367 { 5368 using namespace APIntOps; 5369 const APInt& C = L; 5370 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 5371 // The B coefficient is M-N/2 5372 APInt B(M); 5373 B -= sdiv(N,Two); 5374 5375 // The A coefficient is N/2 5376 APInt A(N.sdiv(Two)); 5377 5378 // Compute the B^2-4ac term. 5379 APInt SqrtTerm(B); 5380 SqrtTerm *= B; 5381 SqrtTerm -= Four * (A * C); 5382 5383 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 5384 // integer value or else APInt::sqrt() will assert. 5385 APInt SqrtVal(SqrtTerm.sqrt()); 5386 5387 // Compute the two solutions for the quadratic formula. 5388 // The divisions must be performed as signed divisions. 5389 APInt NegB(-B); 5390 APInt TwoA(A << 1); 5391 if (TwoA.isMinValue()) { 5392 const SCEV *CNC = SE.getCouldNotCompute(); 5393 return std::make_pair(CNC, CNC); 5394 } 5395 5396 LLVMContext &Context = SE.getContext(); 5397 5398 ConstantInt *Solution1 = 5399 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 5400 ConstantInt *Solution2 = 5401 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 5402 5403 return std::make_pair(SE.getConstant(Solution1), 5404 SE.getConstant(Solution2)); 5405 } // end APIntOps namespace 5406 } 5407 5408 /// HowFarToZero - Return the number of times a backedge comparing the specified 5409 /// value to zero will execute. If not computable, return CouldNotCompute. 5410 /// 5411 /// This is only used for loops with a "x != y" exit test. The exit condition is 5412 /// now expressed as a single expression, V = x-y. So the exit test is 5413 /// effectively V != 0. We know and take advantage of the fact that this 5414 /// expression only being used in a comparison by zero context. 5415 ScalarEvolution::ExitLimit 5416 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 5417 // If the value is a constant 5418 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 5419 // If the value is already zero, the branch will execute zero times. 5420 if (C->getValue()->isZero()) return C; 5421 return getCouldNotCompute(); // Otherwise it will loop infinitely. 5422 } 5423 5424 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 5425 if (!AddRec || AddRec->getLoop() != L) 5426 return getCouldNotCompute(); 5427 5428 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 5429 // the quadratic equation to solve it. 5430 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 5431 std::pair<const SCEV *,const SCEV *> Roots = 5432 SolveQuadraticEquation(AddRec, *this); 5433 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 5434 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 5435 if (R1 && R2) { 5436 #if 0 5437 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 5438 << " sol#2: " << *R2 << "\n"; 5439 #endif 5440 // Pick the smallest positive root value. 5441 if (ConstantInt *CB = 5442 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT, 5443 R1->getValue(), 5444 R2->getValue()))) { 5445 if (CB->getZExtValue() == false) 5446 std::swap(R1, R2); // R1 is the minimum root now. 5447 5448 // We can only use this value if the chrec ends up with an exact zero 5449 // value at this index. When solving for "X*X != 5", for example, we 5450 // should not accept a root of 2. 5451 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 5452 if (Val->isZero()) 5453 return R1; // We found a quadratic root! 5454 } 5455 } 5456 return getCouldNotCompute(); 5457 } 5458 5459 // Otherwise we can only handle this if it is affine. 5460 if (!AddRec->isAffine()) 5461 return getCouldNotCompute(); 5462 5463 // If this is an affine expression, the execution count of this branch is 5464 // the minimum unsigned root of the following equation: 5465 // 5466 // Start + Step*N = 0 (mod 2^BW) 5467 // 5468 // equivalent to: 5469 // 5470 // Step*N = -Start (mod 2^BW) 5471 // 5472 // where BW is the common bit width of Start and Step. 5473 5474 // Get the initial value for the loop. 5475 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 5476 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 5477 5478 // For now we handle only constant steps. 5479 // 5480 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 5481 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 5482 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 5483 // We have not yet seen any such cases. 5484 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 5485 if (StepC == 0) 5486 return getCouldNotCompute(); 5487 5488 // For positive steps (counting up until unsigned overflow): 5489 // N = -Start/Step (as unsigned) 5490 // For negative steps (counting down to zero): 5491 // N = Start/-Step 5492 // First compute the unsigned distance from zero in the direction of Step. 5493 bool CountDown = StepC->getValue()->getValue().isNegative(); 5494 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 5495 5496 // Handle unitary steps, which cannot wraparound. 5497 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 5498 // N = Distance (as unsigned) 5499 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { 5500 ConstantRange CR = getUnsignedRange(Start); 5501 const SCEV *MaxBECount; 5502 if (!CountDown && CR.getUnsignedMin().isMinValue()) 5503 // When counting up, the worst starting value is 1, not 0. 5504 MaxBECount = CR.getUnsignedMax().isMinValue() 5505 ? getConstant(APInt::getMinValue(CR.getBitWidth())) 5506 : getConstant(APInt::getMaxValue(CR.getBitWidth())); 5507 else 5508 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax() 5509 : -CR.getUnsignedMin()); 5510 return ExitLimit(Distance, MaxBECount); 5511 } 5512 5513 // If the recurrence is known not to wraparound, unsigned divide computes the 5514 // back edge count. We know that the value will either become zero (and thus 5515 // the loop terminates), that the loop will terminate through some other exit 5516 // condition first, or that the loop has undefined behavior. This means 5517 // we can't "miss" the exit value, even with nonunit stride. 5518 // 5519 // FIXME: Prove that loops always exhibits *acceptable* undefined 5520 // behavior. Loops must exhibit defined behavior until a wrapped value is 5521 // actually used. So the trip count computed by udiv could be smaller than the 5522 // number of well-defined iterations. 5523 if (AddRec->getNoWrapFlags(SCEV::FlagNW)) { 5524 // FIXME: We really want an "isexact" bit for udiv. 5525 return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 5526 } 5527 // Then, try to solve the above equation provided that Start is constant. 5528 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 5529 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 5530 -StartC->getValue()->getValue(), 5531 *this); 5532 return getCouldNotCompute(); 5533 } 5534 5535 /// HowFarToNonZero - Return the number of times a backedge checking the 5536 /// specified value for nonzero will execute. If not computable, return 5537 /// CouldNotCompute 5538 ScalarEvolution::ExitLimit 5539 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 5540 // Loops that look like: while (X == 0) are very strange indeed. We don't 5541 // handle them yet except for the trivial case. This could be expanded in the 5542 // future as needed. 5543 5544 // If the value is a constant, check to see if it is known to be non-zero 5545 // already. If so, the backedge will execute zero times. 5546 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 5547 if (!C->getValue()->isNullValue()) 5548 return getConstant(C->getType(), 0); 5549 return getCouldNotCompute(); // Otherwise it will loop infinitely. 5550 } 5551 5552 // We could implement others, but I really doubt anyone writes loops like 5553 // this, and if they did, they would already be constant folded. 5554 return getCouldNotCompute(); 5555 } 5556 5557 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 5558 /// (which may not be an immediate predecessor) which has exactly one 5559 /// successor from which BB is reachable, or null if no such block is 5560 /// found. 5561 /// 5562 std::pair<BasicBlock *, BasicBlock *> 5563 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 5564 // If the block has a unique predecessor, then there is no path from the 5565 // predecessor to the block that does not go through the direct edge 5566 // from the predecessor to the block. 5567 if (BasicBlock *Pred = BB->getSinglePredecessor()) 5568 return std::make_pair(Pred, BB); 5569 5570 // A loop's header is defined to be a block that dominates the loop. 5571 // If the header has a unique predecessor outside the loop, it must be 5572 // a block that has exactly one successor that can reach the loop. 5573 if (Loop *L = LI->getLoopFor(BB)) 5574 return std::make_pair(L->getLoopPredecessor(), L->getHeader()); 5575 5576 return std::pair<BasicBlock *, BasicBlock *>(); 5577 } 5578 5579 /// HasSameValue - SCEV structural equivalence is usually sufficient for 5580 /// testing whether two expressions are equal, however for the purposes of 5581 /// looking for a condition guarding a loop, it can be useful to be a little 5582 /// more general, since a front-end may have replicated the controlling 5583 /// expression. 5584 /// 5585 static bool HasSameValue(const SCEV *A, const SCEV *B) { 5586 // Quick check to see if they are the same SCEV. 5587 if (A == B) return true; 5588 5589 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 5590 // two different instructions with the same value. Check for this case. 5591 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 5592 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 5593 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 5594 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 5595 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) 5596 return true; 5597 5598 // Otherwise assume they may have a different value. 5599 return false; 5600 } 5601 5602 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with 5603 /// predicate Pred. Return true iff any changes were made. 5604 /// 5605 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 5606 const SCEV *&LHS, const SCEV *&RHS) { 5607 bool Changed = false; 5608 5609 // Canonicalize a constant to the right side. 5610 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 5611 // Check for both operands constant. 5612 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 5613 if (ConstantExpr::getICmp(Pred, 5614 LHSC->getValue(), 5615 RHSC->getValue())->isNullValue()) 5616 goto trivially_false; 5617 else 5618 goto trivially_true; 5619 } 5620 // Otherwise swap the operands to put the constant on the right. 5621 std::swap(LHS, RHS); 5622 Pred = ICmpInst::getSwappedPredicate(Pred); 5623 Changed = true; 5624 } 5625 5626 // If we're comparing an addrec with a value which is loop-invariant in the 5627 // addrec's loop, put the addrec on the left. Also make a dominance check, 5628 // as both operands could be addrecs loop-invariant in each other's loop. 5629 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 5630 const Loop *L = AR->getLoop(); 5631 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 5632 std::swap(LHS, RHS); 5633 Pred = ICmpInst::getSwappedPredicate(Pred); 5634 Changed = true; 5635 } 5636 } 5637 5638 // If there's a constant operand, canonicalize comparisons with boundary 5639 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 5640 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 5641 const APInt &RA = RC->getValue()->getValue(); 5642 switch (Pred) { 5643 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5644 case ICmpInst::ICMP_EQ: 5645 case ICmpInst::ICMP_NE: 5646 break; 5647 case ICmpInst::ICMP_UGE: 5648 if ((RA - 1).isMinValue()) { 5649 Pred = ICmpInst::ICMP_NE; 5650 RHS = getConstant(RA - 1); 5651 Changed = true; 5652 break; 5653 } 5654 if (RA.isMaxValue()) { 5655 Pred = ICmpInst::ICMP_EQ; 5656 Changed = true; 5657 break; 5658 } 5659 if (RA.isMinValue()) goto trivially_true; 5660 5661 Pred = ICmpInst::ICMP_UGT; 5662 RHS = getConstant(RA - 1); 5663 Changed = true; 5664 break; 5665 case ICmpInst::ICMP_ULE: 5666 if ((RA + 1).isMaxValue()) { 5667 Pred = ICmpInst::ICMP_NE; 5668 RHS = getConstant(RA + 1); 5669 Changed = true; 5670 break; 5671 } 5672 if (RA.isMinValue()) { 5673 Pred = ICmpInst::ICMP_EQ; 5674 Changed = true; 5675 break; 5676 } 5677 if (RA.isMaxValue()) goto trivially_true; 5678 5679 Pred = ICmpInst::ICMP_ULT; 5680 RHS = getConstant(RA + 1); 5681 Changed = true; 5682 break; 5683 case ICmpInst::ICMP_SGE: 5684 if ((RA - 1).isMinSignedValue()) { 5685 Pred = ICmpInst::ICMP_NE; 5686 RHS = getConstant(RA - 1); 5687 Changed = true; 5688 break; 5689 } 5690 if (RA.isMaxSignedValue()) { 5691 Pred = ICmpInst::ICMP_EQ; 5692 Changed = true; 5693 break; 5694 } 5695 if (RA.isMinSignedValue()) goto trivially_true; 5696 5697 Pred = ICmpInst::ICMP_SGT; 5698 RHS = getConstant(RA - 1); 5699 Changed = true; 5700 break; 5701 case ICmpInst::ICMP_SLE: 5702 if ((RA + 1).isMaxSignedValue()) { 5703 Pred = ICmpInst::ICMP_NE; 5704 RHS = getConstant(RA + 1); 5705 Changed = true; 5706 break; 5707 } 5708 if (RA.isMinSignedValue()) { 5709 Pred = ICmpInst::ICMP_EQ; 5710 Changed = true; 5711 break; 5712 } 5713 if (RA.isMaxSignedValue()) goto trivially_true; 5714 5715 Pred = ICmpInst::ICMP_SLT; 5716 RHS = getConstant(RA + 1); 5717 Changed = true; 5718 break; 5719 case ICmpInst::ICMP_UGT: 5720 if (RA.isMinValue()) { 5721 Pred = ICmpInst::ICMP_NE; 5722 Changed = true; 5723 break; 5724 } 5725 if ((RA + 1).isMaxValue()) { 5726 Pred = ICmpInst::ICMP_EQ; 5727 RHS = getConstant(RA + 1); 5728 Changed = true; 5729 break; 5730 } 5731 if (RA.isMaxValue()) goto trivially_false; 5732 break; 5733 case ICmpInst::ICMP_ULT: 5734 if (RA.isMaxValue()) { 5735 Pred = ICmpInst::ICMP_NE; 5736 Changed = true; 5737 break; 5738 } 5739 if ((RA - 1).isMinValue()) { 5740 Pred = ICmpInst::ICMP_EQ; 5741 RHS = getConstant(RA - 1); 5742 Changed = true; 5743 break; 5744 } 5745 if (RA.isMinValue()) goto trivially_false; 5746 break; 5747 case ICmpInst::ICMP_SGT: 5748 if (RA.isMinSignedValue()) { 5749 Pred = ICmpInst::ICMP_NE; 5750 Changed = true; 5751 break; 5752 } 5753 if ((RA + 1).isMaxSignedValue()) { 5754 Pred = ICmpInst::ICMP_EQ; 5755 RHS = getConstant(RA + 1); 5756 Changed = true; 5757 break; 5758 } 5759 if (RA.isMaxSignedValue()) goto trivially_false; 5760 break; 5761 case ICmpInst::ICMP_SLT: 5762 if (RA.isMaxSignedValue()) { 5763 Pred = ICmpInst::ICMP_NE; 5764 Changed = true; 5765 break; 5766 } 5767 if ((RA - 1).isMinSignedValue()) { 5768 Pred = ICmpInst::ICMP_EQ; 5769 RHS = getConstant(RA - 1); 5770 Changed = true; 5771 break; 5772 } 5773 if (RA.isMinSignedValue()) goto trivially_false; 5774 break; 5775 } 5776 } 5777 5778 // Check for obvious equality. 5779 if (HasSameValue(LHS, RHS)) { 5780 if (ICmpInst::isTrueWhenEqual(Pred)) 5781 goto trivially_true; 5782 if (ICmpInst::isFalseWhenEqual(Pred)) 5783 goto trivially_false; 5784 } 5785 5786 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 5787 // adding or subtracting 1 from one of the operands. 5788 switch (Pred) { 5789 case ICmpInst::ICMP_SLE: 5790 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 5791 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5792 SCEV::FlagNSW); 5793 Pred = ICmpInst::ICMP_SLT; 5794 Changed = true; 5795 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 5796 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5797 SCEV::FlagNSW); 5798 Pred = ICmpInst::ICMP_SLT; 5799 Changed = true; 5800 } 5801 break; 5802 case ICmpInst::ICMP_SGE: 5803 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 5804 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5805 SCEV::FlagNSW); 5806 Pred = ICmpInst::ICMP_SGT; 5807 Changed = true; 5808 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 5809 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5810 SCEV::FlagNSW); 5811 Pred = ICmpInst::ICMP_SGT; 5812 Changed = true; 5813 } 5814 break; 5815 case ICmpInst::ICMP_ULE: 5816 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 5817 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5818 SCEV::FlagNUW); 5819 Pred = ICmpInst::ICMP_ULT; 5820 Changed = true; 5821 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 5822 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5823 SCEV::FlagNUW); 5824 Pred = ICmpInst::ICMP_ULT; 5825 Changed = true; 5826 } 5827 break; 5828 case ICmpInst::ICMP_UGE: 5829 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 5830 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5831 SCEV::FlagNUW); 5832 Pred = ICmpInst::ICMP_UGT; 5833 Changed = true; 5834 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 5835 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5836 SCEV::FlagNUW); 5837 Pred = ICmpInst::ICMP_UGT; 5838 Changed = true; 5839 } 5840 break; 5841 default: 5842 break; 5843 } 5844 5845 // TODO: More simplifications are possible here. 5846 5847 return Changed; 5848 5849 trivially_true: 5850 // Return 0 == 0. 5851 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 5852 Pred = ICmpInst::ICMP_EQ; 5853 return true; 5854 5855 trivially_false: 5856 // Return 0 != 0. 5857 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 5858 Pred = ICmpInst::ICMP_NE; 5859 return true; 5860 } 5861 5862 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 5863 return getSignedRange(S).getSignedMax().isNegative(); 5864 } 5865 5866 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 5867 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 5868 } 5869 5870 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 5871 return !getSignedRange(S).getSignedMin().isNegative(); 5872 } 5873 5874 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 5875 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 5876 } 5877 5878 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 5879 return isKnownNegative(S) || isKnownPositive(S); 5880 } 5881 5882 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 5883 const SCEV *LHS, const SCEV *RHS) { 5884 // Canonicalize the inputs first. 5885 (void)SimplifyICmpOperands(Pred, LHS, RHS); 5886 5887 // If LHS or RHS is an addrec, check to see if the condition is true in 5888 // every iteration of the loop. 5889 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 5890 if (isLoopEntryGuardedByCond( 5891 AR->getLoop(), Pred, AR->getStart(), RHS) && 5892 isLoopBackedgeGuardedByCond( 5893 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS)) 5894 return true; 5895 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) 5896 if (isLoopEntryGuardedByCond( 5897 AR->getLoop(), Pred, LHS, AR->getStart()) && 5898 isLoopBackedgeGuardedByCond( 5899 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this))) 5900 return true; 5901 5902 // Otherwise see what can be done with known constant ranges. 5903 return isKnownPredicateWithRanges(Pred, LHS, RHS); 5904 } 5905 5906 bool 5907 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, 5908 const SCEV *LHS, const SCEV *RHS) { 5909 if (HasSameValue(LHS, RHS)) 5910 return ICmpInst::isTrueWhenEqual(Pred); 5911 5912 // This code is split out from isKnownPredicate because it is called from 5913 // within isLoopEntryGuardedByCond. 5914 switch (Pred) { 5915 default: 5916 llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5917 break; 5918 case ICmpInst::ICMP_SGT: 5919 Pred = ICmpInst::ICMP_SLT; 5920 std::swap(LHS, RHS); 5921 case ICmpInst::ICMP_SLT: { 5922 ConstantRange LHSRange = getSignedRange(LHS); 5923 ConstantRange RHSRange = getSignedRange(RHS); 5924 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) 5925 return true; 5926 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) 5927 return false; 5928 break; 5929 } 5930 case ICmpInst::ICMP_SGE: 5931 Pred = ICmpInst::ICMP_SLE; 5932 std::swap(LHS, RHS); 5933 case ICmpInst::ICMP_SLE: { 5934 ConstantRange LHSRange = getSignedRange(LHS); 5935 ConstantRange RHSRange = getSignedRange(RHS); 5936 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) 5937 return true; 5938 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) 5939 return false; 5940 break; 5941 } 5942 case ICmpInst::ICMP_UGT: 5943 Pred = ICmpInst::ICMP_ULT; 5944 std::swap(LHS, RHS); 5945 case ICmpInst::ICMP_ULT: { 5946 ConstantRange LHSRange = getUnsignedRange(LHS); 5947 ConstantRange RHSRange = getUnsignedRange(RHS); 5948 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) 5949 return true; 5950 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) 5951 return false; 5952 break; 5953 } 5954 case ICmpInst::ICMP_UGE: 5955 Pred = ICmpInst::ICMP_ULE; 5956 std::swap(LHS, RHS); 5957 case ICmpInst::ICMP_ULE: { 5958 ConstantRange LHSRange = getUnsignedRange(LHS); 5959 ConstantRange RHSRange = getUnsignedRange(RHS); 5960 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) 5961 return true; 5962 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) 5963 return false; 5964 break; 5965 } 5966 case ICmpInst::ICMP_NE: { 5967 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) 5968 return true; 5969 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) 5970 return true; 5971 5972 const SCEV *Diff = getMinusSCEV(LHS, RHS); 5973 if (isKnownNonZero(Diff)) 5974 return true; 5975 break; 5976 } 5977 case ICmpInst::ICMP_EQ: 5978 // The check at the top of the function catches the case where 5979 // the values are known to be equal. 5980 break; 5981 } 5982 return false; 5983 } 5984 5985 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 5986 /// protected by a conditional between LHS and RHS. This is used to 5987 /// to eliminate casts. 5988 bool 5989 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 5990 ICmpInst::Predicate Pred, 5991 const SCEV *LHS, const SCEV *RHS) { 5992 // Interpret a null as meaning no loop, where there is obviously no guard 5993 // (interprocedural conditions notwithstanding). 5994 if (!L) return true; 5995 5996 BasicBlock *Latch = L->getLoopLatch(); 5997 if (!Latch) 5998 return false; 5999 6000 BranchInst *LoopContinuePredicate = 6001 dyn_cast<BranchInst>(Latch->getTerminator()); 6002 if (!LoopContinuePredicate || 6003 LoopContinuePredicate->isUnconditional()) 6004 return false; 6005 6006 return isImpliedCond(Pred, LHS, RHS, 6007 LoopContinuePredicate->getCondition(), 6008 LoopContinuePredicate->getSuccessor(0) != L->getHeader()); 6009 } 6010 6011 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected 6012 /// by a conditional between LHS and RHS. This is used to help avoid max 6013 /// expressions in loop trip counts, and to eliminate casts. 6014 bool 6015 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 6016 ICmpInst::Predicate Pred, 6017 const SCEV *LHS, const SCEV *RHS) { 6018 // Interpret a null as meaning no loop, where there is obviously no guard 6019 // (interprocedural conditions notwithstanding). 6020 if (!L) return false; 6021 6022 // Starting at the loop predecessor, climb up the predecessor chain, as long 6023 // as there are predecessors that can be found that have unique successors 6024 // leading to the original header. 6025 for (std::pair<BasicBlock *, BasicBlock *> 6026 Pair(L->getLoopPredecessor(), L->getHeader()); 6027 Pair.first; 6028 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 6029 6030 BranchInst *LoopEntryPredicate = 6031 dyn_cast<BranchInst>(Pair.first->getTerminator()); 6032 if (!LoopEntryPredicate || 6033 LoopEntryPredicate->isUnconditional()) 6034 continue; 6035 6036 if (isImpliedCond(Pred, LHS, RHS, 6037 LoopEntryPredicate->getCondition(), 6038 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 6039 return true; 6040 } 6041 6042 return false; 6043 } 6044 6045 /// isImpliedCond - Test whether the condition described by Pred, LHS, 6046 /// and RHS is true whenever the given Cond value evaluates to true. 6047 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 6048 const SCEV *LHS, const SCEV *RHS, 6049 Value *FoundCondValue, 6050 bool Inverse) { 6051 // Recursively handle And and Or conditions. 6052 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 6053 if (BO->getOpcode() == Instruction::And) { 6054 if (!Inverse) 6055 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 6056 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 6057 } else if (BO->getOpcode() == Instruction::Or) { 6058 if (Inverse) 6059 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 6060 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 6061 } 6062 } 6063 6064 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 6065 if (!ICI) return false; 6066 6067 // Bail if the ICmp's operands' types are wider than the needed type 6068 // before attempting to call getSCEV on them. This avoids infinite 6069 // recursion, since the analysis of widening casts can require loop 6070 // exit condition information for overflow checking, which would 6071 // lead back here. 6072 if (getTypeSizeInBits(LHS->getType()) < 6073 getTypeSizeInBits(ICI->getOperand(0)->getType())) 6074 return false; 6075 6076 // Now that we found a conditional branch that dominates the loop, check to 6077 // see if it is the comparison we are looking for. 6078 ICmpInst::Predicate FoundPred; 6079 if (Inverse) 6080 FoundPred = ICI->getInversePredicate(); 6081 else 6082 FoundPred = ICI->getPredicate(); 6083 6084 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 6085 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 6086 6087 // Balance the types. The case where FoundLHS' type is wider than 6088 // LHS' type is checked for above. 6089 if (getTypeSizeInBits(LHS->getType()) > 6090 getTypeSizeInBits(FoundLHS->getType())) { 6091 if (CmpInst::isSigned(Pred)) { 6092 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 6093 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 6094 } else { 6095 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 6096 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 6097 } 6098 } 6099 6100 // Canonicalize the query to match the way instcombine will have 6101 // canonicalized the comparison. 6102 if (SimplifyICmpOperands(Pred, LHS, RHS)) 6103 if (LHS == RHS) 6104 return CmpInst::isTrueWhenEqual(Pred); 6105 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 6106 if (FoundLHS == FoundRHS) 6107 return CmpInst::isFalseWhenEqual(Pred); 6108 6109 // Check to see if we can make the LHS or RHS match. 6110 if (LHS == FoundRHS || RHS == FoundLHS) { 6111 if (isa<SCEVConstant>(RHS)) { 6112 std::swap(FoundLHS, FoundRHS); 6113 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 6114 } else { 6115 std::swap(LHS, RHS); 6116 Pred = ICmpInst::getSwappedPredicate(Pred); 6117 } 6118 } 6119 6120 // Check whether the found predicate is the same as the desired predicate. 6121 if (FoundPred == Pred) 6122 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 6123 6124 // Check whether swapping the found predicate makes it the same as the 6125 // desired predicate. 6126 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 6127 if (isa<SCEVConstant>(RHS)) 6128 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 6129 else 6130 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 6131 RHS, LHS, FoundLHS, FoundRHS); 6132 } 6133 6134 // Check whether the actual condition is beyond sufficient. 6135 if (FoundPred == ICmpInst::ICMP_EQ) 6136 if (ICmpInst::isTrueWhenEqual(Pred)) 6137 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 6138 return true; 6139 if (Pred == ICmpInst::ICMP_NE) 6140 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 6141 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 6142 return true; 6143 6144 // Otherwise assume the worst. 6145 return false; 6146 } 6147 6148 /// isImpliedCondOperands - Test whether the condition described by Pred, 6149 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, 6150 /// and FoundRHS is true. 6151 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 6152 const SCEV *LHS, const SCEV *RHS, 6153 const SCEV *FoundLHS, 6154 const SCEV *FoundRHS) { 6155 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 6156 FoundLHS, FoundRHS) || 6157 // ~x < ~y --> x > y 6158 isImpliedCondOperandsHelper(Pred, LHS, RHS, 6159 getNotSCEV(FoundRHS), 6160 getNotSCEV(FoundLHS)); 6161 } 6162 6163 /// isImpliedCondOperandsHelper - Test whether the condition described by 6164 /// Pred, LHS, and RHS is true whenever the condition described by Pred, 6165 /// FoundLHS, and FoundRHS is true. 6166 bool 6167 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 6168 const SCEV *LHS, const SCEV *RHS, 6169 const SCEV *FoundLHS, 6170 const SCEV *FoundRHS) { 6171 switch (Pred) { 6172 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 6173 case ICmpInst::ICMP_EQ: 6174 case ICmpInst::ICMP_NE: 6175 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 6176 return true; 6177 break; 6178 case ICmpInst::ICMP_SLT: 6179 case ICmpInst::ICMP_SLE: 6180 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 6181 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 6182 return true; 6183 break; 6184 case ICmpInst::ICMP_SGT: 6185 case ICmpInst::ICMP_SGE: 6186 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 6187 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 6188 return true; 6189 break; 6190 case ICmpInst::ICMP_ULT: 6191 case ICmpInst::ICMP_ULE: 6192 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 6193 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 6194 return true; 6195 break; 6196 case ICmpInst::ICMP_UGT: 6197 case ICmpInst::ICMP_UGE: 6198 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 6199 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 6200 return true; 6201 break; 6202 } 6203 6204 return false; 6205 } 6206 6207 /// getBECount - Subtract the end and start values and divide by the step, 6208 /// rounding up, to get the number of times the backedge is executed. Return 6209 /// CouldNotCompute if an intermediate computation overflows. 6210 const SCEV *ScalarEvolution::getBECount(const SCEV *Start, 6211 const SCEV *End, 6212 const SCEV *Step, 6213 bool NoWrap) { 6214 assert(!isKnownNegative(Step) && 6215 "This code doesn't handle negative strides yet!"); 6216 6217 Type *Ty = Start->getType(); 6218 6219 // When Start == End, we have an exact BECount == 0. Short-circuit this case 6220 // here because SCEV may not be able to determine that the unsigned division 6221 // after rounding is zero. 6222 if (Start == End) 6223 return getConstant(Ty, 0); 6224 6225 const SCEV *NegOne = getConstant(Ty, (uint64_t)-1); 6226 const SCEV *Diff = getMinusSCEV(End, Start); 6227 const SCEV *RoundUp = getAddExpr(Step, NegOne); 6228 6229 // Add an adjustment to the difference between End and Start so that 6230 // the division will effectively round up. 6231 const SCEV *Add = getAddExpr(Diff, RoundUp); 6232 6233 if (!NoWrap) { 6234 // Check Add for unsigned overflow. 6235 // TODO: More sophisticated things could be done here. 6236 Type *WideTy = IntegerType::get(getContext(), 6237 getTypeSizeInBits(Ty) + 1); 6238 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); 6239 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); 6240 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); 6241 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 6242 return getCouldNotCompute(); 6243 } 6244 6245 return getUDivExpr(Add, Step); 6246 } 6247 6248 /// HowManyLessThans - Return the number of times a backedge containing the 6249 /// specified less-than comparison will execute. If not computable, return 6250 /// CouldNotCompute. 6251 ScalarEvolution::ExitLimit 6252 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 6253 const Loop *L, bool isSigned) { 6254 // Only handle: "ADDREC < LoopInvariant". 6255 if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); 6256 6257 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 6258 if (!AddRec || AddRec->getLoop() != L) 6259 return getCouldNotCompute(); 6260 6261 // Check to see if we have a flag which makes analysis easy. 6262 bool NoWrap = isSigned ? 6263 AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNW)) : 6264 AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNW)); 6265 6266 if (AddRec->isAffine()) { 6267 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 6268 const SCEV *Step = AddRec->getStepRecurrence(*this); 6269 6270 if (Step->isZero()) 6271 return getCouldNotCompute(); 6272 if (Step->isOne()) { 6273 // With unit stride, the iteration never steps past the limit value. 6274 } else if (isKnownPositive(Step)) { 6275 // Test whether a positive iteration can step past the limit 6276 // value and past the maximum value for its type in a single step. 6277 // Note that it's not sufficient to check NoWrap here, because even 6278 // though the value after a wrap is undefined, it's not undefined 6279 // behavior, so if wrap does occur, the loop could either terminate or 6280 // loop infinitely, but in either case, the loop is guaranteed to 6281 // iterate at least until the iteration where the wrapping occurs. 6282 const SCEV *One = getConstant(Step->getType(), 1); 6283 if (isSigned) { 6284 APInt Max = APInt::getSignedMaxValue(BitWidth); 6285 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) 6286 .slt(getSignedRange(RHS).getSignedMax())) 6287 return getCouldNotCompute(); 6288 } else { 6289 APInt Max = APInt::getMaxValue(BitWidth); 6290 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) 6291 .ult(getUnsignedRange(RHS).getUnsignedMax())) 6292 return getCouldNotCompute(); 6293 } 6294 } else 6295 // TODO: Handle negative strides here and below. 6296 return getCouldNotCompute(); 6297 6298 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 6299 // m. So, we count the number of iterations in which {n,+,s} < m is true. 6300 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 6301 // treat m-n as signed nor unsigned due to overflow possibility. 6302 6303 // First, we get the value of the LHS in the first iteration: n 6304 const SCEV *Start = AddRec->getOperand(0); 6305 6306 // Determine the minimum constant start value. 6307 const SCEV *MinStart = getConstant(isSigned ? 6308 getSignedRange(Start).getSignedMin() : 6309 getUnsignedRange(Start).getUnsignedMin()); 6310 6311 // If we know that the condition is true in order to enter the loop, 6312 // then we know that it will run exactly (m-n)/s times. Otherwise, we 6313 // only know that it will execute (max(m,n)-n)/s times. In both cases, 6314 // the division must round up. 6315 const SCEV *End = RHS; 6316 if (!isLoopEntryGuardedByCond(L, 6317 isSigned ? ICmpInst::ICMP_SLT : 6318 ICmpInst::ICMP_ULT, 6319 getMinusSCEV(Start, Step), RHS)) 6320 End = isSigned ? getSMaxExpr(RHS, Start) 6321 : getUMaxExpr(RHS, Start); 6322 6323 // Determine the maximum constant end value. 6324 const SCEV *MaxEnd = getConstant(isSigned ? 6325 getSignedRange(End).getSignedMax() : 6326 getUnsignedRange(End).getUnsignedMax()); 6327 6328 // If MaxEnd is within a step of the maximum integer value in its type, 6329 // adjust it down to the minimum value which would produce the same effect. 6330 // This allows the subsequent ceiling division of (N+(step-1))/step to 6331 // compute the correct value. 6332 const SCEV *StepMinusOne = getMinusSCEV(Step, 6333 getConstant(Step->getType(), 1)); 6334 MaxEnd = isSigned ? 6335 getSMinExpr(MaxEnd, 6336 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), 6337 StepMinusOne)) : 6338 getUMinExpr(MaxEnd, 6339 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), 6340 StepMinusOne)); 6341 6342 // Finally, we subtract these two values and divide, rounding up, to get 6343 // the number of times the backedge is executed. 6344 const SCEV *BECount = getBECount(Start, End, Step, NoWrap); 6345 6346 // The maximum backedge count is similar, except using the minimum start 6347 // value and the maximum end value. 6348 // If we already have an exact constant BECount, use it instead. 6349 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) ? BECount 6350 : getBECount(MinStart, MaxEnd, Step, NoWrap); 6351 6352 // If the stride is nonconstant, and NoWrap == true, then 6353 // getBECount(MinStart, MaxEnd) may not compute. This would result in an 6354 // exact BECount and invalid MaxBECount, which should be avoided to catch 6355 // more optimization opportunities. 6356 if (isa<SCEVCouldNotCompute>(MaxBECount)) 6357 MaxBECount = BECount; 6358 6359 return ExitLimit(BECount, MaxBECount); 6360 } 6361 6362 return getCouldNotCompute(); 6363 } 6364 6365 /// getNumIterationsInRange - Return the number of iterations of this loop that 6366 /// produce values in the specified constant range. Another way of looking at 6367 /// this is that it returns the first iteration number where the value is not in 6368 /// the condition, thus computing the exit count. If the iteration count can't 6369 /// be computed, an instance of SCEVCouldNotCompute is returned. 6370 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 6371 ScalarEvolution &SE) const { 6372 if (Range.isFullSet()) // Infinite loop. 6373 return SE.getCouldNotCompute(); 6374 6375 // If the start is a non-zero constant, shift the range to simplify things. 6376 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 6377 if (!SC->getValue()->isZero()) { 6378 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 6379 Operands[0] = SE.getConstant(SC->getType(), 0); 6380 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 6381 getNoWrapFlags(FlagNW)); 6382 if (const SCEVAddRecExpr *ShiftedAddRec = 6383 dyn_cast<SCEVAddRecExpr>(Shifted)) 6384 return ShiftedAddRec->getNumIterationsInRange( 6385 Range.subtract(SC->getValue()->getValue()), SE); 6386 // This is strange and shouldn't happen. 6387 return SE.getCouldNotCompute(); 6388 } 6389 6390 // The only time we can solve this is when we have all constant indices. 6391 // Otherwise, we cannot determine the overflow conditions. 6392 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 6393 if (!isa<SCEVConstant>(getOperand(i))) 6394 return SE.getCouldNotCompute(); 6395 6396 6397 // Okay at this point we know that all elements of the chrec are constants and 6398 // that the start element is zero. 6399 6400 // First check to see if the range contains zero. If not, the first 6401 // iteration exits. 6402 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 6403 if (!Range.contains(APInt(BitWidth, 0))) 6404 return SE.getConstant(getType(), 0); 6405 6406 if (isAffine()) { 6407 // If this is an affine expression then we have this situation: 6408 // Solve {0,+,A} in Range === Ax in Range 6409 6410 // We know that zero is in the range. If A is positive then we know that 6411 // the upper value of the range must be the first possible exit value. 6412 // If A is negative then the lower of the range is the last possible loop 6413 // value. Also note that we already checked for a full range. 6414 APInt One(BitWidth,1); 6415 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 6416 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 6417 6418 // The exit value should be (End+A)/A. 6419 APInt ExitVal = (End + A).udiv(A); 6420 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 6421 6422 // Evaluate at the exit value. If we really did fall out of the valid 6423 // range, then we computed our trip count, otherwise wrap around or other 6424 // things must have happened. 6425 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 6426 if (Range.contains(Val->getValue())) 6427 return SE.getCouldNotCompute(); // Something strange happened 6428 6429 // Ensure that the previous value is in the range. This is a sanity check. 6430 assert(Range.contains( 6431 EvaluateConstantChrecAtConstant(this, 6432 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 6433 "Linear scev computation is off in a bad way!"); 6434 return SE.getConstant(ExitValue); 6435 } else if (isQuadratic()) { 6436 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 6437 // quadratic equation to solve it. To do this, we must frame our problem in 6438 // terms of figuring out when zero is crossed, instead of when 6439 // Range.getUpper() is crossed. 6440 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 6441 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 6442 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), 6443 // getNoWrapFlags(FlagNW) 6444 FlagAnyWrap); 6445 6446 // Next, solve the constructed addrec 6447 std::pair<const SCEV *,const SCEV *> Roots = 6448 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 6449 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 6450 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 6451 if (R1) { 6452 // Pick the smallest positive root value. 6453 if (ConstantInt *CB = 6454 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 6455 R1->getValue(), R2->getValue()))) { 6456 if (CB->getZExtValue() == false) 6457 std::swap(R1, R2); // R1 is the minimum root now. 6458 6459 // Make sure the root is not off by one. The returned iteration should 6460 // not be in the range, but the previous one should be. When solving 6461 // for "X*X < 5", for example, we should not return a root of 2. 6462 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 6463 R1->getValue(), 6464 SE); 6465 if (Range.contains(R1Val->getValue())) { 6466 // The next iteration must be out of the range... 6467 ConstantInt *NextVal = 6468 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); 6469 6470 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 6471 if (!Range.contains(R1Val->getValue())) 6472 return SE.getConstant(NextVal); 6473 return SE.getCouldNotCompute(); // Something strange happened 6474 } 6475 6476 // If R1 was not in the range, then it is a good return value. Make 6477 // sure that R1-1 WAS in the range though, just in case. 6478 ConstantInt *NextVal = 6479 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); 6480 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 6481 if (Range.contains(R1Val->getValue())) 6482 return R1; 6483 return SE.getCouldNotCompute(); // Something strange happened 6484 } 6485 } 6486 } 6487 6488 return SE.getCouldNotCompute(); 6489 } 6490 6491 6492 6493 //===----------------------------------------------------------------------===// 6494 // SCEVCallbackVH Class Implementation 6495 //===----------------------------------------------------------------------===// 6496 6497 void ScalarEvolution::SCEVCallbackVH::deleted() { 6498 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 6499 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 6500 SE->ConstantEvolutionLoopExitValue.erase(PN); 6501 SE->ValueExprMap.erase(getValPtr()); 6502 // this now dangles! 6503 } 6504 6505 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 6506 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 6507 6508 // Forget all the expressions associated with users of the old value, 6509 // so that future queries will recompute the expressions using the new 6510 // value. 6511 Value *Old = getValPtr(); 6512 SmallVector<User *, 16> Worklist; 6513 SmallPtrSet<User *, 8> Visited; 6514 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 6515 UI != UE; ++UI) 6516 Worklist.push_back(*UI); 6517 while (!Worklist.empty()) { 6518 User *U = Worklist.pop_back_val(); 6519 // Deleting the Old value will cause this to dangle. Postpone 6520 // that until everything else is done. 6521 if (U == Old) 6522 continue; 6523 if (!Visited.insert(U)) 6524 continue; 6525 if (PHINode *PN = dyn_cast<PHINode>(U)) 6526 SE->ConstantEvolutionLoopExitValue.erase(PN); 6527 SE->ValueExprMap.erase(U); 6528 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 6529 UI != UE; ++UI) 6530 Worklist.push_back(*UI); 6531 } 6532 // Delete the Old value. 6533 if (PHINode *PN = dyn_cast<PHINode>(Old)) 6534 SE->ConstantEvolutionLoopExitValue.erase(PN); 6535 SE->ValueExprMap.erase(Old); 6536 // this now dangles! 6537 } 6538 6539 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 6540 : CallbackVH(V), SE(se) {} 6541 6542 //===----------------------------------------------------------------------===// 6543 // ScalarEvolution Class Implementation 6544 //===----------------------------------------------------------------------===// 6545 6546 ScalarEvolution::ScalarEvolution() 6547 : FunctionPass(ID), FirstUnknown(0) { 6548 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry()); 6549 } 6550 6551 bool ScalarEvolution::runOnFunction(Function &F) { 6552 this->F = &F; 6553 LI = &getAnalysis<LoopInfo>(); 6554 TD = getAnalysisIfAvailable<TargetData>(); 6555 DT = &getAnalysis<DominatorTree>(); 6556 return false; 6557 } 6558 6559 void ScalarEvolution::releaseMemory() { 6560 // Iterate through all the SCEVUnknown instances and call their 6561 // destructors, so that they release their references to their values. 6562 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) 6563 U->~SCEVUnknown(); 6564 FirstUnknown = 0; 6565 6566 ValueExprMap.clear(); 6567 6568 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 6569 // that a loop had multiple computable exits. 6570 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I = 6571 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); 6572 I != E; ++I) { 6573 I->second.clear(); 6574 } 6575 6576 BackedgeTakenCounts.clear(); 6577 ConstantEvolutionLoopExitValue.clear(); 6578 ValuesAtScopes.clear(); 6579 LoopDispositions.clear(); 6580 BlockDispositions.clear(); 6581 UnsignedRanges.clear(); 6582 SignedRanges.clear(); 6583 UniqueSCEVs.clear(); 6584 SCEVAllocator.Reset(); 6585 } 6586 6587 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 6588 AU.setPreservesAll(); 6589 AU.addRequiredTransitive<LoopInfo>(); 6590 AU.addRequiredTransitive<DominatorTree>(); 6591 } 6592 6593 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 6594 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 6595 } 6596 6597 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 6598 const Loop *L) { 6599 // Print all inner loops first 6600 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 6601 PrintLoopInfo(OS, SE, *I); 6602 6603 OS << "Loop "; 6604 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 6605 OS << ": "; 6606 6607 SmallVector<BasicBlock *, 8> ExitBlocks; 6608 L->getExitBlocks(ExitBlocks); 6609 if (ExitBlocks.size() != 1) 6610 OS << "<multiple exits> "; 6611 6612 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 6613 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 6614 } else { 6615 OS << "Unpredictable backedge-taken count. "; 6616 } 6617 6618 OS << "\n" 6619 "Loop "; 6620 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 6621 OS << ": "; 6622 6623 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 6624 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 6625 } else { 6626 OS << "Unpredictable max backedge-taken count. "; 6627 } 6628 6629 OS << "\n"; 6630 } 6631 6632 void ScalarEvolution::print(raw_ostream &OS, const Module *) const { 6633 // ScalarEvolution's implementation of the print method is to print 6634 // out SCEV values of all instructions that are interesting. Doing 6635 // this potentially causes it to create new SCEV objects though, 6636 // which technically conflicts with the const qualifier. This isn't 6637 // observable from outside the class though, so casting away the 6638 // const isn't dangerous. 6639 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 6640 6641 OS << "Classifying expressions for: "; 6642 WriteAsOperand(OS, F, /*PrintType=*/false); 6643 OS << "\n"; 6644 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 6645 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) { 6646 OS << *I << '\n'; 6647 OS << " --> "; 6648 const SCEV *SV = SE.getSCEV(&*I); 6649 SV->print(OS); 6650 6651 const Loop *L = LI->getLoopFor((*I).getParent()); 6652 6653 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 6654 if (AtUse != SV) { 6655 OS << " --> "; 6656 AtUse->print(OS); 6657 } 6658 6659 if (L) { 6660 OS << "\t\t" "Exits: "; 6661 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 6662 if (!SE.isLoopInvariant(ExitValue, L)) { 6663 OS << "<<Unknown>>"; 6664 } else { 6665 OS << *ExitValue; 6666 } 6667 } 6668 6669 OS << "\n"; 6670 } 6671 6672 OS << "Determining loop execution counts for: "; 6673 WriteAsOperand(OS, F, /*PrintType=*/false); 6674 OS << "\n"; 6675 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 6676 PrintLoopInfo(OS, &SE, *I); 6677 } 6678 6679 ScalarEvolution::LoopDisposition 6680 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 6681 std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S]; 6682 std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair = 6683 Values.insert(std::make_pair(L, LoopVariant)); 6684 if (!Pair.second) 6685 return Pair.first->second; 6686 6687 LoopDisposition D = computeLoopDisposition(S, L); 6688 return LoopDispositions[S][L] = D; 6689 } 6690 6691 ScalarEvolution::LoopDisposition 6692 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 6693 switch (S->getSCEVType()) { 6694 case scConstant: 6695 return LoopInvariant; 6696 case scTruncate: 6697 case scZeroExtend: 6698 case scSignExtend: 6699 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 6700 case scAddRecExpr: { 6701 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 6702 6703 // If L is the addrec's loop, it's computable. 6704 if (AR->getLoop() == L) 6705 return LoopComputable; 6706 6707 // Add recurrences are never invariant in the function-body (null loop). 6708 if (!L) 6709 return LoopVariant; 6710 6711 // This recurrence is variant w.r.t. L if L contains AR's loop. 6712 if (L->contains(AR->getLoop())) 6713 return LoopVariant; 6714 6715 // This recurrence is invariant w.r.t. L if AR's loop contains L. 6716 if (AR->getLoop()->contains(L)) 6717 return LoopInvariant; 6718 6719 // This recurrence is variant w.r.t. L if any of its operands 6720 // are variant. 6721 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 6722 I != E; ++I) 6723 if (!isLoopInvariant(*I, L)) 6724 return LoopVariant; 6725 6726 // Otherwise it's loop-invariant. 6727 return LoopInvariant; 6728 } 6729 case scAddExpr: 6730 case scMulExpr: 6731 case scUMaxExpr: 6732 case scSMaxExpr: { 6733 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6734 bool HasVarying = false; 6735 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6736 I != E; ++I) { 6737 LoopDisposition D = getLoopDisposition(*I, L); 6738 if (D == LoopVariant) 6739 return LoopVariant; 6740 if (D == LoopComputable) 6741 HasVarying = true; 6742 } 6743 return HasVarying ? LoopComputable : LoopInvariant; 6744 } 6745 case scUDivExpr: { 6746 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6747 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 6748 if (LD == LoopVariant) 6749 return LoopVariant; 6750 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 6751 if (RD == LoopVariant) 6752 return LoopVariant; 6753 return (LD == LoopInvariant && RD == LoopInvariant) ? 6754 LoopInvariant : LoopComputable; 6755 } 6756 case scUnknown: 6757 // All non-instruction values are loop invariant. All instructions are loop 6758 // invariant if they are not contained in the specified loop. 6759 // Instructions are never considered invariant in the function body 6760 // (null loop) because they are defined within the "loop". 6761 if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 6762 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 6763 return LoopInvariant; 6764 case scCouldNotCompute: 6765 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6766 return LoopVariant; 6767 default: break; 6768 } 6769 llvm_unreachable("Unknown SCEV kind!"); 6770 return LoopVariant; 6771 } 6772 6773 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 6774 return getLoopDisposition(S, L) == LoopInvariant; 6775 } 6776 6777 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 6778 return getLoopDisposition(S, L) == LoopComputable; 6779 } 6780 6781 ScalarEvolution::BlockDisposition 6782 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 6783 std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S]; 6784 std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool> 6785 Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock)); 6786 if (!Pair.second) 6787 return Pair.first->second; 6788 6789 BlockDisposition D = computeBlockDisposition(S, BB); 6790 return BlockDispositions[S][BB] = D; 6791 } 6792 6793 ScalarEvolution::BlockDisposition 6794 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 6795 switch (S->getSCEVType()) { 6796 case scConstant: 6797 return ProperlyDominatesBlock; 6798 case scTruncate: 6799 case scZeroExtend: 6800 case scSignExtend: 6801 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 6802 case scAddRecExpr: { 6803 // This uses a "dominates" query instead of "properly dominates" query 6804 // to test for proper dominance too, because the instruction which 6805 // produces the addrec's value is a PHI, and a PHI effectively properly 6806 // dominates its entire containing block. 6807 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 6808 if (!DT->dominates(AR->getLoop()->getHeader(), BB)) 6809 return DoesNotDominateBlock; 6810 } 6811 // FALL THROUGH into SCEVNAryExpr handling. 6812 case scAddExpr: 6813 case scMulExpr: 6814 case scUMaxExpr: 6815 case scSMaxExpr: { 6816 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6817 bool Proper = true; 6818 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6819 I != E; ++I) { 6820 BlockDisposition D = getBlockDisposition(*I, BB); 6821 if (D == DoesNotDominateBlock) 6822 return DoesNotDominateBlock; 6823 if (D == DominatesBlock) 6824 Proper = false; 6825 } 6826 return Proper ? ProperlyDominatesBlock : DominatesBlock; 6827 } 6828 case scUDivExpr: { 6829 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6830 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 6831 BlockDisposition LD = getBlockDisposition(LHS, BB); 6832 if (LD == DoesNotDominateBlock) 6833 return DoesNotDominateBlock; 6834 BlockDisposition RD = getBlockDisposition(RHS, BB); 6835 if (RD == DoesNotDominateBlock) 6836 return DoesNotDominateBlock; 6837 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 6838 ProperlyDominatesBlock : DominatesBlock; 6839 } 6840 case scUnknown: 6841 if (Instruction *I = 6842 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 6843 if (I->getParent() == BB) 6844 return DominatesBlock; 6845 if (DT->properlyDominates(I->getParent(), BB)) 6846 return ProperlyDominatesBlock; 6847 return DoesNotDominateBlock; 6848 } 6849 return ProperlyDominatesBlock; 6850 case scCouldNotCompute: 6851 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6852 return DoesNotDominateBlock; 6853 default: break; 6854 } 6855 llvm_unreachable("Unknown SCEV kind!"); 6856 return DoesNotDominateBlock; 6857 } 6858 6859 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 6860 return getBlockDisposition(S, BB) >= DominatesBlock; 6861 } 6862 6863 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 6864 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 6865 } 6866 6867 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 6868 switch (S->getSCEVType()) { 6869 case scConstant: 6870 return false; 6871 case scTruncate: 6872 case scZeroExtend: 6873 case scSignExtend: { 6874 const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S); 6875 const SCEV *CastOp = Cast->getOperand(); 6876 return Op == CastOp || hasOperand(CastOp, Op); 6877 } 6878 case scAddRecExpr: 6879 case scAddExpr: 6880 case scMulExpr: 6881 case scUMaxExpr: 6882 case scSMaxExpr: { 6883 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 6884 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 6885 I != E; ++I) { 6886 const SCEV *NAryOp = *I; 6887 if (NAryOp == Op || hasOperand(NAryOp, Op)) 6888 return true; 6889 } 6890 return false; 6891 } 6892 case scUDivExpr: { 6893 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 6894 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 6895 return LHS == Op || hasOperand(LHS, Op) || 6896 RHS == Op || hasOperand(RHS, Op); 6897 } 6898 case scUnknown: 6899 return false; 6900 case scCouldNotCompute: 6901 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 6902 return false; 6903 default: break; 6904 } 6905 llvm_unreachable("Unknown SCEV kind!"); 6906 return false; 6907 } 6908 6909 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 6910 ValuesAtScopes.erase(S); 6911 LoopDispositions.erase(S); 6912 BlockDispositions.erase(S); 6913 UnsignedRanges.erase(S); 6914 SignedRanges.erase(S); 6915 } 6916