1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. These classes are reference counted, managed by the const SCEV* 18 // class. We only create one SCEV of a particular shape, so pointer-comparisons 19 // for equality are legal. 20 // 21 // One important aspect of the SCEV objects is that they are never cyclic, even 22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 24 // recurrence) then we represent it directly as a recurrence node, otherwise we 25 // represent it as a SCEVUnknown node. 26 // 27 // In addition to being able to represent expressions of various types, we also 28 // have folders that are used to build the *canonical* representation for a 29 // particular expression. These folders are capable of using a variety of 30 // rewrite rules to simplify the expressions. 31 // 32 // Once the folders are defined, we can implement the more interesting 33 // higher-level code, such as the code that recognizes PHI nodes of various 34 // types, computes the execution count of a loop, etc. 35 // 36 // TODO: We should use these routines and value representations to implement 37 // dependence analysis! 38 // 39 //===----------------------------------------------------------------------===// 40 // 41 // There are several good references for the techniques used in this analysis. 42 // 43 // Chains of recurrences -- a method to expedite the evaluation 44 // of closed-form functions 45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 46 // 47 // On computational properties of chains of recurrences 48 // Eugene V. Zima 49 // 50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 51 // Robert A. van Engelen 52 // 53 // Efficient Symbolic Analysis for Optimizing Compilers 54 // Robert A. van Engelen 55 // 56 // Using the chains of recurrences algebra for data dependence testing and 57 // induction variable substitution 58 // MS Thesis, Johnie Birch 59 // 60 //===----------------------------------------------------------------------===// 61 62 #define DEBUG_TYPE "scalar-evolution" 63 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 64 #include "llvm/Constants.h" 65 #include "llvm/DerivedTypes.h" 66 #include "llvm/GlobalVariable.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/Analysis/ConstantFolding.h" 69 #include "llvm/Analysis/Dominators.h" 70 #include "llvm/Analysis/LoopInfo.h" 71 #include "llvm/Analysis/ValueTracking.h" 72 #include "llvm/Assembly/Writer.h" 73 #include "llvm/Target/TargetData.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Compiler.h" 76 #include "llvm/Support/ConstantRange.h" 77 #include "llvm/Support/GetElementPtrTypeIterator.h" 78 #include "llvm/Support/InstIterator.h" 79 #include "llvm/Support/MathExtras.h" 80 #include "llvm/Support/raw_ostream.h" 81 #include "llvm/ADT/Statistic.h" 82 #include "llvm/ADT/STLExtras.h" 83 #include <algorithm> 84 using namespace llvm; 85 86 STATISTIC(NumArrayLenItCounts, 87 "Number of trip counts computed with array length"); 88 STATISTIC(NumTripCountsComputed, 89 "Number of loops with predictable loop counts"); 90 STATISTIC(NumTripCountsNotComputed, 91 "Number of loops without predictable loop counts"); 92 STATISTIC(NumBruteForceTripCountsComputed, 93 "Number of loops with trip counts computed by force"); 94 95 static cl::opt<unsigned> 96 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 97 cl::desc("Maximum number of iterations SCEV will " 98 "symbolically execute a constant derived loop"), 99 cl::init(100)); 100 101 static RegisterPass<ScalarEvolution> 102 R("scalar-evolution", "Scalar Evolution Analysis", false, true); 103 char ScalarEvolution::ID = 0; 104 105 //===----------------------------------------------------------------------===// 106 // SCEV class definitions 107 //===----------------------------------------------------------------------===// 108 109 //===----------------------------------------------------------------------===// 110 // Implementation of the SCEV class. 111 // 112 SCEV::~SCEV() {} 113 void SCEV::dump() const { 114 print(errs()); 115 errs() << '\n'; 116 } 117 118 void SCEV::print(std::ostream &o) const { 119 raw_os_ostream OS(o); 120 print(OS); 121 } 122 123 bool SCEV::isZero() const { 124 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 125 return SC->getValue()->isZero(); 126 return false; 127 } 128 129 bool SCEV::isOne() const { 130 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 131 return SC->getValue()->isOne(); 132 return false; 133 } 134 135 bool SCEV::isAllOnesValue() const { 136 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 137 return SC->getValue()->isAllOnesValue(); 138 return false; 139 } 140 141 SCEVCouldNotCompute::SCEVCouldNotCompute() : 142 SCEV(scCouldNotCompute) {} 143 144 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 145 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 146 return false; 147 } 148 149 const Type *SCEVCouldNotCompute::getType() const { 150 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 151 return 0; 152 } 153 154 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 155 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 156 return false; 157 } 158 159 const SCEV* SCEVCouldNotCompute:: 160 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 161 const SCEV* Conc, 162 ScalarEvolution &SE) const { 163 return this; 164 } 165 166 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 167 OS << "***COULDNOTCOMPUTE***"; 168 } 169 170 bool SCEVCouldNotCompute::classof(const SCEV *S) { 171 return S->getSCEVType() == scCouldNotCompute; 172 } 173 174 175 // SCEVConstants - Only allow the creation of one SCEVConstant for any 176 // particular value. Don't use a const SCEV* here, or else the object will 177 // never be deleted! 178 179 const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { 180 SCEVConstant *&R = SCEVConstants[V]; 181 if (R == 0) R = new SCEVConstant(V); 182 return R; 183 } 184 185 const SCEV* ScalarEvolution::getConstant(const APInt& Val) { 186 return getConstant(ConstantInt::get(Val)); 187 } 188 189 const SCEV* 190 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 191 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); 192 } 193 194 const Type *SCEVConstant::getType() const { return V->getType(); } 195 196 void SCEVConstant::print(raw_ostream &OS) const { 197 WriteAsOperand(OS, V, false); 198 } 199 200 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, 201 const SCEV* op, const Type *ty) 202 : SCEV(SCEVTy), Op(op), Ty(ty) {} 203 204 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 205 return Op->dominates(BB, DT); 206 } 207 208 // SCEVTruncates - Only allow the creation of one SCEVTruncateExpr for any 209 // particular input. Don't use a const SCEV* here, or else the object will 210 // never be deleted! 211 212 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty) 213 : SCEVCastExpr(scTruncate, op, ty) { 214 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 215 (Ty->isInteger() || isa<PointerType>(Ty)) && 216 "Cannot truncate non-integer value!"); 217 } 218 219 220 void SCEVTruncateExpr::print(raw_ostream &OS) const { 221 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 222 } 223 224 // SCEVZeroExtends - Only allow the creation of one SCEVZeroExtendExpr for any 225 // particular input. Don't use a const SCEV* here, or else the object will never 226 // be deleted! 227 228 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty) 229 : SCEVCastExpr(scZeroExtend, op, ty) { 230 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 231 (Ty->isInteger() || isa<PointerType>(Ty)) && 232 "Cannot zero extend non-integer value!"); 233 } 234 235 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 236 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 237 } 238 239 // SCEVSignExtends - Only allow the creation of one SCEVSignExtendExpr for any 240 // particular input. Don't use a const SCEV* here, or else the object will never 241 // be deleted! 242 243 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty) 244 : SCEVCastExpr(scSignExtend, op, ty) { 245 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 246 (Ty->isInteger() || isa<PointerType>(Ty)) && 247 "Cannot sign extend non-integer value!"); 248 } 249 250 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 251 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 252 } 253 254 // SCEVCommExprs - Only allow the creation of one SCEVCommutativeExpr for any 255 // particular input. Don't use a const SCEV* here, or else the object will never 256 // be deleted! 257 258 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 259 assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); 260 const char *OpStr = getOperationStr(); 261 OS << "(" << *Operands[0]; 262 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 263 OS << OpStr << *Operands[i]; 264 OS << ")"; 265 } 266 267 const SCEV* SCEVCommutativeExpr:: 268 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 269 const SCEV* Conc, 270 ScalarEvolution &SE) const { 271 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 272 const SCEV* H = 273 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 274 if (H != getOperand(i)) { 275 SmallVector<const SCEV*, 8> NewOps; 276 NewOps.reserve(getNumOperands()); 277 for (unsigned j = 0; j != i; ++j) 278 NewOps.push_back(getOperand(j)); 279 NewOps.push_back(H); 280 for (++i; i != e; ++i) 281 NewOps.push_back(getOperand(i)-> 282 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 283 284 if (isa<SCEVAddExpr>(this)) 285 return SE.getAddExpr(NewOps); 286 else if (isa<SCEVMulExpr>(this)) 287 return SE.getMulExpr(NewOps); 288 else if (isa<SCEVSMaxExpr>(this)) 289 return SE.getSMaxExpr(NewOps); 290 else if (isa<SCEVUMaxExpr>(this)) 291 return SE.getUMaxExpr(NewOps); 292 else 293 assert(0 && "Unknown commutative expr!"); 294 } 295 } 296 return this; 297 } 298 299 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 300 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 301 if (!getOperand(i)->dominates(BB, DT)) 302 return false; 303 } 304 return true; 305 } 306 307 308 // SCEVUDivs - Only allow the creation of one SCEVUDivExpr for any particular 309 // input. Don't use a const SCEV* here, or else the object will never be 310 // deleted! 311 312 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 313 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 314 } 315 316 void SCEVUDivExpr::print(raw_ostream &OS) const { 317 OS << "(" << *LHS << " /u " << *RHS << ")"; 318 } 319 320 const Type *SCEVUDivExpr::getType() const { 321 // In most cases the types of LHS and RHS will be the same, but in some 322 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 323 // depend on the type for correctness, but handling types carefully can 324 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 325 // a pointer type than the RHS, so use the RHS' type here. 326 return RHS->getType(); 327 } 328 329 // SCEVAddRecExprs - Only allow the creation of one SCEVAddRecExpr for any 330 // particular input. Don't use a const SCEV* here, or else the object will never 331 // be deleted! 332 333 const SCEV* SCEVAddRecExpr:: 334 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 335 const SCEV* Conc, 336 ScalarEvolution &SE) const { 337 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 338 const SCEV* H = 339 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 340 if (H != getOperand(i)) { 341 SmallVector<const SCEV*, 8> NewOps; 342 NewOps.reserve(getNumOperands()); 343 for (unsigned j = 0; j != i; ++j) 344 NewOps.push_back(getOperand(j)); 345 NewOps.push_back(H); 346 for (++i; i != e; ++i) 347 NewOps.push_back(getOperand(i)-> 348 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 349 350 return SE.getAddRecExpr(NewOps, L); 351 } 352 } 353 return this; 354 } 355 356 357 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 358 // This recurrence is invariant w.r.t to QueryLoop iff QueryLoop doesn't 359 // contain L and if the start is invariant. 360 // Add recurrences are never invariant in the function-body (null loop). 361 return QueryLoop && 362 !QueryLoop->contains(L->getHeader()) && 363 getOperand(0)->isLoopInvariant(QueryLoop); 364 } 365 366 367 void SCEVAddRecExpr::print(raw_ostream &OS) const { 368 OS << "{" << *Operands[0]; 369 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 370 OS << ",+," << *Operands[i]; 371 OS << "}<" << L->getHeader()->getName() + ">"; 372 } 373 374 // SCEVUnknowns - Only allow the creation of one SCEVUnknown for any particular 375 // value. Don't use a const SCEV* here, or else the object will never be 376 // deleted! 377 378 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 379 // All non-instruction values are loop invariant. All instructions are loop 380 // invariant if they are not contained in the specified loop. 381 // Instructions are never considered invariant in the function body 382 // (null loop) because they are defined within the "loop". 383 if (Instruction *I = dyn_cast<Instruction>(V)) 384 return L && !L->contains(I->getParent()); 385 return true; 386 } 387 388 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 389 if (Instruction *I = dyn_cast<Instruction>(getValue())) 390 return DT->dominates(I->getParent(), BB); 391 return true; 392 } 393 394 const Type *SCEVUnknown::getType() const { 395 return V->getType(); 396 } 397 398 void SCEVUnknown::print(raw_ostream &OS) const { 399 WriteAsOperand(OS, V, false); 400 } 401 402 //===----------------------------------------------------------------------===// 403 // SCEV Utilities 404 //===----------------------------------------------------------------------===// 405 406 namespace { 407 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 408 /// than the complexity of the RHS. This comparator is used to canonicalize 409 /// expressions. 410 class VISIBILITY_HIDDEN SCEVComplexityCompare { 411 LoopInfo *LI; 412 public: 413 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} 414 415 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 416 // Primarily, sort the SCEVs by their getSCEVType(). 417 if (LHS->getSCEVType() != RHS->getSCEVType()) 418 return LHS->getSCEVType() < RHS->getSCEVType(); 419 420 // Aside from the getSCEVType() ordering, the particular ordering 421 // isn't very important except that it's beneficial to be consistent, 422 // so that (a + b) and (b + a) don't end up as different expressions. 423 424 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 425 // not as complete as it could be. 426 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { 427 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 428 429 // Order pointer values after integer values. This helps SCEVExpander 430 // form GEPs. 431 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) 432 return false; 433 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) 434 return true; 435 436 // Compare getValueID values. 437 if (LU->getValue()->getValueID() != RU->getValue()->getValueID()) 438 return LU->getValue()->getValueID() < RU->getValue()->getValueID(); 439 440 // Sort arguments by their position. 441 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) { 442 const Argument *RA = cast<Argument>(RU->getValue()); 443 return LA->getArgNo() < RA->getArgNo(); 444 } 445 446 // For instructions, compare their loop depth, and their opcode. 447 // This is pretty loose. 448 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) { 449 Instruction *RV = cast<Instruction>(RU->getValue()); 450 451 // Compare loop depths. 452 if (LI->getLoopDepth(LV->getParent()) != 453 LI->getLoopDepth(RV->getParent())) 454 return LI->getLoopDepth(LV->getParent()) < 455 LI->getLoopDepth(RV->getParent()); 456 457 // Compare opcodes. 458 if (LV->getOpcode() != RV->getOpcode()) 459 return LV->getOpcode() < RV->getOpcode(); 460 461 // Compare the number of operands. 462 if (LV->getNumOperands() != RV->getNumOperands()) 463 return LV->getNumOperands() < RV->getNumOperands(); 464 } 465 466 return false; 467 } 468 469 // Compare constant values. 470 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { 471 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 472 return LC->getValue()->getValue().ult(RC->getValue()->getValue()); 473 } 474 475 // Compare addrec loop depths. 476 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { 477 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 478 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth()) 479 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth(); 480 } 481 482 // Lexicographically compare n-ary expressions. 483 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { 484 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 485 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) { 486 if (i >= RC->getNumOperands()) 487 return false; 488 if (operator()(LC->getOperand(i), RC->getOperand(i))) 489 return true; 490 if (operator()(RC->getOperand(i), LC->getOperand(i))) 491 return false; 492 } 493 return LC->getNumOperands() < RC->getNumOperands(); 494 } 495 496 // Lexicographically compare udiv expressions. 497 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { 498 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 499 if (operator()(LC->getLHS(), RC->getLHS())) 500 return true; 501 if (operator()(RC->getLHS(), LC->getLHS())) 502 return false; 503 if (operator()(LC->getRHS(), RC->getRHS())) 504 return true; 505 if (operator()(RC->getRHS(), LC->getRHS())) 506 return false; 507 return false; 508 } 509 510 // Compare cast expressions by operand. 511 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { 512 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 513 return operator()(LC->getOperand(), RC->getOperand()); 514 } 515 516 assert(0 && "Unknown SCEV kind!"); 517 return false; 518 } 519 }; 520 } 521 522 /// GroupByComplexity - Given a list of SCEV objects, order them by their 523 /// complexity, and group objects of the same complexity together by value. 524 /// When this routine is finished, we know that any duplicates in the vector are 525 /// consecutive and that complexity is monotonically increasing. 526 /// 527 /// Note that we go take special precautions to ensure that we get determinstic 528 /// results from this routine. In other words, we don't want the results of 529 /// this to depend on where the addresses of various SCEV objects happened to 530 /// land in memory. 531 /// 532 static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, 533 LoopInfo *LI) { 534 if (Ops.size() < 2) return; // Noop 535 if (Ops.size() == 2) { 536 // This is the common case, which also happens to be trivially simple. 537 // Special case it. 538 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) 539 std::swap(Ops[0], Ops[1]); 540 return; 541 } 542 543 // Do the rough sort by complexity. 544 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 545 546 // Now that we are sorted by complexity, group elements of the same 547 // complexity. Note that this is, at worst, N^2, but the vector is likely to 548 // be extremely short in practice. Note that we take this approach because we 549 // do not want to depend on the addresses of the objects we are grouping. 550 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 551 const SCEV *S = Ops[i]; 552 unsigned Complexity = S->getSCEVType(); 553 554 // If there are any objects of the same complexity and same value as this 555 // one, group them. 556 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 557 if (Ops[j] == S) { // Found a duplicate. 558 // Move it to immediately after i'th element. 559 std::swap(Ops[i+1], Ops[j]); 560 ++i; // no need to rescan it. 561 if (i == e-2) return; // Done! 562 } 563 } 564 } 565 } 566 567 568 569 //===----------------------------------------------------------------------===// 570 // Simple SCEV method implementations 571 //===----------------------------------------------------------------------===// 572 573 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 574 /// Assume, K > 0. 575 static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, 576 ScalarEvolution &SE, 577 const Type* ResultTy) { 578 // Handle the simplest case efficiently. 579 if (K == 1) 580 return SE.getTruncateOrZeroExtend(It, ResultTy); 581 582 // We are using the following formula for BC(It, K): 583 // 584 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 585 // 586 // Suppose, W is the bitwidth of the return value. We must be prepared for 587 // overflow. Hence, we must assure that the result of our computation is 588 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 589 // safe in modular arithmetic. 590 // 591 // However, this code doesn't use exactly that formula; the formula it uses 592 // is something like the following, where T is the number of factors of 2 in 593 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 594 // exponentiation: 595 // 596 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 597 // 598 // This formula is trivially equivalent to the previous formula. However, 599 // this formula can be implemented much more efficiently. The trick is that 600 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 601 // arithmetic. To do exact division in modular arithmetic, all we have 602 // to do is multiply by the inverse. Therefore, this step can be done at 603 // width W. 604 // 605 // The next issue is how to safely do the division by 2^T. The way this 606 // is done is by doing the multiplication step at a width of at least W + T 607 // bits. This way, the bottom W+T bits of the product are accurate. Then, 608 // when we perform the division by 2^T (which is equivalent to a right shift 609 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 610 // truncated out after the division by 2^T. 611 // 612 // In comparison to just directly using the first formula, this technique 613 // is much more efficient; using the first formula requires W * K bits, 614 // but this formula less than W + K bits. Also, the first formula requires 615 // a division step, whereas this formula only requires multiplies and shifts. 616 // 617 // It doesn't matter whether the subtraction step is done in the calculation 618 // width or the input iteration count's width; if the subtraction overflows, 619 // the result must be zero anyway. We prefer here to do it in the width of 620 // the induction variable because it helps a lot for certain cases; CodeGen 621 // isn't smart enough to ignore the overflow, which leads to much less 622 // efficient code if the width of the subtraction is wider than the native 623 // register width. 624 // 625 // (It's possible to not widen at all by pulling out factors of 2 before 626 // the multiplication; for example, K=2 can be calculated as 627 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 628 // extra arithmetic, so it's not an obvious win, and it gets 629 // much more complicated for K > 3.) 630 631 // Protection from insane SCEVs; this bound is conservative, 632 // but it probably doesn't matter. 633 if (K > 1000) 634 return SE.getCouldNotCompute(); 635 636 unsigned W = SE.getTypeSizeInBits(ResultTy); 637 638 // Calculate K! / 2^T and T; we divide out the factors of two before 639 // multiplying for calculating K! / 2^T to avoid overflow. 640 // Other overflow doesn't matter because we only care about the bottom 641 // W bits of the result. 642 APInt OddFactorial(W, 1); 643 unsigned T = 1; 644 for (unsigned i = 3; i <= K; ++i) { 645 APInt Mult(W, i); 646 unsigned TwoFactors = Mult.countTrailingZeros(); 647 T += TwoFactors; 648 Mult = Mult.lshr(TwoFactors); 649 OddFactorial *= Mult; 650 } 651 652 // We need at least W + T bits for the multiplication step 653 unsigned CalculationBits = W + T; 654 655 // Calcuate 2^T, at width T+W. 656 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 657 658 // Calculate the multiplicative inverse of K! / 2^T; 659 // this multiplication factor will perform the exact division by 660 // K! / 2^T. 661 APInt Mod = APInt::getSignedMinValue(W+1); 662 APInt MultiplyFactor = OddFactorial.zext(W+1); 663 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 664 MultiplyFactor = MultiplyFactor.trunc(W); 665 666 // Calculate the product, at width T+W 667 const IntegerType *CalculationTy = IntegerType::get(CalculationBits); 668 const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 669 for (unsigned i = 1; i != K; ++i) { 670 const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); 671 Dividend = SE.getMulExpr(Dividend, 672 SE.getTruncateOrZeroExtend(S, CalculationTy)); 673 } 674 675 // Divide by 2^T 676 const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 677 678 // Truncate the result, and divide by K! / 2^T. 679 680 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 681 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 682 } 683 684 /// evaluateAtIteration - Return the value of this chain of recurrences at 685 /// the specified iteration number. We can evaluate this recurrence by 686 /// multiplying each element in the chain by the binomial coefficient 687 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 688 /// 689 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 690 /// 691 /// where BC(It, k) stands for binomial coefficient. 692 /// 693 const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, 694 ScalarEvolution &SE) const { 695 const SCEV* Result = getStart(); 696 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 697 // The computation is correct in the face of overflow provided that the 698 // multiplication is performed _after_ the evaluation of the binomial 699 // coefficient. 700 const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); 701 if (isa<SCEVCouldNotCompute>(Coeff)) 702 return Coeff; 703 704 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 705 } 706 return Result; 707 } 708 709 //===----------------------------------------------------------------------===// 710 // SCEV Expression folder implementations 711 //===----------------------------------------------------------------------===// 712 713 const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, 714 const Type *Ty) { 715 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 716 "This is not a truncating conversion!"); 717 assert(isSCEVable(Ty) && 718 "This is not a conversion to a SCEVable type!"); 719 Ty = getEffectiveSCEVType(Ty); 720 721 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 722 return getUnknown( 723 ConstantExpr::getTrunc(SC->getValue(), Ty)); 724 725 // trunc(trunc(x)) --> trunc(x) 726 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 727 return getTruncateExpr(ST->getOperand(), Ty); 728 729 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 730 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 731 return getTruncateOrSignExtend(SS->getOperand(), Ty); 732 733 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 734 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 735 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 736 737 // If the input value is a chrec scev, truncate the chrec's operands. 738 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 739 SmallVector<const SCEV*, 4> Operands; 740 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 741 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 742 return getAddRecExpr(Operands, AddRec->getLoop()); 743 } 744 745 SCEVTruncateExpr *&Result = SCEVTruncates[std::make_pair(Op, Ty)]; 746 if (Result == 0) Result = new SCEVTruncateExpr(Op, Ty); 747 return Result; 748 } 749 750 const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, 751 const Type *Ty) { 752 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 753 "This is not an extending conversion!"); 754 assert(isSCEVable(Ty) && 755 "This is not a conversion to a SCEVable type!"); 756 Ty = getEffectiveSCEVType(Ty); 757 758 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 759 const Type *IntTy = getEffectiveSCEVType(Ty); 760 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); 761 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 762 return getUnknown(C); 763 } 764 765 // zext(zext(x)) --> zext(x) 766 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 767 return getZeroExtendExpr(SZ->getOperand(), Ty); 768 769 // If the input value is a chrec scev, and we can prove that the value 770 // did not overflow the old, smaller, value, we can zero extend all of the 771 // operands (often constants). This allows analysis of something like 772 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 773 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 774 if (AR->isAffine()) { 775 // Check whether the backedge-taken count is SCEVCouldNotCompute. 776 // Note that this serves two purposes: It filters out loops that are 777 // simply not analyzable, and it covers the case where this code is 778 // being called from within backedge-taken count analysis, such that 779 // attempting to ask for the backedge-taken count would likely result 780 // in infinite recursion. In the later case, the analysis code will 781 // cope with a conservative value, and it will take care to purge 782 // that value once it has finished. 783 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 784 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 785 // Manually compute the final value for AR, checking for 786 // overflow. 787 const SCEV* Start = AR->getStart(); 788 const SCEV* Step = AR->getStepRecurrence(*this); 789 790 // Check whether the backedge-taken count can be losslessly casted to 791 // the addrec's type. The count is always unsigned. 792 const SCEV* CastedMaxBECount = 793 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 794 const SCEV* RecastedMaxBECount = 795 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 796 if (MaxBECount == RecastedMaxBECount) { 797 const Type *WideTy = 798 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 799 // Check whether Start+Step*MaxBECount has no unsigned overflow. 800 const SCEV* ZMul = 801 getMulExpr(CastedMaxBECount, 802 getTruncateOrZeroExtend(Step, Start->getType())); 803 const SCEV* Add = getAddExpr(Start, ZMul); 804 const SCEV* OperandExtendedAdd = 805 getAddExpr(getZeroExtendExpr(Start, WideTy), 806 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 807 getZeroExtendExpr(Step, WideTy))); 808 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 809 // Return the expression with the addrec on the outside. 810 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 811 getZeroExtendExpr(Step, Ty), 812 AR->getLoop()); 813 814 // Similar to above, only this time treat the step value as signed. 815 // This covers loops that count down. 816 const SCEV* SMul = 817 getMulExpr(CastedMaxBECount, 818 getTruncateOrSignExtend(Step, Start->getType())); 819 Add = getAddExpr(Start, SMul); 820 OperandExtendedAdd = 821 getAddExpr(getZeroExtendExpr(Start, WideTy), 822 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 823 getSignExtendExpr(Step, WideTy))); 824 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 825 // Return the expression with the addrec on the outside. 826 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 827 getSignExtendExpr(Step, Ty), 828 AR->getLoop()); 829 } 830 } 831 } 832 833 SCEVZeroExtendExpr *&Result = SCEVZeroExtends[std::make_pair(Op, Ty)]; 834 if (Result == 0) Result = new SCEVZeroExtendExpr(Op, Ty); 835 return Result; 836 } 837 838 const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, 839 const Type *Ty) { 840 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 841 "This is not an extending conversion!"); 842 assert(isSCEVable(Ty) && 843 "This is not a conversion to a SCEVable type!"); 844 Ty = getEffectiveSCEVType(Ty); 845 846 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 847 const Type *IntTy = getEffectiveSCEVType(Ty); 848 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); 849 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 850 return getUnknown(C); 851 } 852 853 // sext(sext(x)) --> sext(x) 854 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 855 return getSignExtendExpr(SS->getOperand(), Ty); 856 857 // If the input value is a chrec scev, and we can prove that the value 858 // did not overflow the old, smaller, value, we can sign extend all of the 859 // operands (often constants). This allows analysis of something like 860 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 861 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 862 if (AR->isAffine()) { 863 // Check whether the backedge-taken count is SCEVCouldNotCompute. 864 // Note that this serves two purposes: It filters out loops that are 865 // simply not analyzable, and it covers the case where this code is 866 // being called from within backedge-taken count analysis, such that 867 // attempting to ask for the backedge-taken count would likely result 868 // in infinite recursion. In the later case, the analysis code will 869 // cope with a conservative value, and it will take care to purge 870 // that value once it has finished. 871 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 872 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 873 // Manually compute the final value for AR, checking for 874 // overflow. 875 const SCEV* Start = AR->getStart(); 876 const SCEV* Step = AR->getStepRecurrence(*this); 877 878 // Check whether the backedge-taken count can be losslessly casted to 879 // the addrec's type. The count is always unsigned. 880 const SCEV* CastedMaxBECount = 881 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 882 const SCEV* RecastedMaxBECount = 883 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 884 if (MaxBECount == RecastedMaxBECount) { 885 const Type *WideTy = 886 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 887 // Check whether Start+Step*MaxBECount has no signed overflow. 888 const SCEV* SMul = 889 getMulExpr(CastedMaxBECount, 890 getTruncateOrSignExtend(Step, Start->getType())); 891 const SCEV* Add = getAddExpr(Start, SMul); 892 const SCEV* OperandExtendedAdd = 893 getAddExpr(getSignExtendExpr(Start, WideTy), 894 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 895 getSignExtendExpr(Step, WideTy))); 896 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 897 // Return the expression with the addrec on the outside. 898 return getAddRecExpr(getSignExtendExpr(Start, Ty), 899 getSignExtendExpr(Step, Ty), 900 AR->getLoop()); 901 } 902 } 903 } 904 905 SCEVSignExtendExpr *&Result = SCEVSignExtends[std::make_pair(Op, Ty)]; 906 if (Result == 0) Result = new SCEVSignExtendExpr(Op, Ty); 907 return Result; 908 } 909 910 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 911 /// unspecified bits out to the given type. 912 /// 913 const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, 914 const Type *Ty) { 915 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 916 "This is not an extending conversion!"); 917 assert(isSCEVable(Ty) && 918 "This is not a conversion to a SCEVable type!"); 919 Ty = getEffectiveSCEVType(Ty); 920 921 // Sign-extend negative constants. 922 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 923 if (SC->getValue()->getValue().isNegative()) 924 return getSignExtendExpr(Op, Ty); 925 926 // Peel off a truncate cast. 927 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 928 const SCEV* NewOp = T->getOperand(); 929 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 930 return getAnyExtendExpr(NewOp, Ty); 931 return getTruncateOrNoop(NewOp, Ty); 932 } 933 934 // Next try a zext cast. If the cast is folded, use it. 935 const SCEV* ZExt = getZeroExtendExpr(Op, Ty); 936 if (!isa<SCEVZeroExtendExpr>(ZExt)) 937 return ZExt; 938 939 // Next try a sext cast. If the cast is folded, use it. 940 const SCEV* SExt = getSignExtendExpr(Op, Ty); 941 if (!isa<SCEVSignExtendExpr>(SExt)) 942 return SExt; 943 944 // If the expression is obviously signed, use the sext cast value. 945 if (isa<SCEVSMaxExpr>(Op)) 946 return SExt; 947 948 // Absent any other information, use the zext cast value. 949 return ZExt; 950 } 951 952 /// CollectAddOperandsWithScales - Process the given Ops list, which is 953 /// a list of operands to be added under the given scale, update the given 954 /// map. This is a helper function for getAddRecExpr. As an example of 955 /// what it does, given a sequence of operands that would form an add 956 /// expression like this: 957 /// 958 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 959 /// 960 /// where A and B are constants, update the map with these values: 961 /// 962 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 963 /// 964 /// and add 13 + A*B*29 to AccumulatedConstant. 965 /// This will allow getAddRecExpr to produce this: 966 /// 967 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 968 /// 969 /// This form often exposes folding opportunities that are hidden in 970 /// the original operand list. 971 /// 972 /// Return true iff it appears that any interesting folding opportunities 973 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 974 /// the common case where no interesting opportunities are present, and 975 /// is also used as a check to avoid infinite recursion. 976 /// 977 static bool 978 CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, 979 SmallVector<const SCEV*, 8> &NewOps, 980 APInt &AccumulatedConstant, 981 const SmallVectorImpl<const SCEV*> &Ops, 982 const APInt &Scale, 983 ScalarEvolution &SE) { 984 bool Interesting = false; 985 986 // Iterate over the add operands. 987 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 988 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 989 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 990 APInt NewScale = 991 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 992 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 993 // A multiplication of a constant with another add; recurse. 994 Interesting |= 995 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 996 cast<SCEVAddExpr>(Mul->getOperand(1)) 997 ->getOperands(), 998 NewScale, SE); 999 } else { 1000 // A multiplication of a constant with some other value. Update 1001 // the map. 1002 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1003 const SCEV* Key = SE.getMulExpr(MulOps); 1004 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 1005 M.insert(std::make_pair(Key, APInt())); 1006 if (Pair.second) { 1007 Pair.first->second = NewScale; 1008 NewOps.push_back(Pair.first->first); 1009 } else { 1010 Pair.first->second += NewScale; 1011 // The map already had an entry for this value, which may indicate 1012 // a folding opportunity. 1013 Interesting = true; 1014 } 1015 } 1016 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1017 // Pull a buried constant out to the outside. 1018 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) 1019 Interesting = true; 1020 AccumulatedConstant += Scale * C->getValue()->getValue(); 1021 } else { 1022 // An ordinary operand. Update the map. 1023 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 1024 M.insert(std::make_pair(Ops[i], APInt())); 1025 if (Pair.second) { 1026 Pair.first->second = Scale; 1027 NewOps.push_back(Pair.first->first); 1028 } else { 1029 Pair.first->second += Scale; 1030 // The map already had an entry for this value, which may indicate 1031 // a folding opportunity. 1032 Interesting = true; 1033 } 1034 } 1035 } 1036 1037 return Interesting; 1038 } 1039 1040 namespace { 1041 struct APIntCompare { 1042 bool operator()(const APInt &LHS, const APInt &RHS) const { 1043 return LHS.ult(RHS); 1044 } 1045 }; 1046 } 1047 1048 /// getAddExpr - Get a canonical add expression, or something simpler if 1049 /// possible. 1050 const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { 1051 assert(!Ops.empty() && "Cannot get empty add!"); 1052 if (Ops.size() == 1) return Ops[0]; 1053 #ifndef NDEBUG 1054 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1055 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1056 getEffectiveSCEVType(Ops[0]->getType()) && 1057 "SCEVAddExpr operand types don't match!"); 1058 #endif 1059 1060 // Sort by complexity, this groups all similar expression types together. 1061 GroupByComplexity(Ops, LI); 1062 1063 // If there are any constants, fold them together. 1064 unsigned Idx = 0; 1065 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1066 ++Idx; 1067 assert(Idx < Ops.size()); 1068 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1069 // We found two constants, fold them together! 1070 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1071 RHSC->getValue()->getValue()); 1072 if (Ops.size() == 2) return Ops[0]; 1073 Ops.erase(Ops.begin()+1); // Erase the folded element 1074 LHSC = cast<SCEVConstant>(Ops[0]); 1075 } 1076 1077 // If we are left with a constant zero being added, strip it off. 1078 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1079 Ops.erase(Ops.begin()); 1080 --Idx; 1081 } 1082 } 1083 1084 if (Ops.size() == 1) return Ops[0]; 1085 1086 // Okay, check to see if the same value occurs in the operand list twice. If 1087 // so, merge them together into an multiply expression. Since we sorted the 1088 // list, these values are required to be adjacent. 1089 const Type *Ty = Ops[0]->getType(); 1090 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1091 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1092 // Found a match, merge the two values into a multiply, and add any 1093 // remaining values to the result. 1094 const SCEV* Two = getIntegerSCEV(2, Ty); 1095 const SCEV* Mul = getMulExpr(Ops[i], Two); 1096 if (Ops.size() == 2) 1097 return Mul; 1098 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1099 Ops.push_back(Mul); 1100 return getAddExpr(Ops); 1101 } 1102 1103 // Check for truncates. If all the operands are truncated from the same 1104 // type, see if factoring out the truncate would permit the result to be 1105 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1106 // if the contents of the resulting outer trunc fold to something simple. 1107 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1108 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1109 const Type *DstType = Trunc->getType(); 1110 const Type *SrcType = Trunc->getOperand()->getType(); 1111 SmallVector<const SCEV*, 8> LargeOps; 1112 bool Ok = true; 1113 // Check all the operands to see if they can be represented in the 1114 // source type of the truncate. 1115 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1116 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1117 if (T->getOperand()->getType() != SrcType) { 1118 Ok = false; 1119 break; 1120 } 1121 LargeOps.push_back(T->getOperand()); 1122 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1123 // This could be either sign or zero extension, but sign extension 1124 // is much more likely to be foldable here. 1125 LargeOps.push_back(getSignExtendExpr(C, SrcType)); 1126 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1127 SmallVector<const SCEV*, 8> LargeMulOps; 1128 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1129 if (const SCEVTruncateExpr *T = 1130 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1131 if (T->getOperand()->getType() != SrcType) { 1132 Ok = false; 1133 break; 1134 } 1135 LargeMulOps.push_back(T->getOperand()); 1136 } else if (const SCEVConstant *C = 1137 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1138 // This could be either sign or zero extension, but sign extension 1139 // is much more likely to be foldable here. 1140 LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); 1141 } else { 1142 Ok = false; 1143 break; 1144 } 1145 } 1146 if (Ok) 1147 LargeOps.push_back(getMulExpr(LargeMulOps)); 1148 } else { 1149 Ok = false; 1150 break; 1151 } 1152 } 1153 if (Ok) { 1154 // Evaluate the expression in the larger type. 1155 const SCEV* Fold = getAddExpr(LargeOps); 1156 // If it folds to something simple, use it. Otherwise, don't. 1157 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1158 return getTruncateExpr(Fold, DstType); 1159 } 1160 } 1161 1162 // Skip past any other cast SCEVs. 1163 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1164 ++Idx; 1165 1166 // If there are add operands they would be next. 1167 if (Idx < Ops.size()) { 1168 bool DeletedAdd = false; 1169 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1170 // If we have an add, expand the add operands onto the end of the operands 1171 // list. 1172 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 1173 Ops.erase(Ops.begin()+Idx); 1174 DeletedAdd = true; 1175 } 1176 1177 // If we deleted at least one add, we added operands to the end of the list, 1178 // and they are not necessarily sorted. Recurse to resort and resimplify 1179 // any operands we just aquired. 1180 if (DeletedAdd) 1181 return getAddExpr(Ops); 1182 } 1183 1184 // Skip over the add expression until we get to a multiply. 1185 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1186 ++Idx; 1187 1188 // Check to see if there are any folding opportunities present with 1189 // operands multiplied by constant values. 1190 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1191 uint64_t BitWidth = getTypeSizeInBits(Ty); 1192 DenseMap<const SCEV*, APInt> M; 1193 SmallVector<const SCEV*, 8> NewOps; 1194 APInt AccumulatedConstant(BitWidth, 0); 1195 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1196 Ops, APInt(BitWidth, 1), *this)) { 1197 // Some interesting folding opportunity is present, so its worthwhile to 1198 // re-generate the operands list. Group the operands by constant scale, 1199 // to avoid multiplying by the same constant scale multiple times. 1200 std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists; 1201 for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(), 1202 E = NewOps.end(); I != E; ++I) 1203 MulOpLists[M.find(*I)->second].push_back(*I); 1204 // Re-generate the operands list. 1205 Ops.clear(); 1206 if (AccumulatedConstant != 0) 1207 Ops.push_back(getConstant(AccumulatedConstant)); 1208 for (std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare>::iterator I = 1209 MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1210 if (I->first != 0) 1211 Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); 1212 if (Ops.empty()) 1213 return getIntegerSCEV(0, Ty); 1214 if (Ops.size() == 1) 1215 return Ops[0]; 1216 return getAddExpr(Ops); 1217 } 1218 } 1219 1220 // If we are adding something to a multiply expression, make sure the 1221 // something is not already an operand of the multiply. If so, merge it into 1222 // the multiply. 1223 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1224 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1225 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1226 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1227 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1228 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { 1229 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1230 const SCEV* InnerMul = Mul->getOperand(MulOp == 0); 1231 if (Mul->getNumOperands() != 2) { 1232 // If the multiply has more than two operands, we must get the 1233 // Y*Z term. 1234 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1235 MulOps.erase(MulOps.begin()+MulOp); 1236 InnerMul = getMulExpr(MulOps); 1237 } 1238 const SCEV* One = getIntegerSCEV(1, Ty); 1239 const SCEV* AddOne = getAddExpr(InnerMul, One); 1240 const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); 1241 if (Ops.size() == 2) return OuterMul; 1242 if (AddOp < Idx) { 1243 Ops.erase(Ops.begin()+AddOp); 1244 Ops.erase(Ops.begin()+Idx-1); 1245 } else { 1246 Ops.erase(Ops.begin()+Idx); 1247 Ops.erase(Ops.begin()+AddOp-1); 1248 } 1249 Ops.push_back(OuterMul); 1250 return getAddExpr(Ops); 1251 } 1252 1253 // Check this multiply against other multiplies being added together. 1254 for (unsigned OtherMulIdx = Idx+1; 1255 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1256 ++OtherMulIdx) { 1257 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1258 // If MulOp occurs in OtherMul, we can fold the two multiplies 1259 // together. 1260 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1261 OMulOp != e; ++OMulOp) 1262 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1263 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1264 const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); 1265 if (Mul->getNumOperands() != 2) { 1266 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1267 MulOps.erase(MulOps.begin()+MulOp); 1268 InnerMul1 = getMulExpr(MulOps); 1269 } 1270 const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1271 if (OtherMul->getNumOperands() != 2) { 1272 SmallVector<const SCEV*, 4> MulOps(OtherMul->op_begin(), 1273 OtherMul->op_end()); 1274 MulOps.erase(MulOps.begin()+OMulOp); 1275 InnerMul2 = getMulExpr(MulOps); 1276 } 1277 const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1278 const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1279 if (Ops.size() == 2) return OuterMul; 1280 Ops.erase(Ops.begin()+Idx); 1281 Ops.erase(Ops.begin()+OtherMulIdx-1); 1282 Ops.push_back(OuterMul); 1283 return getAddExpr(Ops); 1284 } 1285 } 1286 } 1287 } 1288 1289 // If there are any add recurrences in the operands list, see if any other 1290 // added values are loop invariant. If so, we can fold them into the 1291 // recurrence. 1292 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1293 ++Idx; 1294 1295 // Scan over all recurrences, trying to fold loop invariants into them. 1296 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1297 // Scan all of the other operands to this add and add them to the vector if 1298 // they are loop invariant w.r.t. the recurrence. 1299 SmallVector<const SCEV*, 8> LIOps; 1300 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1301 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1302 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1303 LIOps.push_back(Ops[i]); 1304 Ops.erase(Ops.begin()+i); 1305 --i; --e; 1306 } 1307 1308 // If we found some loop invariants, fold them into the recurrence. 1309 if (!LIOps.empty()) { 1310 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1311 LIOps.push_back(AddRec->getStart()); 1312 1313 SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(), 1314 AddRec->op_end()); 1315 AddRecOps[0] = getAddExpr(LIOps); 1316 1317 const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); 1318 // If all of the other operands were loop invariant, we are done. 1319 if (Ops.size() == 1) return NewRec; 1320 1321 // Otherwise, add the folded AddRec by the non-liv parts. 1322 for (unsigned i = 0;; ++i) 1323 if (Ops[i] == AddRec) { 1324 Ops[i] = NewRec; 1325 break; 1326 } 1327 return getAddExpr(Ops); 1328 } 1329 1330 // Okay, if there weren't any loop invariants to be folded, check to see if 1331 // there are multiple AddRec's with the same loop induction variable being 1332 // added together. If so, we can fold them. 1333 for (unsigned OtherIdx = Idx+1; 1334 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1335 if (OtherIdx != Idx) { 1336 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1337 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1338 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} 1339 SmallVector<const SCEV*, 4> NewOps(AddRec->op_begin(), AddRec->op_end()); 1340 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { 1341 if (i >= NewOps.size()) { 1342 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, 1343 OtherAddRec->op_end()); 1344 break; 1345 } 1346 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); 1347 } 1348 const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1349 1350 if (Ops.size() == 2) return NewAddRec; 1351 1352 Ops.erase(Ops.begin()+Idx); 1353 Ops.erase(Ops.begin()+OtherIdx-1); 1354 Ops.push_back(NewAddRec); 1355 return getAddExpr(Ops); 1356 } 1357 } 1358 1359 // Otherwise couldn't fold anything into this recurrence. Move onto the 1360 // next one. 1361 } 1362 1363 // Okay, it looks like we really DO need an add expr. Check to see if we 1364 // already have one, otherwise create a new one. 1365 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1366 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scAddExpr, 1367 SCEVOps)]; 1368 if (Result == 0) Result = new SCEVAddExpr(Ops); 1369 return Result; 1370 } 1371 1372 1373 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1374 /// possible. 1375 const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { 1376 assert(!Ops.empty() && "Cannot get empty mul!"); 1377 #ifndef NDEBUG 1378 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1379 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1380 getEffectiveSCEVType(Ops[0]->getType()) && 1381 "SCEVMulExpr operand types don't match!"); 1382 #endif 1383 1384 // Sort by complexity, this groups all similar expression types together. 1385 GroupByComplexity(Ops, LI); 1386 1387 // If there are any constants, fold them together. 1388 unsigned Idx = 0; 1389 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1390 1391 // C1*(C2+V) -> C1*C2 + C1*V 1392 if (Ops.size() == 2) 1393 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1394 if (Add->getNumOperands() == 2 && 1395 isa<SCEVConstant>(Add->getOperand(0))) 1396 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1397 getMulExpr(LHSC, Add->getOperand(1))); 1398 1399 1400 ++Idx; 1401 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1402 // We found two constants, fold them together! 1403 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() * 1404 RHSC->getValue()->getValue()); 1405 Ops[0] = getConstant(Fold); 1406 Ops.erase(Ops.begin()+1); // Erase the folded element 1407 if (Ops.size() == 1) return Ops[0]; 1408 LHSC = cast<SCEVConstant>(Ops[0]); 1409 } 1410 1411 // If we are left with a constant one being multiplied, strip it off. 1412 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1413 Ops.erase(Ops.begin()); 1414 --Idx; 1415 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1416 // If we have a multiply of zero, it will always be zero. 1417 return Ops[0]; 1418 } 1419 } 1420 1421 // Skip over the add expression until we get to a multiply. 1422 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1423 ++Idx; 1424 1425 if (Ops.size() == 1) 1426 return Ops[0]; 1427 1428 // If there are mul operands inline them all into this expression. 1429 if (Idx < Ops.size()) { 1430 bool DeletedMul = false; 1431 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1432 // If we have an mul, expand the mul operands onto the end of the operands 1433 // list. 1434 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); 1435 Ops.erase(Ops.begin()+Idx); 1436 DeletedMul = true; 1437 } 1438 1439 // If we deleted at least one mul, we added operands to the end of the list, 1440 // and they are not necessarily sorted. Recurse to resort and resimplify 1441 // any operands we just aquired. 1442 if (DeletedMul) 1443 return getMulExpr(Ops); 1444 } 1445 1446 // If there are any add recurrences in the operands list, see if any other 1447 // added values are loop invariant. If so, we can fold them into the 1448 // recurrence. 1449 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1450 ++Idx; 1451 1452 // Scan over all recurrences, trying to fold loop invariants into them. 1453 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1454 // Scan all of the other operands to this mul and add them to the vector if 1455 // they are loop invariant w.r.t. the recurrence. 1456 SmallVector<const SCEV*, 8> LIOps; 1457 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1458 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1459 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1460 LIOps.push_back(Ops[i]); 1461 Ops.erase(Ops.begin()+i); 1462 --i; --e; 1463 } 1464 1465 // If we found some loop invariants, fold them into the recurrence. 1466 if (!LIOps.empty()) { 1467 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1468 SmallVector<const SCEV*, 4> NewOps; 1469 NewOps.reserve(AddRec->getNumOperands()); 1470 if (LIOps.size() == 1) { 1471 const SCEV *Scale = LIOps[0]; 1472 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1473 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1474 } else { 1475 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 1476 SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end()); 1477 MulOps.push_back(AddRec->getOperand(i)); 1478 NewOps.push_back(getMulExpr(MulOps)); 1479 } 1480 } 1481 1482 const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1483 1484 // If all of the other operands were loop invariant, we are done. 1485 if (Ops.size() == 1) return NewRec; 1486 1487 // Otherwise, multiply the folded AddRec by the non-liv parts. 1488 for (unsigned i = 0;; ++i) 1489 if (Ops[i] == AddRec) { 1490 Ops[i] = NewRec; 1491 break; 1492 } 1493 return getMulExpr(Ops); 1494 } 1495 1496 // Okay, if there weren't any loop invariants to be folded, check to see if 1497 // there are multiple AddRec's with the same loop induction variable being 1498 // multiplied together. If so, we can fold them. 1499 for (unsigned OtherIdx = Idx+1; 1500 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1501 if (OtherIdx != Idx) { 1502 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1503 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1504 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} 1505 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1506 const SCEV* NewStart = getMulExpr(F->getStart(), 1507 G->getStart()); 1508 const SCEV* B = F->getStepRecurrence(*this); 1509 const SCEV* D = G->getStepRecurrence(*this); 1510 const SCEV* NewStep = getAddExpr(getMulExpr(F, D), 1511 getMulExpr(G, B), 1512 getMulExpr(B, D)); 1513 const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, 1514 F->getLoop()); 1515 if (Ops.size() == 2) return NewAddRec; 1516 1517 Ops.erase(Ops.begin()+Idx); 1518 Ops.erase(Ops.begin()+OtherIdx-1); 1519 Ops.push_back(NewAddRec); 1520 return getMulExpr(Ops); 1521 } 1522 } 1523 1524 // Otherwise couldn't fold anything into this recurrence. Move onto the 1525 // next one. 1526 } 1527 1528 // Okay, it looks like we really DO need an mul expr. Check to see if we 1529 // already have one, otherwise create a new one. 1530 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1531 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scMulExpr, 1532 SCEVOps)]; 1533 if (Result == 0) 1534 Result = new SCEVMulExpr(Ops); 1535 return Result; 1536 } 1537 1538 /// getUDivExpr - Get a canonical multiply expression, or something simpler if 1539 /// possible. 1540 const SCEV* ScalarEvolution::getUDivExpr(const SCEV* LHS, 1541 const SCEV* RHS) { 1542 assert(getEffectiveSCEVType(LHS->getType()) == 1543 getEffectiveSCEVType(RHS->getType()) && 1544 "SCEVUDivExpr operand types don't match!"); 1545 1546 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1547 if (RHSC->getValue()->equalsInt(1)) 1548 return LHS; // X udiv 1 --> x 1549 if (RHSC->isZero()) 1550 return getIntegerSCEV(0, LHS->getType()); // value is undefined 1551 1552 // Determine if the division can be folded into the operands of 1553 // its operands. 1554 // TODO: Generalize this to non-constants by using known-bits information. 1555 const Type *Ty = LHS->getType(); 1556 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1557 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; 1558 // For non-power-of-two values, effectively round the value up to the 1559 // nearest power of two. 1560 if (!RHSC->getValue()->getValue().isPowerOf2()) 1561 ++MaxShiftAmt; 1562 const IntegerType *ExtTy = 1563 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt); 1564 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1565 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1566 if (const SCEVConstant *Step = 1567 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1568 if (!Step->getValue()->getValue() 1569 .urem(RHSC->getValue()->getValue()) && 1570 getZeroExtendExpr(AR, ExtTy) == 1571 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1572 getZeroExtendExpr(Step, ExtTy), 1573 AR->getLoop())) { 1574 SmallVector<const SCEV*, 4> Operands; 1575 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1576 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1577 return getAddRecExpr(Operands, AR->getLoop()); 1578 } 1579 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1580 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1581 SmallVector<const SCEV*, 4> Operands; 1582 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1583 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1584 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1585 // Find an operand that's safely divisible. 1586 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1587 const SCEV* Op = M->getOperand(i); 1588 const SCEV* Div = getUDivExpr(Op, RHSC); 1589 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1590 const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands(); 1591 Operands = SmallVector<const SCEV*, 4>(MOperands.begin(), 1592 MOperands.end()); 1593 Operands[i] = Div; 1594 return getMulExpr(Operands); 1595 } 1596 } 1597 } 1598 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1599 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1600 SmallVector<const SCEV*, 4> Operands; 1601 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1602 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1603 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1604 Operands.clear(); 1605 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1606 const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); 1607 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) 1608 break; 1609 Operands.push_back(Op); 1610 } 1611 if (Operands.size() == A->getNumOperands()) 1612 return getAddExpr(Operands); 1613 } 1614 } 1615 1616 // Fold if both operands are constant. 1617 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1618 Constant *LHSCV = LHSC->getValue(); 1619 Constant *RHSCV = RHSC->getValue(); 1620 return getUnknown(ConstantExpr::getUDiv(LHSCV, RHSCV)); 1621 } 1622 } 1623 1624 SCEVUDivExpr *&Result = SCEVUDivs[std::make_pair(LHS, RHS)]; 1625 if (Result == 0) Result = new SCEVUDivExpr(LHS, RHS); 1626 return Result; 1627 } 1628 1629 1630 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1631 /// Simplify the expression as much as possible. 1632 const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, 1633 const SCEV* Step, const Loop *L) { 1634 SmallVector<const SCEV*, 4> Operands; 1635 Operands.push_back(Start); 1636 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1637 if (StepChrec->getLoop() == L) { 1638 Operands.insert(Operands.end(), StepChrec->op_begin(), 1639 StepChrec->op_end()); 1640 return getAddRecExpr(Operands, L); 1641 } 1642 1643 Operands.push_back(Step); 1644 return getAddRecExpr(Operands, L); 1645 } 1646 1647 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1648 /// Simplify the expression as much as possible. 1649 const SCEV* ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV*> &Operands, 1650 const Loop *L) { 1651 if (Operands.size() == 1) return Operands[0]; 1652 #ifndef NDEBUG 1653 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1654 assert(getEffectiveSCEVType(Operands[i]->getType()) == 1655 getEffectiveSCEVType(Operands[0]->getType()) && 1656 "SCEVAddRecExpr operand types don't match!"); 1657 #endif 1658 1659 if (Operands.back()->isZero()) { 1660 Operands.pop_back(); 1661 return getAddRecExpr(Operands, L); // {X,+,0} --> X 1662 } 1663 1664 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 1665 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 1666 const Loop* NestedLoop = NestedAR->getLoop(); 1667 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { 1668 SmallVector<const SCEV*, 4> NestedOperands(NestedAR->op_begin(), 1669 NestedAR->op_end()); 1670 Operands[0] = NestedAR->getStart(); 1671 NestedOperands[0] = getAddRecExpr(Operands, L); 1672 return getAddRecExpr(NestedOperands, NestedLoop); 1673 } 1674 } 1675 1676 std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end()); 1677 SCEVAddRecExpr *&Result = SCEVAddRecExprs[std::make_pair(L, SCEVOps)]; 1678 if (Result == 0) Result = new SCEVAddRecExpr(Operands, L); 1679 return Result; 1680 } 1681 1682 const SCEV* ScalarEvolution::getSMaxExpr(const SCEV* LHS, 1683 const SCEV* RHS) { 1684 SmallVector<const SCEV*, 2> Ops; 1685 Ops.push_back(LHS); 1686 Ops.push_back(RHS); 1687 return getSMaxExpr(Ops); 1688 } 1689 1690 const SCEV* 1691 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1692 assert(!Ops.empty() && "Cannot get empty smax!"); 1693 if (Ops.size() == 1) return Ops[0]; 1694 #ifndef NDEBUG 1695 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1696 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1697 getEffectiveSCEVType(Ops[0]->getType()) && 1698 "SCEVSMaxExpr operand types don't match!"); 1699 #endif 1700 1701 // Sort by complexity, this groups all similar expression types together. 1702 GroupByComplexity(Ops, LI); 1703 1704 // If there are any constants, fold them together. 1705 unsigned Idx = 0; 1706 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1707 ++Idx; 1708 assert(Idx < Ops.size()); 1709 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1710 // We found two constants, fold them together! 1711 ConstantInt *Fold = ConstantInt::get( 1712 APIntOps::smax(LHSC->getValue()->getValue(), 1713 RHSC->getValue()->getValue())); 1714 Ops[0] = getConstant(Fold); 1715 Ops.erase(Ops.begin()+1); // Erase the folded element 1716 if (Ops.size() == 1) return Ops[0]; 1717 LHSC = cast<SCEVConstant>(Ops[0]); 1718 } 1719 1720 // If we are left with a constant -inf, strip it off. 1721 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 1722 Ops.erase(Ops.begin()); 1723 --Idx; 1724 } 1725 } 1726 1727 if (Ops.size() == 1) return Ops[0]; 1728 1729 // Find the first SMax 1730 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 1731 ++Idx; 1732 1733 // Check to see if one of the operands is an SMax. If so, expand its operands 1734 // onto our operand list, and recurse to simplify. 1735 if (Idx < Ops.size()) { 1736 bool DeletedSMax = false; 1737 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 1738 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); 1739 Ops.erase(Ops.begin()+Idx); 1740 DeletedSMax = true; 1741 } 1742 1743 if (DeletedSMax) 1744 return getSMaxExpr(Ops); 1745 } 1746 1747 // Okay, check to see if the same value occurs in the operand list twice. If 1748 // so, delete one. Since we sorted the list, these values are required to 1749 // be adjacent. 1750 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1751 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y 1752 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1753 --i; --e; 1754 } 1755 1756 if (Ops.size() == 1) return Ops[0]; 1757 1758 assert(!Ops.empty() && "Reduced smax down to nothing!"); 1759 1760 // Okay, it looks like we really DO need an smax expr. Check to see if we 1761 // already have one, otherwise create a new one. 1762 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1763 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scSMaxExpr, 1764 SCEVOps)]; 1765 if (Result == 0) Result = new SCEVSMaxExpr(Ops); 1766 return Result; 1767 } 1768 1769 const SCEV* ScalarEvolution::getUMaxExpr(const SCEV* LHS, 1770 const SCEV* RHS) { 1771 SmallVector<const SCEV*, 2> Ops; 1772 Ops.push_back(LHS); 1773 Ops.push_back(RHS); 1774 return getUMaxExpr(Ops); 1775 } 1776 1777 const SCEV* 1778 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1779 assert(!Ops.empty() && "Cannot get empty umax!"); 1780 if (Ops.size() == 1) return Ops[0]; 1781 #ifndef NDEBUG 1782 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1783 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1784 getEffectiveSCEVType(Ops[0]->getType()) && 1785 "SCEVUMaxExpr operand types don't match!"); 1786 #endif 1787 1788 // Sort by complexity, this groups all similar expression types together. 1789 GroupByComplexity(Ops, LI); 1790 1791 // If there are any constants, fold them together. 1792 unsigned Idx = 0; 1793 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1794 ++Idx; 1795 assert(Idx < Ops.size()); 1796 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1797 // We found two constants, fold them together! 1798 ConstantInt *Fold = ConstantInt::get( 1799 APIntOps::umax(LHSC->getValue()->getValue(), 1800 RHSC->getValue()->getValue())); 1801 Ops[0] = getConstant(Fold); 1802 Ops.erase(Ops.begin()+1); // Erase the folded element 1803 if (Ops.size() == 1) return Ops[0]; 1804 LHSC = cast<SCEVConstant>(Ops[0]); 1805 } 1806 1807 // If we are left with a constant zero, strip it off. 1808 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 1809 Ops.erase(Ops.begin()); 1810 --Idx; 1811 } 1812 } 1813 1814 if (Ops.size() == 1) return Ops[0]; 1815 1816 // Find the first UMax 1817 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 1818 ++Idx; 1819 1820 // Check to see if one of the operands is a UMax. If so, expand its operands 1821 // onto our operand list, and recurse to simplify. 1822 if (Idx < Ops.size()) { 1823 bool DeletedUMax = false; 1824 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 1825 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); 1826 Ops.erase(Ops.begin()+Idx); 1827 DeletedUMax = true; 1828 } 1829 1830 if (DeletedUMax) 1831 return getUMaxExpr(Ops); 1832 } 1833 1834 // Okay, check to see if the same value occurs in the operand list twice. If 1835 // so, delete one. Since we sorted the list, these values are required to 1836 // be adjacent. 1837 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1838 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y 1839 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1840 --i; --e; 1841 } 1842 1843 if (Ops.size() == 1) return Ops[0]; 1844 1845 assert(!Ops.empty() && "Reduced umax down to nothing!"); 1846 1847 // Okay, it looks like we really DO need a umax expr. Check to see if we 1848 // already have one, otherwise create a new one. 1849 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1850 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scUMaxExpr, 1851 SCEVOps)]; 1852 if (Result == 0) Result = new SCEVUMaxExpr(Ops); 1853 return Result; 1854 } 1855 1856 const SCEV* ScalarEvolution::getSMinExpr(const SCEV* LHS, 1857 const SCEV* RHS) { 1858 // ~smax(~x, ~y) == smin(x, y). 1859 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1860 } 1861 1862 const SCEV* ScalarEvolution::getUMinExpr(const SCEV* LHS, 1863 const SCEV* RHS) { 1864 // ~umax(~x, ~y) == umin(x, y) 1865 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1866 } 1867 1868 const SCEV* ScalarEvolution::getUnknown(Value *V) { 1869 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 1870 return getConstant(CI); 1871 if (isa<ConstantPointerNull>(V)) 1872 return getIntegerSCEV(0, V->getType()); 1873 SCEVUnknown *&Result = SCEVUnknowns[V]; 1874 if (Result == 0) Result = new SCEVUnknown(V); 1875 return Result; 1876 } 1877 1878 //===----------------------------------------------------------------------===// 1879 // Basic SCEV Analysis and PHI Idiom Recognition Code 1880 // 1881 1882 /// isSCEVable - Test if values of the given type are analyzable within 1883 /// the SCEV framework. This primarily includes integer types, and it 1884 /// can optionally include pointer types if the ScalarEvolution class 1885 /// has access to target-specific information. 1886 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 1887 // Integers are always SCEVable. 1888 if (Ty->isInteger()) 1889 return true; 1890 1891 // Pointers are SCEVable if TargetData information is available 1892 // to provide pointer size information. 1893 if (isa<PointerType>(Ty)) 1894 return TD != NULL; 1895 1896 // Otherwise it's not SCEVable. 1897 return false; 1898 } 1899 1900 /// getTypeSizeInBits - Return the size in bits of the specified type, 1901 /// for which isSCEVable must return true. 1902 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 1903 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1904 1905 // If we have a TargetData, use it! 1906 if (TD) 1907 return TD->getTypeSizeInBits(Ty); 1908 1909 // Otherwise, we support only integer types. 1910 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!"); 1911 return Ty->getPrimitiveSizeInBits(); 1912 } 1913 1914 /// getEffectiveSCEVType - Return a type with the same bitwidth as 1915 /// the given type and which represents how SCEV will treat the given 1916 /// type, for which isSCEVable must return true. For pointer types, 1917 /// this is the pointer-sized integer type. 1918 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 1919 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1920 1921 if (Ty->isInteger()) 1922 return Ty; 1923 1924 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); 1925 return TD->getIntPtrType(); 1926 } 1927 1928 const SCEV* ScalarEvolution::getCouldNotCompute() { 1929 return CouldNotCompute; 1930 } 1931 1932 /// hasSCEV - Return true if the SCEV for this value has already been 1933 /// computed. 1934 bool ScalarEvolution::hasSCEV(Value *V) const { 1935 return Scalars.count(V); 1936 } 1937 1938 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 1939 /// expression and create a new one. 1940 const SCEV* ScalarEvolution::getSCEV(Value *V) { 1941 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 1942 1943 std::map<SCEVCallbackVH, const SCEV*>::iterator I = Scalars.find(V); 1944 if (I != Scalars.end()) return I->second; 1945 const SCEV* S = createSCEV(V); 1946 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 1947 return S; 1948 } 1949 1950 /// getIntegerSCEV - Given an integer or FP type, create a constant for the 1951 /// specified signed integer value and return a SCEV for the constant. 1952 const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { 1953 Ty = getEffectiveSCEVType(Ty); 1954 Constant *C; 1955 if (Val == 0) 1956 C = Constant::getNullValue(Ty); 1957 else if (Ty->isFloatingPoint()) 1958 C = ConstantFP::get(APFloat(Ty==Type::FloatTy ? APFloat::IEEEsingle : 1959 APFloat::IEEEdouble, Val)); 1960 else 1961 C = ConstantInt::get(Ty, Val); 1962 return getUnknown(C); 1963 } 1964 1965 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 1966 /// 1967 const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { 1968 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1969 return getUnknown(ConstantExpr::getNeg(VC->getValue())); 1970 1971 const Type *Ty = V->getType(); 1972 Ty = getEffectiveSCEVType(Ty); 1973 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty))); 1974 } 1975 1976 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 1977 const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) { 1978 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1979 return getUnknown(ConstantExpr::getNot(VC->getValue())); 1980 1981 const Type *Ty = V->getType(); 1982 Ty = getEffectiveSCEVType(Ty); 1983 const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); 1984 return getMinusSCEV(AllOnes, V); 1985 } 1986 1987 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 1988 /// 1989 const SCEV* ScalarEvolution::getMinusSCEV(const SCEV* LHS, 1990 const SCEV* RHS) { 1991 // X - Y --> X + -Y 1992 return getAddExpr(LHS, getNegativeSCEV(RHS)); 1993 } 1994 1995 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 1996 /// input value to the specified type. If the type must be extended, it is zero 1997 /// extended. 1998 const SCEV* 1999 ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, 2000 const Type *Ty) { 2001 const Type *SrcTy = V->getType(); 2002 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2003 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2004 "Cannot truncate or zero extend with non-integer arguments!"); 2005 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2006 return V; // No conversion 2007 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2008 return getTruncateExpr(V, Ty); 2009 return getZeroExtendExpr(V, Ty); 2010 } 2011 2012 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2013 /// input value to the specified type. If the type must be extended, it is sign 2014 /// extended. 2015 const SCEV* 2016 ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, 2017 const Type *Ty) { 2018 const Type *SrcTy = V->getType(); 2019 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2020 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2021 "Cannot truncate or zero extend with non-integer arguments!"); 2022 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2023 return V; // No conversion 2024 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2025 return getTruncateExpr(V, Ty); 2026 return getSignExtendExpr(V, Ty); 2027 } 2028 2029 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2030 /// input value to the specified type. If the type must be extended, it is zero 2031 /// extended. The conversion must not be narrowing. 2032 const SCEV* 2033 ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { 2034 const Type *SrcTy = V->getType(); 2035 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2036 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2037 "Cannot noop or zero extend with non-integer arguments!"); 2038 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2039 "getNoopOrZeroExtend cannot truncate!"); 2040 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2041 return V; // No conversion 2042 return getZeroExtendExpr(V, Ty); 2043 } 2044 2045 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2046 /// input value to the specified type. If the type must be extended, it is sign 2047 /// extended. The conversion must not be narrowing. 2048 const SCEV* 2049 ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { 2050 const Type *SrcTy = V->getType(); 2051 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2052 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2053 "Cannot noop or sign extend with non-integer arguments!"); 2054 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2055 "getNoopOrSignExtend cannot truncate!"); 2056 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2057 return V; // No conversion 2058 return getSignExtendExpr(V, Ty); 2059 } 2060 2061 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2062 /// the input value to the specified type. If the type must be extended, 2063 /// it is extended with unspecified bits. The conversion must not be 2064 /// narrowing. 2065 const SCEV* 2066 ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { 2067 const Type *SrcTy = V->getType(); 2068 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2069 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2070 "Cannot noop or any extend with non-integer arguments!"); 2071 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2072 "getNoopOrAnyExtend cannot truncate!"); 2073 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2074 return V; // No conversion 2075 return getAnyExtendExpr(V, Ty); 2076 } 2077 2078 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2079 /// input value to the specified type. The conversion must not be widening. 2080 const SCEV* 2081 ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { 2082 const Type *SrcTy = V->getType(); 2083 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2084 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2085 "Cannot truncate or noop with non-integer arguments!"); 2086 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2087 "getTruncateOrNoop cannot extend!"); 2088 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2089 return V; // No conversion 2090 return getTruncateExpr(V, Ty); 2091 } 2092 2093 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2094 /// the types using zero-extension, and then perform a umax operation 2095 /// with them. 2096 const SCEV* ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV* LHS, 2097 const SCEV* RHS) { 2098 const SCEV* PromotedLHS = LHS; 2099 const SCEV* PromotedRHS = RHS; 2100 2101 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2102 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2103 else 2104 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2105 2106 return getUMaxExpr(PromotedLHS, PromotedRHS); 2107 } 2108 2109 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2110 /// the types using zero-extension, and then perform a umin operation 2111 /// with them. 2112 const SCEV* ScalarEvolution::getUMinFromMismatchedTypes(const SCEV* LHS, 2113 const SCEV* RHS) { 2114 const SCEV* PromotedLHS = LHS; 2115 const SCEV* PromotedRHS = RHS; 2116 2117 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2118 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2119 else 2120 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2121 2122 return getUMinExpr(PromotedLHS, PromotedRHS); 2123 } 2124 2125 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for 2126 /// the specified instruction and replaces any references to the symbolic value 2127 /// SymName with the specified value. This is used during PHI resolution. 2128 void ScalarEvolution:: 2129 ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEV* SymName, 2130 const SCEV* NewVal) { 2131 std::map<SCEVCallbackVH, const SCEV*>::iterator SI = 2132 Scalars.find(SCEVCallbackVH(I, this)); 2133 if (SI == Scalars.end()) return; 2134 2135 const SCEV* NV = 2136 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); 2137 if (NV == SI->second) return; // No change. 2138 2139 SI->second = NV; // Update the scalars map! 2140 2141 // Any instruction values that use this instruction might also need to be 2142 // updated! 2143 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 2144 UI != E; ++UI) 2145 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal); 2146 } 2147 2148 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2149 /// a loop header, making it a potential recurrence, or it doesn't. 2150 /// 2151 const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { 2152 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. 2153 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2154 if (L->getHeader() == PN->getParent()) { 2155 // If it lives in the loop header, it has two incoming values, one 2156 // from outside the loop, and one from inside. 2157 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 2158 unsigned BackEdge = IncomingEdge^1; 2159 2160 // While we are analyzing this PHI node, handle its value symbolically. 2161 const SCEV* SymbolicName = getUnknown(PN); 2162 assert(Scalars.find(PN) == Scalars.end() && 2163 "PHI node already processed?"); 2164 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2165 2166 // Using this symbolic name for the PHI, analyze the value coming around 2167 // the back-edge. 2168 const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge)); 2169 2170 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2171 // has a special value for the first iteration of the loop. 2172 2173 // If the value coming around the backedge is an add with the symbolic 2174 // value we just inserted, then we found a simple induction variable! 2175 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2176 // If there is a single occurrence of the symbolic value, replace it 2177 // with a recurrence. 2178 unsigned FoundIndex = Add->getNumOperands(); 2179 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2180 if (Add->getOperand(i) == SymbolicName) 2181 if (FoundIndex == e) { 2182 FoundIndex = i; 2183 break; 2184 } 2185 2186 if (FoundIndex != Add->getNumOperands()) { 2187 // Create an add with everything but the specified operand. 2188 SmallVector<const SCEV*, 8> Ops; 2189 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2190 if (i != FoundIndex) 2191 Ops.push_back(Add->getOperand(i)); 2192 const SCEV* Accum = getAddExpr(Ops); 2193 2194 // This is not a valid addrec if the step amount is varying each 2195 // loop iteration, but is not itself an addrec in this loop. 2196 if (Accum->isLoopInvariant(L) || 2197 (isa<SCEVAddRecExpr>(Accum) && 2198 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2199 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2200 const SCEV* PHISCEV = getAddRecExpr(StartVal, Accum, L); 2201 2202 // Okay, for the entire analysis of this edge we assumed the PHI 2203 // to be symbolic. We now need to go back and update all of the 2204 // entries for the scalars that use the PHI (except for the PHI 2205 // itself) to use the new analyzed value instead of the "symbolic" 2206 // value. 2207 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2208 return PHISCEV; 2209 } 2210 } 2211 } else if (const SCEVAddRecExpr *AddRec = 2212 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2213 // Otherwise, this could be a loop like this: 2214 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2215 // In this case, j = {1,+,1} and BEValue is j. 2216 // Because the other in-value of i (0) fits the evolution of BEValue 2217 // i really is an addrec evolution. 2218 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2219 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2220 2221 // If StartVal = j.start - j.stride, we can use StartVal as the 2222 // initial step of the addrec evolution. 2223 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2224 AddRec->getOperand(1))) { 2225 const SCEV* PHISCEV = 2226 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2227 2228 // Okay, for the entire analysis of this edge we assumed the PHI 2229 // to be symbolic. We now need to go back and update all of the 2230 // entries for the scalars that use the PHI (except for the PHI 2231 // itself) to use the new analyzed value instead of the "symbolic" 2232 // value. 2233 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2234 return PHISCEV; 2235 } 2236 } 2237 } 2238 2239 return SymbolicName; 2240 } 2241 2242 // If it's not a loop phi, we can't handle it yet. 2243 return getUnknown(PN); 2244 } 2245 2246 /// createNodeForGEP - Expand GEP instructions into add and multiply 2247 /// operations. This allows them to be analyzed by regular SCEV code. 2248 /// 2249 const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { 2250 2251 const Type *IntPtrTy = TD->getIntPtrType(); 2252 Value *Base = GEP->getOperand(0); 2253 // Don't attempt to analyze GEPs over unsized objects. 2254 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2255 return getUnknown(GEP); 2256 const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy); 2257 gep_type_iterator GTI = gep_type_begin(GEP); 2258 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), 2259 E = GEP->op_end(); 2260 I != E; ++I) { 2261 Value *Index = *I; 2262 // Compute the (potentially symbolic) offset in bytes for this index. 2263 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2264 // For a struct, add the member offset. 2265 const StructLayout &SL = *TD->getStructLayout(STy); 2266 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2267 uint64_t Offset = SL.getElementOffset(FieldNo); 2268 TotalOffset = getAddExpr(TotalOffset, 2269 getIntegerSCEV(Offset, IntPtrTy)); 2270 } else { 2271 // For an array, add the element offset, explicitly scaled. 2272 const SCEV* LocalOffset = getSCEV(Index); 2273 if (!isa<PointerType>(LocalOffset->getType())) 2274 // Getelementptr indicies are signed. 2275 LocalOffset = getTruncateOrSignExtend(LocalOffset, 2276 IntPtrTy); 2277 LocalOffset = 2278 getMulExpr(LocalOffset, 2279 getIntegerSCEV(TD->getTypeAllocSize(*GTI), 2280 IntPtrTy)); 2281 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2282 } 2283 } 2284 return getAddExpr(getSCEV(Base), TotalOffset); 2285 } 2286 2287 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2288 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2289 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2290 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2291 uint32_t 2292 ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { 2293 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2294 return C->getValue()->getValue().countTrailingZeros(); 2295 2296 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2297 return std::min(GetMinTrailingZeros(T->getOperand()), 2298 (uint32_t)getTypeSizeInBits(T->getType())); 2299 2300 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2301 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2302 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2303 getTypeSizeInBits(E->getType()) : OpRes; 2304 } 2305 2306 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2307 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2308 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2309 getTypeSizeInBits(E->getType()) : OpRes; 2310 } 2311 2312 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2313 // The result is the min of all operands results. 2314 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2315 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2316 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2317 return MinOpRes; 2318 } 2319 2320 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2321 // The result is the sum of all operands results. 2322 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2323 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2324 for (unsigned i = 1, e = M->getNumOperands(); 2325 SumOpRes != BitWidth && i != e; ++i) 2326 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2327 BitWidth); 2328 return SumOpRes; 2329 } 2330 2331 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2332 // The result is the min of all operands results. 2333 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2334 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2335 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2336 return MinOpRes; 2337 } 2338 2339 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2340 // The result is the min of all operands results. 2341 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2342 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2343 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2344 return MinOpRes; 2345 } 2346 2347 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2348 // The result is the min of all operands results. 2349 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2350 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2351 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2352 return MinOpRes; 2353 } 2354 2355 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2356 // For a SCEVUnknown, ask ValueTracking. 2357 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2358 APInt Mask = APInt::getAllOnesValue(BitWidth); 2359 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2360 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2361 return Zeros.countTrailingOnes(); 2362 } 2363 2364 // SCEVUDivExpr 2365 return 0; 2366 } 2367 2368 uint32_t 2369 ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { 2370 // TODO: Handle other SCEV expression types here. 2371 2372 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2373 return C->getValue()->getValue().countLeadingZeros(); 2374 2375 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) { 2376 // A zero-extension cast adds zero bits. 2377 return GetMinLeadingZeros(C->getOperand()) + 2378 (getTypeSizeInBits(C->getType()) - 2379 getTypeSizeInBits(C->getOperand()->getType())); 2380 } 2381 2382 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2383 // For a SCEVUnknown, ask ValueTracking. 2384 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2385 APInt Mask = APInt::getAllOnesValue(BitWidth); 2386 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2387 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 2388 return Zeros.countLeadingOnes(); 2389 } 2390 2391 return 1; 2392 } 2393 2394 uint32_t 2395 ScalarEvolution::GetMinSignBits(const SCEV* S) { 2396 // TODO: Handle other SCEV expression types here. 2397 2398 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 2399 const APInt &A = C->getValue()->getValue(); 2400 return A.isNegative() ? A.countLeadingOnes() : 2401 A.countLeadingZeros(); 2402 } 2403 2404 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) { 2405 // A sign-extension cast adds sign bits. 2406 return GetMinSignBits(C->getOperand()) + 2407 (getTypeSizeInBits(C->getType()) - 2408 getTypeSizeInBits(C->getOperand()->getType())); 2409 } 2410 2411 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2412 // For a SCEVUnknown, ask ValueTracking. 2413 return ComputeNumSignBits(U->getValue(), TD); 2414 } 2415 2416 return 1; 2417 } 2418 2419 /// createSCEV - We know that there is no SCEV for the specified value. 2420 /// Analyze the expression. 2421 /// 2422 const SCEV* ScalarEvolution::createSCEV(Value *V) { 2423 if (!isSCEVable(V->getType())) 2424 return getUnknown(V); 2425 2426 unsigned Opcode = Instruction::UserOp1; 2427 if (Instruction *I = dyn_cast<Instruction>(V)) 2428 Opcode = I->getOpcode(); 2429 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 2430 Opcode = CE->getOpcode(); 2431 else 2432 return getUnknown(V); 2433 2434 User *U = cast<User>(V); 2435 switch (Opcode) { 2436 case Instruction::Add: 2437 return getAddExpr(getSCEV(U->getOperand(0)), 2438 getSCEV(U->getOperand(1))); 2439 case Instruction::Mul: 2440 return getMulExpr(getSCEV(U->getOperand(0)), 2441 getSCEV(U->getOperand(1))); 2442 case Instruction::UDiv: 2443 return getUDivExpr(getSCEV(U->getOperand(0)), 2444 getSCEV(U->getOperand(1))); 2445 case Instruction::Sub: 2446 return getMinusSCEV(getSCEV(U->getOperand(0)), 2447 getSCEV(U->getOperand(1))); 2448 case Instruction::And: 2449 // For an expression like x&255 that merely masks off the high bits, 2450 // use zext(trunc(x)) as the SCEV expression. 2451 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2452 if (CI->isNullValue()) 2453 return getSCEV(U->getOperand(1)); 2454 if (CI->isAllOnesValue()) 2455 return getSCEV(U->getOperand(0)); 2456 const APInt &A = CI->getValue(); 2457 2458 // Instcombine's ShrinkDemandedConstant may strip bits out of 2459 // constants, obscuring what would otherwise be a low-bits mask. 2460 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 2461 // knew about to reconstruct a low-bits mask value. 2462 unsigned LZ = A.countLeadingZeros(); 2463 unsigned BitWidth = A.getBitWidth(); 2464 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 2465 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 2466 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 2467 2468 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 2469 2470 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 2471 return 2472 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 2473 IntegerType::get(BitWidth - LZ)), 2474 U->getType()); 2475 } 2476 break; 2477 2478 case Instruction::Or: 2479 // If the RHS of the Or is a constant, we may have something like: 2480 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 2481 // optimizations will transparently handle this case. 2482 // 2483 // In order for this transformation to be safe, the LHS must be of the 2484 // form X*(2^n) and the Or constant must be less than 2^n. 2485 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2486 const SCEV* LHS = getSCEV(U->getOperand(0)); 2487 const APInt &CIVal = CI->getValue(); 2488 if (GetMinTrailingZeros(LHS) >= 2489 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) 2490 return getAddExpr(LHS, getSCEV(U->getOperand(1))); 2491 } 2492 break; 2493 case Instruction::Xor: 2494 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2495 // If the RHS of the xor is a signbit, then this is just an add. 2496 // Instcombine turns add of signbit into xor as a strength reduction step. 2497 if (CI->getValue().isSignBit()) 2498 return getAddExpr(getSCEV(U->getOperand(0)), 2499 getSCEV(U->getOperand(1))); 2500 2501 // If the RHS of xor is -1, then this is a not operation. 2502 if (CI->isAllOnesValue()) 2503 return getNotSCEV(getSCEV(U->getOperand(0))); 2504 2505 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 2506 // This is a variant of the check for xor with -1, and it handles 2507 // the case where instcombine has trimmed non-demanded bits out 2508 // of an xor with -1. 2509 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 2510 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 2511 if (BO->getOpcode() == Instruction::And && 2512 LCI->getValue() == CI->getValue()) 2513 if (const SCEVZeroExtendExpr *Z = 2514 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 2515 const Type *UTy = U->getType(); 2516 const SCEV* Z0 = Z->getOperand(); 2517 const Type *Z0Ty = Z0->getType(); 2518 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 2519 2520 // If C is a low-bits mask, the zero extend is zerving to 2521 // mask off the high bits. Complement the operand and 2522 // re-apply the zext. 2523 if (APIntOps::isMask(Z0TySize, CI->getValue())) 2524 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 2525 2526 // If C is a single bit, it may be in the sign-bit position 2527 // before the zero-extend. In this case, represent the xor 2528 // using an add, which is equivalent, and re-apply the zext. 2529 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 2530 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 2531 Trunc.isSignBit()) 2532 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 2533 UTy); 2534 } 2535 } 2536 break; 2537 2538 case Instruction::Shl: 2539 // Turn shift left of a constant amount into a multiply. 2540 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2541 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2542 Constant *X = ConstantInt::get( 2543 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2544 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2545 } 2546 break; 2547 2548 case Instruction::LShr: 2549 // Turn logical shift right of a constant into a unsigned divide. 2550 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2551 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2552 Constant *X = ConstantInt::get( 2553 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2554 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2555 } 2556 break; 2557 2558 case Instruction::AShr: 2559 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 2560 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 2561 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0))) 2562 if (L->getOpcode() == Instruction::Shl && 2563 L->getOperand(1) == U->getOperand(1)) { 2564 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2565 uint64_t Amt = BitWidth - CI->getZExtValue(); 2566 if (Amt == BitWidth) 2567 return getSCEV(L->getOperand(0)); // shift by zero --> noop 2568 if (Amt > BitWidth) 2569 return getIntegerSCEV(0, U->getType()); // value is undefined 2570 return 2571 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 2572 IntegerType::get(Amt)), 2573 U->getType()); 2574 } 2575 break; 2576 2577 case Instruction::Trunc: 2578 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 2579 2580 case Instruction::ZExt: 2581 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2582 2583 case Instruction::SExt: 2584 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2585 2586 case Instruction::BitCast: 2587 // BitCasts are no-op casts so we just eliminate the cast. 2588 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 2589 return getSCEV(U->getOperand(0)); 2590 break; 2591 2592 case Instruction::IntToPtr: 2593 if (!TD) break; // Without TD we can't analyze pointers. 2594 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2595 TD->getIntPtrType()); 2596 2597 case Instruction::PtrToInt: 2598 if (!TD) break; // Without TD we can't analyze pointers. 2599 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2600 U->getType()); 2601 2602 case Instruction::GetElementPtr: 2603 if (!TD) break; // Without TD we can't analyze pointers. 2604 return createNodeForGEP(U); 2605 2606 case Instruction::PHI: 2607 return createNodeForPHI(cast<PHINode>(U)); 2608 2609 case Instruction::Select: 2610 // This could be a smax or umax that was lowered earlier. 2611 // Try to recover it. 2612 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 2613 Value *LHS = ICI->getOperand(0); 2614 Value *RHS = ICI->getOperand(1); 2615 switch (ICI->getPredicate()) { 2616 case ICmpInst::ICMP_SLT: 2617 case ICmpInst::ICMP_SLE: 2618 std::swap(LHS, RHS); 2619 // fall through 2620 case ICmpInst::ICMP_SGT: 2621 case ICmpInst::ICMP_SGE: 2622 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2623 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2624 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2625 return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); 2626 break; 2627 case ICmpInst::ICMP_ULT: 2628 case ICmpInst::ICMP_ULE: 2629 std::swap(LHS, RHS); 2630 // fall through 2631 case ICmpInst::ICMP_UGT: 2632 case ICmpInst::ICMP_UGE: 2633 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2634 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2635 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2636 return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); 2637 break; 2638 case ICmpInst::ICMP_NE: 2639 // n != 0 ? n : 1 -> umax(n, 1) 2640 if (LHS == U->getOperand(1) && 2641 isa<ConstantInt>(U->getOperand(2)) && 2642 cast<ConstantInt>(U->getOperand(2))->isOne() && 2643 isa<ConstantInt>(RHS) && 2644 cast<ConstantInt>(RHS)->isZero()) 2645 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); 2646 break; 2647 case ICmpInst::ICMP_EQ: 2648 // n == 0 ? 1 : n -> umax(n, 1) 2649 if (LHS == U->getOperand(2) && 2650 isa<ConstantInt>(U->getOperand(1)) && 2651 cast<ConstantInt>(U->getOperand(1))->isOne() && 2652 isa<ConstantInt>(RHS) && 2653 cast<ConstantInt>(RHS)->isZero()) 2654 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); 2655 break; 2656 default: 2657 break; 2658 } 2659 } 2660 2661 default: // We cannot analyze this expression. 2662 break; 2663 } 2664 2665 return getUnknown(V); 2666 } 2667 2668 2669 2670 //===----------------------------------------------------------------------===// 2671 // Iteration Count Computation Code 2672 // 2673 2674 /// getBackedgeTakenCount - If the specified loop has a predictable 2675 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 2676 /// object. The backedge-taken count is the number of times the loop header 2677 /// will be branched to from within the loop. This is one less than the 2678 /// trip count of the loop, since it doesn't count the first iteration, 2679 /// when the header is branched to from outside the loop. 2680 /// 2681 /// Note that it is not valid to call this method on a loop without a 2682 /// loop-invariant backedge-taken count (see 2683 /// hasLoopInvariantBackedgeTakenCount). 2684 /// 2685 const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 2686 return getBackedgeTakenInfo(L).Exact; 2687 } 2688 2689 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 2690 /// return the least SCEV value that is known never to be less than the 2691 /// actual backedge taken count. 2692 const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 2693 return getBackedgeTakenInfo(L).Max; 2694 } 2695 2696 const ScalarEvolution::BackedgeTakenInfo & 2697 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 2698 // Initially insert a CouldNotCompute for this loop. If the insertion 2699 // succeeds, procede to actually compute a backedge-taken count and 2700 // update the value. The temporary CouldNotCompute value tells SCEV 2701 // code elsewhere that it shouldn't attempt to request a new 2702 // backedge-taken count, which could result in infinite recursion. 2703 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair = 2704 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 2705 if (Pair.second) { 2706 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); 2707 if (ItCount.Exact != CouldNotCompute) { 2708 assert(ItCount.Exact->isLoopInvariant(L) && 2709 ItCount.Max->isLoopInvariant(L) && 2710 "Computed trip count isn't loop invariant for loop!"); 2711 ++NumTripCountsComputed; 2712 2713 // Update the value in the map. 2714 Pair.first->second = ItCount; 2715 } else { 2716 if (ItCount.Max != CouldNotCompute) 2717 // Update the value in the map. 2718 Pair.first->second = ItCount; 2719 if (isa<PHINode>(L->getHeader()->begin())) 2720 // Only count loops that have phi nodes as not being computable. 2721 ++NumTripCountsNotComputed; 2722 } 2723 2724 // Now that we know more about the trip count for this loop, forget any 2725 // existing SCEV values for PHI nodes in this loop since they are only 2726 // conservative estimates made without the benefit 2727 // of trip count information. 2728 if (ItCount.hasAnyInfo()) 2729 forgetLoopPHIs(L); 2730 } 2731 return Pair.first->second; 2732 } 2733 2734 /// forgetLoopBackedgeTakenCount - This method should be called by the 2735 /// client when it has changed a loop in a way that may effect 2736 /// ScalarEvolution's ability to compute a trip count, or if the loop 2737 /// is deleted. 2738 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) { 2739 BackedgeTakenCounts.erase(L); 2740 forgetLoopPHIs(L); 2741 } 2742 2743 /// forgetLoopPHIs - Delete the memoized SCEVs associated with the 2744 /// PHI nodes in the given loop. This is used when the trip count of 2745 /// the loop may have changed. 2746 void ScalarEvolution::forgetLoopPHIs(const Loop *L) { 2747 BasicBlock *Header = L->getHeader(); 2748 2749 // Push all Loop-header PHIs onto the Worklist stack, except those 2750 // that are presently represented via a SCEVUnknown. SCEVUnknown for 2751 // a PHI either means that it has an unrecognized structure, or it's 2752 // a PHI that's in the progress of being computed by createNodeForPHI. 2753 // In the former case, additional loop trip count information isn't 2754 // going to change anything. In the later case, createNodeForPHI will 2755 // perform the necessary updates on its own when it gets to that point. 2756 SmallVector<Instruction *, 16> Worklist; 2757 for (BasicBlock::iterator I = Header->begin(); 2758 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 2759 std::map<SCEVCallbackVH, const SCEV*>::iterator It = Scalars.find((Value*)I); 2760 if (It != Scalars.end() && !isa<SCEVUnknown>(It->second)) 2761 Worklist.push_back(PN); 2762 } 2763 2764 while (!Worklist.empty()) { 2765 Instruction *I = Worklist.pop_back_val(); 2766 if (Scalars.erase(I)) 2767 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2768 UI != UE; ++UI) 2769 Worklist.push_back(cast<Instruction>(UI)); 2770 } 2771 } 2772 2773 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 2774 /// of the specified loop will execute. 2775 ScalarEvolution::BackedgeTakenInfo 2776 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 2777 SmallVector<BasicBlock*, 8> ExitingBlocks; 2778 L->getExitingBlocks(ExitingBlocks); 2779 2780 // Examine all exits and pick the most conservative values. 2781 const SCEV* BECount = CouldNotCompute; 2782 const SCEV* MaxBECount = CouldNotCompute; 2783 bool CouldNotComputeBECount = false; 2784 bool CouldNotComputeMaxBECount = false; 2785 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 2786 BackedgeTakenInfo NewBTI = 2787 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 2788 2789 if (NewBTI.Exact == CouldNotCompute) { 2790 // We couldn't compute an exact value for this exit, so 2791 // we won't be able to compute an exact value for the loop. 2792 CouldNotComputeBECount = true; 2793 BECount = CouldNotCompute; 2794 } else if (!CouldNotComputeBECount) { 2795 if (BECount == CouldNotCompute) 2796 BECount = NewBTI.Exact; 2797 else { 2798 // TODO: More analysis could be done here. For example, a 2799 // loop with a short-circuiting && operator has an exact count 2800 // of the min of both sides. 2801 CouldNotComputeBECount = true; 2802 BECount = CouldNotCompute; 2803 } 2804 } 2805 if (NewBTI.Max == CouldNotCompute) { 2806 // We couldn't compute an maximum value for this exit, so 2807 // we won't be able to compute an maximum value for the loop. 2808 CouldNotComputeMaxBECount = true; 2809 MaxBECount = CouldNotCompute; 2810 } else if (!CouldNotComputeMaxBECount) { 2811 if (MaxBECount == CouldNotCompute) 2812 MaxBECount = NewBTI.Max; 2813 else 2814 MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, NewBTI.Max); 2815 } 2816 } 2817 2818 return BackedgeTakenInfo(BECount, MaxBECount); 2819 } 2820 2821 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 2822 /// of the specified loop will execute if it exits via the specified block. 2823 ScalarEvolution::BackedgeTakenInfo 2824 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 2825 BasicBlock *ExitingBlock) { 2826 2827 // Okay, we've chosen an exiting block. See what condition causes us to 2828 // exit at this block. 2829 // 2830 // FIXME: we should be able to handle switch instructions (with a single exit) 2831 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2832 if (ExitBr == 0) return CouldNotCompute; 2833 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 2834 2835 // At this point, we know we have a conditional branch that determines whether 2836 // the loop is exited. However, we don't know if the branch is executed each 2837 // time through the loop. If not, then the execution count of the branch will 2838 // not be equal to the trip count of the loop. 2839 // 2840 // Currently we check for this by checking to see if the Exit branch goes to 2841 // the loop header. If so, we know it will always execute the same number of 2842 // times as the loop. We also handle the case where the exit block *is* the 2843 // loop header. This is common for un-rotated loops. 2844 // 2845 // If both of those tests fail, walk up the unique predecessor chain to the 2846 // header, stopping if there is an edge that doesn't exit the loop. If the 2847 // header is reached, the execution count of the branch will be equal to the 2848 // trip count of the loop. 2849 // 2850 // More extensive analysis could be done to handle more cases here. 2851 // 2852 if (ExitBr->getSuccessor(0) != L->getHeader() && 2853 ExitBr->getSuccessor(1) != L->getHeader() && 2854 ExitBr->getParent() != L->getHeader()) { 2855 // The simple checks failed, try climbing the unique predecessor chain 2856 // up to the header. 2857 bool Ok = false; 2858 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 2859 BasicBlock *Pred = BB->getUniquePredecessor(); 2860 if (!Pred) 2861 return CouldNotCompute; 2862 TerminatorInst *PredTerm = Pred->getTerminator(); 2863 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 2864 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 2865 if (PredSucc == BB) 2866 continue; 2867 // If the predecessor has a successor that isn't BB and isn't 2868 // outside the loop, assume the worst. 2869 if (L->contains(PredSucc)) 2870 return CouldNotCompute; 2871 } 2872 if (Pred == L->getHeader()) { 2873 Ok = true; 2874 break; 2875 } 2876 BB = Pred; 2877 } 2878 if (!Ok) 2879 return CouldNotCompute; 2880 } 2881 2882 // Procede to the next level to examine the exit condition expression. 2883 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 2884 ExitBr->getSuccessor(0), 2885 ExitBr->getSuccessor(1)); 2886 } 2887 2888 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 2889 /// backedge of the specified loop will execute if its exit condition 2890 /// were a conditional branch of ExitCond, TBB, and FBB. 2891 ScalarEvolution::BackedgeTakenInfo 2892 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 2893 Value *ExitCond, 2894 BasicBlock *TBB, 2895 BasicBlock *FBB) { 2896 // Check if the controlling expression for this loop is an and or or. In 2897 // such cases, an exact backedge-taken count may be infeasible, but a 2898 // maximum count may still be feasible. 2899 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 2900 if (BO->getOpcode() == Instruction::And) { 2901 // Recurse on the operands of the and. 2902 BackedgeTakenInfo BTI0 = 2903 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2904 BackedgeTakenInfo BTI1 = 2905 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2906 const SCEV* BECount = CouldNotCompute; 2907 const SCEV* MaxBECount = CouldNotCompute; 2908 if (L->contains(TBB)) { 2909 // Both conditions must be true for the loop to continue executing. 2910 // Choose the less conservative count. 2911 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2912 BECount = CouldNotCompute; 2913 else 2914 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2915 if (BTI0.Max == CouldNotCompute) 2916 MaxBECount = BTI1.Max; 2917 else if (BTI1.Max == CouldNotCompute) 2918 MaxBECount = BTI0.Max; 2919 else 2920 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2921 } else { 2922 // Both conditions must be true for the loop to exit. 2923 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 2924 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2925 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2926 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2927 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2928 } 2929 2930 return BackedgeTakenInfo(BECount, MaxBECount); 2931 } 2932 if (BO->getOpcode() == Instruction::Or) { 2933 // Recurse on the operands of the or. 2934 BackedgeTakenInfo BTI0 = 2935 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2936 BackedgeTakenInfo BTI1 = 2937 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2938 const SCEV* BECount = CouldNotCompute; 2939 const SCEV* MaxBECount = CouldNotCompute; 2940 if (L->contains(FBB)) { 2941 // Both conditions must be false for the loop to continue executing. 2942 // Choose the less conservative count. 2943 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2944 BECount = CouldNotCompute; 2945 else 2946 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2947 if (BTI0.Max == CouldNotCompute) 2948 MaxBECount = BTI1.Max; 2949 else if (BTI1.Max == CouldNotCompute) 2950 MaxBECount = BTI0.Max; 2951 else 2952 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2953 } else { 2954 // Both conditions must be false for the loop to exit. 2955 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 2956 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2957 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2958 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2959 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2960 } 2961 2962 return BackedgeTakenInfo(BECount, MaxBECount); 2963 } 2964 } 2965 2966 // With an icmp, it may be feasible to compute an exact backedge-taken count. 2967 // Procede to the next level to examine the icmp. 2968 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 2969 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 2970 2971 // If it's not an integer or pointer comparison then compute it the hard way. 2972 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 2973 } 2974 2975 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 2976 /// backedge of the specified loop will execute if its exit condition 2977 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 2978 ScalarEvolution::BackedgeTakenInfo 2979 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 2980 ICmpInst *ExitCond, 2981 BasicBlock *TBB, 2982 BasicBlock *FBB) { 2983 2984 // If the condition was exit on true, convert the condition to exit on false 2985 ICmpInst::Predicate Cond; 2986 if (!L->contains(FBB)) 2987 Cond = ExitCond->getPredicate(); 2988 else 2989 Cond = ExitCond->getInversePredicate(); 2990 2991 // Handle common loops like: for (X = "string"; *X; ++X) 2992 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 2993 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 2994 const SCEV* ItCnt = 2995 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 2996 if (!isa<SCEVCouldNotCompute>(ItCnt)) { 2997 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); 2998 return BackedgeTakenInfo(ItCnt, 2999 isa<SCEVConstant>(ItCnt) ? ItCnt : 3000 getConstant(APInt::getMaxValue(BitWidth)-1)); 3001 } 3002 } 3003 3004 const SCEV* LHS = getSCEV(ExitCond->getOperand(0)); 3005 const SCEV* RHS = getSCEV(ExitCond->getOperand(1)); 3006 3007 // Try to evaluate any dependencies out of the loop. 3008 LHS = getSCEVAtScope(LHS, L); 3009 RHS = getSCEVAtScope(RHS, L); 3010 3011 // At this point, we would like to compute how many iterations of the 3012 // loop the predicate will return true for these inputs. 3013 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 3014 // If there is a loop-invariant, force it into the RHS. 3015 std::swap(LHS, RHS); 3016 Cond = ICmpInst::getSwappedPredicate(Cond); 3017 } 3018 3019 // If we have a comparison of a chrec against a constant, try to use value 3020 // ranges to answer this query. 3021 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 3022 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 3023 if (AddRec->getLoop() == L) { 3024 // Form the constant range. 3025 ConstantRange CompRange( 3026 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 3027 3028 const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this); 3029 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 3030 } 3031 3032 switch (Cond) { 3033 case ICmpInst::ICMP_NE: { // while (X != Y) 3034 // Convert to: while (X-Y != 0) 3035 const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); 3036 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3037 break; 3038 } 3039 case ICmpInst::ICMP_EQ: { 3040 // Convert to: while (X-Y == 0) // while (X == Y) 3041 const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 3042 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3043 break; 3044 } 3045 case ICmpInst::ICMP_SLT: { 3046 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 3047 if (BTI.hasAnyInfo()) return BTI; 3048 break; 3049 } 3050 case ICmpInst::ICMP_SGT: { 3051 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3052 getNotSCEV(RHS), L, true); 3053 if (BTI.hasAnyInfo()) return BTI; 3054 break; 3055 } 3056 case ICmpInst::ICMP_ULT: { 3057 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 3058 if (BTI.hasAnyInfo()) return BTI; 3059 break; 3060 } 3061 case ICmpInst::ICMP_UGT: { 3062 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3063 getNotSCEV(RHS), L, false); 3064 if (BTI.hasAnyInfo()) return BTI; 3065 break; 3066 } 3067 default: 3068 #if 0 3069 errs() << "ComputeBackedgeTakenCount "; 3070 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 3071 errs() << "[unsigned] "; 3072 errs() << *LHS << " " 3073 << Instruction::getOpcodeName(Instruction::ICmp) 3074 << " " << *RHS << "\n"; 3075 #endif 3076 break; 3077 } 3078 return 3079 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3080 } 3081 3082 static ConstantInt * 3083 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 3084 ScalarEvolution &SE) { 3085 const SCEV* InVal = SE.getConstant(C); 3086 const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE); 3087 assert(isa<SCEVConstant>(Val) && 3088 "Evaluation of SCEV at constant didn't fold correctly?"); 3089 return cast<SCEVConstant>(Val)->getValue(); 3090 } 3091 3092 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 3093 /// and a GEP expression (missing the pointer index) indexing into it, return 3094 /// the addressed element of the initializer or null if the index expression is 3095 /// invalid. 3096 static Constant * 3097 GetAddressedElementFromGlobal(GlobalVariable *GV, 3098 const std::vector<ConstantInt*> &Indices) { 3099 Constant *Init = GV->getInitializer(); 3100 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 3101 uint64_t Idx = Indices[i]->getZExtValue(); 3102 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 3103 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 3104 Init = cast<Constant>(CS->getOperand(Idx)); 3105 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 3106 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 3107 Init = cast<Constant>(CA->getOperand(Idx)); 3108 } else if (isa<ConstantAggregateZero>(Init)) { 3109 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 3110 assert(Idx < STy->getNumElements() && "Bad struct index!"); 3111 Init = Constant::getNullValue(STy->getElementType(Idx)); 3112 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 3113 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 3114 Init = Constant::getNullValue(ATy->getElementType()); 3115 } else { 3116 assert(0 && "Unknown constant aggregate type!"); 3117 } 3118 return 0; 3119 } else { 3120 return 0; // Unknown initializer type 3121 } 3122 } 3123 return Init; 3124 } 3125 3126 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 3127 /// 'icmp op load X, cst', try to see if we can compute the backedge 3128 /// execution count. 3129 const SCEV* ScalarEvolution:: 3130 ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, 3131 const Loop *L, 3132 ICmpInst::Predicate predicate) { 3133 if (LI->isVolatile()) return CouldNotCompute; 3134 3135 // Check to see if the loaded pointer is a getelementptr of a global. 3136 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 3137 if (!GEP) return CouldNotCompute; 3138 3139 // Make sure that it is really a constant global we are gepping, with an 3140 // initializer, and make sure the first IDX is really 0. 3141 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 3142 if (!GV || !GV->isConstant() || !GV->hasInitializer() || 3143 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 3144 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 3145 return CouldNotCompute; 3146 3147 // Okay, we allow one non-constant index into the GEP instruction. 3148 Value *VarIdx = 0; 3149 std::vector<ConstantInt*> Indexes; 3150 unsigned VarIdxNum = 0; 3151 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 3152 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 3153 Indexes.push_back(CI); 3154 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 3155 if (VarIdx) return CouldNotCompute; // Multiple non-constant idx's. 3156 VarIdx = GEP->getOperand(i); 3157 VarIdxNum = i-2; 3158 Indexes.push_back(0); 3159 } 3160 3161 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 3162 // Check to see if X is a loop variant variable value now. 3163 const SCEV* Idx = getSCEV(VarIdx); 3164 Idx = getSCEVAtScope(Idx, L); 3165 3166 // We can only recognize very limited forms of loop index expressions, in 3167 // particular, only affine AddRec's like {C1,+,C2}. 3168 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 3169 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 3170 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 3171 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 3172 return CouldNotCompute; 3173 3174 unsigned MaxSteps = MaxBruteForceIterations; 3175 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 3176 ConstantInt *ItCst = 3177 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum); 3178 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 3179 3180 // Form the GEP offset. 3181 Indexes[VarIdxNum] = Val; 3182 3183 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 3184 if (Result == 0) break; // Cannot compute! 3185 3186 // Evaluate the condition for this iteration. 3187 Result = ConstantExpr::getICmp(predicate, Result, RHS); 3188 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 3189 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 3190 #if 0 3191 errs() << "\n***\n*** Computed loop count " << *ItCst 3192 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 3193 << "***\n"; 3194 #endif 3195 ++NumArrayLenItCounts; 3196 return getConstant(ItCst); // Found terminating iteration! 3197 } 3198 } 3199 return CouldNotCompute; 3200 } 3201 3202 3203 /// CanConstantFold - Return true if we can constant fold an instruction of the 3204 /// specified type, assuming that all operands were constants. 3205 static bool CanConstantFold(const Instruction *I) { 3206 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 3207 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 3208 return true; 3209 3210 if (const CallInst *CI = dyn_cast<CallInst>(I)) 3211 if (const Function *F = CI->getCalledFunction()) 3212 return canConstantFoldCallTo(F); 3213 return false; 3214 } 3215 3216 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 3217 /// in the loop that V is derived from. We allow arbitrary operations along the 3218 /// way, but the operands of an operation must either be constants or a value 3219 /// derived from a constant PHI. If this expression does not fit with these 3220 /// constraints, return null. 3221 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 3222 // If this is not an instruction, or if this is an instruction outside of the 3223 // loop, it can't be derived from a loop PHI. 3224 Instruction *I = dyn_cast<Instruction>(V); 3225 if (I == 0 || !L->contains(I->getParent())) return 0; 3226 3227 if (PHINode *PN = dyn_cast<PHINode>(I)) { 3228 if (L->getHeader() == I->getParent()) 3229 return PN; 3230 else 3231 // We don't currently keep track of the control flow needed to evaluate 3232 // PHIs, so we cannot handle PHIs inside of loops. 3233 return 0; 3234 } 3235 3236 // If we won't be able to constant fold this expression even if the operands 3237 // are constants, return early. 3238 if (!CanConstantFold(I)) return 0; 3239 3240 // Otherwise, we can evaluate this instruction if all of its operands are 3241 // constant or derived from a PHI node themselves. 3242 PHINode *PHI = 0; 3243 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 3244 if (!(isa<Constant>(I->getOperand(Op)) || 3245 isa<GlobalValue>(I->getOperand(Op)))) { 3246 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 3247 if (P == 0) return 0; // Not evolving from PHI 3248 if (PHI == 0) 3249 PHI = P; 3250 else if (PHI != P) 3251 return 0; // Evolving from multiple different PHIs. 3252 } 3253 3254 // This is a expression evolving from a constant PHI! 3255 return PHI; 3256 } 3257 3258 /// EvaluateExpression - Given an expression that passes the 3259 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 3260 /// in the loop has the value PHIVal. If we can't fold this expression for some 3261 /// reason, return null. 3262 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { 3263 if (isa<PHINode>(V)) return PHIVal; 3264 if (Constant *C = dyn_cast<Constant>(V)) return C; 3265 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; 3266 Instruction *I = cast<Instruction>(V); 3267 3268 std::vector<Constant*> Operands; 3269 Operands.resize(I->getNumOperands()); 3270 3271 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3272 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal); 3273 if (Operands[i] == 0) return 0; 3274 } 3275 3276 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3277 return ConstantFoldCompareInstOperands(CI->getPredicate(), 3278 &Operands[0], Operands.size()); 3279 else 3280 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3281 &Operands[0], Operands.size()); 3282 } 3283 3284 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 3285 /// in the header of its containing loop, we know the loop executes a 3286 /// constant number of times, and the PHI node is just a recurrence 3287 /// involving constants, fold it. 3288 Constant *ScalarEvolution:: 3289 getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs, const Loop *L){ 3290 std::map<PHINode*, Constant*>::iterator I = 3291 ConstantEvolutionLoopExitValue.find(PN); 3292 if (I != ConstantEvolutionLoopExitValue.end()) 3293 return I->second; 3294 3295 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) 3296 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 3297 3298 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 3299 3300 // Since the loop is canonicalized, the PHI node must have two entries. One 3301 // entry must be a constant (coming in from outside of the loop), and the 3302 // second must be derived from the same PHI. 3303 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3304 Constant *StartCST = 3305 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3306 if (StartCST == 0) 3307 return RetVal = 0; // Must be a constant. 3308 3309 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3310 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3311 if (PN2 != PN) 3312 return RetVal = 0; // Not derived from same PHI. 3313 3314 // Execute the loop symbolically to determine the exit value. 3315 if (BEs.getActiveBits() >= 32) 3316 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 3317 3318 unsigned NumIterations = BEs.getZExtValue(); // must be in range 3319 unsigned IterationNum = 0; 3320 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 3321 if (IterationNum == NumIterations) 3322 return RetVal = PHIVal; // Got exit value! 3323 3324 // Compute the value of the PHI node for the next iteration. 3325 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3326 if (NextPHI == PHIVal) 3327 return RetVal = NextPHI; // Stopped evolving! 3328 if (NextPHI == 0) 3329 return 0; // Couldn't evaluate! 3330 PHIVal = NextPHI; 3331 } 3332 } 3333 3334 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a 3335 /// constant number of times (the condition evolves only from constants), 3336 /// try to evaluate a few iterations of the loop until we get the exit 3337 /// condition gets a value of ExitWhen (true or false). If we cannot 3338 /// evaluate the trip count of the loop, return CouldNotCompute. 3339 const SCEV* ScalarEvolution:: 3340 ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) { 3341 PHINode *PN = getConstantEvolvingPHI(Cond, L); 3342 if (PN == 0) return CouldNotCompute; 3343 3344 // Since the loop is canonicalized, the PHI node must have two entries. One 3345 // entry must be a constant (coming in from outside of the loop), and the 3346 // second must be derived from the same PHI. 3347 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3348 Constant *StartCST = 3349 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3350 if (StartCST == 0) return CouldNotCompute; // Must be a constant. 3351 3352 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3353 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3354 if (PN2 != PN) return CouldNotCompute; // Not derived from same PHI. 3355 3356 // Okay, we find a PHI node that defines the trip count of this loop. Execute 3357 // the loop symbolically to determine when the condition gets a value of 3358 // "ExitWhen". 3359 unsigned IterationNum = 0; 3360 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 3361 for (Constant *PHIVal = StartCST; 3362 IterationNum != MaxIterations; ++IterationNum) { 3363 ConstantInt *CondVal = 3364 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal)); 3365 3366 // Couldn't symbolically evaluate. 3367 if (!CondVal) return CouldNotCompute; 3368 3369 if (CondVal->getValue() == uint64_t(ExitWhen)) { 3370 ConstantEvolutionLoopExitValue[PN] = PHIVal; 3371 ++NumBruteForceTripCountsComputed; 3372 return getConstant(Type::Int32Ty, IterationNum); 3373 } 3374 3375 // Compute the value of the PHI node for the next iteration. 3376 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3377 if (NextPHI == 0 || NextPHI == PHIVal) 3378 return CouldNotCompute; // Couldn't evaluate or not making progress... 3379 PHIVal = NextPHI; 3380 } 3381 3382 // Too many iterations were needed to evaluate. 3383 return CouldNotCompute; 3384 } 3385 3386 /// getSCEVAtScope - Return a SCEV expression handle for the specified value 3387 /// at the specified scope in the program. The L value specifies a loop 3388 /// nest to evaluate the expression at, where null is the top-level or a 3389 /// specified loop is immediately inside of the loop. 3390 /// 3391 /// This method can be used to compute the exit value for a variable defined 3392 /// in a loop by querying what the value will hold in the parent loop. 3393 /// 3394 /// In the case that a relevant loop exit value cannot be computed, the 3395 /// original value V is returned. 3396 const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 3397 // FIXME: this should be turned into a virtual method on SCEV! 3398 3399 if (isa<SCEVConstant>(V)) return V; 3400 3401 // If this instruction is evolved from a constant-evolving PHI, compute the 3402 // exit value from the loop without using SCEVs. 3403 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 3404 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 3405 const Loop *LI = (*this->LI)[I->getParent()]; 3406 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 3407 if (PHINode *PN = dyn_cast<PHINode>(I)) 3408 if (PN->getParent() == LI->getHeader()) { 3409 // Okay, there is no closed form solution for the PHI node. Check 3410 // to see if the loop that contains it has a known backedge-taken 3411 // count. If so, we may be able to force computation of the exit 3412 // value. 3413 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI); 3414 if (const SCEVConstant *BTCC = 3415 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 3416 // Okay, we know how many times the containing loop executes. If 3417 // this is a constant evolving PHI node, get the final value at 3418 // the specified iteration number. 3419 Constant *RV = getConstantEvolutionLoopExitValue(PN, 3420 BTCC->getValue()->getValue(), 3421 LI); 3422 if (RV) return getUnknown(RV); 3423 } 3424 } 3425 3426 // Okay, this is an expression that we cannot symbolically evaluate 3427 // into a SCEV. Check to see if it's possible to symbolically evaluate 3428 // the arguments into constants, and if so, try to constant propagate the 3429 // result. This is particularly useful for computing loop exit values. 3430 if (CanConstantFold(I)) { 3431 // Check to see if we've folded this instruction at this loop before. 3432 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I]; 3433 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair = 3434 Values.insert(std::make_pair(L, static_cast<Constant *>(0))); 3435 if (!Pair.second) 3436 return Pair.first->second ? &*getUnknown(Pair.first->second) : V; 3437 3438 std::vector<Constant*> Operands; 3439 Operands.reserve(I->getNumOperands()); 3440 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3441 Value *Op = I->getOperand(i); 3442 if (Constant *C = dyn_cast<Constant>(Op)) { 3443 Operands.push_back(C); 3444 } else { 3445 // If any of the operands is non-constant and if they are 3446 // non-integer and non-pointer, don't even try to analyze them 3447 // with scev techniques. 3448 if (!isSCEVable(Op->getType())) 3449 return V; 3450 3451 const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L); 3452 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { 3453 Constant *C = SC->getValue(); 3454 if (C->getType() != Op->getType()) 3455 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3456 Op->getType(), 3457 false), 3458 C, Op->getType()); 3459 Operands.push_back(C); 3460 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) { 3461 if (Constant *C = dyn_cast<Constant>(SU->getValue())) { 3462 if (C->getType() != Op->getType()) 3463 C = 3464 ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3465 Op->getType(), 3466 false), 3467 C, Op->getType()); 3468 Operands.push_back(C); 3469 } else 3470 return V; 3471 } else { 3472 return V; 3473 } 3474 } 3475 } 3476 3477 Constant *C; 3478 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3479 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 3480 &Operands[0], Operands.size()); 3481 else 3482 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3483 &Operands[0], Operands.size()); 3484 Pair.first->second = C; 3485 return getUnknown(C); 3486 } 3487 } 3488 3489 // This is some other type of SCEVUnknown, just return it. 3490 return V; 3491 } 3492 3493 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 3494 // Avoid performing the look-up in the common case where the specified 3495 // expression has no loop-variant portions. 3496 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 3497 const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3498 if (OpAtScope != Comm->getOperand(i)) { 3499 // Okay, at least one of these operands is loop variant but might be 3500 // foldable. Build a new instance of the folded commutative expression. 3501 SmallVector<const SCEV*, 8> NewOps(Comm->op_begin(), Comm->op_begin()+i); 3502 NewOps.push_back(OpAtScope); 3503 3504 for (++i; i != e; ++i) { 3505 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3506 NewOps.push_back(OpAtScope); 3507 } 3508 if (isa<SCEVAddExpr>(Comm)) 3509 return getAddExpr(NewOps); 3510 if (isa<SCEVMulExpr>(Comm)) 3511 return getMulExpr(NewOps); 3512 if (isa<SCEVSMaxExpr>(Comm)) 3513 return getSMaxExpr(NewOps); 3514 if (isa<SCEVUMaxExpr>(Comm)) 3515 return getUMaxExpr(NewOps); 3516 assert(0 && "Unknown commutative SCEV type!"); 3517 } 3518 } 3519 // If we got here, all operands are loop invariant. 3520 return Comm; 3521 } 3522 3523 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 3524 const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L); 3525 const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L); 3526 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 3527 return Div; // must be loop invariant 3528 return getUDivExpr(LHS, RHS); 3529 } 3530 3531 // If this is a loop recurrence for a loop that does not contain L, then we 3532 // are dealing with the final value computed by the loop. 3533 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 3534 if (!L || !AddRec->getLoop()->contains(L->getHeader())) { 3535 // To evaluate this recurrence, we need to know how many times the AddRec 3536 // loop iterates. Compute this now. 3537 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 3538 if (BackedgeTakenCount == CouldNotCompute) return AddRec; 3539 3540 // Then, evaluate the AddRec. 3541 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 3542 } 3543 return AddRec; 3544 } 3545 3546 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 3547 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3548 if (Op == Cast->getOperand()) 3549 return Cast; // must be loop invariant 3550 return getZeroExtendExpr(Op, Cast->getType()); 3551 } 3552 3553 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 3554 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3555 if (Op == Cast->getOperand()) 3556 return Cast; // must be loop invariant 3557 return getSignExtendExpr(Op, Cast->getType()); 3558 } 3559 3560 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 3561 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3562 if (Op == Cast->getOperand()) 3563 return Cast; // must be loop invariant 3564 return getTruncateExpr(Op, Cast->getType()); 3565 } 3566 3567 assert(0 && "Unknown SCEV type!"); 3568 return 0; 3569 } 3570 3571 /// getSCEVAtScope - This is a convenience function which does 3572 /// getSCEVAtScope(getSCEV(V), L). 3573 const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 3574 return getSCEVAtScope(getSCEV(V), L); 3575 } 3576 3577 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 3578 /// following equation: 3579 /// 3580 /// A * X = B (mod N) 3581 /// 3582 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 3583 /// A and B isn't important. 3584 /// 3585 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 3586 static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 3587 ScalarEvolution &SE) { 3588 uint32_t BW = A.getBitWidth(); 3589 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 3590 assert(A != 0 && "A must be non-zero."); 3591 3592 // 1. D = gcd(A, N) 3593 // 3594 // The gcd of A and N may have only one prime factor: 2. The number of 3595 // trailing zeros in A is its multiplicity 3596 uint32_t Mult2 = A.countTrailingZeros(); 3597 // D = 2^Mult2 3598 3599 // 2. Check if B is divisible by D. 3600 // 3601 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 3602 // is not less than multiplicity of this prime factor for D. 3603 if (B.countTrailingZeros() < Mult2) 3604 return SE.getCouldNotCompute(); 3605 3606 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 3607 // modulo (N / D). 3608 // 3609 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 3610 // bit width during computations. 3611 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 3612 APInt Mod(BW + 1, 0); 3613 Mod.set(BW - Mult2); // Mod = N / D 3614 APInt I = AD.multiplicativeInverse(Mod); 3615 3616 // 4. Compute the minimum unsigned root of the equation: 3617 // I * (B / D) mod (N / D) 3618 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 3619 3620 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 3621 // bits. 3622 return SE.getConstant(Result.trunc(BW)); 3623 } 3624 3625 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 3626 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 3627 /// might be the same) or two SCEVCouldNotCompute objects. 3628 /// 3629 static std::pair<const SCEV*,const SCEV*> 3630 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 3631 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 3632 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 3633 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 3634 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 3635 3636 // We currently can only solve this if the coefficients are constants. 3637 if (!LC || !MC || !NC) { 3638 const SCEV *CNC = SE.getCouldNotCompute(); 3639 return std::make_pair(CNC, CNC); 3640 } 3641 3642 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 3643 const APInt &L = LC->getValue()->getValue(); 3644 const APInt &M = MC->getValue()->getValue(); 3645 const APInt &N = NC->getValue()->getValue(); 3646 APInt Two(BitWidth, 2); 3647 APInt Four(BitWidth, 4); 3648 3649 { 3650 using namespace APIntOps; 3651 const APInt& C = L; 3652 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 3653 // The B coefficient is M-N/2 3654 APInt B(M); 3655 B -= sdiv(N,Two); 3656 3657 // The A coefficient is N/2 3658 APInt A(N.sdiv(Two)); 3659 3660 // Compute the B^2-4ac term. 3661 APInt SqrtTerm(B); 3662 SqrtTerm *= B; 3663 SqrtTerm -= Four * (A * C); 3664 3665 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 3666 // integer value or else APInt::sqrt() will assert. 3667 APInt SqrtVal(SqrtTerm.sqrt()); 3668 3669 // Compute the two solutions for the quadratic formula. 3670 // The divisions must be performed as signed divisions. 3671 APInt NegB(-B); 3672 APInt TwoA( A << 1 ); 3673 if (TwoA.isMinValue()) { 3674 const SCEV *CNC = SE.getCouldNotCompute(); 3675 return std::make_pair(CNC, CNC); 3676 } 3677 3678 ConstantInt *Solution1 = ConstantInt::get((NegB + SqrtVal).sdiv(TwoA)); 3679 ConstantInt *Solution2 = ConstantInt::get((NegB - SqrtVal).sdiv(TwoA)); 3680 3681 return std::make_pair(SE.getConstant(Solution1), 3682 SE.getConstant(Solution2)); 3683 } // end APIntOps namespace 3684 } 3685 3686 /// HowFarToZero - Return the number of times a backedge comparing the specified 3687 /// value to zero will execute. If not computable, return CouldNotCompute. 3688 const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 3689 // If the value is a constant 3690 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3691 // If the value is already zero, the branch will execute zero times. 3692 if (C->getValue()->isZero()) return C; 3693 return CouldNotCompute; // Otherwise it will loop infinitely. 3694 } 3695 3696 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 3697 if (!AddRec || AddRec->getLoop() != L) 3698 return CouldNotCompute; 3699 3700 if (AddRec->isAffine()) { 3701 // If this is an affine expression, the execution count of this branch is 3702 // the minimum unsigned root of the following equation: 3703 // 3704 // Start + Step*N = 0 (mod 2^BW) 3705 // 3706 // equivalent to: 3707 // 3708 // Step*N = -Start (mod 2^BW) 3709 // 3710 // where BW is the common bit width of Start and Step. 3711 3712 // Get the initial value for the loop. 3713 const SCEV* Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 3714 const SCEV* Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 3715 3716 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 3717 // For now we handle only constant steps. 3718 3719 // First, handle unitary steps. 3720 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 3721 return getNegativeSCEV(Start); // N = -Start (as unsigned) 3722 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 3723 return Start; // N = Start (as unsigned) 3724 3725 // Then, try to solve the above equation provided that Start is constant. 3726 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 3727 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 3728 -StartC->getValue()->getValue(), 3729 *this); 3730 } 3731 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { 3732 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 3733 // the quadratic equation to solve it. 3734 std::pair<const SCEV*,const SCEV*> Roots = SolveQuadraticEquation(AddRec, 3735 *this); 3736 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 3737 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 3738 if (R1) { 3739 #if 0 3740 errs() << "HFTZ: " << *V << " - sol#1: " << *R1 3741 << " sol#2: " << *R2 << "\n"; 3742 #endif 3743 // Pick the smallest positive root value. 3744 if (ConstantInt *CB = 3745 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 3746 R1->getValue(), R2->getValue()))) { 3747 if (CB->getZExtValue() == false) 3748 std::swap(R1, R2); // R1 is the minimum root now. 3749 3750 // We can only use this value if the chrec ends up with an exact zero 3751 // value at this index. When solving for "X*X != 5", for example, we 3752 // should not accept a root of 2. 3753 const SCEV* Val = AddRec->evaluateAtIteration(R1, *this); 3754 if (Val->isZero()) 3755 return R1; // We found a quadratic root! 3756 } 3757 } 3758 } 3759 3760 return CouldNotCompute; 3761 } 3762 3763 /// HowFarToNonZero - Return the number of times a backedge checking the 3764 /// specified value for nonzero will execute. If not computable, return 3765 /// CouldNotCompute 3766 const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 3767 // Loops that look like: while (X == 0) are very strange indeed. We don't 3768 // handle them yet except for the trivial case. This could be expanded in the 3769 // future as needed. 3770 3771 // If the value is a constant, check to see if it is known to be non-zero 3772 // already. If so, the backedge will execute zero times. 3773 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3774 if (!C->getValue()->isNullValue()) 3775 return getIntegerSCEV(0, C->getType()); 3776 return CouldNotCompute; // Otherwise it will loop infinitely. 3777 } 3778 3779 // We could implement others, but I really doubt anyone writes loops like 3780 // this, and if they did, they would already be constant folded. 3781 return CouldNotCompute; 3782 } 3783 3784 /// getLoopPredecessor - If the given loop's header has exactly one unique 3785 /// predecessor outside the loop, return it. Otherwise return null. 3786 /// 3787 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { 3788 BasicBlock *Header = L->getHeader(); 3789 BasicBlock *Pred = 0; 3790 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); 3791 PI != E; ++PI) 3792 if (!L->contains(*PI)) { 3793 if (Pred && Pred != *PI) return 0; // Multiple predecessors. 3794 Pred = *PI; 3795 } 3796 return Pred; 3797 } 3798 3799 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 3800 /// (which may not be an immediate predecessor) which has exactly one 3801 /// successor from which BB is reachable, or null if no such block is 3802 /// found. 3803 /// 3804 BasicBlock * 3805 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 3806 // If the block has a unique predecessor, then there is no path from the 3807 // predecessor to the block that does not go through the direct edge 3808 // from the predecessor to the block. 3809 if (BasicBlock *Pred = BB->getSinglePredecessor()) 3810 return Pred; 3811 3812 // A loop's header is defined to be a block that dominates the loop. 3813 // If the header has a unique predecessor outside the loop, it must be 3814 // a block that has exactly one successor that can reach the loop. 3815 if (Loop *L = LI->getLoopFor(BB)) 3816 return getLoopPredecessor(L); 3817 3818 return 0; 3819 } 3820 3821 /// HasSameValue - SCEV structural equivalence is usually sufficient for 3822 /// testing whether two expressions are equal, however for the purposes of 3823 /// looking for a condition guarding a loop, it can be useful to be a little 3824 /// more general, since a front-end may have replicated the controlling 3825 /// expression. 3826 /// 3827 static bool HasSameValue(const SCEV* A, const SCEV* B) { 3828 // Quick check to see if they are the same SCEV. 3829 if (A == B) return true; 3830 3831 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 3832 // two different instructions with the same value. Check for this case. 3833 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 3834 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 3835 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 3836 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 3837 if (AI->isIdenticalTo(BI)) 3838 return true; 3839 3840 // Otherwise assume they may have a different value. 3841 return false; 3842 } 3843 3844 /// isLoopGuardedByCond - Test whether entry to the loop is protected by 3845 /// a conditional between LHS and RHS. This is used to help avoid max 3846 /// expressions in loop trip counts. 3847 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L, 3848 ICmpInst::Predicate Pred, 3849 const SCEV *LHS, const SCEV *RHS) { 3850 // Interpret a null as meaning no loop, where there is obviously no guard 3851 // (interprocedural conditions notwithstanding). 3852 if (!L) return false; 3853 3854 BasicBlock *Predecessor = getLoopPredecessor(L); 3855 BasicBlock *PredecessorDest = L->getHeader(); 3856 3857 // Starting at the loop predecessor, climb up the predecessor chain, as long 3858 // as there are predecessors that can be found that have unique successors 3859 // leading to the original header. 3860 for (; Predecessor; 3861 PredecessorDest = Predecessor, 3862 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { 3863 3864 BranchInst *LoopEntryPredicate = 3865 dyn_cast<BranchInst>(Predecessor->getTerminator()); 3866 if (!LoopEntryPredicate || 3867 LoopEntryPredicate->isUnconditional()) 3868 continue; 3869 3870 ICmpInst *ICI = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 3871 if (!ICI) continue; 3872 3873 // Now that we found a conditional branch that dominates the loop, check to 3874 // see if it is the comparison we are looking for. 3875 Value *PreCondLHS = ICI->getOperand(0); 3876 Value *PreCondRHS = ICI->getOperand(1); 3877 ICmpInst::Predicate Cond; 3878 if (LoopEntryPredicate->getSuccessor(0) == PredecessorDest) 3879 Cond = ICI->getPredicate(); 3880 else 3881 Cond = ICI->getInversePredicate(); 3882 3883 if (Cond == Pred) 3884 ; // An exact match. 3885 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE) 3886 ; // The actual condition is beyond sufficient. 3887 else 3888 // Check a few special cases. 3889 switch (Cond) { 3890 case ICmpInst::ICMP_UGT: 3891 if (Pred == ICmpInst::ICMP_ULT) { 3892 std::swap(PreCondLHS, PreCondRHS); 3893 Cond = ICmpInst::ICMP_ULT; 3894 break; 3895 } 3896 continue; 3897 case ICmpInst::ICMP_SGT: 3898 if (Pred == ICmpInst::ICMP_SLT) { 3899 std::swap(PreCondLHS, PreCondRHS); 3900 Cond = ICmpInst::ICMP_SLT; 3901 break; 3902 } 3903 continue; 3904 case ICmpInst::ICMP_NE: 3905 // Expressions like (x >u 0) are often canonicalized to (x != 0), 3906 // so check for this case by checking if the NE is comparing against 3907 // a minimum or maximum constant. 3908 if (!ICmpInst::isTrueWhenEqual(Pred)) 3909 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) { 3910 const APInt &A = CI->getValue(); 3911 switch (Pred) { 3912 case ICmpInst::ICMP_SLT: 3913 if (A.isMaxSignedValue()) break; 3914 continue; 3915 case ICmpInst::ICMP_SGT: 3916 if (A.isMinSignedValue()) break; 3917 continue; 3918 case ICmpInst::ICMP_ULT: 3919 if (A.isMaxValue()) break; 3920 continue; 3921 case ICmpInst::ICMP_UGT: 3922 if (A.isMinValue()) break; 3923 continue; 3924 default: 3925 continue; 3926 } 3927 Cond = ICmpInst::ICMP_NE; 3928 // NE is symmetric but the original comparison may not be. Swap 3929 // the operands if necessary so that they match below. 3930 if (isa<SCEVConstant>(LHS)) 3931 std::swap(PreCondLHS, PreCondRHS); 3932 break; 3933 } 3934 continue; 3935 default: 3936 // We weren't able to reconcile the condition. 3937 continue; 3938 } 3939 3940 if (!PreCondLHS->getType()->isInteger()) continue; 3941 3942 const SCEV* PreCondLHSSCEV = getSCEV(PreCondLHS); 3943 const SCEV* PreCondRHSSCEV = getSCEV(PreCondRHS); 3944 if ((HasSameValue(LHS, PreCondLHSSCEV) && 3945 HasSameValue(RHS, PreCondRHSSCEV)) || 3946 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) && 3947 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)))) 3948 return true; 3949 } 3950 3951 return false; 3952 } 3953 3954 /// getBECount - Subtract the end and start values and divide by the step, 3955 /// rounding up, to get the number of times the backedge is executed. Return 3956 /// CouldNotCompute if an intermediate computation overflows. 3957 const SCEV* ScalarEvolution::getBECount(const SCEV* Start, 3958 const SCEV* End, 3959 const SCEV* Step) { 3960 const Type *Ty = Start->getType(); 3961 const SCEV* NegOne = getIntegerSCEV(-1, Ty); 3962 const SCEV* Diff = getMinusSCEV(End, Start); 3963 const SCEV* RoundUp = getAddExpr(Step, NegOne); 3964 3965 // Add an adjustment to the difference between End and Start so that 3966 // the division will effectively round up. 3967 const SCEV* Add = getAddExpr(Diff, RoundUp); 3968 3969 // Check Add for unsigned overflow. 3970 // TODO: More sophisticated things could be done here. 3971 const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1); 3972 const SCEV* OperandExtendedAdd = 3973 getAddExpr(getZeroExtendExpr(Diff, WideTy), 3974 getZeroExtendExpr(RoundUp, WideTy)); 3975 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 3976 return CouldNotCompute; 3977 3978 return getUDivExpr(Add, Step); 3979 } 3980 3981 /// HowManyLessThans - Return the number of times a backedge containing the 3982 /// specified less-than comparison will execute. If not computable, return 3983 /// CouldNotCompute. 3984 ScalarEvolution::BackedgeTakenInfo ScalarEvolution:: 3985 HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 3986 const Loop *L, bool isSigned) { 3987 // Only handle: "ADDREC < LoopInvariant". 3988 if (!RHS->isLoopInvariant(L)) return CouldNotCompute; 3989 3990 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 3991 if (!AddRec || AddRec->getLoop() != L) 3992 return CouldNotCompute; 3993 3994 if (AddRec->isAffine()) { 3995 // FORNOW: We only support unit strides. 3996 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 3997 const SCEV* Step = AddRec->getStepRecurrence(*this); 3998 3999 // TODO: handle non-constant strides. 4000 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step); 4001 if (!CStep || CStep->isZero()) 4002 return CouldNotCompute; 4003 if (CStep->isOne()) { 4004 // With unit stride, the iteration never steps past the limit value. 4005 } else if (CStep->getValue()->getValue().isStrictlyPositive()) { 4006 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) { 4007 // Test whether a positive iteration iteration can step past the limit 4008 // value and past the maximum value for its type in a single step. 4009 if (isSigned) { 4010 APInt Max = APInt::getSignedMaxValue(BitWidth); 4011 if ((Max - CStep->getValue()->getValue()) 4012 .slt(CLimit->getValue()->getValue())) 4013 return CouldNotCompute; 4014 } else { 4015 APInt Max = APInt::getMaxValue(BitWidth); 4016 if ((Max - CStep->getValue()->getValue()) 4017 .ult(CLimit->getValue()->getValue())) 4018 return CouldNotCompute; 4019 } 4020 } else 4021 // TODO: handle non-constant limit values below. 4022 return CouldNotCompute; 4023 } else 4024 // TODO: handle negative strides below. 4025 return CouldNotCompute; 4026 4027 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 4028 // m. So, we count the number of iterations in which {n,+,s} < m is true. 4029 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 4030 // treat m-n as signed nor unsigned due to overflow possibility. 4031 4032 // First, we get the value of the LHS in the first iteration: n 4033 const SCEV* Start = AddRec->getOperand(0); 4034 4035 // Determine the minimum constant start value. 4036 const SCEV* MinStart = isa<SCEVConstant>(Start) ? Start : 4037 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) : 4038 APInt::getMinValue(BitWidth)); 4039 4040 // If we know that the condition is true in order to enter the loop, 4041 // then we know that it will run exactly (m-n)/s times. Otherwise, we 4042 // only know that it will execute (max(m,n)-n)/s times. In both cases, 4043 // the division must round up. 4044 const SCEV* End = RHS; 4045 if (!isLoopGuardedByCond(L, 4046 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, 4047 getMinusSCEV(Start, Step), RHS)) 4048 End = isSigned ? getSMaxExpr(RHS, Start) 4049 : getUMaxExpr(RHS, Start); 4050 4051 // Determine the maximum constant end value. 4052 const SCEV* MaxEnd = 4053 isa<SCEVConstant>(End) ? End : 4054 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth) 4055 .ashr(GetMinSignBits(End) - 1) : 4056 APInt::getMaxValue(BitWidth) 4057 .lshr(GetMinLeadingZeros(End))); 4058 4059 // Finally, we subtract these two values and divide, rounding up, to get 4060 // the number of times the backedge is executed. 4061 const SCEV* BECount = getBECount(Start, End, Step); 4062 4063 // The maximum backedge count is similar, except using the minimum start 4064 // value and the maximum end value. 4065 const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step);; 4066 4067 return BackedgeTakenInfo(BECount, MaxBECount); 4068 } 4069 4070 return CouldNotCompute; 4071 } 4072 4073 /// getNumIterationsInRange - Return the number of iterations of this loop that 4074 /// produce values in the specified constant range. Another way of looking at 4075 /// this is that it returns the first iteration number where the value is not in 4076 /// the condition, thus computing the exit count. If the iteration count can't 4077 /// be computed, an instance of SCEVCouldNotCompute is returned. 4078 const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 4079 ScalarEvolution &SE) const { 4080 if (Range.isFullSet()) // Infinite loop. 4081 return SE.getCouldNotCompute(); 4082 4083 // If the start is a non-zero constant, shift the range to simplify things. 4084 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 4085 if (!SC->getValue()->isZero()) { 4086 SmallVector<const SCEV*, 4> Operands(op_begin(), op_end()); 4087 Operands[0] = SE.getIntegerSCEV(0, SC->getType()); 4088 const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop()); 4089 if (const SCEVAddRecExpr *ShiftedAddRec = 4090 dyn_cast<SCEVAddRecExpr>(Shifted)) 4091 return ShiftedAddRec->getNumIterationsInRange( 4092 Range.subtract(SC->getValue()->getValue()), SE); 4093 // This is strange and shouldn't happen. 4094 return SE.getCouldNotCompute(); 4095 } 4096 4097 // The only time we can solve this is when we have all constant indices. 4098 // Otherwise, we cannot determine the overflow conditions. 4099 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 4100 if (!isa<SCEVConstant>(getOperand(i))) 4101 return SE.getCouldNotCompute(); 4102 4103 4104 // Okay at this point we know that all elements of the chrec are constants and 4105 // that the start element is zero. 4106 4107 // First check to see if the range contains zero. If not, the first 4108 // iteration exits. 4109 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 4110 if (!Range.contains(APInt(BitWidth, 0))) 4111 return SE.getIntegerSCEV(0, getType()); 4112 4113 if (isAffine()) { 4114 // If this is an affine expression then we have this situation: 4115 // Solve {0,+,A} in Range === Ax in Range 4116 4117 // We know that zero is in the range. If A is positive then we know that 4118 // the upper value of the range must be the first possible exit value. 4119 // If A is negative then the lower of the range is the last possible loop 4120 // value. Also note that we already checked for a full range. 4121 APInt One(BitWidth,1); 4122 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 4123 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 4124 4125 // The exit value should be (End+A)/A. 4126 APInt ExitVal = (End + A).udiv(A); 4127 ConstantInt *ExitValue = ConstantInt::get(ExitVal); 4128 4129 // Evaluate at the exit value. If we really did fall out of the valid 4130 // range, then we computed our trip count, otherwise wrap around or other 4131 // things must have happened. 4132 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 4133 if (Range.contains(Val->getValue())) 4134 return SE.getCouldNotCompute(); // Something strange happened 4135 4136 // Ensure that the previous value is in the range. This is a sanity check. 4137 assert(Range.contains( 4138 EvaluateConstantChrecAtConstant(this, 4139 ConstantInt::get(ExitVal - One), SE)->getValue()) && 4140 "Linear scev computation is off in a bad way!"); 4141 return SE.getConstant(ExitValue); 4142 } else if (isQuadratic()) { 4143 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 4144 // quadratic equation to solve it. To do this, we must frame our problem in 4145 // terms of figuring out when zero is crossed, instead of when 4146 // Range.getUpper() is crossed. 4147 SmallVector<const SCEV*, 4> NewOps(op_begin(), op_end()); 4148 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 4149 const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 4150 4151 // Next, solve the constructed addrec 4152 std::pair<const SCEV*,const SCEV*> Roots = 4153 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 4154 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4155 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4156 if (R1) { 4157 // Pick the smallest positive root value. 4158 if (ConstantInt *CB = 4159 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 4160 R1->getValue(), R2->getValue()))) { 4161 if (CB->getZExtValue() == false) 4162 std::swap(R1, R2); // R1 is the minimum root now. 4163 4164 // Make sure the root is not off by one. The returned iteration should 4165 // not be in the range, but the previous one should be. When solving 4166 // for "X*X < 5", for example, we should not return a root of 2. 4167 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 4168 R1->getValue(), 4169 SE); 4170 if (Range.contains(R1Val->getValue())) { 4171 // The next iteration must be out of the range... 4172 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()+1); 4173 4174 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4175 if (!Range.contains(R1Val->getValue())) 4176 return SE.getConstant(NextVal); 4177 return SE.getCouldNotCompute(); // Something strange happened 4178 } 4179 4180 // If R1 was not in the range, then it is a good return value. Make 4181 // sure that R1-1 WAS in the range though, just in case. 4182 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()-1); 4183 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4184 if (Range.contains(R1Val->getValue())) 4185 return R1; 4186 return SE.getCouldNotCompute(); // Something strange happened 4187 } 4188 } 4189 } 4190 4191 return SE.getCouldNotCompute(); 4192 } 4193 4194 4195 4196 //===----------------------------------------------------------------------===// 4197 // SCEVCallbackVH Class Implementation 4198 //===----------------------------------------------------------------------===// 4199 4200 void ScalarEvolution::SCEVCallbackVH::deleted() { 4201 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4202 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 4203 SE->ConstantEvolutionLoopExitValue.erase(PN); 4204 if (Instruction *I = dyn_cast<Instruction>(getValPtr())) 4205 SE->ValuesAtScopes.erase(I); 4206 SE->Scalars.erase(getValPtr()); 4207 // this now dangles! 4208 } 4209 4210 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) { 4211 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4212 4213 // Forget all the expressions associated with users of the old value, 4214 // so that future queries will recompute the expressions using the new 4215 // value. 4216 SmallVector<User *, 16> Worklist; 4217 Value *Old = getValPtr(); 4218 bool DeleteOld = false; 4219 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 4220 UI != UE; ++UI) 4221 Worklist.push_back(*UI); 4222 while (!Worklist.empty()) { 4223 User *U = Worklist.pop_back_val(); 4224 // Deleting the Old value will cause this to dangle. Postpone 4225 // that until everything else is done. 4226 if (U == Old) { 4227 DeleteOld = true; 4228 continue; 4229 } 4230 if (PHINode *PN = dyn_cast<PHINode>(U)) 4231 SE->ConstantEvolutionLoopExitValue.erase(PN); 4232 if (Instruction *I = dyn_cast<Instruction>(U)) 4233 SE->ValuesAtScopes.erase(I); 4234 if (SE->Scalars.erase(U)) 4235 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 4236 UI != UE; ++UI) 4237 Worklist.push_back(*UI); 4238 } 4239 if (DeleteOld) { 4240 if (PHINode *PN = dyn_cast<PHINode>(Old)) 4241 SE->ConstantEvolutionLoopExitValue.erase(PN); 4242 if (Instruction *I = dyn_cast<Instruction>(Old)) 4243 SE->ValuesAtScopes.erase(I); 4244 SE->Scalars.erase(Old); 4245 // this now dangles! 4246 } 4247 // this may dangle! 4248 } 4249 4250 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 4251 : CallbackVH(V), SE(se) {} 4252 4253 //===----------------------------------------------------------------------===// 4254 // ScalarEvolution Class Implementation 4255 //===----------------------------------------------------------------------===// 4256 4257 ScalarEvolution::ScalarEvolution() 4258 : FunctionPass(&ID), CouldNotCompute(new SCEVCouldNotCompute()) { 4259 } 4260 4261 bool ScalarEvolution::runOnFunction(Function &F) { 4262 this->F = &F; 4263 LI = &getAnalysis<LoopInfo>(); 4264 TD = getAnalysisIfAvailable<TargetData>(); 4265 return false; 4266 } 4267 4268 void ScalarEvolution::releaseMemory() { 4269 Scalars.clear(); 4270 BackedgeTakenCounts.clear(); 4271 ConstantEvolutionLoopExitValue.clear(); 4272 ValuesAtScopes.clear(); 4273 4274 for (std::map<ConstantInt*, SCEVConstant*>::iterator 4275 I = SCEVConstants.begin(), E = SCEVConstants.end(); I != E; ++I) 4276 delete I->second; 4277 for (std::map<std::pair<const SCEV*, const Type*>, 4278 SCEVTruncateExpr*>::iterator I = SCEVTruncates.begin(), 4279 E = SCEVTruncates.end(); I != E; ++I) 4280 delete I->second; 4281 for (std::map<std::pair<const SCEV*, const Type*>, 4282 SCEVZeroExtendExpr*>::iterator I = SCEVZeroExtends.begin(), 4283 E = SCEVZeroExtends.end(); I != E; ++I) 4284 delete I->second; 4285 for (std::map<std::pair<unsigned, std::vector<const SCEV*> >, 4286 SCEVCommutativeExpr*>::iterator I = SCEVCommExprs.begin(), 4287 E = SCEVCommExprs.end(); I != E; ++I) 4288 delete I->second; 4289 for (std::map<std::pair<const SCEV*, const SCEV*>, SCEVUDivExpr*>::iterator 4290 I = SCEVUDivs.begin(), E = SCEVUDivs.end(); I != E; ++I) 4291 delete I->second; 4292 for (std::map<std::pair<const SCEV*, const Type*>, 4293 SCEVSignExtendExpr*>::iterator I = SCEVSignExtends.begin(), 4294 E = SCEVSignExtends.end(); I != E; ++I) 4295 delete I->second; 4296 for (std::map<std::pair<const Loop *, std::vector<const SCEV*> >, 4297 SCEVAddRecExpr*>::iterator I = SCEVAddRecExprs.begin(), 4298 E = SCEVAddRecExprs.end(); I != E; ++I) 4299 delete I->second; 4300 for (std::map<Value*, SCEVUnknown*>::iterator I = SCEVUnknowns.begin(), 4301 E = SCEVUnknowns.end(); I != E; ++I) 4302 delete I->second; 4303 4304 SCEVConstants.clear(); 4305 SCEVTruncates.clear(); 4306 SCEVZeroExtends.clear(); 4307 SCEVCommExprs.clear(); 4308 SCEVUDivs.clear(); 4309 SCEVSignExtends.clear(); 4310 SCEVAddRecExprs.clear(); 4311 SCEVUnknowns.clear(); 4312 } 4313 4314 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 4315 AU.setPreservesAll(); 4316 AU.addRequiredTransitive<LoopInfo>(); 4317 } 4318 4319 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 4320 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 4321 } 4322 4323 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 4324 const Loop *L) { 4325 // Print all inner loops first 4326 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 4327 PrintLoopInfo(OS, SE, *I); 4328 4329 OS << "Loop " << L->getHeader()->getName() << ": "; 4330 4331 SmallVector<BasicBlock*, 8> ExitBlocks; 4332 L->getExitBlocks(ExitBlocks); 4333 if (ExitBlocks.size() != 1) 4334 OS << "<multiple exits> "; 4335 4336 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 4337 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 4338 } else { 4339 OS << "Unpredictable backedge-taken count. "; 4340 } 4341 4342 OS << "\n"; 4343 } 4344 4345 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { 4346 // ScalarEvolution's implementaiton of the print method is to print 4347 // out SCEV values of all instructions that are interesting. Doing 4348 // this potentially causes it to create new SCEV objects though, 4349 // which technically conflicts with the const qualifier. This isn't 4350 // observable from outside the class though (the hasSCEV function 4351 // notwithstanding), so casting away the const isn't dangerous. 4352 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this); 4353 4354 OS << "Classifying expressions for: " << F->getName() << "\n"; 4355 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 4356 if (isSCEVable(I->getType())) { 4357 OS << *I; 4358 OS << " --> "; 4359 const SCEV* SV = SE.getSCEV(&*I); 4360 SV->print(OS); 4361 4362 const Loop *L = LI->getLoopFor((*I).getParent()); 4363 4364 const SCEV* AtUse = SE.getSCEVAtScope(SV, L); 4365 if (AtUse != SV) { 4366 OS << " --> "; 4367 AtUse->print(OS); 4368 } 4369 4370 if (L) { 4371 OS << "\t\t" "Exits: "; 4372 const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 4373 if (!ExitValue->isLoopInvariant(L)) { 4374 OS << "<<Unknown>>"; 4375 } else { 4376 OS << *ExitValue; 4377 } 4378 } 4379 4380 OS << "\n"; 4381 } 4382 4383 OS << "Determining loop execution counts for: " << F->getName() << "\n"; 4384 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 4385 PrintLoopInfo(OS, &SE, *I); 4386 } 4387 4388 void ScalarEvolution::print(std::ostream &o, const Module *M) const { 4389 raw_os_ostream OS(o); 4390 print(OS, M); 4391 } 4392