1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. These classes are reference counted, managed by the const SCEV* 18 // class. We only create one SCEV of a particular shape, so pointer-comparisons 19 // for equality are legal. 20 // 21 // One important aspect of the SCEV objects is that they are never cyclic, even 22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 24 // recurrence) then we represent it directly as a recurrence node, otherwise we 25 // represent it as a SCEVUnknown node. 26 // 27 // In addition to being able to represent expressions of various types, we also 28 // have folders that are used to build the *canonical* representation for a 29 // particular expression. These folders are capable of using a variety of 30 // rewrite rules to simplify the expressions. 31 // 32 // Once the folders are defined, we can implement the more interesting 33 // higher-level code, such as the code that recognizes PHI nodes of various 34 // types, computes the execution count of a loop, etc. 35 // 36 // TODO: We should use these routines and value representations to implement 37 // dependence analysis! 38 // 39 //===----------------------------------------------------------------------===// 40 // 41 // There are several good references for the techniques used in this analysis. 42 // 43 // Chains of recurrences -- a method to expedite the evaluation 44 // of closed-form functions 45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 46 // 47 // On computational properties of chains of recurrences 48 // Eugene V. Zima 49 // 50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 51 // Robert A. van Engelen 52 // 53 // Efficient Symbolic Analysis for Optimizing Compilers 54 // Robert A. van Engelen 55 // 56 // Using the chains of recurrences algebra for data dependence testing and 57 // induction variable substitution 58 // MS Thesis, Johnie Birch 59 // 60 //===----------------------------------------------------------------------===// 61 62 #define DEBUG_TYPE "scalar-evolution" 63 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 64 #include "llvm/Constants.h" 65 #include "llvm/DerivedTypes.h" 66 #include "llvm/GlobalVariable.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/Analysis/ConstantFolding.h" 69 #include "llvm/Analysis/Dominators.h" 70 #include "llvm/Analysis/LoopInfo.h" 71 #include "llvm/Analysis/ValueTracking.h" 72 #include "llvm/Assembly/Writer.h" 73 #include "llvm/Target/TargetData.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Compiler.h" 76 #include "llvm/Support/ConstantRange.h" 77 #include "llvm/Support/GetElementPtrTypeIterator.h" 78 #include "llvm/Support/InstIterator.h" 79 #include "llvm/Support/MathExtras.h" 80 #include "llvm/Support/raw_ostream.h" 81 #include "llvm/ADT/Statistic.h" 82 #include "llvm/ADT/STLExtras.h" 83 #include <algorithm> 84 using namespace llvm; 85 86 STATISTIC(NumArrayLenItCounts, 87 "Number of trip counts computed with array length"); 88 STATISTIC(NumTripCountsComputed, 89 "Number of loops with predictable loop counts"); 90 STATISTIC(NumTripCountsNotComputed, 91 "Number of loops without predictable loop counts"); 92 STATISTIC(NumBruteForceTripCountsComputed, 93 "Number of loops with trip counts computed by force"); 94 95 static cl::opt<unsigned> 96 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 97 cl::desc("Maximum number of iterations SCEV will " 98 "symbolically execute a constant derived loop"), 99 cl::init(100)); 100 101 static RegisterPass<ScalarEvolution> 102 R("scalar-evolution", "Scalar Evolution Analysis", false, true); 103 char ScalarEvolution::ID = 0; 104 105 //===----------------------------------------------------------------------===// 106 // SCEV class definitions 107 //===----------------------------------------------------------------------===// 108 109 //===----------------------------------------------------------------------===// 110 // Implementation of the SCEV class. 111 // 112 SCEV::~SCEV() {} 113 void SCEV::dump() const { 114 print(errs()); 115 errs() << '\n'; 116 } 117 118 void SCEV::print(std::ostream &o) const { 119 raw_os_ostream OS(o); 120 print(OS); 121 } 122 123 bool SCEV::isZero() const { 124 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 125 return SC->getValue()->isZero(); 126 return false; 127 } 128 129 bool SCEV::isOne() const { 130 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 131 return SC->getValue()->isOne(); 132 return false; 133 } 134 135 bool SCEV::isAllOnesValue() const { 136 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 137 return SC->getValue()->isAllOnesValue(); 138 return false; 139 } 140 141 SCEVCouldNotCompute::SCEVCouldNotCompute() : 142 SCEV(scCouldNotCompute) {} 143 144 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 145 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 146 return false; 147 } 148 149 const Type *SCEVCouldNotCompute::getType() const { 150 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 151 return 0; 152 } 153 154 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 155 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 156 return false; 157 } 158 159 const SCEV* SCEVCouldNotCompute:: 160 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 161 const SCEV* Conc, 162 ScalarEvolution &SE) const { 163 return this; 164 } 165 166 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 167 OS << "***COULDNOTCOMPUTE***"; 168 } 169 170 bool SCEVCouldNotCompute::classof(const SCEV *S) { 171 return S->getSCEVType() == scCouldNotCompute; 172 } 173 174 175 // SCEVConstants - Only allow the creation of one SCEVConstant for any 176 // particular value. Don't use a const SCEV* here, or else the object will 177 // never be deleted! 178 179 const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { 180 SCEVConstant *&R = SCEVConstants[V]; 181 if (R == 0) R = new SCEVConstant(V); 182 return R; 183 } 184 185 const SCEV* ScalarEvolution::getConstant(const APInt& Val) { 186 return getConstant(ConstantInt::get(Val)); 187 } 188 189 const SCEV* 190 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 191 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); 192 } 193 194 const Type *SCEVConstant::getType() const { return V->getType(); } 195 196 void SCEVConstant::print(raw_ostream &OS) const { 197 WriteAsOperand(OS, V, false); 198 } 199 200 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, 201 const SCEV* op, const Type *ty) 202 : SCEV(SCEVTy), Op(op), Ty(ty) {} 203 204 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 205 return Op->dominates(BB, DT); 206 } 207 208 // SCEVTruncates - Only allow the creation of one SCEVTruncateExpr for any 209 // particular input. Don't use a const SCEV* here, or else the object will 210 // never be deleted! 211 212 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty) 213 : SCEVCastExpr(scTruncate, op, ty) { 214 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 215 (Ty->isInteger() || isa<PointerType>(Ty)) && 216 "Cannot truncate non-integer value!"); 217 } 218 219 220 void SCEVTruncateExpr::print(raw_ostream &OS) const { 221 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 222 } 223 224 // SCEVZeroExtends - Only allow the creation of one SCEVZeroExtendExpr for any 225 // particular input. Don't use a const SCEV* here, or else the object will never 226 // be deleted! 227 228 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty) 229 : SCEVCastExpr(scZeroExtend, op, ty) { 230 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 231 (Ty->isInteger() || isa<PointerType>(Ty)) && 232 "Cannot zero extend non-integer value!"); 233 } 234 235 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 236 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 237 } 238 239 // SCEVSignExtends - Only allow the creation of one SCEVSignExtendExpr for any 240 // particular input. Don't use a const SCEV* here, or else the object will never 241 // be deleted! 242 243 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty) 244 : SCEVCastExpr(scSignExtend, op, ty) { 245 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 246 (Ty->isInteger() || isa<PointerType>(Ty)) && 247 "Cannot sign extend non-integer value!"); 248 } 249 250 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 251 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 252 } 253 254 // SCEVCommExprs - Only allow the creation of one SCEVCommutativeExpr for any 255 // particular input. Don't use a const SCEV* here, or else the object will never 256 // be deleted! 257 258 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 259 assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); 260 const char *OpStr = getOperationStr(); 261 OS << "(" << *Operands[0]; 262 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 263 OS << OpStr << *Operands[i]; 264 OS << ")"; 265 } 266 267 const SCEV* SCEVCommutativeExpr:: 268 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 269 const SCEV* Conc, 270 ScalarEvolution &SE) const { 271 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 272 const SCEV* H = 273 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 274 if (H != getOperand(i)) { 275 SmallVector<const SCEV*, 8> NewOps; 276 NewOps.reserve(getNumOperands()); 277 for (unsigned j = 0; j != i; ++j) 278 NewOps.push_back(getOperand(j)); 279 NewOps.push_back(H); 280 for (++i; i != e; ++i) 281 NewOps.push_back(getOperand(i)-> 282 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 283 284 if (isa<SCEVAddExpr>(this)) 285 return SE.getAddExpr(NewOps); 286 else if (isa<SCEVMulExpr>(this)) 287 return SE.getMulExpr(NewOps); 288 else if (isa<SCEVSMaxExpr>(this)) 289 return SE.getSMaxExpr(NewOps); 290 else if (isa<SCEVUMaxExpr>(this)) 291 return SE.getUMaxExpr(NewOps); 292 else 293 assert(0 && "Unknown commutative expr!"); 294 } 295 } 296 return this; 297 } 298 299 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 300 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 301 if (!getOperand(i)->dominates(BB, DT)) 302 return false; 303 } 304 return true; 305 } 306 307 308 // SCEVUDivs - Only allow the creation of one SCEVUDivExpr for any particular 309 // input. Don't use a const SCEV* here, or else the object will never be 310 // deleted! 311 312 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 313 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 314 } 315 316 void SCEVUDivExpr::print(raw_ostream &OS) const { 317 OS << "(" << *LHS << " /u " << *RHS << ")"; 318 } 319 320 const Type *SCEVUDivExpr::getType() const { 321 // In most cases the types of LHS and RHS will be the same, but in some 322 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 323 // depend on the type for correctness, but handling types carefully can 324 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 325 // a pointer type than the RHS, so use the RHS' type here. 326 return RHS->getType(); 327 } 328 329 // SCEVAddRecExprs - Only allow the creation of one SCEVAddRecExpr for any 330 // particular input. Don't use a const SCEV* here, or else the object will never 331 // be deleted! 332 333 const SCEV* SCEVAddRecExpr:: 334 replaceSymbolicValuesWithConcrete(const SCEV* Sym, 335 const SCEV* Conc, 336 ScalarEvolution &SE) const { 337 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 338 const SCEV* H = 339 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 340 if (H != getOperand(i)) { 341 SmallVector<const SCEV*, 8> NewOps; 342 NewOps.reserve(getNumOperands()); 343 for (unsigned j = 0; j != i; ++j) 344 NewOps.push_back(getOperand(j)); 345 NewOps.push_back(H); 346 for (++i; i != e; ++i) 347 NewOps.push_back(getOperand(i)-> 348 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 349 350 return SE.getAddRecExpr(NewOps, L); 351 } 352 } 353 return this; 354 } 355 356 357 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 358 // This recurrence is invariant w.r.t to QueryLoop iff QueryLoop doesn't 359 // contain L and if the start is invariant. 360 // Add recurrences are never invariant in the function-body (null loop). 361 return QueryLoop && 362 !QueryLoop->contains(L->getHeader()) && 363 getOperand(0)->isLoopInvariant(QueryLoop); 364 } 365 366 367 void SCEVAddRecExpr::print(raw_ostream &OS) const { 368 OS << "{" << *Operands[0]; 369 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 370 OS << ",+," << *Operands[i]; 371 OS << "}<" << L->getHeader()->getName() + ">"; 372 } 373 374 // SCEVUnknowns - Only allow the creation of one SCEVUnknown for any particular 375 // value. Don't use a const SCEV* here, or else the object will never be 376 // deleted! 377 378 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 379 // All non-instruction values are loop invariant. All instructions are loop 380 // invariant if they are not contained in the specified loop. 381 // Instructions are never considered invariant in the function body 382 // (null loop) because they are defined within the "loop". 383 if (Instruction *I = dyn_cast<Instruction>(V)) 384 return L && !L->contains(I->getParent()); 385 return true; 386 } 387 388 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 389 if (Instruction *I = dyn_cast<Instruction>(getValue())) 390 return DT->dominates(I->getParent(), BB); 391 return true; 392 } 393 394 const Type *SCEVUnknown::getType() const { 395 return V->getType(); 396 } 397 398 void SCEVUnknown::print(raw_ostream &OS) const { 399 WriteAsOperand(OS, V, false); 400 } 401 402 //===----------------------------------------------------------------------===// 403 // SCEV Utilities 404 //===----------------------------------------------------------------------===// 405 406 namespace { 407 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 408 /// than the complexity of the RHS. This comparator is used to canonicalize 409 /// expressions. 410 class VISIBILITY_HIDDEN SCEVComplexityCompare { 411 LoopInfo *LI; 412 public: 413 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} 414 415 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 416 // Primarily, sort the SCEVs by their getSCEVType(). 417 if (LHS->getSCEVType() != RHS->getSCEVType()) 418 return LHS->getSCEVType() < RHS->getSCEVType(); 419 420 // Aside from the getSCEVType() ordering, the particular ordering 421 // isn't very important except that it's beneficial to be consistent, 422 // so that (a + b) and (b + a) don't end up as different expressions. 423 424 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 425 // not as complete as it could be. 426 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { 427 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 428 429 // Order pointer values after integer values. This helps SCEVExpander 430 // form GEPs. 431 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) 432 return false; 433 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) 434 return true; 435 436 // Compare getValueID values. 437 if (LU->getValue()->getValueID() != RU->getValue()->getValueID()) 438 return LU->getValue()->getValueID() < RU->getValue()->getValueID(); 439 440 // Sort arguments by their position. 441 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) { 442 const Argument *RA = cast<Argument>(RU->getValue()); 443 return LA->getArgNo() < RA->getArgNo(); 444 } 445 446 // For instructions, compare their loop depth, and their opcode. 447 // This is pretty loose. 448 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) { 449 Instruction *RV = cast<Instruction>(RU->getValue()); 450 451 // Compare loop depths. 452 if (LI->getLoopDepth(LV->getParent()) != 453 LI->getLoopDepth(RV->getParent())) 454 return LI->getLoopDepth(LV->getParent()) < 455 LI->getLoopDepth(RV->getParent()); 456 457 // Compare opcodes. 458 if (LV->getOpcode() != RV->getOpcode()) 459 return LV->getOpcode() < RV->getOpcode(); 460 461 // Compare the number of operands. 462 if (LV->getNumOperands() != RV->getNumOperands()) 463 return LV->getNumOperands() < RV->getNumOperands(); 464 } 465 466 return false; 467 } 468 469 // Compare constant values. 470 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { 471 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 472 return LC->getValue()->getValue().ult(RC->getValue()->getValue()); 473 } 474 475 // Compare addrec loop depths. 476 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { 477 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 478 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth()) 479 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth(); 480 } 481 482 // Lexicographically compare n-ary expressions. 483 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { 484 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 485 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) { 486 if (i >= RC->getNumOperands()) 487 return false; 488 if (operator()(LC->getOperand(i), RC->getOperand(i))) 489 return true; 490 if (operator()(RC->getOperand(i), LC->getOperand(i))) 491 return false; 492 } 493 return LC->getNumOperands() < RC->getNumOperands(); 494 } 495 496 // Lexicographically compare udiv expressions. 497 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { 498 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 499 if (operator()(LC->getLHS(), RC->getLHS())) 500 return true; 501 if (operator()(RC->getLHS(), LC->getLHS())) 502 return false; 503 if (operator()(LC->getRHS(), RC->getRHS())) 504 return true; 505 if (operator()(RC->getRHS(), LC->getRHS())) 506 return false; 507 return false; 508 } 509 510 // Compare cast expressions by operand. 511 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { 512 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 513 return operator()(LC->getOperand(), RC->getOperand()); 514 } 515 516 assert(0 && "Unknown SCEV kind!"); 517 return false; 518 } 519 }; 520 } 521 522 /// GroupByComplexity - Given a list of SCEV objects, order them by their 523 /// complexity, and group objects of the same complexity together by value. 524 /// When this routine is finished, we know that any duplicates in the vector are 525 /// consecutive and that complexity is monotonically increasing. 526 /// 527 /// Note that we go take special precautions to ensure that we get determinstic 528 /// results from this routine. In other words, we don't want the results of 529 /// this to depend on where the addresses of various SCEV objects happened to 530 /// land in memory. 531 /// 532 static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, 533 LoopInfo *LI) { 534 if (Ops.size() < 2) return; // Noop 535 if (Ops.size() == 2) { 536 // This is the common case, which also happens to be trivially simple. 537 // Special case it. 538 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) 539 std::swap(Ops[0], Ops[1]); 540 return; 541 } 542 543 // Do the rough sort by complexity. 544 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 545 546 // Now that we are sorted by complexity, group elements of the same 547 // complexity. Note that this is, at worst, N^2, but the vector is likely to 548 // be extremely short in practice. Note that we take this approach because we 549 // do not want to depend on the addresses of the objects we are grouping. 550 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 551 const SCEV *S = Ops[i]; 552 unsigned Complexity = S->getSCEVType(); 553 554 // If there are any objects of the same complexity and same value as this 555 // one, group them. 556 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 557 if (Ops[j] == S) { // Found a duplicate. 558 // Move it to immediately after i'th element. 559 std::swap(Ops[i+1], Ops[j]); 560 ++i; // no need to rescan it. 561 if (i == e-2) return; // Done! 562 } 563 } 564 } 565 } 566 567 568 569 //===----------------------------------------------------------------------===// 570 // Simple SCEV method implementations 571 //===----------------------------------------------------------------------===// 572 573 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 574 /// Assume, K > 0. 575 static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, 576 ScalarEvolution &SE, 577 const Type* ResultTy) { 578 // Handle the simplest case efficiently. 579 if (K == 1) 580 return SE.getTruncateOrZeroExtend(It, ResultTy); 581 582 // We are using the following formula for BC(It, K): 583 // 584 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 585 // 586 // Suppose, W is the bitwidth of the return value. We must be prepared for 587 // overflow. Hence, we must assure that the result of our computation is 588 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 589 // safe in modular arithmetic. 590 // 591 // However, this code doesn't use exactly that formula; the formula it uses 592 // is something like the following, where T is the number of factors of 2 in 593 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 594 // exponentiation: 595 // 596 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 597 // 598 // This formula is trivially equivalent to the previous formula. However, 599 // this formula can be implemented much more efficiently. The trick is that 600 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 601 // arithmetic. To do exact division in modular arithmetic, all we have 602 // to do is multiply by the inverse. Therefore, this step can be done at 603 // width W. 604 // 605 // The next issue is how to safely do the division by 2^T. The way this 606 // is done is by doing the multiplication step at a width of at least W + T 607 // bits. This way, the bottom W+T bits of the product are accurate. Then, 608 // when we perform the division by 2^T (which is equivalent to a right shift 609 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 610 // truncated out after the division by 2^T. 611 // 612 // In comparison to just directly using the first formula, this technique 613 // is much more efficient; using the first formula requires W * K bits, 614 // but this formula less than W + K bits. Also, the first formula requires 615 // a division step, whereas this formula only requires multiplies and shifts. 616 // 617 // It doesn't matter whether the subtraction step is done in the calculation 618 // width or the input iteration count's width; if the subtraction overflows, 619 // the result must be zero anyway. We prefer here to do it in the width of 620 // the induction variable because it helps a lot for certain cases; CodeGen 621 // isn't smart enough to ignore the overflow, which leads to much less 622 // efficient code if the width of the subtraction is wider than the native 623 // register width. 624 // 625 // (It's possible to not widen at all by pulling out factors of 2 before 626 // the multiplication; for example, K=2 can be calculated as 627 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 628 // extra arithmetic, so it's not an obvious win, and it gets 629 // much more complicated for K > 3.) 630 631 // Protection from insane SCEVs; this bound is conservative, 632 // but it probably doesn't matter. 633 if (K > 1000) 634 return SE.getCouldNotCompute(); 635 636 unsigned W = SE.getTypeSizeInBits(ResultTy); 637 638 // Calculate K! / 2^T and T; we divide out the factors of two before 639 // multiplying for calculating K! / 2^T to avoid overflow. 640 // Other overflow doesn't matter because we only care about the bottom 641 // W bits of the result. 642 APInt OddFactorial(W, 1); 643 unsigned T = 1; 644 for (unsigned i = 3; i <= K; ++i) { 645 APInt Mult(W, i); 646 unsigned TwoFactors = Mult.countTrailingZeros(); 647 T += TwoFactors; 648 Mult = Mult.lshr(TwoFactors); 649 OddFactorial *= Mult; 650 } 651 652 // We need at least W + T bits for the multiplication step 653 unsigned CalculationBits = W + T; 654 655 // Calcuate 2^T, at width T+W. 656 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 657 658 // Calculate the multiplicative inverse of K! / 2^T; 659 // this multiplication factor will perform the exact division by 660 // K! / 2^T. 661 APInt Mod = APInt::getSignedMinValue(W+1); 662 APInt MultiplyFactor = OddFactorial.zext(W+1); 663 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 664 MultiplyFactor = MultiplyFactor.trunc(W); 665 666 // Calculate the product, at width T+W 667 const IntegerType *CalculationTy = IntegerType::get(CalculationBits); 668 const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 669 for (unsigned i = 1; i != K; ++i) { 670 const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); 671 Dividend = SE.getMulExpr(Dividend, 672 SE.getTruncateOrZeroExtend(S, CalculationTy)); 673 } 674 675 // Divide by 2^T 676 const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 677 678 // Truncate the result, and divide by K! / 2^T. 679 680 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 681 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 682 } 683 684 /// evaluateAtIteration - Return the value of this chain of recurrences at 685 /// the specified iteration number. We can evaluate this recurrence by 686 /// multiplying each element in the chain by the binomial coefficient 687 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 688 /// 689 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 690 /// 691 /// where BC(It, k) stands for binomial coefficient. 692 /// 693 const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, 694 ScalarEvolution &SE) const { 695 const SCEV* Result = getStart(); 696 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 697 // The computation is correct in the face of overflow provided that the 698 // multiplication is performed _after_ the evaluation of the binomial 699 // coefficient. 700 const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); 701 if (isa<SCEVCouldNotCompute>(Coeff)) 702 return Coeff; 703 704 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 705 } 706 return Result; 707 } 708 709 //===----------------------------------------------------------------------===// 710 // SCEV Expression folder implementations 711 //===----------------------------------------------------------------------===// 712 713 const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, 714 const Type *Ty) { 715 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 716 "This is not a truncating conversion!"); 717 assert(isSCEVable(Ty) && 718 "This is not a conversion to a SCEVable type!"); 719 Ty = getEffectiveSCEVType(Ty); 720 721 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 722 return getConstant( 723 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 724 725 // trunc(trunc(x)) --> trunc(x) 726 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 727 return getTruncateExpr(ST->getOperand(), Ty); 728 729 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 730 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 731 return getTruncateOrSignExtend(SS->getOperand(), Ty); 732 733 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 734 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 735 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 736 737 // If the input value is a chrec scev, truncate the chrec's operands. 738 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 739 SmallVector<const SCEV*, 4> Operands; 740 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 741 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 742 return getAddRecExpr(Operands, AddRec->getLoop()); 743 } 744 745 SCEVTruncateExpr *&Result = SCEVTruncates[std::make_pair(Op, Ty)]; 746 if (Result == 0) Result = new SCEVTruncateExpr(Op, Ty); 747 return Result; 748 } 749 750 const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, 751 const Type *Ty) { 752 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 753 "This is not an extending conversion!"); 754 assert(isSCEVable(Ty) && 755 "This is not a conversion to a SCEVable type!"); 756 Ty = getEffectiveSCEVType(Ty); 757 758 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 759 const Type *IntTy = getEffectiveSCEVType(Ty); 760 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); 761 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 762 return getConstant(cast<ConstantInt>(C)); 763 } 764 765 // zext(zext(x)) --> zext(x) 766 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 767 return getZeroExtendExpr(SZ->getOperand(), Ty); 768 769 // If the input value is a chrec scev, and we can prove that the value 770 // did not overflow the old, smaller, value, we can zero extend all of the 771 // operands (often constants). This allows analysis of something like 772 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 773 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 774 if (AR->isAffine()) { 775 // Check whether the backedge-taken count is SCEVCouldNotCompute. 776 // Note that this serves two purposes: It filters out loops that are 777 // simply not analyzable, and it covers the case where this code is 778 // being called from within backedge-taken count analysis, such that 779 // attempting to ask for the backedge-taken count would likely result 780 // in infinite recursion. In the later case, the analysis code will 781 // cope with a conservative value, and it will take care to purge 782 // that value once it has finished. 783 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 784 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 785 // Manually compute the final value for AR, checking for 786 // overflow. 787 const SCEV* Start = AR->getStart(); 788 const SCEV* Step = AR->getStepRecurrence(*this); 789 790 // Check whether the backedge-taken count can be losslessly casted to 791 // the addrec's type. The count is always unsigned. 792 const SCEV* CastedMaxBECount = 793 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 794 const SCEV* RecastedMaxBECount = 795 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 796 if (MaxBECount == RecastedMaxBECount) { 797 const Type *WideTy = 798 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 799 // Check whether Start+Step*MaxBECount has no unsigned overflow. 800 const SCEV* ZMul = 801 getMulExpr(CastedMaxBECount, 802 getTruncateOrZeroExtend(Step, Start->getType())); 803 const SCEV* Add = getAddExpr(Start, ZMul); 804 const SCEV* OperandExtendedAdd = 805 getAddExpr(getZeroExtendExpr(Start, WideTy), 806 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 807 getZeroExtendExpr(Step, WideTy))); 808 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 809 // Return the expression with the addrec on the outside. 810 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 811 getZeroExtendExpr(Step, Ty), 812 AR->getLoop()); 813 814 // Similar to above, only this time treat the step value as signed. 815 // This covers loops that count down. 816 const SCEV* SMul = 817 getMulExpr(CastedMaxBECount, 818 getTruncateOrSignExtend(Step, Start->getType())); 819 Add = getAddExpr(Start, SMul); 820 OperandExtendedAdd = 821 getAddExpr(getZeroExtendExpr(Start, WideTy), 822 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 823 getSignExtendExpr(Step, WideTy))); 824 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 825 // Return the expression with the addrec on the outside. 826 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 827 getSignExtendExpr(Step, Ty), 828 AR->getLoop()); 829 } 830 } 831 } 832 833 SCEVZeroExtendExpr *&Result = SCEVZeroExtends[std::make_pair(Op, Ty)]; 834 if (Result == 0) Result = new SCEVZeroExtendExpr(Op, Ty); 835 return Result; 836 } 837 838 const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, 839 const Type *Ty) { 840 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 841 "This is not an extending conversion!"); 842 assert(isSCEVable(Ty) && 843 "This is not a conversion to a SCEVable type!"); 844 Ty = getEffectiveSCEVType(Ty); 845 846 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 847 const Type *IntTy = getEffectiveSCEVType(Ty); 848 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); 849 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 850 return getConstant(cast<ConstantInt>(C)); 851 } 852 853 // sext(sext(x)) --> sext(x) 854 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 855 return getSignExtendExpr(SS->getOperand(), Ty); 856 857 // If the input value is a chrec scev, and we can prove that the value 858 // did not overflow the old, smaller, value, we can sign extend all of the 859 // operands (often constants). This allows analysis of something like 860 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 861 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 862 if (AR->isAffine()) { 863 // Check whether the backedge-taken count is SCEVCouldNotCompute. 864 // Note that this serves two purposes: It filters out loops that are 865 // simply not analyzable, and it covers the case where this code is 866 // being called from within backedge-taken count analysis, such that 867 // attempting to ask for the backedge-taken count would likely result 868 // in infinite recursion. In the later case, the analysis code will 869 // cope with a conservative value, and it will take care to purge 870 // that value once it has finished. 871 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 872 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 873 // Manually compute the final value for AR, checking for 874 // overflow. 875 const SCEV* Start = AR->getStart(); 876 const SCEV* Step = AR->getStepRecurrence(*this); 877 878 // Check whether the backedge-taken count can be losslessly casted to 879 // the addrec's type. The count is always unsigned. 880 const SCEV* CastedMaxBECount = 881 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 882 const SCEV* RecastedMaxBECount = 883 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 884 if (MaxBECount == RecastedMaxBECount) { 885 const Type *WideTy = 886 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 887 // Check whether Start+Step*MaxBECount has no signed overflow. 888 const SCEV* SMul = 889 getMulExpr(CastedMaxBECount, 890 getTruncateOrSignExtend(Step, Start->getType())); 891 const SCEV* Add = getAddExpr(Start, SMul); 892 const SCEV* OperandExtendedAdd = 893 getAddExpr(getSignExtendExpr(Start, WideTy), 894 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 895 getSignExtendExpr(Step, WideTy))); 896 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 897 // Return the expression with the addrec on the outside. 898 return getAddRecExpr(getSignExtendExpr(Start, Ty), 899 getSignExtendExpr(Step, Ty), 900 AR->getLoop()); 901 } 902 } 903 } 904 905 SCEVSignExtendExpr *&Result = SCEVSignExtends[std::make_pair(Op, Ty)]; 906 if (Result == 0) Result = new SCEVSignExtendExpr(Op, Ty); 907 return Result; 908 } 909 910 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 911 /// unspecified bits out to the given type. 912 /// 913 const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, 914 const Type *Ty) { 915 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 916 "This is not an extending conversion!"); 917 assert(isSCEVable(Ty) && 918 "This is not a conversion to a SCEVable type!"); 919 Ty = getEffectiveSCEVType(Ty); 920 921 // Sign-extend negative constants. 922 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 923 if (SC->getValue()->getValue().isNegative()) 924 return getSignExtendExpr(Op, Ty); 925 926 // Peel off a truncate cast. 927 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 928 const SCEV* NewOp = T->getOperand(); 929 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 930 return getAnyExtendExpr(NewOp, Ty); 931 return getTruncateOrNoop(NewOp, Ty); 932 } 933 934 // Next try a zext cast. If the cast is folded, use it. 935 const SCEV* ZExt = getZeroExtendExpr(Op, Ty); 936 if (!isa<SCEVZeroExtendExpr>(ZExt)) 937 return ZExt; 938 939 // Next try a sext cast. If the cast is folded, use it. 940 const SCEV* SExt = getSignExtendExpr(Op, Ty); 941 if (!isa<SCEVSignExtendExpr>(SExt)) 942 return SExt; 943 944 // If the expression is obviously signed, use the sext cast value. 945 if (isa<SCEVSMaxExpr>(Op)) 946 return SExt; 947 948 // Absent any other information, use the zext cast value. 949 return ZExt; 950 } 951 952 /// CollectAddOperandsWithScales - Process the given Ops list, which is 953 /// a list of operands to be added under the given scale, update the given 954 /// map. This is a helper function for getAddRecExpr. As an example of 955 /// what it does, given a sequence of operands that would form an add 956 /// expression like this: 957 /// 958 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 959 /// 960 /// where A and B are constants, update the map with these values: 961 /// 962 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 963 /// 964 /// and add 13 + A*B*29 to AccumulatedConstant. 965 /// This will allow getAddRecExpr to produce this: 966 /// 967 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 968 /// 969 /// This form often exposes folding opportunities that are hidden in 970 /// the original operand list. 971 /// 972 /// Return true iff it appears that any interesting folding opportunities 973 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 974 /// the common case where no interesting opportunities are present, and 975 /// is also used as a check to avoid infinite recursion. 976 /// 977 static bool 978 CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, 979 SmallVector<const SCEV*, 8> &NewOps, 980 APInt &AccumulatedConstant, 981 const SmallVectorImpl<const SCEV*> &Ops, 982 const APInt &Scale, 983 ScalarEvolution &SE) { 984 bool Interesting = false; 985 986 // Iterate over the add operands. 987 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 988 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 989 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 990 APInt NewScale = 991 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 992 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 993 // A multiplication of a constant with another add; recurse. 994 Interesting |= 995 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 996 cast<SCEVAddExpr>(Mul->getOperand(1)) 997 ->getOperands(), 998 NewScale, SE); 999 } else { 1000 // A multiplication of a constant with some other value. Update 1001 // the map. 1002 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1003 const SCEV* Key = SE.getMulExpr(MulOps); 1004 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 1005 M.insert(std::make_pair(Key, APInt())); 1006 if (Pair.second) { 1007 Pair.first->second = NewScale; 1008 NewOps.push_back(Pair.first->first); 1009 } else { 1010 Pair.first->second += NewScale; 1011 // The map already had an entry for this value, which may indicate 1012 // a folding opportunity. 1013 Interesting = true; 1014 } 1015 } 1016 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1017 // Pull a buried constant out to the outside. 1018 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) 1019 Interesting = true; 1020 AccumulatedConstant += Scale * C->getValue()->getValue(); 1021 } else { 1022 // An ordinary operand. Update the map. 1023 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 1024 M.insert(std::make_pair(Ops[i], APInt())); 1025 if (Pair.second) { 1026 Pair.first->second = Scale; 1027 NewOps.push_back(Pair.first->first); 1028 } else { 1029 Pair.first->second += Scale; 1030 // The map already had an entry for this value, which may indicate 1031 // a folding opportunity. 1032 Interesting = true; 1033 } 1034 } 1035 } 1036 1037 return Interesting; 1038 } 1039 1040 namespace { 1041 struct APIntCompare { 1042 bool operator()(const APInt &LHS, const APInt &RHS) const { 1043 return LHS.ult(RHS); 1044 } 1045 }; 1046 } 1047 1048 /// getAddExpr - Get a canonical add expression, or something simpler if 1049 /// possible. 1050 const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { 1051 assert(!Ops.empty() && "Cannot get empty add!"); 1052 if (Ops.size() == 1) return Ops[0]; 1053 #ifndef NDEBUG 1054 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1055 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1056 getEffectiveSCEVType(Ops[0]->getType()) && 1057 "SCEVAddExpr operand types don't match!"); 1058 #endif 1059 1060 // Sort by complexity, this groups all similar expression types together. 1061 GroupByComplexity(Ops, LI); 1062 1063 // If there are any constants, fold them together. 1064 unsigned Idx = 0; 1065 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1066 ++Idx; 1067 assert(Idx < Ops.size()); 1068 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1069 // We found two constants, fold them together! 1070 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1071 RHSC->getValue()->getValue()); 1072 if (Ops.size() == 2) return Ops[0]; 1073 Ops.erase(Ops.begin()+1); // Erase the folded element 1074 LHSC = cast<SCEVConstant>(Ops[0]); 1075 } 1076 1077 // If we are left with a constant zero being added, strip it off. 1078 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1079 Ops.erase(Ops.begin()); 1080 --Idx; 1081 } 1082 } 1083 1084 if (Ops.size() == 1) return Ops[0]; 1085 1086 // Okay, check to see if the same value occurs in the operand list twice. If 1087 // so, merge them together into an multiply expression. Since we sorted the 1088 // list, these values are required to be adjacent. 1089 const Type *Ty = Ops[0]->getType(); 1090 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1091 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1092 // Found a match, merge the two values into a multiply, and add any 1093 // remaining values to the result. 1094 const SCEV* Two = getIntegerSCEV(2, Ty); 1095 const SCEV* Mul = getMulExpr(Ops[i], Two); 1096 if (Ops.size() == 2) 1097 return Mul; 1098 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1099 Ops.push_back(Mul); 1100 return getAddExpr(Ops); 1101 } 1102 1103 // Check for truncates. If all the operands are truncated from the same 1104 // type, see if factoring out the truncate would permit the result to be 1105 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1106 // if the contents of the resulting outer trunc fold to something simple. 1107 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1108 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1109 const Type *DstType = Trunc->getType(); 1110 const Type *SrcType = Trunc->getOperand()->getType(); 1111 SmallVector<const SCEV*, 8> LargeOps; 1112 bool Ok = true; 1113 // Check all the operands to see if they can be represented in the 1114 // source type of the truncate. 1115 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1116 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1117 if (T->getOperand()->getType() != SrcType) { 1118 Ok = false; 1119 break; 1120 } 1121 LargeOps.push_back(T->getOperand()); 1122 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1123 // This could be either sign or zero extension, but sign extension 1124 // is much more likely to be foldable here. 1125 LargeOps.push_back(getSignExtendExpr(C, SrcType)); 1126 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1127 SmallVector<const SCEV*, 8> LargeMulOps; 1128 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1129 if (const SCEVTruncateExpr *T = 1130 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1131 if (T->getOperand()->getType() != SrcType) { 1132 Ok = false; 1133 break; 1134 } 1135 LargeMulOps.push_back(T->getOperand()); 1136 } else if (const SCEVConstant *C = 1137 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1138 // This could be either sign or zero extension, but sign extension 1139 // is much more likely to be foldable here. 1140 LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); 1141 } else { 1142 Ok = false; 1143 break; 1144 } 1145 } 1146 if (Ok) 1147 LargeOps.push_back(getMulExpr(LargeMulOps)); 1148 } else { 1149 Ok = false; 1150 break; 1151 } 1152 } 1153 if (Ok) { 1154 // Evaluate the expression in the larger type. 1155 const SCEV* Fold = getAddExpr(LargeOps); 1156 // If it folds to something simple, use it. Otherwise, don't. 1157 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1158 return getTruncateExpr(Fold, DstType); 1159 } 1160 } 1161 1162 // Skip past any other cast SCEVs. 1163 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1164 ++Idx; 1165 1166 // If there are add operands they would be next. 1167 if (Idx < Ops.size()) { 1168 bool DeletedAdd = false; 1169 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1170 // If we have an add, expand the add operands onto the end of the operands 1171 // list. 1172 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 1173 Ops.erase(Ops.begin()+Idx); 1174 DeletedAdd = true; 1175 } 1176 1177 // If we deleted at least one add, we added operands to the end of the list, 1178 // and they are not necessarily sorted. Recurse to resort and resimplify 1179 // any operands we just aquired. 1180 if (DeletedAdd) 1181 return getAddExpr(Ops); 1182 } 1183 1184 // Skip over the add expression until we get to a multiply. 1185 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1186 ++Idx; 1187 1188 // Check to see if there are any folding opportunities present with 1189 // operands multiplied by constant values. 1190 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1191 uint64_t BitWidth = getTypeSizeInBits(Ty); 1192 DenseMap<const SCEV*, APInt> M; 1193 SmallVector<const SCEV*, 8> NewOps; 1194 APInt AccumulatedConstant(BitWidth, 0); 1195 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1196 Ops, APInt(BitWidth, 1), *this)) { 1197 // Some interesting folding opportunity is present, so its worthwhile to 1198 // re-generate the operands list. Group the operands by constant scale, 1199 // to avoid multiplying by the same constant scale multiple times. 1200 std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists; 1201 for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(), 1202 E = NewOps.end(); I != E; ++I) 1203 MulOpLists[M.find(*I)->second].push_back(*I); 1204 // Re-generate the operands list. 1205 Ops.clear(); 1206 if (AccumulatedConstant != 0) 1207 Ops.push_back(getConstant(AccumulatedConstant)); 1208 for (std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare>::iterator I = 1209 MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1210 if (I->first != 0) 1211 Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); 1212 if (Ops.empty()) 1213 return getIntegerSCEV(0, Ty); 1214 if (Ops.size() == 1) 1215 return Ops[0]; 1216 return getAddExpr(Ops); 1217 } 1218 } 1219 1220 // If we are adding something to a multiply expression, make sure the 1221 // something is not already an operand of the multiply. If so, merge it into 1222 // the multiply. 1223 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1224 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1225 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1226 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1227 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1228 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { 1229 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1230 const SCEV* InnerMul = Mul->getOperand(MulOp == 0); 1231 if (Mul->getNumOperands() != 2) { 1232 // If the multiply has more than two operands, we must get the 1233 // Y*Z term. 1234 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1235 MulOps.erase(MulOps.begin()+MulOp); 1236 InnerMul = getMulExpr(MulOps); 1237 } 1238 const SCEV* One = getIntegerSCEV(1, Ty); 1239 const SCEV* AddOne = getAddExpr(InnerMul, One); 1240 const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); 1241 if (Ops.size() == 2) return OuterMul; 1242 if (AddOp < Idx) { 1243 Ops.erase(Ops.begin()+AddOp); 1244 Ops.erase(Ops.begin()+Idx-1); 1245 } else { 1246 Ops.erase(Ops.begin()+Idx); 1247 Ops.erase(Ops.begin()+AddOp-1); 1248 } 1249 Ops.push_back(OuterMul); 1250 return getAddExpr(Ops); 1251 } 1252 1253 // Check this multiply against other multiplies being added together. 1254 for (unsigned OtherMulIdx = Idx+1; 1255 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1256 ++OtherMulIdx) { 1257 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1258 // If MulOp occurs in OtherMul, we can fold the two multiplies 1259 // together. 1260 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1261 OMulOp != e; ++OMulOp) 1262 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1263 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1264 const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); 1265 if (Mul->getNumOperands() != 2) { 1266 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1267 MulOps.erase(MulOps.begin()+MulOp); 1268 InnerMul1 = getMulExpr(MulOps); 1269 } 1270 const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1271 if (OtherMul->getNumOperands() != 2) { 1272 SmallVector<const SCEV*, 4> MulOps(OtherMul->op_begin(), 1273 OtherMul->op_end()); 1274 MulOps.erase(MulOps.begin()+OMulOp); 1275 InnerMul2 = getMulExpr(MulOps); 1276 } 1277 const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1278 const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1279 if (Ops.size() == 2) return OuterMul; 1280 Ops.erase(Ops.begin()+Idx); 1281 Ops.erase(Ops.begin()+OtherMulIdx-1); 1282 Ops.push_back(OuterMul); 1283 return getAddExpr(Ops); 1284 } 1285 } 1286 } 1287 } 1288 1289 // If there are any add recurrences in the operands list, see if any other 1290 // added values are loop invariant. If so, we can fold them into the 1291 // recurrence. 1292 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1293 ++Idx; 1294 1295 // Scan over all recurrences, trying to fold loop invariants into them. 1296 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1297 // Scan all of the other operands to this add and add them to the vector if 1298 // they are loop invariant w.r.t. the recurrence. 1299 SmallVector<const SCEV*, 8> LIOps; 1300 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1301 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1302 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1303 LIOps.push_back(Ops[i]); 1304 Ops.erase(Ops.begin()+i); 1305 --i; --e; 1306 } 1307 1308 // If we found some loop invariants, fold them into the recurrence. 1309 if (!LIOps.empty()) { 1310 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1311 LIOps.push_back(AddRec->getStart()); 1312 1313 SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(), 1314 AddRec->op_end()); 1315 AddRecOps[0] = getAddExpr(LIOps); 1316 1317 const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); 1318 // If all of the other operands were loop invariant, we are done. 1319 if (Ops.size() == 1) return NewRec; 1320 1321 // Otherwise, add the folded AddRec by the non-liv parts. 1322 for (unsigned i = 0;; ++i) 1323 if (Ops[i] == AddRec) { 1324 Ops[i] = NewRec; 1325 break; 1326 } 1327 return getAddExpr(Ops); 1328 } 1329 1330 // Okay, if there weren't any loop invariants to be folded, check to see if 1331 // there are multiple AddRec's with the same loop induction variable being 1332 // added together. If so, we can fold them. 1333 for (unsigned OtherIdx = Idx+1; 1334 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1335 if (OtherIdx != Idx) { 1336 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1337 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1338 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} 1339 SmallVector<const SCEV*, 4> NewOps(AddRec->op_begin(), AddRec->op_end()); 1340 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { 1341 if (i >= NewOps.size()) { 1342 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, 1343 OtherAddRec->op_end()); 1344 break; 1345 } 1346 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); 1347 } 1348 const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1349 1350 if (Ops.size() == 2) return NewAddRec; 1351 1352 Ops.erase(Ops.begin()+Idx); 1353 Ops.erase(Ops.begin()+OtherIdx-1); 1354 Ops.push_back(NewAddRec); 1355 return getAddExpr(Ops); 1356 } 1357 } 1358 1359 // Otherwise couldn't fold anything into this recurrence. Move onto the 1360 // next one. 1361 } 1362 1363 // Okay, it looks like we really DO need an add expr. Check to see if we 1364 // already have one, otherwise create a new one. 1365 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1366 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scAddExpr, 1367 SCEVOps)]; 1368 if (Result == 0) Result = new SCEVAddExpr(Ops); 1369 return Result; 1370 } 1371 1372 1373 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1374 /// possible. 1375 const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { 1376 assert(!Ops.empty() && "Cannot get empty mul!"); 1377 #ifndef NDEBUG 1378 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1379 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1380 getEffectiveSCEVType(Ops[0]->getType()) && 1381 "SCEVMulExpr operand types don't match!"); 1382 #endif 1383 1384 // Sort by complexity, this groups all similar expression types together. 1385 GroupByComplexity(Ops, LI); 1386 1387 // If there are any constants, fold them together. 1388 unsigned Idx = 0; 1389 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1390 1391 // C1*(C2+V) -> C1*C2 + C1*V 1392 if (Ops.size() == 2) 1393 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1394 if (Add->getNumOperands() == 2 && 1395 isa<SCEVConstant>(Add->getOperand(0))) 1396 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1397 getMulExpr(LHSC, Add->getOperand(1))); 1398 1399 1400 ++Idx; 1401 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1402 // We found two constants, fold them together! 1403 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() * 1404 RHSC->getValue()->getValue()); 1405 Ops[0] = getConstant(Fold); 1406 Ops.erase(Ops.begin()+1); // Erase the folded element 1407 if (Ops.size() == 1) return Ops[0]; 1408 LHSC = cast<SCEVConstant>(Ops[0]); 1409 } 1410 1411 // If we are left with a constant one being multiplied, strip it off. 1412 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1413 Ops.erase(Ops.begin()); 1414 --Idx; 1415 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1416 // If we have a multiply of zero, it will always be zero. 1417 return Ops[0]; 1418 } 1419 } 1420 1421 // Skip over the add expression until we get to a multiply. 1422 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1423 ++Idx; 1424 1425 if (Ops.size() == 1) 1426 return Ops[0]; 1427 1428 // If there are mul operands inline them all into this expression. 1429 if (Idx < Ops.size()) { 1430 bool DeletedMul = false; 1431 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1432 // If we have an mul, expand the mul operands onto the end of the operands 1433 // list. 1434 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); 1435 Ops.erase(Ops.begin()+Idx); 1436 DeletedMul = true; 1437 } 1438 1439 // If we deleted at least one mul, we added operands to the end of the list, 1440 // and they are not necessarily sorted. Recurse to resort and resimplify 1441 // any operands we just aquired. 1442 if (DeletedMul) 1443 return getMulExpr(Ops); 1444 } 1445 1446 // If there are any add recurrences in the operands list, see if any other 1447 // added values are loop invariant. If so, we can fold them into the 1448 // recurrence. 1449 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1450 ++Idx; 1451 1452 // Scan over all recurrences, trying to fold loop invariants into them. 1453 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1454 // Scan all of the other operands to this mul and add them to the vector if 1455 // they are loop invariant w.r.t. the recurrence. 1456 SmallVector<const SCEV*, 8> LIOps; 1457 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1458 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1459 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1460 LIOps.push_back(Ops[i]); 1461 Ops.erase(Ops.begin()+i); 1462 --i; --e; 1463 } 1464 1465 // If we found some loop invariants, fold them into the recurrence. 1466 if (!LIOps.empty()) { 1467 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1468 SmallVector<const SCEV*, 4> NewOps; 1469 NewOps.reserve(AddRec->getNumOperands()); 1470 if (LIOps.size() == 1) { 1471 const SCEV *Scale = LIOps[0]; 1472 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1473 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1474 } else { 1475 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 1476 SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end()); 1477 MulOps.push_back(AddRec->getOperand(i)); 1478 NewOps.push_back(getMulExpr(MulOps)); 1479 } 1480 } 1481 1482 const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1483 1484 // If all of the other operands were loop invariant, we are done. 1485 if (Ops.size() == 1) return NewRec; 1486 1487 // Otherwise, multiply the folded AddRec by the non-liv parts. 1488 for (unsigned i = 0;; ++i) 1489 if (Ops[i] == AddRec) { 1490 Ops[i] = NewRec; 1491 break; 1492 } 1493 return getMulExpr(Ops); 1494 } 1495 1496 // Okay, if there weren't any loop invariants to be folded, check to see if 1497 // there are multiple AddRec's with the same loop induction variable being 1498 // multiplied together. If so, we can fold them. 1499 for (unsigned OtherIdx = Idx+1; 1500 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1501 if (OtherIdx != Idx) { 1502 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1503 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1504 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} 1505 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1506 const SCEV* NewStart = getMulExpr(F->getStart(), 1507 G->getStart()); 1508 const SCEV* B = F->getStepRecurrence(*this); 1509 const SCEV* D = G->getStepRecurrence(*this); 1510 const SCEV* NewStep = getAddExpr(getMulExpr(F, D), 1511 getMulExpr(G, B), 1512 getMulExpr(B, D)); 1513 const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, 1514 F->getLoop()); 1515 if (Ops.size() == 2) return NewAddRec; 1516 1517 Ops.erase(Ops.begin()+Idx); 1518 Ops.erase(Ops.begin()+OtherIdx-1); 1519 Ops.push_back(NewAddRec); 1520 return getMulExpr(Ops); 1521 } 1522 } 1523 1524 // Otherwise couldn't fold anything into this recurrence. Move onto the 1525 // next one. 1526 } 1527 1528 // Okay, it looks like we really DO need an mul expr. Check to see if we 1529 // already have one, otherwise create a new one. 1530 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1531 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scMulExpr, 1532 SCEVOps)]; 1533 if (Result == 0) 1534 Result = new SCEVMulExpr(Ops); 1535 return Result; 1536 } 1537 1538 /// getUDivExpr - Get a canonical multiply expression, or something simpler if 1539 /// possible. 1540 const SCEV* ScalarEvolution::getUDivExpr(const SCEV* LHS, 1541 const SCEV* RHS) { 1542 assert(getEffectiveSCEVType(LHS->getType()) == 1543 getEffectiveSCEVType(RHS->getType()) && 1544 "SCEVUDivExpr operand types don't match!"); 1545 1546 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1547 if (RHSC->getValue()->equalsInt(1)) 1548 return LHS; // X udiv 1 --> x 1549 if (RHSC->isZero()) 1550 return getIntegerSCEV(0, LHS->getType()); // value is undefined 1551 1552 // Determine if the division can be folded into the operands of 1553 // its operands. 1554 // TODO: Generalize this to non-constants by using known-bits information. 1555 const Type *Ty = LHS->getType(); 1556 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1557 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; 1558 // For non-power-of-two values, effectively round the value up to the 1559 // nearest power of two. 1560 if (!RHSC->getValue()->getValue().isPowerOf2()) 1561 ++MaxShiftAmt; 1562 const IntegerType *ExtTy = 1563 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt); 1564 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1565 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1566 if (const SCEVConstant *Step = 1567 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1568 if (!Step->getValue()->getValue() 1569 .urem(RHSC->getValue()->getValue()) && 1570 getZeroExtendExpr(AR, ExtTy) == 1571 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1572 getZeroExtendExpr(Step, ExtTy), 1573 AR->getLoop())) { 1574 SmallVector<const SCEV*, 4> Operands; 1575 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1576 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1577 return getAddRecExpr(Operands, AR->getLoop()); 1578 } 1579 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1580 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1581 SmallVector<const SCEV*, 4> Operands; 1582 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1583 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1584 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1585 // Find an operand that's safely divisible. 1586 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1587 const SCEV* Op = M->getOperand(i); 1588 const SCEV* Div = getUDivExpr(Op, RHSC); 1589 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1590 const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands(); 1591 Operands = SmallVector<const SCEV*, 4>(MOperands.begin(), 1592 MOperands.end()); 1593 Operands[i] = Div; 1594 return getMulExpr(Operands); 1595 } 1596 } 1597 } 1598 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1599 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1600 SmallVector<const SCEV*, 4> Operands; 1601 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1602 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1603 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1604 Operands.clear(); 1605 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1606 const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); 1607 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) 1608 break; 1609 Operands.push_back(Op); 1610 } 1611 if (Operands.size() == A->getNumOperands()) 1612 return getAddExpr(Operands); 1613 } 1614 } 1615 1616 // Fold if both operands are constant. 1617 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1618 Constant *LHSCV = LHSC->getValue(); 1619 Constant *RHSCV = RHSC->getValue(); 1620 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 1621 RHSCV))); 1622 } 1623 } 1624 1625 SCEVUDivExpr *&Result = SCEVUDivs[std::make_pair(LHS, RHS)]; 1626 if (Result == 0) Result = new SCEVUDivExpr(LHS, RHS); 1627 return Result; 1628 } 1629 1630 1631 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1632 /// Simplify the expression as much as possible. 1633 const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, 1634 const SCEV* Step, const Loop *L) { 1635 SmallVector<const SCEV*, 4> Operands; 1636 Operands.push_back(Start); 1637 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1638 if (StepChrec->getLoop() == L) { 1639 Operands.insert(Operands.end(), StepChrec->op_begin(), 1640 StepChrec->op_end()); 1641 return getAddRecExpr(Operands, L); 1642 } 1643 1644 Operands.push_back(Step); 1645 return getAddRecExpr(Operands, L); 1646 } 1647 1648 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1649 /// Simplify the expression as much as possible. 1650 const SCEV* ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV*> &Operands, 1651 const Loop *L) { 1652 if (Operands.size() == 1) return Operands[0]; 1653 #ifndef NDEBUG 1654 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1655 assert(getEffectiveSCEVType(Operands[i]->getType()) == 1656 getEffectiveSCEVType(Operands[0]->getType()) && 1657 "SCEVAddRecExpr operand types don't match!"); 1658 #endif 1659 1660 if (Operands.back()->isZero()) { 1661 Operands.pop_back(); 1662 return getAddRecExpr(Operands, L); // {X,+,0} --> X 1663 } 1664 1665 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 1666 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 1667 const Loop* NestedLoop = NestedAR->getLoop(); 1668 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { 1669 SmallVector<const SCEV*, 4> NestedOperands(NestedAR->op_begin(), 1670 NestedAR->op_end()); 1671 Operands[0] = NestedAR->getStart(); 1672 NestedOperands[0] = getAddRecExpr(Operands, L); 1673 return getAddRecExpr(NestedOperands, NestedLoop); 1674 } 1675 } 1676 1677 std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end()); 1678 SCEVAddRecExpr *&Result = SCEVAddRecExprs[std::make_pair(L, SCEVOps)]; 1679 if (Result == 0) Result = new SCEVAddRecExpr(Operands, L); 1680 return Result; 1681 } 1682 1683 const SCEV* ScalarEvolution::getSMaxExpr(const SCEV* LHS, 1684 const SCEV* RHS) { 1685 SmallVector<const SCEV*, 2> Ops; 1686 Ops.push_back(LHS); 1687 Ops.push_back(RHS); 1688 return getSMaxExpr(Ops); 1689 } 1690 1691 const SCEV* 1692 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1693 assert(!Ops.empty() && "Cannot get empty smax!"); 1694 if (Ops.size() == 1) return Ops[0]; 1695 #ifndef NDEBUG 1696 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1697 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1698 getEffectiveSCEVType(Ops[0]->getType()) && 1699 "SCEVSMaxExpr operand types don't match!"); 1700 #endif 1701 1702 // Sort by complexity, this groups all similar expression types together. 1703 GroupByComplexity(Ops, LI); 1704 1705 // If there are any constants, fold them together. 1706 unsigned Idx = 0; 1707 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1708 ++Idx; 1709 assert(Idx < Ops.size()); 1710 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1711 // We found two constants, fold them together! 1712 ConstantInt *Fold = ConstantInt::get( 1713 APIntOps::smax(LHSC->getValue()->getValue(), 1714 RHSC->getValue()->getValue())); 1715 Ops[0] = getConstant(Fold); 1716 Ops.erase(Ops.begin()+1); // Erase the folded element 1717 if (Ops.size() == 1) return Ops[0]; 1718 LHSC = cast<SCEVConstant>(Ops[0]); 1719 } 1720 1721 // If we are left with a constant -inf, strip it off. 1722 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 1723 Ops.erase(Ops.begin()); 1724 --Idx; 1725 } 1726 } 1727 1728 if (Ops.size() == 1) return Ops[0]; 1729 1730 // Find the first SMax 1731 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 1732 ++Idx; 1733 1734 // Check to see if one of the operands is an SMax. If so, expand its operands 1735 // onto our operand list, and recurse to simplify. 1736 if (Idx < Ops.size()) { 1737 bool DeletedSMax = false; 1738 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 1739 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); 1740 Ops.erase(Ops.begin()+Idx); 1741 DeletedSMax = true; 1742 } 1743 1744 if (DeletedSMax) 1745 return getSMaxExpr(Ops); 1746 } 1747 1748 // Okay, check to see if the same value occurs in the operand list twice. If 1749 // so, delete one. Since we sorted the list, these values are required to 1750 // be adjacent. 1751 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1752 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y 1753 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1754 --i; --e; 1755 } 1756 1757 if (Ops.size() == 1) return Ops[0]; 1758 1759 assert(!Ops.empty() && "Reduced smax down to nothing!"); 1760 1761 // Okay, it looks like we really DO need an smax expr. Check to see if we 1762 // already have one, otherwise create a new one. 1763 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1764 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scSMaxExpr, 1765 SCEVOps)]; 1766 if (Result == 0) Result = new SCEVSMaxExpr(Ops); 1767 return Result; 1768 } 1769 1770 const SCEV* ScalarEvolution::getUMaxExpr(const SCEV* LHS, 1771 const SCEV* RHS) { 1772 SmallVector<const SCEV*, 2> Ops; 1773 Ops.push_back(LHS); 1774 Ops.push_back(RHS); 1775 return getUMaxExpr(Ops); 1776 } 1777 1778 const SCEV* 1779 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1780 assert(!Ops.empty() && "Cannot get empty umax!"); 1781 if (Ops.size() == 1) return Ops[0]; 1782 #ifndef NDEBUG 1783 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1784 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1785 getEffectiveSCEVType(Ops[0]->getType()) && 1786 "SCEVUMaxExpr operand types don't match!"); 1787 #endif 1788 1789 // Sort by complexity, this groups all similar expression types together. 1790 GroupByComplexity(Ops, LI); 1791 1792 // If there are any constants, fold them together. 1793 unsigned Idx = 0; 1794 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1795 ++Idx; 1796 assert(Idx < Ops.size()); 1797 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1798 // We found two constants, fold them together! 1799 ConstantInt *Fold = ConstantInt::get( 1800 APIntOps::umax(LHSC->getValue()->getValue(), 1801 RHSC->getValue()->getValue())); 1802 Ops[0] = getConstant(Fold); 1803 Ops.erase(Ops.begin()+1); // Erase the folded element 1804 if (Ops.size() == 1) return Ops[0]; 1805 LHSC = cast<SCEVConstant>(Ops[0]); 1806 } 1807 1808 // If we are left with a constant zero, strip it off. 1809 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 1810 Ops.erase(Ops.begin()); 1811 --Idx; 1812 } 1813 } 1814 1815 if (Ops.size() == 1) return Ops[0]; 1816 1817 // Find the first UMax 1818 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 1819 ++Idx; 1820 1821 // Check to see if one of the operands is a UMax. If so, expand its operands 1822 // onto our operand list, and recurse to simplify. 1823 if (Idx < Ops.size()) { 1824 bool DeletedUMax = false; 1825 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 1826 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); 1827 Ops.erase(Ops.begin()+Idx); 1828 DeletedUMax = true; 1829 } 1830 1831 if (DeletedUMax) 1832 return getUMaxExpr(Ops); 1833 } 1834 1835 // Okay, check to see if the same value occurs in the operand list twice. If 1836 // so, delete one. Since we sorted the list, these values are required to 1837 // be adjacent. 1838 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1839 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y 1840 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1841 --i; --e; 1842 } 1843 1844 if (Ops.size() == 1) return Ops[0]; 1845 1846 assert(!Ops.empty() && "Reduced umax down to nothing!"); 1847 1848 // Okay, it looks like we really DO need a umax expr. Check to see if we 1849 // already have one, otherwise create a new one. 1850 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1851 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scUMaxExpr, 1852 SCEVOps)]; 1853 if (Result == 0) Result = new SCEVUMaxExpr(Ops); 1854 return Result; 1855 } 1856 1857 const SCEV* ScalarEvolution::getSMinExpr(const SCEV* LHS, 1858 const SCEV* RHS) { 1859 // ~smax(~x, ~y) == smin(x, y). 1860 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1861 } 1862 1863 const SCEV* ScalarEvolution::getUMinExpr(const SCEV* LHS, 1864 const SCEV* RHS) { 1865 // ~umax(~x, ~y) == umin(x, y) 1866 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1867 } 1868 1869 const SCEV* ScalarEvolution::getUnknown(Value *V) { 1870 // Don't attempt to do anything other than create a SCEVUnknown object 1871 // here. createSCEV only calls getUnknown after checking for all other 1872 // interesting possibilities, and any other code that calls getUnknown 1873 // is doing so in order to hide a value from SCEV canonicalization. 1874 1875 SCEVUnknown *&Result = SCEVUnknowns[V]; 1876 if (Result == 0) Result = new SCEVUnknown(V); 1877 return Result; 1878 } 1879 1880 //===----------------------------------------------------------------------===// 1881 // Basic SCEV Analysis and PHI Idiom Recognition Code 1882 // 1883 1884 /// isSCEVable - Test if values of the given type are analyzable within 1885 /// the SCEV framework. This primarily includes integer types, and it 1886 /// can optionally include pointer types if the ScalarEvolution class 1887 /// has access to target-specific information. 1888 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 1889 // Integers are always SCEVable. 1890 if (Ty->isInteger()) 1891 return true; 1892 1893 // Pointers are SCEVable if TargetData information is available 1894 // to provide pointer size information. 1895 if (isa<PointerType>(Ty)) 1896 return TD != NULL; 1897 1898 // Otherwise it's not SCEVable. 1899 return false; 1900 } 1901 1902 /// getTypeSizeInBits - Return the size in bits of the specified type, 1903 /// for which isSCEVable must return true. 1904 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 1905 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1906 1907 // If we have a TargetData, use it! 1908 if (TD) 1909 return TD->getTypeSizeInBits(Ty); 1910 1911 // Otherwise, we support only integer types. 1912 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!"); 1913 return Ty->getPrimitiveSizeInBits(); 1914 } 1915 1916 /// getEffectiveSCEVType - Return a type with the same bitwidth as 1917 /// the given type and which represents how SCEV will treat the given 1918 /// type, for which isSCEVable must return true. For pointer types, 1919 /// this is the pointer-sized integer type. 1920 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 1921 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1922 1923 if (Ty->isInteger()) 1924 return Ty; 1925 1926 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); 1927 return TD->getIntPtrType(); 1928 } 1929 1930 const SCEV* ScalarEvolution::getCouldNotCompute() { 1931 return CouldNotCompute; 1932 } 1933 1934 /// hasSCEV - Return true if the SCEV for this value has already been 1935 /// computed. 1936 bool ScalarEvolution::hasSCEV(Value *V) const { 1937 return Scalars.count(V); 1938 } 1939 1940 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 1941 /// expression and create a new one. 1942 const SCEV* ScalarEvolution::getSCEV(Value *V) { 1943 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 1944 1945 std::map<SCEVCallbackVH, const SCEV*>::iterator I = Scalars.find(V); 1946 if (I != Scalars.end()) return I->second; 1947 const SCEV* S = createSCEV(V); 1948 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 1949 return S; 1950 } 1951 1952 /// getIntegerSCEV - Given a SCEVable type, create a constant for the 1953 /// specified signed integer value and return a SCEV for the constant. 1954 const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { 1955 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 1956 return getConstant(ConstantInt::get(ITy, Val)); 1957 } 1958 1959 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 1960 /// 1961 const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { 1962 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1963 return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 1964 1965 const Type *Ty = V->getType(); 1966 Ty = getEffectiveSCEVType(Ty); 1967 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty))); 1968 } 1969 1970 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 1971 const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) { 1972 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1973 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 1974 1975 const Type *Ty = V->getType(); 1976 Ty = getEffectiveSCEVType(Ty); 1977 const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); 1978 return getMinusSCEV(AllOnes, V); 1979 } 1980 1981 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 1982 /// 1983 const SCEV* ScalarEvolution::getMinusSCEV(const SCEV* LHS, 1984 const SCEV* RHS) { 1985 // X - Y --> X + -Y 1986 return getAddExpr(LHS, getNegativeSCEV(RHS)); 1987 } 1988 1989 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 1990 /// input value to the specified type. If the type must be extended, it is zero 1991 /// extended. 1992 const SCEV* 1993 ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, 1994 const Type *Ty) { 1995 const Type *SrcTy = V->getType(); 1996 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 1997 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 1998 "Cannot truncate or zero extend with non-integer arguments!"); 1999 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2000 return V; // No conversion 2001 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2002 return getTruncateExpr(V, Ty); 2003 return getZeroExtendExpr(V, Ty); 2004 } 2005 2006 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2007 /// input value to the specified type. If the type must be extended, it is sign 2008 /// extended. 2009 const SCEV* 2010 ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, 2011 const Type *Ty) { 2012 const Type *SrcTy = V->getType(); 2013 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2014 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2015 "Cannot truncate or zero extend with non-integer arguments!"); 2016 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2017 return V; // No conversion 2018 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2019 return getTruncateExpr(V, Ty); 2020 return getSignExtendExpr(V, Ty); 2021 } 2022 2023 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2024 /// input value to the specified type. If the type must be extended, it is zero 2025 /// extended. The conversion must not be narrowing. 2026 const SCEV* 2027 ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { 2028 const Type *SrcTy = V->getType(); 2029 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2030 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2031 "Cannot noop or zero extend with non-integer arguments!"); 2032 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2033 "getNoopOrZeroExtend cannot truncate!"); 2034 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2035 return V; // No conversion 2036 return getZeroExtendExpr(V, Ty); 2037 } 2038 2039 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2040 /// input value to the specified type. If the type must be extended, it is sign 2041 /// extended. The conversion must not be narrowing. 2042 const SCEV* 2043 ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { 2044 const Type *SrcTy = V->getType(); 2045 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2046 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2047 "Cannot noop or sign extend with non-integer arguments!"); 2048 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2049 "getNoopOrSignExtend cannot truncate!"); 2050 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2051 return V; // No conversion 2052 return getSignExtendExpr(V, Ty); 2053 } 2054 2055 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2056 /// the input value to the specified type. If the type must be extended, 2057 /// it is extended with unspecified bits. The conversion must not be 2058 /// narrowing. 2059 const SCEV* 2060 ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { 2061 const Type *SrcTy = V->getType(); 2062 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2063 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2064 "Cannot noop or any extend with non-integer arguments!"); 2065 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2066 "getNoopOrAnyExtend cannot truncate!"); 2067 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2068 return V; // No conversion 2069 return getAnyExtendExpr(V, Ty); 2070 } 2071 2072 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2073 /// input value to the specified type. The conversion must not be widening. 2074 const SCEV* 2075 ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { 2076 const Type *SrcTy = V->getType(); 2077 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2078 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2079 "Cannot truncate or noop with non-integer arguments!"); 2080 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2081 "getTruncateOrNoop cannot extend!"); 2082 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2083 return V; // No conversion 2084 return getTruncateExpr(V, Ty); 2085 } 2086 2087 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2088 /// the types using zero-extension, and then perform a umax operation 2089 /// with them. 2090 const SCEV* ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV* LHS, 2091 const SCEV* RHS) { 2092 const SCEV* PromotedLHS = LHS; 2093 const SCEV* PromotedRHS = RHS; 2094 2095 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2096 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2097 else 2098 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2099 2100 return getUMaxExpr(PromotedLHS, PromotedRHS); 2101 } 2102 2103 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2104 /// the types using zero-extension, and then perform a umin operation 2105 /// with them. 2106 const SCEV* ScalarEvolution::getUMinFromMismatchedTypes(const SCEV* LHS, 2107 const SCEV* RHS) { 2108 const SCEV* PromotedLHS = LHS; 2109 const SCEV* PromotedRHS = RHS; 2110 2111 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2112 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2113 else 2114 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2115 2116 return getUMinExpr(PromotedLHS, PromotedRHS); 2117 } 2118 2119 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for 2120 /// the specified instruction and replaces any references to the symbolic value 2121 /// SymName with the specified value. This is used during PHI resolution. 2122 void ScalarEvolution:: 2123 ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEV* SymName, 2124 const SCEV* NewVal) { 2125 std::map<SCEVCallbackVH, const SCEV*>::iterator SI = 2126 Scalars.find(SCEVCallbackVH(I, this)); 2127 if (SI == Scalars.end()) return; 2128 2129 const SCEV* NV = 2130 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); 2131 if (NV == SI->second) return; // No change. 2132 2133 SI->second = NV; // Update the scalars map! 2134 2135 // Any instruction values that use this instruction might also need to be 2136 // updated! 2137 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 2138 UI != E; ++UI) 2139 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal); 2140 } 2141 2142 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2143 /// a loop header, making it a potential recurrence, or it doesn't. 2144 /// 2145 const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { 2146 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. 2147 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2148 if (L->getHeader() == PN->getParent()) { 2149 // If it lives in the loop header, it has two incoming values, one 2150 // from outside the loop, and one from inside. 2151 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 2152 unsigned BackEdge = IncomingEdge^1; 2153 2154 // While we are analyzing this PHI node, handle its value symbolically. 2155 const SCEV* SymbolicName = getUnknown(PN); 2156 assert(Scalars.find(PN) == Scalars.end() && 2157 "PHI node already processed?"); 2158 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2159 2160 // Using this symbolic name for the PHI, analyze the value coming around 2161 // the back-edge. 2162 const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge)); 2163 2164 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2165 // has a special value for the first iteration of the loop. 2166 2167 // If the value coming around the backedge is an add with the symbolic 2168 // value we just inserted, then we found a simple induction variable! 2169 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2170 // If there is a single occurrence of the symbolic value, replace it 2171 // with a recurrence. 2172 unsigned FoundIndex = Add->getNumOperands(); 2173 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2174 if (Add->getOperand(i) == SymbolicName) 2175 if (FoundIndex == e) { 2176 FoundIndex = i; 2177 break; 2178 } 2179 2180 if (FoundIndex != Add->getNumOperands()) { 2181 // Create an add with everything but the specified operand. 2182 SmallVector<const SCEV*, 8> Ops; 2183 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2184 if (i != FoundIndex) 2185 Ops.push_back(Add->getOperand(i)); 2186 const SCEV* Accum = getAddExpr(Ops); 2187 2188 // This is not a valid addrec if the step amount is varying each 2189 // loop iteration, but is not itself an addrec in this loop. 2190 if (Accum->isLoopInvariant(L) || 2191 (isa<SCEVAddRecExpr>(Accum) && 2192 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2193 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2194 const SCEV* PHISCEV = getAddRecExpr(StartVal, Accum, L); 2195 2196 // Okay, for the entire analysis of this edge we assumed the PHI 2197 // to be symbolic. We now need to go back and update all of the 2198 // entries for the scalars that use the PHI (except for the PHI 2199 // itself) to use the new analyzed value instead of the "symbolic" 2200 // value. 2201 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2202 return PHISCEV; 2203 } 2204 } 2205 } else if (const SCEVAddRecExpr *AddRec = 2206 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2207 // Otherwise, this could be a loop like this: 2208 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2209 // In this case, j = {1,+,1} and BEValue is j. 2210 // Because the other in-value of i (0) fits the evolution of BEValue 2211 // i really is an addrec evolution. 2212 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2213 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2214 2215 // If StartVal = j.start - j.stride, we can use StartVal as the 2216 // initial step of the addrec evolution. 2217 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2218 AddRec->getOperand(1))) { 2219 const SCEV* PHISCEV = 2220 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2221 2222 // Okay, for the entire analysis of this edge we assumed the PHI 2223 // to be symbolic. We now need to go back and update all of the 2224 // entries for the scalars that use the PHI (except for the PHI 2225 // itself) to use the new analyzed value instead of the "symbolic" 2226 // value. 2227 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2228 return PHISCEV; 2229 } 2230 } 2231 } 2232 2233 return SymbolicName; 2234 } 2235 2236 // If it's not a loop phi, we can't handle it yet. 2237 return getUnknown(PN); 2238 } 2239 2240 /// createNodeForGEP - Expand GEP instructions into add and multiply 2241 /// operations. This allows them to be analyzed by regular SCEV code. 2242 /// 2243 const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { 2244 2245 const Type *IntPtrTy = TD->getIntPtrType(); 2246 Value *Base = GEP->getOperand(0); 2247 // Don't attempt to analyze GEPs over unsized objects. 2248 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2249 return getUnknown(GEP); 2250 const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy); 2251 gep_type_iterator GTI = gep_type_begin(GEP); 2252 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), 2253 E = GEP->op_end(); 2254 I != E; ++I) { 2255 Value *Index = *I; 2256 // Compute the (potentially symbolic) offset in bytes for this index. 2257 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2258 // For a struct, add the member offset. 2259 const StructLayout &SL = *TD->getStructLayout(STy); 2260 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2261 uint64_t Offset = SL.getElementOffset(FieldNo); 2262 TotalOffset = getAddExpr(TotalOffset, 2263 getIntegerSCEV(Offset, IntPtrTy)); 2264 } else { 2265 // For an array, add the element offset, explicitly scaled. 2266 const SCEV* LocalOffset = getSCEV(Index); 2267 if (!isa<PointerType>(LocalOffset->getType())) 2268 // Getelementptr indicies are signed. 2269 LocalOffset = getTruncateOrSignExtend(LocalOffset, 2270 IntPtrTy); 2271 LocalOffset = 2272 getMulExpr(LocalOffset, 2273 getIntegerSCEV(TD->getTypeAllocSize(*GTI), 2274 IntPtrTy)); 2275 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2276 } 2277 } 2278 return getAddExpr(getSCEV(Base), TotalOffset); 2279 } 2280 2281 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2282 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2283 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2284 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2285 uint32_t 2286 ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { 2287 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2288 return C->getValue()->getValue().countTrailingZeros(); 2289 2290 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2291 return std::min(GetMinTrailingZeros(T->getOperand()), 2292 (uint32_t)getTypeSizeInBits(T->getType())); 2293 2294 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2295 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2296 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2297 getTypeSizeInBits(E->getType()) : OpRes; 2298 } 2299 2300 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2301 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2302 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2303 getTypeSizeInBits(E->getType()) : OpRes; 2304 } 2305 2306 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2307 // The result is the min of all operands results. 2308 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2309 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2310 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2311 return MinOpRes; 2312 } 2313 2314 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2315 // The result is the sum of all operands results. 2316 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2317 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2318 for (unsigned i = 1, e = M->getNumOperands(); 2319 SumOpRes != BitWidth && i != e; ++i) 2320 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2321 BitWidth); 2322 return SumOpRes; 2323 } 2324 2325 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2326 // The result is the min of all operands results. 2327 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2328 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2329 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2330 return MinOpRes; 2331 } 2332 2333 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2334 // The result is the min of all operands results. 2335 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2336 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2337 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2338 return MinOpRes; 2339 } 2340 2341 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2342 // The result is the min of all operands results. 2343 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2344 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2345 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2346 return MinOpRes; 2347 } 2348 2349 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2350 // For a SCEVUnknown, ask ValueTracking. 2351 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2352 APInt Mask = APInt::getAllOnesValue(BitWidth); 2353 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2354 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2355 return Zeros.countTrailingOnes(); 2356 } 2357 2358 // SCEVUDivExpr 2359 return 0; 2360 } 2361 2362 uint32_t 2363 ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { 2364 // TODO: Handle other SCEV expression types here. 2365 2366 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2367 return C->getValue()->getValue().countLeadingZeros(); 2368 2369 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) { 2370 // A zero-extension cast adds zero bits. 2371 return GetMinLeadingZeros(C->getOperand()) + 2372 (getTypeSizeInBits(C->getType()) - 2373 getTypeSizeInBits(C->getOperand()->getType())); 2374 } 2375 2376 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2377 // For a SCEVUnknown, ask ValueTracking. 2378 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2379 APInt Mask = APInt::getAllOnesValue(BitWidth); 2380 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2381 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 2382 return Zeros.countLeadingOnes(); 2383 } 2384 2385 return 1; 2386 } 2387 2388 uint32_t 2389 ScalarEvolution::GetMinSignBits(const SCEV* S) { 2390 // TODO: Handle other SCEV expression types here. 2391 2392 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 2393 const APInt &A = C->getValue()->getValue(); 2394 return A.isNegative() ? A.countLeadingOnes() : 2395 A.countLeadingZeros(); 2396 } 2397 2398 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) { 2399 // A sign-extension cast adds sign bits. 2400 return GetMinSignBits(C->getOperand()) + 2401 (getTypeSizeInBits(C->getType()) - 2402 getTypeSizeInBits(C->getOperand()->getType())); 2403 } 2404 2405 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2406 unsigned BitWidth = getTypeSizeInBits(A->getType()); 2407 2408 // Special case decrementing a value (ADD X, -1): 2409 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0))) 2410 if (CRHS->isAllOnesValue()) { 2411 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end()); 2412 const SCEV *OtherOpsAdd = getAddExpr(OtherOps); 2413 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd); 2414 2415 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2416 // sign bits set. 2417 if (LZ == BitWidth - 1) 2418 return BitWidth; 2419 2420 // If we are subtracting one from a positive number, there is no carry 2421 // out of the result. 2422 if (LZ > 0) 2423 return GetMinSignBits(OtherOpsAdd); 2424 } 2425 2426 // Add can have at most one carry bit. Thus we know that the output 2427 // is, at worst, one more bit than the inputs. 2428 unsigned Min = BitWidth; 2429 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2430 unsigned N = GetMinSignBits(A->getOperand(i)); 2431 Min = std::min(Min, N) - 1; 2432 if (Min == 0) return 1; 2433 } 2434 return 1; 2435 } 2436 2437 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2438 // For a SCEVUnknown, ask ValueTracking. 2439 return ComputeNumSignBits(U->getValue(), TD); 2440 } 2441 2442 return 1; 2443 } 2444 2445 /// createSCEV - We know that there is no SCEV for the specified value. 2446 /// Analyze the expression. 2447 /// 2448 const SCEV* ScalarEvolution::createSCEV(Value *V) { 2449 if (!isSCEVable(V->getType())) 2450 return getUnknown(V); 2451 2452 unsigned Opcode = Instruction::UserOp1; 2453 if (Instruction *I = dyn_cast<Instruction>(V)) 2454 Opcode = I->getOpcode(); 2455 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 2456 Opcode = CE->getOpcode(); 2457 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 2458 return getConstant(CI); 2459 else if (isa<ConstantPointerNull>(V)) 2460 return getIntegerSCEV(0, V->getType()); 2461 else if (isa<UndefValue>(V)) 2462 return getIntegerSCEV(0, V->getType()); 2463 else 2464 return getUnknown(V); 2465 2466 User *U = cast<User>(V); 2467 switch (Opcode) { 2468 case Instruction::Add: 2469 return getAddExpr(getSCEV(U->getOperand(0)), 2470 getSCEV(U->getOperand(1))); 2471 case Instruction::Mul: 2472 return getMulExpr(getSCEV(U->getOperand(0)), 2473 getSCEV(U->getOperand(1))); 2474 case Instruction::UDiv: 2475 return getUDivExpr(getSCEV(U->getOperand(0)), 2476 getSCEV(U->getOperand(1))); 2477 case Instruction::Sub: 2478 return getMinusSCEV(getSCEV(U->getOperand(0)), 2479 getSCEV(U->getOperand(1))); 2480 case Instruction::And: 2481 // For an expression like x&255 that merely masks off the high bits, 2482 // use zext(trunc(x)) as the SCEV expression. 2483 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2484 if (CI->isNullValue()) 2485 return getSCEV(U->getOperand(1)); 2486 if (CI->isAllOnesValue()) 2487 return getSCEV(U->getOperand(0)); 2488 const APInt &A = CI->getValue(); 2489 2490 // Instcombine's ShrinkDemandedConstant may strip bits out of 2491 // constants, obscuring what would otherwise be a low-bits mask. 2492 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 2493 // knew about to reconstruct a low-bits mask value. 2494 unsigned LZ = A.countLeadingZeros(); 2495 unsigned BitWidth = A.getBitWidth(); 2496 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 2497 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 2498 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 2499 2500 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 2501 2502 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 2503 return 2504 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 2505 IntegerType::get(BitWidth - LZ)), 2506 U->getType()); 2507 } 2508 break; 2509 2510 case Instruction::Or: 2511 // If the RHS of the Or is a constant, we may have something like: 2512 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 2513 // optimizations will transparently handle this case. 2514 // 2515 // In order for this transformation to be safe, the LHS must be of the 2516 // form X*(2^n) and the Or constant must be less than 2^n. 2517 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2518 const SCEV* LHS = getSCEV(U->getOperand(0)); 2519 const APInt &CIVal = CI->getValue(); 2520 if (GetMinTrailingZeros(LHS) >= 2521 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) 2522 return getAddExpr(LHS, getSCEV(U->getOperand(1))); 2523 } 2524 break; 2525 case Instruction::Xor: 2526 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2527 // If the RHS of the xor is a signbit, then this is just an add. 2528 // Instcombine turns add of signbit into xor as a strength reduction step. 2529 if (CI->getValue().isSignBit()) 2530 return getAddExpr(getSCEV(U->getOperand(0)), 2531 getSCEV(U->getOperand(1))); 2532 2533 // If the RHS of xor is -1, then this is a not operation. 2534 if (CI->isAllOnesValue()) 2535 return getNotSCEV(getSCEV(U->getOperand(0))); 2536 2537 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 2538 // This is a variant of the check for xor with -1, and it handles 2539 // the case where instcombine has trimmed non-demanded bits out 2540 // of an xor with -1. 2541 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 2542 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 2543 if (BO->getOpcode() == Instruction::And && 2544 LCI->getValue() == CI->getValue()) 2545 if (const SCEVZeroExtendExpr *Z = 2546 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 2547 const Type *UTy = U->getType(); 2548 const SCEV* Z0 = Z->getOperand(); 2549 const Type *Z0Ty = Z0->getType(); 2550 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 2551 2552 // If C is a low-bits mask, the zero extend is zerving to 2553 // mask off the high bits. Complement the operand and 2554 // re-apply the zext. 2555 if (APIntOps::isMask(Z0TySize, CI->getValue())) 2556 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 2557 2558 // If C is a single bit, it may be in the sign-bit position 2559 // before the zero-extend. In this case, represent the xor 2560 // using an add, which is equivalent, and re-apply the zext. 2561 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 2562 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 2563 Trunc.isSignBit()) 2564 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 2565 UTy); 2566 } 2567 } 2568 break; 2569 2570 case Instruction::Shl: 2571 // Turn shift left of a constant amount into a multiply. 2572 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2573 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2574 Constant *X = ConstantInt::get( 2575 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2576 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2577 } 2578 break; 2579 2580 case Instruction::LShr: 2581 // Turn logical shift right of a constant into a unsigned divide. 2582 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2583 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2584 Constant *X = ConstantInt::get( 2585 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2586 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2587 } 2588 break; 2589 2590 case Instruction::AShr: 2591 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 2592 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 2593 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0))) 2594 if (L->getOpcode() == Instruction::Shl && 2595 L->getOperand(1) == U->getOperand(1)) { 2596 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2597 uint64_t Amt = BitWidth - CI->getZExtValue(); 2598 if (Amt == BitWidth) 2599 return getSCEV(L->getOperand(0)); // shift by zero --> noop 2600 if (Amt > BitWidth) 2601 return getIntegerSCEV(0, U->getType()); // value is undefined 2602 return 2603 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 2604 IntegerType::get(Amt)), 2605 U->getType()); 2606 } 2607 break; 2608 2609 case Instruction::Trunc: 2610 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 2611 2612 case Instruction::ZExt: 2613 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2614 2615 case Instruction::SExt: 2616 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2617 2618 case Instruction::BitCast: 2619 // BitCasts are no-op casts so we just eliminate the cast. 2620 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 2621 return getSCEV(U->getOperand(0)); 2622 break; 2623 2624 case Instruction::IntToPtr: 2625 if (!TD) break; // Without TD we can't analyze pointers. 2626 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2627 TD->getIntPtrType()); 2628 2629 case Instruction::PtrToInt: 2630 if (!TD) break; // Without TD we can't analyze pointers. 2631 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2632 U->getType()); 2633 2634 case Instruction::GetElementPtr: 2635 if (!TD) break; // Without TD we can't analyze pointers. 2636 return createNodeForGEP(U); 2637 2638 case Instruction::PHI: 2639 return createNodeForPHI(cast<PHINode>(U)); 2640 2641 case Instruction::Select: 2642 // This could be a smax or umax that was lowered earlier. 2643 // Try to recover it. 2644 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 2645 Value *LHS = ICI->getOperand(0); 2646 Value *RHS = ICI->getOperand(1); 2647 switch (ICI->getPredicate()) { 2648 case ICmpInst::ICMP_SLT: 2649 case ICmpInst::ICMP_SLE: 2650 std::swap(LHS, RHS); 2651 // fall through 2652 case ICmpInst::ICMP_SGT: 2653 case ICmpInst::ICMP_SGE: 2654 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2655 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2656 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2657 return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); 2658 break; 2659 case ICmpInst::ICMP_ULT: 2660 case ICmpInst::ICMP_ULE: 2661 std::swap(LHS, RHS); 2662 // fall through 2663 case ICmpInst::ICMP_UGT: 2664 case ICmpInst::ICMP_UGE: 2665 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2666 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2667 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2668 return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); 2669 break; 2670 case ICmpInst::ICMP_NE: 2671 // n != 0 ? n : 1 -> umax(n, 1) 2672 if (LHS == U->getOperand(1) && 2673 isa<ConstantInt>(U->getOperand(2)) && 2674 cast<ConstantInt>(U->getOperand(2))->isOne() && 2675 isa<ConstantInt>(RHS) && 2676 cast<ConstantInt>(RHS)->isZero()) 2677 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); 2678 break; 2679 case ICmpInst::ICMP_EQ: 2680 // n == 0 ? 1 : n -> umax(n, 1) 2681 if (LHS == U->getOperand(2) && 2682 isa<ConstantInt>(U->getOperand(1)) && 2683 cast<ConstantInt>(U->getOperand(1))->isOne() && 2684 isa<ConstantInt>(RHS) && 2685 cast<ConstantInt>(RHS)->isZero()) 2686 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); 2687 break; 2688 default: 2689 break; 2690 } 2691 } 2692 2693 default: // We cannot analyze this expression. 2694 break; 2695 } 2696 2697 return getUnknown(V); 2698 } 2699 2700 2701 2702 //===----------------------------------------------------------------------===// 2703 // Iteration Count Computation Code 2704 // 2705 2706 /// getBackedgeTakenCount - If the specified loop has a predictable 2707 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 2708 /// object. The backedge-taken count is the number of times the loop header 2709 /// will be branched to from within the loop. This is one less than the 2710 /// trip count of the loop, since it doesn't count the first iteration, 2711 /// when the header is branched to from outside the loop. 2712 /// 2713 /// Note that it is not valid to call this method on a loop without a 2714 /// loop-invariant backedge-taken count (see 2715 /// hasLoopInvariantBackedgeTakenCount). 2716 /// 2717 const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 2718 return getBackedgeTakenInfo(L).Exact; 2719 } 2720 2721 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 2722 /// return the least SCEV value that is known never to be less than the 2723 /// actual backedge taken count. 2724 const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 2725 return getBackedgeTakenInfo(L).Max; 2726 } 2727 2728 const ScalarEvolution::BackedgeTakenInfo & 2729 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 2730 // Initially insert a CouldNotCompute for this loop. If the insertion 2731 // succeeds, procede to actually compute a backedge-taken count and 2732 // update the value. The temporary CouldNotCompute value tells SCEV 2733 // code elsewhere that it shouldn't attempt to request a new 2734 // backedge-taken count, which could result in infinite recursion. 2735 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair = 2736 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 2737 if (Pair.second) { 2738 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); 2739 if (ItCount.Exact != CouldNotCompute) { 2740 assert(ItCount.Exact->isLoopInvariant(L) && 2741 ItCount.Max->isLoopInvariant(L) && 2742 "Computed trip count isn't loop invariant for loop!"); 2743 ++NumTripCountsComputed; 2744 2745 // Update the value in the map. 2746 Pair.first->second = ItCount; 2747 } else { 2748 if (ItCount.Max != CouldNotCompute) 2749 // Update the value in the map. 2750 Pair.first->second = ItCount; 2751 if (isa<PHINode>(L->getHeader()->begin())) 2752 // Only count loops that have phi nodes as not being computable. 2753 ++NumTripCountsNotComputed; 2754 } 2755 2756 // Now that we know more about the trip count for this loop, forget any 2757 // existing SCEV values for PHI nodes in this loop since they are only 2758 // conservative estimates made without the benefit 2759 // of trip count information. 2760 if (ItCount.hasAnyInfo()) 2761 forgetLoopPHIs(L); 2762 } 2763 return Pair.first->second; 2764 } 2765 2766 /// forgetLoopBackedgeTakenCount - This method should be called by the 2767 /// client when it has changed a loop in a way that may effect 2768 /// ScalarEvolution's ability to compute a trip count, or if the loop 2769 /// is deleted. 2770 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) { 2771 BackedgeTakenCounts.erase(L); 2772 forgetLoopPHIs(L); 2773 } 2774 2775 /// forgetLoopPHIs - Delete the memoized SCEVs associated with the 2776 /// PHI nodes in the given loop. This is used when the trip count of 2777 /// the loop may have changed. 2778 void ScalarEvolution::forgetLoopPHIs(const Loop *L) { 2779 BasicBlock *Header = L->getHeader(); 2780 2781 // Push all Loop-header PHIs onto the Worklist stack, except those 2782 // that are presently represented via a SCEVUnknown. SCEVUnknown for 2783 // a PHI either means that it has an unrecognized structure, or it's 2784 // a PHI that's in the progress of being computed by createNodeForPHI. 2785 // In the former case, additional loop trip count information isn't 2786 // going to change anything. In the later case, createNodeForPHI will 2787 // perform the necessary updates on its own when it gets to that point. 2788 SmallVector<Instruction *, 16> Worklist; 2789 for (BasicBlock::iterator I = Header->begin(); 2790 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 2791 std::map<SCEVCallbackVH, const SCEV*>::iterator It = Scalars.find((Value*)I); 2792 if (It != Scalars.end() && !isa<SCEVUnknown>(It->second)) 2793 Worklist.push_back(PN); 2794 } 2795 2796 while (!Worklist.empty()) { 2797 Instruction *I = Worklist.pop_back_val(); 2798 if (Scalars.erase(I)) 2799 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2800 UI != UE; ++UI) 2801 Worklist.push_back(cast<Instruction>(UI)); 2802 } 2803 } 2804 2805 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 2806 /// of the specified loop will execute. 2807 ScalarEvolution::BackedgeTakenInfo 2808 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 2809 SmallVector<BasicBlock*, 8> ExitingBlocks; 2810 L->getExitingBlocks(ExitingBlocks); 2811 2812 // Examine all exits and pick the most conservative values. 2813 const SCEV* BECount = CouldNotCompute; 2814 const SCEV* MaxBECount = CouldNotCompute; 2815 bool CouldNotComputeBECount = false; 2816 bool CouldNotComputeMaxBECount = false; 2817 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 2818 BackedgeTakenInfo NewBTI = 2819 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 2820 2821 if (NewBTI.Exact == CouldNotCompute) { 2822 // We couldn't compute an exact value for this exit, so 2823 // we won't be able to compute an exact value for the loop. 2824 CouldNotComputeBECount = true; 2825 BECount = CouldNotCompute; 2826 } else if (!CouldNotComputeBECount) { 2827 if (BECount == CouldNotCompute) 2828 BECount = NewBTI.Exact; 2829 else { 2830 // TODO: More analysis could be done here. For example, a 2831 // loop with a short-circuiting && operator has an exact count 2832 // of the min of both sides. 2833 CouldNotComputeBECount = true; 2834 BECount = CouldNotCompute; 2835 } 2836 } 2837 if (NewBTI.Max == CouldNotCompute) { 2838 // We couldn't compute an maximum value for this exit, so 2839 // we won't be able to compute an maximum value for the loop. 2840 CouldNotComputeMaxBECount = true; 2841 MaxBECount = CouldNotCompute; 2842 } else if (!CouldNotComputeMaxBECount) { 2843 if (MaxBECount == CouldNotCompute) 2844 MaxBECount = NewBTI.Max; 2845 else 2846 MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, NewBTI.Max); 2847 } 2848 } 2849 2850 return BackedgeTakenInfo(BECount, MaxBECount); 2851 } 2852 2853 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 2854 /// of the specified loop will execute if it exits via the specified block. 2855 ScalarEvolution::BackedgeTakenInfo 2856 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 2857 BasicBlock *ExitingBlock) { 2858 2859 // Okay, we've chosen an exiting block. See what condition causes us to 2860 // exit at this block. 2861 // 2862 // FIXME: we should be able to handle switch instructions (with a single exit) 2863 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2864 if (ExitBr == 0) return CouldNotCompute; 2865 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 2866 2867 // At this point, we know we have a conditional branch that determines whether 2868 // the loop is exited. However, we don't know if the branch is executed each 2869 // time through the loop. If not, then the execution count of the branch will 2870 // not be equal to the trip count of the loop. 2871 // 2872 // Currently we check for this by checking to see if the Exit branch goes to 2873 // the loop header. If so, we know it will always execute the same number of 2874 // times as the loop. We also handle the case where the exit block *is* the 2875 // loop header. This is common for un-rotated loops. 2876 // 2877 // If both of those tests fail, walk up the unique predecessor chain to the 2878 // header, stopping if there is an edge that doesn't exit the loop. If the 2879 // header is reached, the execution count of the branch will be equal to the 2880 // trip count of the loop. 2881 // 2882 // More extensive analysis could be done to handle more cases here. 2883 // 2884 if (ExitBr->getSuccessor(0) != L->getHeader() && 2885 ExitBr->getSuccessor(1) != L->getHeader() && 2886 ExitBr->getParent() != L->getHeader()) { 2887 // The simple checks failed, try climbing the unique predecessor chain 2888 // up to the header. 2889 bool Ok = false; 2890 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 2891 BasicBlock *Pred = BB->getUniquePredecessor(); 2892 if (!Pred) 2893 return CouldNotCompute; 2894 TerminatorInst *PredTerm = Pred->getTerminator(); 2895 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 2896 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 2897 if (PredSucc == BB) 2898 continue; 2899 // If the predecessor has a successor that isn't BB and isn't 2900 // outside the loop, assume the worst. 2901 if (L->contains(PredSucc)) 2902 return CouldNotCompute; 2903 } 2904 if (Pred == L->getHeader()) { 2905 Ok = true; 2906 break; 2907 } 2908 BB = Pred; 2909 } 2910 if (!Ok) 2911 return CouldNotCompute; 2912 } 2913 2914 // Procede to the next level to examine the exit condition expression. 2915 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 2916 ExitBr->getSuccessor(0), 2917 ExitBr->getSuccessor(1)); 2918 } 2919 2920 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 2921 /// backedge of the specified loop will execute if its exit condition 2922 /// were a conditional branch of ExitCond, TBB, and FBB. 2923 ScalarEvolution::BackedgeTakenInfo 2924 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 2925 Value *ExitCond, 2926 BasicBlock *TBB, 2927 BasicBlock *FBB) { 2928 // Check if the controlling expression for this loop is an and or or. In 2929 // such cases, an exact backedge-taken count may be infeasible, but a 2930 // maximum count may still be feasible. 2931 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 2932 if (BO->getOpcode() == Instruction::And) { 2933 // Recurse on the operands of the and. 2934 BackedgeTakenInfo BTI0 = 2935 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2936 BackedgeTakenInfo BTI1 = 2937 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2938 const SCEV* BECount = CouldNotCompute; 2939 const SCEV* MaxBECount = CouldNotCompute; 2940 if (L->contains(TBB)) { 2941 // Both conditions must be true for the loop to continue executing. 2942 // Choose the less conservative count. 2943 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2944 BECount = CouldNotCompute; 2945 else 2946 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2947 if (BTI0.Max == CouldNotCompute) 2948 MaxBECount = BTI1.Max; 2949 else if (BTI1.Max == CouldNotCompute) 2950 MaxBECount = BTI0.Max; 2951 else 2952 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2953 } else { 2954 // Both conditions must be true for the loop to exit. 2955 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 2956 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2957 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2958 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2959 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2960 } 2961 2962 return BackedgeTakenInfo(BECount, MaxBECount); 2963 } 2964 if (BO->getOpcode() == Instruction::Or) { 2965 // Recurse on the operands of the or. 2966 BackedgeTakenInfo BTI0 = 2967 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2968 BackedgeTakenInfo BTI1 = 2969 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2970 const SCEV* BECount = CouldNotCompute; 2971 const SCEV* MaxBECount = CouldNotCompute; 2972 if (L->contains(FBB)) { 2973 // Both conditions must be false for the loop to continue executing. 2974 // Choose the less conservative count. 2975 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2976 BECount = CouldNotCompute; 2977 else 2978 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2979 if (BTI0.Max == CouldNotCompute) 2980 MaxBECount = BTI1.Max; 2981 else if (BTI1.Max == CouldNotCompute) 2982 MaxBECount = BTI0.Max; 2983 else 2984 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2985 } else { 2986 // Both conditions must be false for the loop to exit. 2987 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 2988 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2989 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2990 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2991 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2992 } 2993 2994 return BackedgeTakenInfo(BECount, MaxBECount); 2995 } 2996 } 2997 2998 // With an icmp, it may be feasible to compute an exact backedge-taken count. 2999 // Procede to the next level to examine the icmp. 3000 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3001 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3002 3003 // If it's not an integer or pointer comparison then compute it the hard way. 3004 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3005 } 3006 3007 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 3008 /// backedge of the specified loop will execute if its exit condition 3009 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 3010 ScalarEvolution::BackedgeTakenInfo 3011 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 3012 ICmpInst *ExitCond, 3013 BasicBlock *TBB, 3014 BasicBlock *FBB) { 3015 3016 // If the condition was exit on true, convert the condition to exit on false 3017 ICmpInst::Predicate Cond; 3018 if (!L->contains(FBB)) 3019 Cond = ExitCond->getPredicate(); 3020 else 3021 Cond = ExitCond->getInversePredicate(); 3022 3023 // Handle common loops like: for (X = "string"; *X; ++X) 3024 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 3025 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 3026 const SCEV* ItCnt = 3027 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 3028 if (!isa<SCEVCouldNotCompute>(ItCnt)) { 3029 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); 3030 return BackedgeTakenInfo(ItCnt, 3031 isa<SCEVConstant>(ItCnt) ? ItCnt : 3032 getConstant(APInt::getMaxValue(BitWidth)-1)); 3033 } 3034 } 3035 3036 const SCEV* LHS = getSCEV(ExitCond->getOperand(0)); 3037 const SCEV* RHS = getSCEV(ExitCond->getOperand(1)); 3038 3039 // Try to evaluate any dependencies out of the loop. 3040 LHS = getSCEVAtScope(LHS, L); 3041 RHS = getSCEVAtScope(RHS, L); 3042 3043 // At this point, we would like to compute how many iterations of the 3044 // loop the predicate will return true for these inputs. 3045 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 3046 // If there is a loop-invariant, force it into the RHS. 3047 std::swap(LHS, RHS); 3048 Cond = ICmpInst::getSwappedPredicate(Cond); 3049 } 3050 3051 // If we have a comparison of a chrec against a constant, try to use value 3052 // ranges to answer this query. 3053 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 3054 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 3055 if (AddRec->getLoop() == L) { 3056 // Form the constant range. 3057 ConstantRange CompRange( 3058 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 3059 3060 const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this); 3061 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 3062 } 3063 3064 switch (Cond) { 3065 case ICmpInst::ICMP_NE: { // while (X != Y) 3066 // Convert to: while (X-Y != 0) 3067 const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); 3068 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3069 break; 3070 } 3071 case ICmpInst::ICMP_EQ: { 3072 // Convert to: while (X-Y == 0) // while (X == Y) 3073 const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 3074 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3075 break; 3076 } 3077 case ICmpInst::ICMP_SLT: { 3078 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 3079 if (BTI.hasAnyInfo()) return BTI; 3080 break; 3081 } 3082 case ICmpInst::ICMP_SGT: { 3083 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3084 getNotSCEV(RHS), L, true); 3085 if (BTI.hasAnyInfo()) return BTI; 3086 break; 3087 } 3088 case ICmpInst::ICMP_ULT: { 3089 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 3090 if (BTI.hasAnyInfo()) return BTI; 3091 break; 3092 } 3093 case ICmpInst::ICMP_UGT: { 3094 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3095 getNotSCEV(RHS), L, false); 3096 if (BTI.hasAnyInfo()) return BTI; 3097 break; 3098 } 3099 default: 3100 #if 0 3101 errs() << "ComputeBackedgeTakenCount "; 3102 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 3103 errs() << "[unsigned] "; 3104 errs() << *LHS << " " 3105 << Instruction::getOpcodeName(Instruction::ICmp) 3106 << " " << *RHS << "\n"; 3107 #endif 3108 break; 3109 } 3110 return 3111 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3112 } 3113 3114 static ConstantInt * 3115 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 3116 ScalarEvolution &SE) { 3117 const SCEV* InVal = SE.getConstant(C); 3118 const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE); 3119 assert(isa<SCEVConstant>(Val) && 3120 "Evaluation of SCEV at constant didn't fold correctly?"); 3121 return cast<SCEVConstant>(Val)->getValue(); 3122 } 3123 3124 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 3125 /// and a GEP expression (missing the pointer index) indexing into it, return 3126 /// the addressed element of the initializer or null if the index expression is 3127 /// invalid. 3128 static Constant * 3129 GetAddressedElementFromGlobal(GlobalVariable *GV, 3130 const std::vector<ConstantInt*> &Indices) { 3131 Constant *Init = GV->getInitializer(); 3132 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 3133 uint64_t Idx = Indices[i]->getZExtValue(); 3134 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 3135 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 3136 Init = cast<Constant>(CS->getOperand(Idx)); 3137 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 3138 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 3139 Init = cast<Constant>(CA->getOperand(Idx)); 3140 } else if (isa<ConstantAggregateZero>(Init)) { 3141 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 3142 assert(Idx < STy->getNumElements() && "Bad struct index!"); 3143 Init = Constant::getNullValue(STy->getElementType(Idx)); 3144 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 3145 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 3146 Init = Constant::getNullValue(ATy->getElementType()); 3147 } else { 3148 assert(0 && "Unknown constant aggregate type!"); 3149 } 3150 return 0; 3151 } else { 3152 return 0; // Unknown initializer type 3153 } 3154 } 3155 return Init; 3156 } 3157 3158 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 3159 /// 'icmp op load X, cst', try to see if we can compute the backedge 3160 /// execution count. 3161 const SCEV* ScalarEvolution:: 3162 ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, 3163 const Loop *L, 3164 ICmpInst::Predicate predicate) { 3165 if (LI->isVolatile()) return CouldNotCompute; 3166 3167 // Check to see if the loaded pointer is a getelementptr of a global. 3168 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 3169 if (!GEP) return CouldNotCompute; 3170 3171 // Make sure that it is really a constant global we are gepping, with an 3172 // initializer, and make sure the first IDX is really 0. 3173 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 3174 if (!GV || !GV->isConstant() || !GV->hasInitializer() || 3175 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 3176 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 3177 return CouldNotCompute; 3178 3179 // Okay, we allow one non-constant index into the GEP instruction. 3180 Value *VarIdx = 0; 3181 std::vector<ConstantInt*> Indexes; 3182 unsigned VarIdxNum = 0; 3183 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 3184 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 3185 Indexes.push_back(CI); 3186 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 3187 if (VarIdx) return CouldNotCompute; // Multiple non-constant idx's. 3188 VarIdx = GEP->getOperand(i); 3189 VarIdxNum = i-2; 3190 Indexes.push_back(0); 3191 } 3192 3193 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 3194 // Check to see if X is a loop variant variable value now. 3195 const SCEV* Idx = getSCEV(VarIdx); 3196 Idx = getSCEVAtScope(Idx, L); 3197 3198 // We can only recognize very limited forms of loop index expressions, in 3199 // particular, only affine AddRec's like {C1,+,C2}. 3200 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 3201 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 3202 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 3203 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 3204 return CouldNotCompute; 3205 3206 unsigned MaxSteps = MaxBruteForceIterations; 3207 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 3208 ConstantInt *ItCst = 3209 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum); 3210 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 3211 3212 // Form the GEP offset. 3213 Indexes[VarIdxNum] = Val; 3214 3215 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 3216 if (Result == 0) break; // Cannot compute! 3217 3218 // Evaluate the condition for this iteration. 3219 Result = ConstantExpr::getICmp(predicate, Result, RHS); 3220 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 3221 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 3222 #if 0 3223 errs() << "\n***\n*** Computed loop count " << *ItCst 3224 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 3225 << "***\n"; 3226 #endif 3227 ++NumArrayLenItCounts; 3228 return getConstant(ItCst); // Found terminating iteration! 3229 } 3230 } 3231 return CouldNotCompute; 3232 } 3233 3234 3235 /// CanConstantFold - Return true if we can constant fold an instruction of the 3236 /// specified type, assuming that all operands were constants. 3237 static bool CanConstantFold(const Instruction *I) { 3238 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 3239 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 3240 return true; 3241 3242 if (const CallInst *CI = dyn_cast<CallInst>(I)) 3243 if (const Function *F = CI->getCalledFunction()) 3244 return canConstantFoldCallTo(F); 3245 return false; 3246 } 3247 3248 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 3249 /// in the loop that V is derived from. We allow arbitrary operations along the 3250 /// way, but the operands of an operation must either be constants or a value 3251 /// derived from a constant PHI. If this expression does not fit with these 3252 /// constraints, return null. 3253 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 3254 // If this is not an instruction, or if this is an instruction outside of the 3255 // loop, it can't be derived from a loop PHI. 3256 Instruction *I = dyn_cast<Instruction>(V); 3257 if (I == 0 || !L->contains(I->getParent())) return 0; 3258 3259 if (PHINode *PN = dyn_cast<PHINode>(I)) { 3260 if (L->getHeader() == I->getParent()) 3261 return PN; 3262 else 3263 // We don't currently keep track of the control flow needed to evaluate 3264 // PHIs, so we cannot handle PHIs inside of loops. 3265 return 0; 3266 } 3267 3268 // If we won't be able to constant fold this expression even if the operands 3269 // are constants, return early. 3270 if (!CanConstantFold(I)) return 0; 3271 3272 // Otherwise, we can evaluate this instruction if all of its operands are 3273 // constant or derived from a PHI node themselves. 3274 PHINode *PHI = 0; 3275 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 3276 if (!(isa<Constant>(I->getOperand(Op)) || 3277 isa<GlobalValue>(I->getOperand(Op)))) { 3278 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 3279 if (P == 0) return 0; // Not evolving from PHI 3280 if (PHI == 0) 3281 PHI = P; 3282 else if (PHI != P) 3283 return 0; // Evolving from multiple different PHIs. 3284 } 3285 3286 // This is a expression evolving from a constant PHI! 3287 return PHI; 3288 } 3289 3290 /// EvaluateExpression - Given an expression that passes the 3291 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 3292 /// in the loop has the value PHIVal. If we can't fold this expression for some 3293 /// reason, return null. 3294 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { 3295 if (isa<PHINode>(V)) return PHIVal; 3296 if (Constant *C = dyn_cast<Constant>(V)) return C; 3297 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; 3298 Instruction *I = cast<Instruction>(V); 3299 3300 std::vector<Constant*> Operands; 3301 Operands.resize(I->getNumOperands()); 3302 3303 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3304 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal); 3305 if (Operands[i] == 0) return 0; 3306 } 3307 3308 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3309 return ConstantFoldCompareInstOperands(CI->getPredicate(), 3310 &Operands[0], Operands.size()); 3311 else 3312 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3313 &Operands[0], Operands.size()); 3314 } 3315 3316 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 3317 /// in the header of its containing loop, we know the loop executes a 3318 /// constant number of times, and the PHI node is just a recurrence 3319 /// involving constants, fold it. 3320 Constant *ScalarEvolution:: 3321 getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs, const Loop *L){ 3322 std::map<PHINode*, Constant*>::iterator I = 3323 ConstantEvolutionLoopExitValue.find(PN); 3324 if (I != ConstantEvolutionLoopExitValue.end()) 3325 return I->second; 3326 3327 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) 3328 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 3329 3330 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 3331 3332 // Since the loop is canonicalized, the PHI node must have two entries. One 3333 // entry must be a constant (coming in from outside of the loop), and the 3334 // second must be derived from the same PHI. 3335 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3336 Constant *StartCST = 3337 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3338 if (StartCST == 0) 3339 return RetVal = 0; // Must be a constant. 3340 3341 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3342 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3343 if (PN2 != PN) 3344 return RetVal = 0; // Not derived from same PHI. 3345 3346 // Execute the loop symbolically to determine the exit value. 3347 if (BEs.getActiveBits() >= 32) 3348 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 3349 3350 unsigned NumIterations = BEs.getZExtValue(); // must be in range 3351 unsigned IterationNum = 0; 3352 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 3353 if (IterationNum == NumIterations) 3354 return RetVal = PHIVal; // Got exit value! 3355 3356 // Compute the value of the PHI node for the next iteration. 3357 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3358 if (NextPHI == PHIVal) 3359 return RetVal = NextPHI; // Stopped evolving! 3360 if (NextPHI == 0) 3361 return 0; // Couldn't evaluate! 3362 PHIVal = NextPHI; 3363 } 3364 } 3365 3366 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a 3367 /// constant number of times (the condition evolves only from constants), 3368 /// try to evaluate a few iterations of the loop until we get the exit 3369 /// condition gets a value of ExitWhen (true or false). If we cannot 3370 /// evaluate the trip count of the loop, return CouldNotCompute. 3371 const SCEV* ScalarEvolution:: 3372 ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) { 3373 PHINode *PN = getConstantEvolvingPHI(Cond, L); 3374 if (PN == 0) return CouldNotCompute; 3375 3376 // Since the loop is canonicalized, the PHI node must have two entries. One 3377 // entry must be a constant (coming in from outside of the loop), and the 3378 // second must be derived from the same PHI. 3379 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3380 Constant *StartCST = 3381 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3382 if (StartCST == 0) return CouldNotCompute; // Must be a constant. 3383 3384 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3385 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3386 if (PN2 != PN) return CouldNotCompute; // Not derived from same PHI. 3387 3388 // Okay, we find a PHI node that defines the trip count of this loop. Execute 3389 // the loop symbolically to determine when the condition gets a value of 3390 // "ExitWhen". 3391 unsigned IterationNum = 0; 3392 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 3393 for (Constant *PHIVal = StartCST; 3394 IterationNum != MaxIterations; ++IterationNum) { 3395 ConstantInt *CondVal = 3396 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal)); 3397 3398 // Couldn't symbolically evaluate. 3399 if (!CondVal) return CouldNotCompute; 3400 3401 if (CondVal->getValue() == uint64_t(ExitWhen)) { 3402 ConstantEvolutionLoopExitValue[PN] = PHIVal; 3403 ++NumBruteForceTripCountsComputed; 3404 return getConstant(Type::Int32Ty, IterationNum); 3405 } 3406 3407 // Compute the value of the PHI node for the next iteration. 3408 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3409 if (NextPHI == 0 || NextPHI == PHIVal) 3410 return CouldNotCompute; // Couldn't evaluate or not making progress... 3411 PHIVal = NextPHI; 3412 } 3413 3414 // Too many iterations were needed to evaluate. 3415 return CouldNotCompute; 3416 } 3417 3418 /// getSCEVAtScope - Return a SCEV expression handle for the specified value 3419 /// at the specified scope in the program. The L value specifies a loop 3420 /// nest to evaluate the expression at, where null is the top-level or a 3421 /// specified loop is immediately inside of the loop. 3422 /// 3423 /// This method can be used to compute the exit value for a variable defined 3424 /// in a loop by querying what the value will hold in the parent loop. 3425 /// 3426 /// In the case that a relevant loop exit value cannot be computed, the 3427 /// original value V is returned. 3428 const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 3429 // FIXME: this should be turned into a virtual method on SCEV! 3430 3431 if (isa<SCEVConstant>(V)) return V; 3432 3433 // If this instruction is evolved from a constant-evolving PHI, compute the 3434 // exit value from the loop without using SCEVs. 3435 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 3436 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 3437 const Loop *LI = (*this->LI)[I->getParent()]; 3438 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 3439 if (PHINode *PN = dyn_cast<PHINode>(I)) 3440 if (PN->getParent() == LI->getHeader()) { 3441 // Okay, there is no closed form solution for the PHI node. Check 3442 // to see if the loop that contains it has a known backedge-taken 3443 // count. If so, we may be able to force computation of the exit 3444 // value. 3445 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI); 3446 if (const SCEVConstant *BTCC = 3447 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 3448 // Okay, we know how many times the containing loop executes. If 3449 // this is a constant evolving PHI node, get the final value at 3450 // the specified iteration number. 3451 Constant *RV = getConstantEvolutionLoopExitValue(PN, 3452 BTCC->getValue()->getValue(), 3453 LI); 3454 if (RV) return getUnknown(RV); 3455 } 3456 } 3457 3458 // Okay, this is an expression that we cannot symbolically evaluate 3459 // into a SCEV. Check to see if it's possible to symbolically evaluate 3460 // the arguments into constants, and if so, try to constant propagate the 3461 // result. This is particularly useful for computing loop exit values. 3462 if (CanConstantFold(I)) { 3463 // Check to see if we've folded this instruction at this loop before. 3464 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I]; 3465 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair = 3466 Values.insert(std::make_pair(L, static_cast<Constant *>(0))); 3467 if (!Pair.second) 3468 return Pair.first->second ? &*getUnknown(Pair.first->second) : V; 3469 3470 std::vector<Constant*> Operands; 3471 Operands.reserve(I->getNumOperands()); 3472 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3473 Value *Op = I->getOperand(i); 3474 if (Constant *C = dyn_cast<Constant>(Op)) { 3475 Operands.push_back(C); 3476 } else { 3477 // If any of the operands is non-constant and if they are 3478 // non-integer and non-pointer, don't even try to analyze them 3479 // with scev techniques. 3480 if (!isSCEVable(Op->getType())) 3481 return V; 3482 3483 const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L); 3484 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { 3485 Constant *C = SC->getValue(); 3486 if (C->getType() != Op->getType()) 3487 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3488 Op->getType(), 3489 false), 3490 C, Op->getType()); 3491 Operands.push_back(C); 3492 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) { 3493 if (Constant *C = dyn_cast<Constant>(SU->getValue())) { 3494 if (C->getType() != Op->getType()) 3495 C = 3496 ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3497 Op->getType(), 3498 false), 3499 C, Op->getType()); 3500 Operands.push_back(C); 3501 } else 3502 return V; 3503 } else { 3504 return V; 3505 } 3506 } 3507 } 3508 3509 Constant *C; 3510 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3511 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 3512 &Operands[0], Operands.size()); 3513 else 3514 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3515 &Operands[0], Operands.size()); 3516 Pair.first->second = C; 3517 return getUnknown(C); 3518 } 3519 } 3520 3521 // This is some other type of SCEVUnknown, just return it. 3522 return V; 3523 } 3524 3525 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 3526 // Avoid performing the look-up in the common case where the specified 3527 // expression has no loop-variant portions. 3528 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 3529 const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3530 if (OpAtScope != Comm->getOperand(i)) { 3531 // Okay, at least one of these operands is loop variant but might be 3532 // foldable. Build a new instance of the folded commutative expression. 3533 SmallVector<const SCEV*, 8> NewOps(Comm->op_begin(), Comm->op_begin()+i); 3534 NewOps.push_back(OpAtScope); 3535 3536 for (++i; i != e; ++i) { 3537 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3538 NewOps.push_back(OpAtScope); 3539 } 3540 if (isa<SCEVAddExpr>(Comm)) 3541 return getAddExpr(NewOps); 3542 if (isa<SCEVMulExpr>(Comm)) 3543 return getMulExpr(NewOps); 3544 if (isa<SCEVSMaxExpr>(Comm)) 3545 return getSMaxExpr(NewOps); 3546 if (isa<SCEVUMaxExpr>(Comm)) 3547 return getUMaxExpr(NewOps); 3548 assert(0 && "Unknown commutative SCEV type!"); 3549 } 3550 } 3551 // If we got here, all operands are loop invariant. 3552 return Comm; 3553 } 3554 3555 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 3556 const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L); 3557 const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L); 3558 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 3559 return Div; // must be loop invariant 3560 return getUDivExpr(LHS, RHS); 3561 } 3562 3563 // If this is a loop recurrence for a loop that does not contain L, then we 3564 // are dealing with the final value computed by the loop. 3565 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 3566 if (!L || !AddRec->getLoop()->contains(L->getHeader())) { 3567 // To evaluate this recurrence, we need to know how many times the AddRec 3568 // loop iterates. Compute this now. 3569 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 3570 if (BackedgeTakenCount == CouldNotCompute) return AddRec; 3571 3572 // Then, evaluate the AddRec. 3573 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 3574 } 3575 return AddRec; 3576 } 3577 3578 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 3579 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3580 if (Op == Cast->getOperand()) 3581 return Cast; // must be loop invariant 3582 return getZeroExtendExpr(Op, Cast->getType()); 3583 } 3584 3585 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 3586 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3587 if (Op == Cast->getOperand()) 3588 return Cast; // must be loop invariant 3589 return getSignExtendExpr(Op, Cast->getType()); 3590 } 3591 3592 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 3593 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3594 if (Op == Cast->getOperand()) 3595 return Cast; // must be loop invariant 3596 return getTruncateExpr(Op, Cast->getType()); 3597 } 3598 3599 assert(0 && "Unknown SCEV type!"); 3600 return 0; 3601 } 3602 3603 /// getSCEVAtScope - This is a convenience function which does 3604 /// getSCEVAtScope(getSCEV(V), L). 3605 const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 3606 return getSCEVAtScope(getSCEV(V), L); 3607 } 3608 3609 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 3610 /// following equation: 3611 /// 3612 /// A * X = B (mod N) 3613 /// 3614 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 3615 /// A and B isn't important. 3616 /// 3617 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 3618 static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 3619 ScalarEvolution &SE) { 3620 uint32_t BW = A.getBitWidth(); 3621 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 3622 assert(A != 0 && "A must be non-zero."); 3623 3624 // 1. D = gcd(A, N) 3625 // 3626 // The gcd of A and N may have only one prime factor: 2. The number of 3627 // trailing zeros in A is its multiplicity 3628 uint32_t Mult2 = A.countTrailingZeros(); 3629 // D = 2^Mult2 3630 3631 // 2. Check if B is divisible by D. 3632 // 3633 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 3634 // is not less than multiplicity of this prime factor for D. 3635 if (B.countTrailingZeros() < Mult2) 3636 return SE.getCouldNotCompute(); 3637 3638 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 3639 // modulo (N / D). 3640 // 3641 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 3642 // bit width during computations. 3643 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 3644 APInt Mod(BW + 1, 0); 3645 Mod.set(BW - Mult2); // Mod = N / D 3646 APInt I = AD.multiplicativeInverse(Mod); 3647 3648 // 4. Compute the minimum unsigned root of the equation: 3649 // I * (B / D) mod (N / D) 3650 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 3651 3652 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 3653 // bits. 3654 return SE.getConstant(Result.trunc(BW)); 3655 } 3656 3657 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 3658 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 3659 /// might be the same) or two SCEVCouldNotCompute objects. 3660 /// 3661 static std::pair<const SCEV*,const SCEV*> 3662 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 3663 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 3664 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 3665 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 3666 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 3667 3668 // We currently can only solve this if the coefficients are constants. 3669 if (!LC || !MC || !NC) { 3670 const SCEV *CNC = SE.getCouldNotCompute(); 3671 return std::make_pair(CNC, CNC); 3672 } 3673 3674 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 3675 const APInt &L = LC->getValue()->getValue(); 3676 const APInt &M = MC->getValue()->getValue(); 3677 const APInt &N = NC->getValue()->getValue(); 3678 APInt Two(BitWidth, 2); 3679 APInt Four(BitWidth, 4); 3680 3681 { 3682 using namespace APIntOps; 3683 const APInt& C = L; 3684 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 3685 // The B coefficient is M-N/2 3686 APInt B(M); 3687 B -= sdiv(N,Two); 3688 3689 // The A coefficient is N/2 3690 APInt A(N.sdiv(Two)); 3691 3692 // Compute the B^2-4ac term. 3693 APInt SqrtTerm(B); 3694 SqrtTerm *= B; 3695 SqrtTerm -= Four * (A * C); 3696 3697 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 3698 // integer value or else APInt::sqrt() will assert. 3699 APInt SqrtVal(SqrtTerm.sqrt()); 3700 3701 // Compute the two solutions for the quadratic formula. 3702 // The divisions must be performed as signed divisions. 3703 APInt NegB(-B); 3704 APInt TwoA( A << 1 ); 3705 if (TwoA.isMinValue()) { 3706 const SCEV *CNC = SE.getCouldNotCompute(); 3707 return std::make_pair(CNC, CNC); 3708 } 3709 3710 ConstantInt *Solution1 = ConstantInt::get((NegB + SqrtVal).sdiv(TwoA)); 3711 ConstantInt *Solution2 = ConstantInt::get((NegB - SqrtVal).sdiv(TwoA)); 3712 3713 return std::make_pair(SE.getConstant(Solution1), 3714 SE.getConstant(Solution2)); 3715 } // end APIntOps namespace 3716 } 3717 3718 /// HowFarToZero - Return the number of times a backedge comparing the specified 3719 /// value to zero will execute. If not computable, return CouldNotCompute. 3720 const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 3721 // If the value is a constant 3722 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3723 // If the value is already zero, the branch will execute zero times. 3724 if (C->getValue()->isZero()) return C; 3725 return CouldNotCompute; // Otherwise it will loop infinitely. 3726 } 3727 3728 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 3729 if (!AddRec || AddRec->getLoop() != L) 3730 return CouldNotCompute; 3731 3732 if (AddRec->isAffine()) { 3733 // If this is an affine expression, the execution count of this branch is 3734 // the minimum unsigned root of the following equation: 3735 // 3736 // Start + Step*N = 0 (mod 2^BW) 3737 // 3738 // equivalent to: 3739 // 3740 // Step*N = -Start (mod 2^BW) 3741 // 3742 // where BW is the common bit width of Start and Step. 3743 3744 // Get the initial value for the loop. 3745 const SCEV* Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 3746 const SCEV* Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 3747 3748 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 3749 // For now we handle only constant steps. 3750 3751 // First, handle unitary steps. 3752 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 3753 return getNegativeSCEV(Start); // N = -Start (as unsigned) 3754 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 3755 return Start; // N = Start (as unsigned) 3756 3757 // Then, try to solve the above equation provided that Start is constant. 3758 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 3759 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 3760 -StartC->getValue()->getValue(), 3761 *this); 3762 } 3763 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { 3764 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 3765 // the quadratic equation to solve it. 3766 std::pair<const SCEV*,const SCEV*> Roots = SolveQuadraticEquation(AddRec, 3767 *this); 3768 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 3769 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 3770 if (R1) { 3771 #if 0 3772 errs() << "HFTZ: " << *V << " - sol#1: " << *R1 3773 << " sol#2: " << *R2 << "\n"; 3774 #endif 3775 // Pick the smallest positive root value. 3776 if (ConstantInt *CB = 3777 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 3778 R1->getValue(), R2->getValue()))) { 3779 if (CB->getZExtValue() == false) 3780 std::swap(R1, R2); // R1 is the minimum root now. 3781 3782 // We can only use this value if the chrec ends up with an exact zero 3783 // value at this index. When solving for "X*X != 5", for example, we 3784 // should not accept a root of 2. 3785 const SCEV* Val = AddRec->evaluateAtIteration(R1, *this); 3786 if (Val->isZero()) 3787 return R1; // We found a quadratic root! 3788 } 3789 } 3790 } 3791 3792 return CouldNotCompute; 3793 } 3794 3795 /// HowFarToNonZero - Return the number of times a backedge checking the 3796 /// specified value for nonzero will execute. If not computable, return 3797 /// CouldNotCompute 3798 const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 3799 // Loops that look like: while (X == 0) are very strange indeed. We don't 3800 // handle them yet except for the trivial case. This could be expanded in the 3801 // future as needed. 3802 3803 // If the value is a constant, check to see if it is known to be non-zero 3804 // already. If so, the backedge will execute zero times. 3805 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3806 if (!C->getValue()->isNullValue()) 3807 return getIntegerSCEV(0, C->getType()); 3808 return CouldNotCompute; // Otherwise it will loop infinitely. 3809 } 3810 3811 // We could implement others, but I really doubt anyone writes loops like 3812 // this, and if they did, they would already be constant folded. 3813 return CouldNotCompute; 3814 } 3815 3816 /// getLoopPredecessor - If the given loop's header has exactly one unique 3817 /// predecessor outside the loop, return it. Otherwise return null. 3818 /// 3819 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { 3820 BasicBlock *Header = L->getHeader(); 3821 BasicBlock *Pred = 0; 3822 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); 3823 PI != E; ++PI) 3824 if (!L->contains(*PI)) { 3825 if (Pred && Pred != *PI) return 0; // Multiple predecessors. 3826 Pred = *PI; 3827 } 3828 return Pred; 3829 } 3830 3831 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 3832 /// (which may not be an immediate predecessor) which has exactly one 3833 /// successor from which BB is reachable, or null if no such block is 3834 /// found. 3835 /// 3836 BasicBlock * 3837 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 3838 // If the block has a unique predecessor, then there is no path from the 3839 // predecessor to the block that does not go through the direct edge 3840 // from the predecessor to the block. 3841 if (BasicBlock *Pred = BB->getSinglePredecessor()) 3842 return Pred; 3843 3844 // A loop's header is defined to be a block that dominates the loop. 3845 // If the header has a unique predecessor outside the loop, it must be 3846 // a block that has exactly one successor that can reach the loop. 3847 if (Loop *L = LI->getLoopFor(BB)) 3848 return getLoopPredecessor(L); 3849 3850 return 0; 3851 } 3852 3853 /// HasSameValue - SCEV structural equivalence is usually sufficient for 3854 /// testing whether two expressions are equal, however for the purposes of 3855 /// looking for a condition guarding a loop, it can be useful to be a little 3856 /// more general, since a front-end may have replicated the controlling 3857 /// expression. 3858 /// 3859 static bool HasSameValue(const SCEV* A, const SCEV* B) { 3860 // Quick check to see if they are the same SCEV. 3861 if (A == B) return true; 3862 3863 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 3864 // two different instructions with the same value. Check for this case. 3865 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 3866 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 3867 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 3868 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 3869 if (AI->isIdenticalTo(BI)) 3870 return true; 3871 3872 // Otherwise assume they may have a different value. 3873 return false; 3874 } 3875 3876 /// isLoopGuardedByCond - Test whether entry to the loop is protected by 3877 /// a conditional between LHS and RHS. This is used to help avoid max 3878 /// expressions in loop trip counts. 3879 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L, 3880 ICmpInst::Predicate Pred, 3881 const SCEV *LHS, const SCEV *RHS) { 3882 // Interpret a null as meaning no loop, where there is obviously no guard 3883 // (interprocedural conditions notwithstanding). 3884 if (!L) return false; 3885 3886 BasicBlock *Predecessor = getLoopPredecessor(L); 3887 BasicBlock *PredecessorDest = L->getHeader(); 3888 3889 // Starting at the loop predecessor, climb up the predecessor chain, as long 3890 // as there are predecessors that can be found that have unique successors 3891 // leading to the original header. 3892 for (; Predecessor; 3893 PredecessorDest = Predecessor, 3894 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { 3895 3896 BranchInst *LoopEntryPredicate = 3897 dyn_cast<BranchInst>(Predecessor->getTerminator()); 3898 if (!LoopEntryPredicate || 3899 LoopEntryPredicate->isUnconditional()) 3900 continue; 3901 3902 ICmpInst *ICI = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 3903 if (!ICI) continue; 3904 3905 // Now that we found a conditional branch that dominates the loop, check to 3906 // see if it is the comparison we are looking for. 3907 Value *PreCondLHS = ICI->getOperand(0); 3908 Value *PreCondRHS = ICI->getOperand(1); 3909 ICmpInst::Predicate Cond; 3910 if (LoopEntryPredicate->getSuccessor(0) == PredecessorDest) 3911 Cond = ICI->getPredicate(); 3912 else 3913 Cond = ICI->getInversePredicate(); 3914 3915 if (Cond == Pred) 3916 ; // An exact match. 3917 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE) 3918 ; // The actual condition is beyond sufficient. 3919 else 3920 // Check a few special cases. 3921 switch (Cond) { 3922 case ICmpInst::ICMP_UGT: 3923 if (Pred == ICmpInst::ICMP_ULT) { 3924 std::swap(PreCondLHS, PreCondRHS); 3925 Cond = ICmpInst::ICMP_ULT; 3926 break; 3927 } 3928 continue; 3929 case ICmpInst::ICMP_SGT: 3930 if (Pred == ICmpInst::ICMP_SLT) { 3931 std::swap(PreCondLHS, PreCondRHS); 3932 Cond = ICmpInst::ICMP_SLT; 3933 break; 3934 } 3935 continue; 3936 case ICmpInst::ICMP_NE: 3937 // Expressions like (x >u 0) are often canonicalized to (x != 0), 3938 // so check for this case by checking if the NE is comparing against 3939 // a minimum or maximum constant. 3940 if (!ICmpInst::isTrueWhenEqual(Pred)) 3941 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) { 3942 const APInt &A = CI->getValue(); 3943 switch (Pred) { 3944 case ICmpInst::ICMP_SLT: 3945 if (A.isMaxSignedValue()) break; 3946 continue; 3947 case ICmpInst::ICMP_SGT: 3948 if (A.isMinSignedValue()) break; 3949 continue; 3950 case ICmpInst::ICMP_ULT: 3951 if (A.isMaxValue()) break; 3952 continue; 3953 case ICmpInst::ICMP_UGT: 3954 if (A.isMinValue()) break; 3955 continue; 3956 default: 3957 continue; 3958 } 3959 Cond = ICmpInst::ICMP_NE; 3960 // NE is symmetric but the original comparison may not be. Swap 3961 // the operands if necessary so that they match below. 3962 if (isa<SCEVConstant>(LHS)) 3963 std::swap(PreCondLHS, PreCondRHS); 3964 break; 3965 } 3966 continue; 3967 default: 3968 // We weren't able to reconcile the condition. 3969 continue; 3970 } 3971 3972 if (!PreCondLHS->getType()->isInteger()) continue; 3973 3974 const SCEV* PreCondLHSSCEV = getSCEV(PreCondLHS); 3975 const SCEV* PreCondRHSSCEV = getSCEV(PreCondRHS); 3976 if ((HasSameValue(LHS, PreCondLHSSCEV) && 3977 HasSameValue(RHS, PreCondRHSSCEV)) || 3978 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) && 3979 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)))) 3980 return true; 3981 } 3982 3983 return false; 3984 } 3985 3986 /// getBECount - Subtract the end and start values and divide by the step, 3987 /// rounding up, to get the number of times the backedge is executed. Return 3988 /// CouldNotCompute if an intermediate computation overflows. 3989 const SCEV* ScalarEvolution::getBECount(const SCEV* Start, 3990 const SCEV* End, 3991 const SCEV* Step) { 3992 const Type *Ty = Start->getType(); 3993 const SCEV* NegOne = getIntegerSCEV(-1, Ty); 3994 const SCEV* Diff = getMinusSCEV(End, Start); 3995 const SCEV* RoundUp = getAddExpr(Step, NegOne); 3996 3997 // Add an adjustment to the difference between End and Start so that 3998 // the division will effectively round up. 3999 const SCEV* Add = getAddExpr(Diff, RoundUp); 4000 4001 // Check Add for unsigned overflow. 4002 // TODO: More sophisticated things could be done here. 4003 const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1); 4004 const SCEV* OperandExtendedAdd = 4005 getAddExpr(getZeroExtendExpr(Diff, WideTy), 4006 getZeroExtendExpr(RoundUp, WideTy)); 4007 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 4008 return CouldNotCompute; 4009 4010 return getUDivExpr(Add, Step); 4011 } 4012 4013 /// HowManyLessThans - Return the number of times a backedge containing the 4014 /// specified less-than comparison will execute. If not computable, return 4015 /// CouldNotCompute. 4016 ScalarEvolution::BackedgeTakenInfo ScalarEvolution:: 4017 HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 4018 const Loop *L, bool isSigned) { 4019 // Only handle: "ADDREC < LoopInvariant". 4020 if (!RHS->isLoopInvariant(L)) return CouldNotCompute; 4021 4022 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 4023 if (!AddRec || AddRec->getLoop() != L) 4024 return CouldNotCompute; 4025 4026 if (AddRec->isAffine()) { 4027 // FORNOW: We only support unit strides. 4028 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 4029 const SCEV* Step = AddRec->getStepRecurrence(*this); 4030 4031 // TODO: handle non-constant strides. 4032 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step); 4033 if (!CStep || CStep->isZero()) 4034 return CouldNotCompute; 4035 if (CStep->isOne()) { 4036 // With unit stride, the iteration never steps past the limit value. 4037 } else if (CStep->getValue()->getValue().isStrictlyPositive()) { 4038 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) { 4039 // Test whether a positive iteration iteration can step past the limit 4040 // value and past the maximum value for its type in a single step. 4041 if (isSigned) { 4042 APInt Max = APInt::getSignedMaxValue(BitWidth); 4043 if ((Max - CStep->getValue()->getValue()) 4044 .slt(CLimit->getValue()->getValue())) 4045 return CouldNotCompute; 4046 } else { 4047 APInt Max = APInt::getMaxValue(BitWidth); 4048 if ((Max - CStep->getValue()->getValue()) 4049 .ult(CLimit->getValue()->getValue())) 4050 return CouldNotCompute; 4051 } 4052 } else 4053 // TODO: handle non-constant limit values below. 4054 return CouldNotCompute; 4055 } else 4056 // TODO: handle negative strides below. 4057 return CouldNotCompute; 4058 4059 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 4060 // m. So, we count the number of iterations in which {n,+,s} < m is true. 4061 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 4062 // treat m-n as signed nor unsigned due to overflow possibility. 4063 4064 // First, we get the value of the LHS in the first iteration: n 4065 const SCEV* Start = AddRec->getOperand(0); 4066 4067 // Determine the minimum constant start value. 4068 const SCEV* MinStart = isa<SCEVConstant>(Start) ? Start : 4069 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) : 4070 APInt::getMinValue(BitWidth)); 4071 4072 // If we know that the condition is true in order to enter the loop, 4073 // then we know that it will run exactly (m-n)/s times. Otherwise, we 4074 // only know that it will execute (max(m,n)-n)/s times. In both cases, 4075 // the division must round up. 4076 const SCEV* End = RHS; 4077 if (!isLoopGuardedByCond(L, 4078 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, 4079 getMinusSCEV(Start, Step), RHS)) 4080 End = isSigned ? getSMaxExpr(RHS, Start) 4081 : getUMaxExpr(RHS, Start); 4082 4083 // Determine the maximum constant end value. 4084 const SCEV* MaxEnd = 4085 isa<SCEVConstant>(End) ? End : 4086 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth) 4087 .ashr(GetMinSignBits(End) - 1) : 4088 APInt::getMaxValue(BitWidth) 4089 .lshr(GetMinLeadingZeros(End))); 4090 4091 // Finally, we subtract these two values and divide, rounding up, to get 4092 // the number of times the backedge is executed. 4093 const SCEV* BECount = getBECount(Start, End, Step); 4094 4095 // The maximum backedge count is similar, except using the minimum start 4096 // value and the maximum end value. 4097 const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step);; 4098 4099 return BackedgeTakenInfo(BECount, MaxBECount); 4100 } 4101 4102 return CouldNotCompute; 4103 } 4104 4105 /// getNumIterationsInRange - Return the number of iterations of this loop that 4106 /// produce values in the specified constant range. Another way of looking at 4107 /// this is that it returns the first iteration number where the value is not in 4108 /// the condition, thus computing the exit count. If the iteration count can't 4109 /// be computed, an instance of SCEVCouldNotCompute is returned. 4110 const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 4111 ScalarEvolution &SE) const { 4112 if (Range.isFullSet()) // Infinite loop. 4113 return SE.getCouldNotCompute(); 4114 4115 // If the start is a non-zero constant, shift the range to simplify things. 4116 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 4117 if (!SC->getValue()->isZero()) { 4118 SmallVector<const SCEV*, 4> Operands(op_begin(), op_end()); 4119 Operands[0] = SE.getIntegerSCEV(0, SC->getType()); 4120 const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop()); 4121 if (const SCEVAddRecExpr *ShiftedAddRec = 4122 dyn_cast<SCEVAddRecExpr>(Shifted)) 4123 return ShiftedAddRec->getNumIterationsInRange( 4124 Range.subtract(SC->getValue()->getValue()), SE); 4125 // This is strange and shouldn't happen. 4126 return SE.getCouldNotCompute(); 4127 } 4128 4129 // The only time we can solve this is when we have all constant indices. 4130 // Otherwise, we cannot determine the overflow conditions. 4131 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 4132 if (!isa<SCEVConstant>(getOperand(i))) 4133 return SE.getCouldNotCompute(); 4134 4135 4136 // Okay at this point we know that all elements of the chrec are constants and 4137 // that the start element is zero. 4138 4139 // First check to see if the range contains zero. If not, the first 4140 // iteration exits. 4141 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 4142 if (!Range.contains(APInt(BitWidth, 0))) 4143 return SE.getIntegerSCEV(0, getType()); 4144 4145 if (isAffine()) { 4146 // If this is an affine expression then we have this situation: 4147 // Solve {0,+,A} in Range === Ax in Range 4148 4149 // We know that zero is in the range. If A is positive then we know that 4150 // the upper value of the range must be the first possible exit value. 4151 // If A is negative then the lower of the range is the last possible loop 4152 // value. Also note that we already checked for a full range. 4153 APInt One(BitWidth,1); 4154 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 4155 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 4156 4157 // The exit value should be (End+A)/A. 4158 APInt ExitVal = (End + A).udiv(A); 4159 ConstantInt *ExitValue = ConstantInt::get(ExitVal); 4160 4161 // Evaluate at the exit value. If we really did fall out of the valid 4162 // range, then we computed our trip count, otherwise wrap around or other 4163 // things must have happened. 4164 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 4165 if (Range.contains(Val->getValue())) 4166 return SE.getCouldNotCompute(); // Something strange happened 4167 4168 // Ensure that the previous value is in the range. This is a sanity check. 4169 assert(Range.contains( 4170 EvaluateConstantChrecAtConstant(this, 4171 ConstantInt::get(ExitVal - One), SE)->getValue()) && 4172 "Linear scev computation is off in a bad way!"); 4173 return SE.getConstant(ExitValue); 4174 } else if (isQuadratic()) { 4175 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 4176 // quadratic equation to solve it. To do this, we must frame our problem in 4177 // terms of figuring out when zero is crossed, instead of when 4178 // Range.getUpper() is crossed. 4179 SmallVector<const SCEV*, 4> NewOps(op_begin(), op_end()); 4180 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 4181 const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 4182 4183 // Next, solve the constructed addrec 4184 std::pair<const SCEV*,const SCEV*> Roots = 4185 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 4186 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4187 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4188 if (R1) { 4189 // Pick the smallest positive root value. 4190 if (ConstantInt *CB = 4191 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 4192 R1->getValue(), R2->getValue()))) { 4193 if (CB->getZExtValue() == false) 4194 std::swap(R1, R2); // R1 is the minimum root now. 4195 4196 // Make sure the root is not off by one. The returned iteration should 4197 // not be in the range, but the previous one should be. When solving 4198 // for "X*X < 5", for example, we should not return a root of 2. 4199 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 4200 R1->getValue(), 4201 SE); 4202 if (Range.contains(R1Val->getValue())) { 4203 // The next iteration must be out of the range... 4204 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()+1); 4205 4206 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4207 if (!Range.contains(R1Val->getValue())) 4208 return SE.getConstant(NextVal); 4209 return SE.getCouldNotCompute(); // Something strange happened 4210 } 4211 4212 // If R1 was not in the range, then it is a good return value. Make 4213 // sure that R1-1 WAS in the range though, just in case. 4214 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()-1); 4215 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4216 if (Range.contains(R1Val->getValue())) 4217 return R1; 4218 return SE.getCouldNotCompute(); // Something strange happened 4219 } 4220 } 4221 } 4222 4223 return SE.getCouldNotCompute(); 4224 } 4225 4226 4227 4228 //===----------------------------------------------------------------------===// 4229 // SCEVCallbackVH Class Implementation 4230 //===----------------------------------------------------------------------===// 4231 4232 void ScalarEvolution::SCEVCallbackVH::deleted() { 4233 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4234 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 4235 SE->ConstantEvolutionLoopExitValue.erase(PN); 4236 if (Instruction *I = dyn_cast<Instruction>(getValPtr())) 4237 SE->ValuesAtScopes.erase(I); 4238 SE->Scalars.erase(getValPtr()); 4239 // this now dangles! 4240 } 4241 4242 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) { 4243 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4244 4245 // Forget all the expressions associated with users of the old value, 4246 // so that future queries will recompute the expressions using the new 4247 // value. 4248 SmallVector<User *, 16> Worklist; 4249 Value *Old = getValPtr(); 4250 bool DeleteOld = false; 4251 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 4252 UI != UE; ++UI) 4253 Worklist.push_back(*UI); 4254 while (!Worklist.empty()) { 4255 User *U = Worklist.pop_back_val(); 4256 // Deleting the Old value will cause this to dangle. Postpone 4257 // that until everything else is done. 4258 if (U == Old) { 4259 DeleteOld = true; 4260 continue; 4261 } 4262 if (PHINode *PN = dyn_cast<PHINode>(U)) 4263 SE->ConstantEvolutionLoopExitValue.erase(PN); 4264 if (Instruction *I = dyn_cast<Instruction>(U)) 4265 SE->ValuesAtScopes.erase(I); 4266 if (SE->Scalars.erase(U)) 4267 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 4268 UI != UE; ++UI) 4269 Worklist.push_back(*UI); 4270 } 4271 if (DeleteOld) { 4272 if (PHINode *PN = dyn_cast<PHINode>(Old)) 4273 SE->ConstantEvolutionLoopExitValue.erase(PN); 4274 if (Instruction *I = dyn_cast<Instruction>(Old)) 4275 SE->ValuesAtScopes.erase(I); 4276 SE->Scalars.erase(Old); 4277 // this now dangles! 4278 } 4279 // this may dangle! 4280 } 4281 4282 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 4283 : CallbackVH(V), SE(se) {} 4284 4285 //===----------------------------------------------------------------------===// 4286 // ScalarEvolution Class Implementation 4287 //===----------------------------------------------------------------------===// 4288 4289 ScalarEvolution::ScalarEvolution() 4290 : FunctionPass(&ID), CouldNotCompute(new SCEVCouldNotCompute()) { 4291 } 4292 4293 bool ScalarEvolution::runOnFunction(Function &F) { 4294 this->F = &F; 4295 LI = &getAnalysis<LoopInfo>(); 4296 TD = getAnalysisIfAvailable<TargetData>(); 4297 return false; 4298 } 4299 4300 void ScalarEvolution::releaseMemory() { 4301 Scalars.clear(); 4302 BackedgeTakenCounts.clear(); 4303 ConstantEvolutionLoopExitValue.clear(); 4304 ValuesAtScopes.clear(); 4305 4306 for (std::map<ConstantInt*, SCEVConstant*>::iterator 4307 I = SCEVConstants.begin(), E = SCEVConstants.end(); I != E; ++I) 4308 delete I->second; 4309 for (std::map<std::pair<const SCEV*, const Type*>, 4310 SCEVTruncateExpr*>::iterator I = SCEVTruncates.begin(), 4311 E = SCEVTruncates.end(); I != E; ++I) 4312 delete I->second; 4313 for (std::map<std::pair<const SCEV*, const Type*>, 4314 SCEVZeroExtendExpr*>::iterator I = SCEVZeroExtends.begin(), 4315 E = SCEVZeroExtends.end(); I != E; ++I) 4316 delete I->second; 4317 for (std::map<std::pair<unsigned, std::vector<const SCEV*> >, 4318 SCEVCommutativeExpr*>::iterator I = SCEVCommExprs.begin(), 4319 E = SCEVCommExprs.end(); I != E; ++I) 4320 delete I->second; 4321 for (std::map<std::pair<const SCEV*, const SCEV*>, SCEVUDivExpr*>::iterator 4322 I = SCEVUDivs.begin(), E = SCEVUDivs.end(); I != E; ++I) 4323 delete I->second; 4324 for (std::map<std::pair<const SCEV*, const Type*>, 4325 SCEVSignExtendExpr*>::iterator I = SCEVSignExtends.begin(), 4326 E = SCEVSignExtends.end(); I != E; ++I) 4327 delete I->second; 4328 for (std::map<std::pair<const Loop *, std::vector<const SCEV*> >, 4329 SCEVAddRecExpr*>::iterator I = SCEVAddRecExprs.begin(), 4330 E = SCEVAddRecExprs.end(); I != E; ++I) 4331 delete I->second; 4332 for (std::map<Value*, SCEVUnknown*>::iterator I = SCEVUnknowns.begin(), 4333 E = SCEVUnknowns.end(); I != E; ++I) 4334 delete I->second; 4335 4336 SCEVConstants.clear(); 4337 SCEVTruncates.clear(); 4338 SCEVZeroExtends.clear(); 4339 SCEVCommExprs.clear(); 4340 SCEVUDivs.clear(); 4341 SCEVSignExtends.clear(); 4342 SCEVAddRecExprs.clear(); 4343 SCEVUnknowns.clear(); 4344 } 4345 4346 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 4347 AU.setPreservesAll(); 4348 AU.addRequiredTransitive<LoopInfo>(); 4349 } 4350 4351 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 4352 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 4353 } 4354 4355 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 4356 const Loop *L) { 4357 // Print all inner loops first 4358 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 4359 PrintLoopInfo(OS, SE, *I); 4360 4361 OS << "Loop " << L->getHeader()->getName() << ": "; 4362 4363 SmallVector<BasicBlock*, 8> ExitBlocks; 4364 L->getExitBlocks(ExitBlocks); 4365 if (ExitBlocks.size() != 1) 4366 OS << "<multiple exits> "; 4367 4368 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 4369 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 4370 } else { 4371 OS << "Unpredictable backedge-taken count. "; 4372 } 4373 4374 OS << "\n"; 4375 OS << "Loop " << L->getHeader()->getName() << ": "; 4376 4377 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 4378 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 4379 } else { 4380 OS << "Unpredictable max backedge-taken count. "; 4381 } 4382 4383 OS << "\n"; 4384 } 4385 4386 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { 4387 // ScalarEvolution's implementaiton of the print method is to print 4388 // out SCEV values of all instructions that are interesting. Doing 4389 // this potentially causes it to create new SCEV objects though, 4390 // which technically conflicts with the const qualifier. This isn't 4391 // observable from outside the class though (the hasSCEV function 4392 // notwithstanding), so casting away the const isn't dangerous. 4393 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this); 4394 4395 OS << "Classifying expressions for: " << F->getName() << "\n"; 4396 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 4397 if (isSCEVable(I->getType())) { 4398 OS << *I; 4399 OS << " --> "; 4400 const SCEV* SV = SE.getSCEV(&*I); 4401 SV->print(OS); 4402 4403 const Loop *L = LI->getLoopFor((*I).getParent()); 4404 4405 const SCEV* AtUse = SE.getSCEVAtScope(SV, L); 4406 if (AtUse != SV) { 4407 OS << " --> "; 4408 AtUse->print(OS); 4409 } 4410 4411 if (L) { 4412 OS << "\t\t" "Exits: "; 4413 const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 4414 if (!ExitValue->isLoopInvariant(L)) { 4415 OS << "<<Unknown>>"; 4416 } else { 4417 OS << *ExitValue; 4418 } 4419 } 4420 4421 OS << "\n"; 4422 } 4423 4424 OS << "Determining loop execution counts for: " << F->getName() << "\n"; 4425 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 4426 PrintLoopInfo(OS, &SE, *I); 4427 } 4428 4429 void ScalarEvolution::print(std::ostream &o, const Module *M) const { 4430 raw_os_ostream OS(o); 4431 print(OS, M); 4432 } 4433