1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. These classes are reference counted, managed by the const SCEV* 18 // class. We only create one SCEV of a particular shape, so pointer-comparisons 19 // for equality are legal. 20 // 21 // One important aspect of the SCEV objects is that they are never cyclic, even 22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 24 // recurrence) then we represent it directly as a recurrence node, otherwise we 25 // represent it as a SCEVUnknown node. 26 // 27 // In addition to being able to represent expressions of various types, we also 28 // have folders that are used to build the *canonical* representation for a 29 // particular expression. These folders are capable of using a variety of 30 // rewrite rules to simplify the expressions. 31 // 32 // Once the folders are defined, we can implement the more interesting 33 // higher-level code, such as the code that recognizes PHI nodes of various 34 // types, computes the execution count of a loop, etc. 35 // 36 // TODO: We should use these routines and value representations to implement 37 // dependence analysis! 38 // 39 //===----------------------------------------------------------------------===// 40 // 41 // There are several good references for the techniques used in this analysis. 42 // 43 // Chains of recurrences -- a method to expedite the evaluation 44 // of closed-form functions 45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 46 // 47 // On computational properties of chains of recurrences 48 // Eugene V. Zima 49 // 50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 51 // Robert A. van Engelen 52 // 53 // Efficient Symbolic Analysis for Optimizing Compilers 54 // Robert A. van Engelen 55 // 56 // Using the chains of recurrences algebra for data dependence testing and 57 // induction variable substitution 58 // MS Thesis, Johnie Birch 59 // 60 //===----------------------------------------------------------------------===// 61 62 #define DEBUG_TYPE "scalar-evolution" 63 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 64 #include "llvm/Constants.h" 65 #include "llvm/DerivedTypes.h" 66 #include "llvm/GlobalVariable.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/Analysis/ConstantFolding.h" 69 #include "llvm/Analysis/Dominators.h" 70 #include "llvm/Analysis/LoopInfo.h" 71 #include "llvm/Analysis/ValueTracking.h" 72 #include "llvm/Assembly/Writer.h" 73 #include "llvm/Target/TargetData.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Compiler.h" 76 #include "llvm/Support/ConstantRange.h" 77 #include "llvm/Support/GetElementPtrTypeIterator.h" 78 #include "llvm/Support/InstIterator.h" 79 #include "llvm/Support/MathExtras.h" 80 #include "llvm/Support/raw_ostream.h" 81 #include "llvm/ADT/Statistic.h" 82 #include "llvm/ADT/STLExtras.h" 83 #include <algorithm> 84 using namespace llvm; 85 86 STATISTIC(NumArrayLenItCounts, 87 "Number of trip counts computed with array length"); 88 STATISTIC(NumTripCountsComputed, 89 "Number of loops with predictable loop counts"); 90 STATISTIC(NumTripCountsNotComputed, 91 "Number of loops without predictable loop counts"); 92 STATISTIC(NumBruteForceTripCountsComputed, 93 "Number of loops with trip counts computed by force"); 94 95 static cl::opt<unsigned> 96 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 97 cl::desc("Maximum number of iterations SCEV will " 98 "symbolically execute a constant " 99 "derived loop"), 100 cl::init(100)); 101 102 static RegisterPass<ScalarEvolution> 103 R("scalar-evolution", "Scalar Evolution Analysis", false, true); 104 char ScalarEvolution::ID = 0; 105 106 //===----------------------------------------------------------------------===// 107 // SCEV class definitions 108 //===----------------------------------------------------------------------===// 109 110 //===----------------------------------------------------------------------===// 111 // Implementation of the SCEV class. 112 // 113 SCEV::~SCEV() {} 114 void SCEV::dump() const { 115 print(errs()); 116 errs() << '\n'; 117 } 118 119 void SCEV::print(std::ostream &o) const { 120 raw_os_ostream OS(o); 121 print(OS); 122 } 123 124 bool SCEV::isZero() const { 125 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 126 return SC->getValue()->isZero(); 127 return false; 128 } 129 130 bool SCEV::isOne() const { 131 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 132 return SC->getValue()->isOne(); 133 return false; 134 } 135 136 bool SCEV::isAllOnesValue() const { 137 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 138 return SC->getValue()->isAllOnesValue(); 139 return false; 140 } 141 142 SCEVCouldNotCompute::SCEVCouldNotCompute() : 143 SCEV(scCouldNotCompute) {} 144 145 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 146 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 147 return false; 148 } 149 150 const Type *SCEVCouldNotCompute::getType() const { 151 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 152 return 0; 153 } 154 155 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 156 assert(0 && "Attempt to use a SCEVCouldNotCompute object!"); 157 return false; 158 } 159 160 const SCEV * 161 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete( 162 const SCEV *Sym, 163 const SCEV *Conc, 164 ScalarEvolution &SE) const { 165 return this; 166 } 167 168 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 169 OS << "***COULDNOTCOMPUTE***"; 170 } 171 172 bool SCEVCouldNotCompute::classof(const SCEV *S) { 173 return S->getSCEVType() == scCouldNotCompute; 174 } 175 176 const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { 177 SCEVConstant *&R = SCEVConstants[V]; 178 if (R == 0) R = new SCEVConstant(V); 179 return R; 180 } 181 182 const SCEV* ScalarEvolution::getConstant(const APInt& Val) { 183 return getConstant(ConstantInt::get(Val)); 184 } 185 186 const SCEV* 187 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 188 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); 189 } 190 191 const Type *SCEVConstant::getType() const { return V->getType(); } 192 193 void SCEVConstant::print(raw_ostream &OS) const { 194 WriteAsOperand(OS, V, false); 195 } 196 197 SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, 198 const SCEV* op, const Type *ty) 199 : SCEV(SCEVTy), Op(op), Ty(ty) {} 200 201 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 202 return Op->dominates(BB, DT); 203 } 204 205 SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty) 206 : SCEVCastExpr(scTruncate, op, ty) { 207 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 208 (Ty->isInteger() || isa<PointerType>(Ty)) && 209 "Cannot truncate non-integer value!"); 210 } 211 212 void SCEVTruncateExpr::print(raw_ostream &OS) const { 213 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 214 } 215 216 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty) 217 : SCEVCastExpr(scZeroExtend, op, ty) { 218 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 219 (Ty->isInteger() || isa<PointerType>(Ty)) && 220 "Cannot zero extend non-integer value!"); 221 } 222 223 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 224 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 225 } 226 227 SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty) 228 : SCEVCastExpr(scSignExtend, op, ty) { 229 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 230 (Ty->isInteger() || isa<PointerType>(Ty)) && 231 "Cannot sign extend non-integer value!"); 232 } 233 234 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 235 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 236 } 237 238 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 239 assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); 240 const char *OpStr = getOperationStr(); 241 OS << "(" << *Operands[0]; 242 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 243 OS << OpStr << *Operands[i]; 244 OS << ")"; 245 } 246 247 const SCEV * 248 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete( 249 const SCEV *Sym, 250 const SCEV *Conc, 251 ScalarEvolution &SE) const { 252 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 253 const SCEV* H = 254 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 255 if (H != getOperand(i)) { 256 SmallVector<const SCEV*, 8> NewOps; 257 NewOps.reserve(getNumOperands()); 258 for (unsigned j = 0; j != i; ++j) 259 NewOps.push_back(getOperand(j)); 260 NewOps.push_back(H); 261 for (++i; i != e; ++i) 262 NewOps.push_back(getOperand(i)-> 263 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 264 265 if (isa<SCEVAddExpr>(this)) 266 return SE.getAddExpr(NewOps); 267 else if (isa<SCEVMulExpr>(this)) 268 return SE.getMulExpr(NewOps); 269 else if (isa<SCEVSMaxExpr>(this)) 270 return SE.getSMaxExpr(NewOps); 271 else if (isa<SCEVUMaxExpr>(this)) 272 return SE.getUMaxExpr(NewOps); 273 else 274 assert(0 && "Unknown commutative expr!"); 275 } 276 } 277 return this; 278 } 279 280 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 281 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 282 if (!getOperand(i)->dominates(BB, DT)) 283 return false; 284 } 285 return true; 286 } 287 288 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 289 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 290 } 291 292 void SCEVUDivExpr::print(raw_ostream &OS) const { 293 OS << "(" << *LHS << " /u " << *RHS << ")"; 294 } 295 296 const Type *SCEVUDivExpr::getType() const { 297 // In most cases the types of LHS and RHS will be the same, but in some 298 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 299 // depend on the type for correctness, but handling types carefully can 300 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 301 // a pointer type than the RHS, so use the RHS' type here. 302 return RHS->getType(); 303 } 304 305 const SCEV * 306 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym, 307 const SCEV *Conc, 308 ScalarEvolution &SE) const { 309 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 310 const SCEV* H = 311 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 312 if (H != getOperand(i)) { 313 SmallVector<const SCEV*, 8> NewOps; 314 NewOps.reserve(getNumOperands()); 315 for (unsigned j = 0; j != i; ++j) 316 NewOps.push_back(getOperand(j)); 317 NewOps.push_back(H); 318 for (++i; i != e; ++i) 319 NewOps.push_back(getOperand(i)-> 320 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 321 322 return SE.getAddRecExpr(NewOps, L); 323 } 324 } 325 return this; 326 } 327 328 329 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 330 // Add recurrences are never invariant in the function-body (null loop). 331 if (!QueryLoop) 332 return false; 333 334 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. 335 if (QueryLoop->contains(L->getHeader())) 336 return false; 337 338 // This recurrence is variant w.r.t. QueryLoop if any of its operands 339 // are variant. 340 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 341 if (!getOperand(i)->isLoopInvariant(QueryLoop)) 342 return false; 343 344 // Otherwise it's loop-invariant. 345 return true; 346 } 347 348 349 void SCEVAddRecExpr::print(raw_ostream &OS) const { 350 OS << "{" << *Operands[0]; 351 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 352 OS << ",+," << *Operands[i]; 353 OS << "}<" << L->getHeader()->getName() + ">"; 354 } 355 356 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 357 // All non-instruction values are loop invariant. All instructions are loop 358 // invariant if they are not contained in the specified loop. 359 // Instructions are never considered invariant in the function body 360 // (null loop) because they are defined within the "loop". 361 if (Instruction *I = dyn_cast<Instruction>(V)) 362 return L && !L->contains(I->getParent()); 363 return true; 364 } 365 366 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 367 if (Instruction *I = dyn_cast<Instruction>(getValue())) 368 return DT->dominates(I->getParent(), BB); 369 return true; 370 } 371 372 const Type *SCEVUnknown::getType() const { 373 return V->getType(); 374 } 375 376 void SCEVUnknown::print(raw_ostream &OS) const { 377 WriteAsOperand(OS, V, false); 378 } 379 380 //===----------------------------------------------------------------------===// 381 // SCEV Utilities 382 //===----------------------------------------------------------------------===// 383 384 namespace { 385 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 386 /// than the complexity of the RHS. This comparator is used to canonicalize 387 /// expressions. 388 class VISIBILITY_HIDDEN SCEVComplexityCompare { 389 LoopInfo *LI; 390 public: 391 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} 392 393 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 394 // Primarily, sort the SCEVs by their getSCEVType(). 395 if (LHS->getSCEVType() != RHS->getSCEVType()) 396 return LHS->getSCEVType() < RHS->getSCEVType(); 397 398 // Aside from the getSCEVType() ordering, the particular ordering 399 // isn't very important except that it's beneficial to be consistent, 400 // so that (a + b) and (b + a) don't end up as different expressions. 401 402 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 403 // not as complete as it could be. 404 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { 405 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 406 407 // Order pointer values after integer values. This helps SCEVExpander 408 // form GEPs. 409 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) 410 return false; 411 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) 412 return true; 413 414 // Compare getValueID values. 415 if (LU->getValue()->getValueID() != RU->getValue()->getValueID()) 416 return LU->getValue()->getValueID() < RU->getValue()->getValueID(); 417 418 // Sort arguments by their position. 419 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) { 420 const Argument *RA = cast<Argument>(RU->getValue()); 421 return LA->getArgNo() < RA->getArgNo(); 422 } 423 424 // For instructions, compare their loop depth, and their opcode. 425 // This is pretty loose. 426 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) { 427 Instruction *RV = cast<Instruction>(RU->getValue()); 428 429 // Compare loop depths. 430 if (LI->getLoopDepth(LV->getParent()) != 431 LI->getLoopDepth(RV->getParent())) 432 return LI->getLoopDepth(LV->getParent()) < 433 LI->getLoopDepth(RV->getParent()); 434 435 // Compare opcodes. 436 if (LV->getOpcode() != RV->getOpcode()) 437 return LV->getOpcode() < RV->getOpcode(); 438 439 // Compare the number of operands. 440 if (LV->getNumOperands() != RV->getNumOperands()) 441 return LV->getNumOperands() < RV->getNumOperands(); 442 } 443 444 return false; 445 } 446 447 // Compare constant values. 448 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { 449 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 450 return LC->getValue()->getValue().ult(RC->getValue()->getValue()); 451 } 452 453 // Compare addrec loop depths. 454 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { 455 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 456 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth()) 457 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth(); 458 } 459 460 // Lexicographically compare n-ary expressions. 461 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { 462 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 463 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) { 464 if (i >= RC->getNumOperands()) 465 return false; 466 if (operator()(LC->getOperand(i), RC->getOperand(i))) 467 return true; 468 if (operator()(RC->getOperand(i), LC->getOperand(i))) 469 return false; 470 } 471 return LC->getNumOperands() < RC->getNumOperands(); 472 } 473 474 // Lexicographically compare udiv expressions. 475 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { 476 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 477 if (operator()(LC->getLHS(), RC->getLHS())) 478 return true; 479 if (operator()(RC->getLHS(), LC->getLHS())) 480 return false; 481 if (operator()(LC->getRHS(), RC->getRHS())) 482 return true; 483 if (operator()(RC->getRHS(), LC->getRHS())) 484 return false; 485 return false; 486 } 487 488 // Compare cast expressions by operand. 489 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { 490 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 491 return operator()(LC->getOperand(), RC->getOperand()); 492 } 493 494 assert(0 && "Unknown SCEV kind!"); 495 return false; 496 } 497 }; 498 } 499 500 /// GroupByComplexity - Given a list of SCEV objects, order them by their 501 /// complexity, and group objects of the same complexity together by value. 502 /// When this routine is finished, we know that any duplicates in the vector are 503 /// consecutive and that complexity is monotonically increasing. 504 /// 505 /// Note that we go take special precautions to ensure that we get determinstic 506 /// results from this routine. In other words, we don't want the results of 507 /// this to depend on where the addresses of various SCEV objects happened to 508 /// land in memory. 509 /// 510 static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, 511 LoopInfo *LI) { 512 if (Ops.size() < 2) return; // Noop 513 if (Ops.size() == 2) { 514 // This is the common case, which also happens to be trivially simple. 515 // Special case it. 516 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) 517 std::swap(Ops[0], Ops[1]); 518 return; 519 } 520 521 // Do the rough sort by complexity. 522 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 523 524 // Now that we are sorted by complexity, group elements of the same 525 // complexity. Note that this is, at worst, N^2, but the vector is likely to 526 // be extremely short in practice. Note that we take this approach because we 527 // do not want to depend on the addresses of the objects we are grouping. 528 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 529 const SCEV *S = Ops[i]; 530 unsigned Complexity = S->getSCEVType(); 531 532 // If there are any objects of the same complexity and same value as this 533 // one, group them. 534 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 535 if (Ops[j] == S) { // Found a duplicate. 536 // Move it to immediately after i'th element. 537 std::swap(Ops[i+1], Ops[j]); 538 ++i; // no need to rescan it. 539 if (i == e-2) return; // Done! 540 } 541 } 542 } 543 } 544 545 546 547 //===----------------------------------------------------------------------===// 548 // Simple SCEV method implementations 549 //===----------------------------------------------------------------------===// 550 551 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 552 /// Assume, K > 0. 553 static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, 554 ScalarEvolution &SE, 555 const Type* ResultTy) { 556 // Handle the simplest case efficiently. 557 if (K == 1) 558 return SE.getTruncateOrZeroExtend(It, ResultTy); 559 560 // We are using the following formula for BC(It, K): 561 // 562 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 563 // 564 // Suppose, W is the bitwidth of the return value. We must be prepared for 565 // overflow. Hence, we must assure that the result of our computation is 566 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 567 // safe in modular arithmetic. 568 // 569 // However, this code doesn't use exactly that formula; the formula it uses 570 // is something like the following, where T is the number of factors of 2 in 571 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 572 // exponentiation: 573 // 574 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 575 // 576 // This formula is trivially equivalent to the previous formula. However, 577 // this formula can be implemented much more efficiently. The trick is that 578 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 579 // arithmetic. To do exact division in modular arithmetic, all we have 580 // to do is multiply by the inverse. Therefore, this step can be done at 581 // width W. 582 // 583 // The next issue is how to safely do the division by 2^T. The way this 584 // is done is by doing the multiplication step at a width of at least W + T 585 // bits. This way, the bottom W+T bits of the product are accurate. Then, 586 // when we perform the division by 2^T (which is equivalent to a right shift 587 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 588 // truncated out after the division by 2^T. 589 // 590 // In comparison to just directly using the first formula, this technique 591 // is much more efficient; using the first formula requires W * K bits, 592 // but this formula less than W + K bits. Also, the first formula requires 593 // a division step, whereas this formula only requires multiplies and shifts. 594 // 595 // It doesn't matter whether the subtraction step is done in the calculation 596 // width or the input iteration count's width; if the subtraction overflows, 597 // the result must be zero anyway. We prefer here to do it in the width of 598 // the induction variable because it helps a lot for certain cases; CodeGen 599 // isn't smart enough to ignore the overflow, which leads to much less 600 // efficient code if the width of the subtraction is wider than the native 601 // register width. 602 // 603 // (It's possible to not widen at all by pulling out factors of 2 before 604 // the multiplication; for example, K=2 can be calculated as 605 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 606 // extra arithmetic, so it's not an obvious win, and it gets 607 // much more complicated for K > 3.) 608 609 // Protection from insane SCEVs; this bound is conservative, 610 // but it probably doesn't matter. 611 if (K > 1000) 612 return SE.getCouldNotCompute(); 613 614 unsigned W = SE.getTypeSizeInBits(ResultTy); 615 616 // Calculate K! / 2^T and T; we divide out the factors of two before 617 // multiplying for calculating K! / 2^T to avoid overflow. 618 // Other overflow doesn't matter because we only care about the bottom 619 // W bits of the result. 620 APInt OddFactorial(W, 1); 621 unsigned T = 1; 622 for (unsigned i = 3; i <= K; ++i) { 623 APInt Mult(W, i); 624 unsigned TwoFactors = Mult.countTrailingZeros(); 625 T += TwoFactors; 626 Mult = Mult.lshr(TwoFactors); 627 OddFactorial *= Mult; 628 } 629 630 // We need at least W + T bits for the multiplication step 631 unsigned CalculationBits = W + T; 632 633 // Calcuate 2^T, at width T+W. 634 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 635 636 // Calculate the multiplicative inverse of K! / 2^T; 637 // this multiplication factor will perform the exact division by 638 // K! / 2^T. 639 APInt Mod = APInt::getSignedMinValue(W+1); 640 APInt MultiplyFactor = OddFactorial.zext(W+1); 641 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 642 MultiplyFactor = MultiplyFactor.trunc(W); 643 644 // Calculate the product, at width T+W 645 const IntegerType *CalculationTy = IntegerType::get(CalculationBits); 646 const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 647 for (unsigned i = 1; i != K; ++i) { 648 const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); 649 Dividend = SE.getMulExpr(Dividend, 650 SE.getTruncateOrZeroExtend(S, CalculationTy)); 651 } 652 653 // Divide by 2^T 654 const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 655 656 // Truncate the result, and divide by K! / 2^T. 657 658 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 659 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 660 } 661 662 /// evaluateAtIteration - Return the value of this chain of recurrences at 663 /// the specified iteration number. We can evaluate this recurrence by 664 /// multiplying each element in the chain by the binomial coefficient 665 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 666 /// 667 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 668 /// 669 /// where BC(It, k) stands for binomial coefficient. 670 /// 671 const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, 672 ScalarEvolution &SE) const { 673 const SCEV* Result = getStart(); 674 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 675 // The computation is correct in the face of overflow provided that the 676 // multiplication is performed _after_ the evaluation of the binomial 677 // coefficient. 678 const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); 679 if (isa<SCEVCouldNotCompute>(Coeff)) 680 return Coeff; 681 682 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 683 } 684 return Result; 685 } 686 687 //===----------------------------------------------------------------------===// 688 // SCEV Expression folder implementations 689 //===----------------------------------------------------------------------===// 690 691 const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, 692 const Type *Ty) { 693 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 694 "This is not a truncating conversion!"); 695 assert(isSCEVable(Ty) && 696 "This is not a conversion to a SCEVable type!"); 697 Ty = getEffectiveSCEVType(Ty); 698 699 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 700 return getConstant( 701 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 702 703 // trunc(trunc(x)) --> trunc(x) 704 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 705 return getTruncateExpr(ST->getOperand(), Ty); 706 707 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 708 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 709 return getTruncateOrSignExtend(SS->getOperand(), Ty); 710 711 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 712 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 713 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 714 715 // If the input value is a chrec scev, truncate the chrec's operands. 716 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 717 SmallVector<const SCEV*, 4> Operands; 718 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 719 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 720 return getAddRecExpr(Operands, AddRec->getLoop()); 721 } 722 723 SCEVTruncateExpr *&Result = SCEVTruncates[std::make_pair(Op, Ty)]; 724 if (Result == 0) Result = new SCEVTruncateExpr(Op, Ty); 725 return Result; 726 } 727 728 const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, 729 const Type *Ty) { 730 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 731 "This is not an extending conversion!"); 732 assert(isSCEVable(Ty) && 733 "This is not a conversion to a SCEVable type!"); 734 Ty = getEffectiveSCEVType(Ty); 735 736 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 737 const Type *IntTy = getEffectiveSCEVType(Ty); 738 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); 739 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 740 return getConstant(cast<ConstantInt>(C)); 741 } 742 743 // zext(zext(x)) --> zext(x) 744 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 745 return getZeroExtendExpr(SZ->getOperand(), Ty); 746 747 // If the input value is a chrec scev, and we can prove that the value 748 // did not overflow the old, smaller, value, we can zero extend all of the 749 // operands (often constants). This allows analysis of something like 750 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 751 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 752 if (AR->isAffine()) { 753 // Check whether the backedge-taken count is SCEVCouldNotCompute. 754 // Note that this serves two purposes: It filters out loops that are 755 // simply not analyzable, and it covers the case where this code is 756 // being called from within backedge-taken count analysis, such that 757 // attempting to ask for the backedge-taken count would likely result 758 // in infinite recursion. In the later case, the analysis code will 759 // cope with a conservative value, and it will take care to purge 760 // that value once it has finished. 761 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 762 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 763 // Manually compute the final value for AR, checking for 764 // overflow. 765 const SCEV* Start = AR->getStart(); 766 const SCEV* Step = AR->getStepRecurrence(*this); 767 768 // Check whether the backedge-taken count can be losslessly casted to 769 // the addrec's type. The count is always unsigned. 770 const SCEV* CastedMaxBECount = 771 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 772 const SCEV* RecastedMaxBECount = 773 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 774 if (MaxBECount == RecastedMaxBECount) { 775 const Type *WideTy = 776 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 777 // Check whether Start+Step*MaxBECount has no unsigned overflow. 778 const SCEV* ZMul = 779 getMulExpr(CastedMaxBECount, 780 getTruncateOrZeroExtend(Step, Start->getType())); 781 const SCEV* Add = getAddExpr(Start, ZMul); 782 const SCEV* OperandExtendedAdd = 783 getAddExpr(getZeroExtendExpr(Start, WideTy), 784 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 785 getZeroExtendExpr(Step, WideTy))); 786 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 787 // Return the expression with the addrec on the outside. 788 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 789 getZeroExtendExpr(Step, Ty), 790 AR->getLoop()); 791 792 // Similar to above, only this time treat the step value as signed. 793 // This covers loops that count down. 794 const SCEV* SMul = 795 getMulExpr(CastedMaxBECount, 796 getTruncateOrSignExtend(Step, Start->getType())); 797 Add = getAddExpr(Start, SMul); 798 OperandExtendedAdd = 799 getAddExpr(getZeroExtendExpr(Start, WideTy), 800 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 801 getSignExtendExpr(Step, WideTy))); 802 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 803 // Return the expression with the addrec on the outside. 804 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 805 getSignExtendExpr(Step, Ty), 806 AR->getLoop()); 807 } 808 } 809 } 810 811 SCEVZeroExtendExpr *&Result = SCEVZeroExtends[std::make_pair(Op, Ty)]; 812 if (Result == 0) Result = new SCEVZeroExtendExpr(Op, Ty); 813 return Result; 814 } 815 816 const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, 817 const Type *Ty) { 818 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 819 "This is not an extending conversion!"); 820 assert(isSCEVable(Ty) && 821 "This is not a conversion to a SCEVable type!"); 822 Ty = getEffectiveSCEVType(Ty); 823 824 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 825 const Type *IntTy = getEffectiveSCEVType(Ty); 826 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); 827 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 828 return getConstant(cast<ConstantInt>(C)); 829 } 830 831 // sext(sext(x)) --> sext(x) 832 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 833 return getSignExtendExpr(SS->getOperand(), Ty); 834 835 // If the input value is a chrec scev, and we can prove that the value 836 // did not overflow the old, smaller, value, we can sign extend all of the 837 // operands (often constants). This allows analysis of something like 838 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 839 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 840 if (AR->isAffine()) { 841 // Check whether the backedge-taken count is SCEVCouldNotCompute. 842 // Note that this serves two purposes: It filters out loops that are 843 // simply not analyzable, and it covers the case where this code is 844 // being called from within backedge-taken count analysis, such that 845 // attempting to ask for the backedge-taken count would likely result 846 // in infinite recursion. In the later case, the analysis code will 847 // cope with a conservative value, and it will take care to purge 848 // that value once it has finished. 849 const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); 850 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 851 // Manually compute the final value for AR, checking for 852 // overflow. 853 const SCEV* Start = AR->getStart(); 854 const SCEV* Step = AR->getStepRecurrence(*this); 855 856 // Check whether the backedge-taken count can be losslessly casted to 857 // the addrec's type. The count is always unsigned. 858 const SCEV* CastedMaxBECount = 859 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 860 const SCEV* RecastedMaxBECount = 861 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 862 if (MaxBECount == RecastedMaxBECount) { 863 const Type *WideTy = 864 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); 865 // Check whether Start+Step*MaxBECount has no signed overflow. 866 const SCEV* SMul = 867 getMulExpr(CastedMaxBECount, 868 getTruncateOrSignExtend(Step, Start->getType())); 869 const SCEV* Add = getAddExpr(Start, SMul); 870 const SCEV* OperandExtendedAdd = 871 getAddExpr(getSignExtendExpr(Start, WideTy), 872 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 873 getSignExtendExpr(Step, WideTy))); 874 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 875 // Return the expression with the addrec on the outside. 876 return getAddRecExpr(getSignExtendExpr(Start, Ty), 877 getSignExtendExpr(Step, Ty), 878 AR->getLoop()); 879 } 880 } 881 } 882 883 SCEVSignExtendExpr *&Result = SCEVSignExtends[std::make_pair(Op, Ty)]; 884 if (Result == 0) Result = new SCEVSignExtendExpr(Op, Ty); 885 return Result; 886 } 887 888 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 889 /// unspecified bits out to the given type. 890 /// 891 const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, 892 const Type *Ty) { 893 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 894 "This is not an extending conversion!"); 895 assert(isSCEVable(Ty) && 896 "This is not a conversion to a SCEVable type!"); 897 Ty = getEffectiveSCEVType(Ty); 898 899 // Sign-extend negative constants. 900 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 901 if (SC->getValue()->getValue().isNegative()) 902 return getSignExtendExpr(Op, Ty); 903 904 // Peel off a truncate cast. 905 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 906 const SCEV* NewOp = T->getOperand(); 907 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 908 return getAnyExtendExpr(NewOp, Ty); 909 return getTruncateOrNoop(NewOp, Ty); 910 } 911 912 // Next try a zext cast. If the cast is folded, use it. 913 const SCEV* ZExt = getZeroExtendExpr(Op, Ty); 914 if (!isa<SCEVZeroExtendExpr>(ZExt)) 915 return ZExt; 916 917 // Next try a sext cast. If the cast is folded, use it. 918 const SCEV* SExt = getSignExtendExpr(Op, Ty); 919 if (!isa<SCEVSignExtendExpr>(SExt)) 920 return SExt; 921 922 // If the expression is obviously signed, use the sext cast value. 923 if (isa<SCEVSMaxExpr>(Op)) 924 return SExt; 925 926 // Absent any other information, use the zext cast value. 927 return ZExt; 928 } 929 930 /// CollectAddOperandsWithScales - Process the given Ops list, which is 931 /// a list of operands to be added under the given scale, update the given 932 /// map. This is a helper function for getAddRecExpr. As an example of 933 /// what it does, given a sequence of operands that would form an add 934 /// expression like this: 935 /// 936 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 937 /// 938 /// where A and B are constants, update the map with these values: 939 /// 940 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 941 /// 942 /// and add 13 + A*B*29 to AccumulatedConstant. 943 /// This will allow getAddRecExpr to produce this: 944 /// 945 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 946 /// 947 /// This form often exposes folding opportunities that are hidden in 948 /// the original operand list. 949 /// 950 /// Return true iff it appears that any interesting folding opportunities 951 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 952 /// the common case where no interesting opportunities are present, and 953 /// is also used as a check to avoid infinite recursion. 954 /// 955 static bool 956 CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, 957 SmallVector<const SCEV*, 8> &NewOps, 958 APInt &AccumulatedConstant, 959 const SmallVectorImpl<const SCEV*> &Ops, 960 const APInt &Scale, 961 ScalarEvolution &SE) { 962 bool Interesting = false; 963 964 // Iterate over the add operands. 965 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 966 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 967 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 968 APInt NewScale = 969 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 970 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 971 // A multiplication of a constant with another add; recurse. 972 Interesting |= 973 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 974 cast<SCEVAddExpr>(Mul->getOperand(1)) 975 ->getOperands(), 976 NewScale, SE); 977 } else { 978 // A multiplication of a constant with some other value. Update 979 // the map. 980 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 981 const SCEV* Key = SE.getMulExpr(MulOps); 982 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 983 M.insert(std::make_pair(Key, APInt())); 984 if (Pair.second) { 985 Pair.first->second = NewScale; 986 NewOps.push_back(Pair.first->first); 987 } else { 988 Pair.first->second += NewScale; 989 // The map already had an entry for this value, which may indicate 990 // a folding opportunity. 991 Interesting = true; 992 } 993 } 994 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 995 // Pull a buried constant out to the outside. 996 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) 997 Interesting = true; 998 AccumulatedConstant += Scale * C->getValue()->getValue(); 999 } else { 1000 // An ordinary operand. Update the map. 1001 std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = 1002 M.insert(std::make_pair(Ops[i], APInt())); 1003 if (Pair.second) { 1004 Pair.first->second = Scale; 1005 NewOps.push_back(Pair.first->first); 1006 } else { 1007 Pair.first->second += Scale; 1008 // The map already had an entry for this value, which may indicate 1009 // a folding opportunity. 1010 Interesting = true; 1011 } 1012 } 1013 } 1014 1015 return Interesting; 1016 } 1017 1018 namespace { 1019 struct APIntCompare { 1020 bool operator()(const APInt &LHS, const APInt &RHS) const { 1021 return LHS.ult(RHS); 1022 } 1023 }; 1024 } 1025 1026 /// getAddExpr - Get a canonical add expression, or something simpler if 1027 /// possible. 1028 const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { 1029 assert(!Ops.empty() && "Cannot get empty add!"); 1030 if (Ops.size() == 1) return Ops[0]; 1031 #ifndef NDEBUG 1032 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1033 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1034 getEffectiveSCEVType(Ops[0]->getType()) && 1035 "SCEVAddExpr operand types don't match!"); 1036 #endif 1037 1038 // Sort by complexity, this groups all similar expression types together. 1039 GroupByComplexity(Ops, LI); 1040 1041 // If there are any constants, fold them together. 1042 unsigned Idx = 0; 1043 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1044 ++Idx; 1045 assert(Idx < Ops.size()); 1046 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1047 // We found two constants, fold them together! 1048 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1049 RHSC->getValue()->getValue()); 1050 if (Ops.size() == 2) return Ops[0]; 1051 Ops.erase(Ops.begin()+1); // Erase the folded element 1052 LHSC = cast<SCEVConstant>(Ops[0]); 1053 } 1054 1055 // If we are left with a constant zero being added, strip it off. 1056 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1057 Ops.erase(Ops.begin()); 1058 --Idx; 1059 } 1060 } 1061 1062 if (Ops.size() == 1) return Ops[0]; 1063 1064 // Okay, check to see if the same value occurs in the operand list twice. If 1065 // so, merge them together into an multiply expression. Since we sorted the 1066 // list, these values are required to be adjacent. 1067 const Type *Ty = Ops[0]->getType(); 1068 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1069 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1070 // Found a match, merge the two values into a multiply, and add any 1071 // remaining values to the result. 1072 const SCEV* Two = getIntegerSCEV(2, Ty); 1073 const SCEV* Mul = getMulExpr(Ops[i], Two); 1074 if (Ops.size() == 2) 1075 return Mul; 1076 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1077 Ops.push_back(Mul); 1078 return getAddExpr(Ops); 1079 } 1080 1081 // Check for truncates. If all the operands are truncated from the same 1082 // type, see if factoring out the truncate would permit the result to be 1083 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1084 // if the contents of the resulting outer trunc fold to something simple. 1085 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1086 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1087 const Type *DstType = Trunc->getType(); 1088 const Type *SrcType = Trunc->getOperand()->getType(); 1089 SmallVector<const SCEV*, 8> LargeOps; 1090 bool Ok = true; 1091 // Check all the operands to see if they can be represented in the 1092 // source type of the truncate. 1093 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1094 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1095 if (T->getOperand()->getType() != SrcType) { 1096 Ok = false; 1097 break; 1098 } 1099 LargeOps.push_back(T->getOperand()); 1100 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1101 // This could be either sign or zero extension, but sign extension 1102 // is much more likely to be foldable here. 1103 LargeOps.push_back(getSignExtendExpr(C, SrcType)); 1104 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1105 SmallVector<const SCEV*, 8> LargeMulOps; 1106 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1107 if (const SCEVTruncateExpr *T = 1108 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1109 if (T->getOperand()->getType() != SrcType) { 1110 Ok = false; 1111 break; 1112 } 1113 LargeMulOps.push_back(T->getOperand()); 1114 } else if (const SCEVConstant *C = 1115 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1116 // This could be either sign or zero extension, but sign extension 1117 // is much more likely to be foldable here. 1118 LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); 1119 } else { 1120 Ok = false; 1121 break; 1122 } 1123 } 1124 if (Ok) 1125 LargeOps.push_back(getMulExpr(LargeMulOps)); 1126 } else { 1127 Ok = false; 1128 break; 1129 } 1130 } 1131 if (Ok) { 1132 // Evaluate the expression in the larger type. 1133 const SCEV* Fold = getAddExpr(LargeOps); 1134 // If it folds to something simple, use it. Otherwise, don't. 1135 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1136 return getTruncateExpr(Fold, DstType); 1137 } 1138 } 1139 1140 // Skip past any other cast SCEVs. 1141 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1142 ++Idx; 1143 1144 // If there are add operands they would be next. 1145 if (Idx < Ops.size()) { 1146 bool DeletedAdd = false; 1147 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1148 // If we have an add, expand the add operands onto the end of the operands 1149 // list. 1150 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 1151 Ops.erase(Ops.begin()+Idx); 1152 DeletedAdd = true; 1153 } 1154 1155 // If we deleted at least one add, we added operands to the end of the list, 1156 // and they are not necessarily sorted. Recurse to resort and resimplify 1157 // any operands we just aquired. 1158 if (DeletedAdd) 1159 return getAddExpr(Ops); 1160 } 1161 1162 // Skip over the add expression until we get to a multiply. 1163 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1164 ++Idx; 1165 1166 // Check to see if there are any folding opportunities present with 1167 // operands multiplied by constant values. 1168 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1169 uint64_t BitWidth = getTypeSizeInBits(Ty); 1170 DenseMap<const SCEV*, APInt> M; 1171 SmallVector<const SCEV*, 8> NewOps; 1172 APInt AccumulatedConstant(BitWidth, 0); 1173 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1174 Ops, APInt(BitWidth, 1), *this)) { 1175 // Some interesting folding opportunity is present, so its worthwhile to 1176 // re-generate the operands list. Group the operands by constant scale, 1177 // to avoid multiplying by the same constant scale multiple times. 1178 std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists; 1179 for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(), 1180 E = NewOps.end(); I != E; ++I) 1181 MulOpLists[M.find(*I)->second].push_back(*I); 1182 // Re-generate the operands list. 1183 Ops.clear(); 1184 if (AccumulatedConstant != 0) 1185 Ops.push_back(getConstant(AccumulatedConstant)); 1186 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1187 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1188 if (I->first != 0) 1189 Ops.push_back(getMulExpr(getConstant(I->first), 1190 getAddExpr(I->second))); 1191 if (Ops.empty()) 1192 return getIntegerSCEV(0, Ty); 1193 if (Ops.size() == 1) 1194 return Ops[0]; 1195 return getAddExpr(Ops); 1196 } 1197 } 1198 1199 // If we are adding something to a multiply expression, make sure the 1200 // something is not already an operand of the multiply. If so, merge it into 1201 // the multiply. 1202 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1203 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1204 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1205 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1206 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1207 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { 1208 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1209 const SCEV* InnerMul = Mul->getOperand(MulOp == 0); 1210 if (Mul->getNumOperands() != 2) { 1211 // If the multiply has more than two operands, we must get the 1212 // Y*Z term. 1213 SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1214 MulOps.erase(MulOps.begin()+MulOp); 1215 InnerMul = getMulExpr(MulOps); 1216 } 1217 const SCEV* One = getIntegerSCEV(1, Ty); 1218 const SCEV* AddOne = getAddExpr(InnerMul, One); 1219 const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); 1220 if (Ops.size() == 2) return OuterMul; 1221 if (AddOp < Idx) { 1222 Ops.erase(Ops.begin()+AddOp); 1223 Ops.erase(Ops.begin()+Idx-1); 1224 } else { 1225 Ops.erase(Ops.begin()+Idx); 1226 Ops.erase(Ops.begin()+AddOp-1); 1227 } 1228 Ops.push_back(OuterMul); 1229 return getAddExpr(Ops); 1230 } 1231 1232 // Check this multiply against other multiplies being added together. 1233 for (unsigned OtherMulIdx = Idx+1; 1234 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1235 ++OtherMulIdx) { 1236 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1237 // If MulOp occurs in OtherMul, we can fold the two multiplies 1238 // together. 1239 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1240 OMulOp != e; ++OMulOp) 1241 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1242 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1243 const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); 1244 if (Mul->getNumOperands() != 2) { 1245 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1246 Mul->op_end()); 1247 MulOps.erase(MulOps.begin()+MulOp); 1248 InnerMul1 = getMulExpr(MulOps); 1249 } 1250 const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1251 if (OtherMul->getNumOperands() != 2) { 1252 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1253 OtherMul->op_end()); 1254 MulOps.erase(MulOps.begin()+OMulOp); 1255 InnerMul2 = getMulExpr(MulOps); 1256 } 1257 const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1258 const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1259 if (Ops.size() == 2) return OuterMul; 1260 Ops.erase(Ops.begin()+Idx); 1261 Ops.erase(Ops.begin()+OtherMulIdx-1); 1262 Ops.push_back(OuterMul); 1263 return getAddExpr(Ops); 1264 } 1265 } 1266 } 1267 } 1268 1269 // If there are any add recurrences in the operands list, see if any other 1270 // added values are loop invariant. If so, we can fold them into the 1271 // recurrence. 1272 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1273 ++Idx; 1274 1275 // Scan over all recurrences, trying to fold loop invariants into them. 1276 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1277 // Scan all of the other operands to this add and add them to the vector if 1278 // they are loop invariant w.r.t. the recurrence. 1279 SmallVector<const SCEV*, 8> LIOps; 1280 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1281 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1282 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1283 LIOps.push_back(Ops[i]); 1284 Ops.erase(Ops.begin()+i); 1285 --i; --e; 1286 } 1287 1288 // If we found some loop invariants, fold them into the recurrence. 1289 if (!LIOps.empty()) { 1290 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1291 LIOps.push_back(AddRec->getStart()); 1292 1293 SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(), 1294 AddRec->op_end()); 1295 AddRecOps[0] = getAddExpr(LIOps); 1296 1297 const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); 1298 // If all of the other operands were loop invariant, we are done. 1299 if (Ops.size() == 1) return NewRec; 1300 1301 // Otherwise, add the folded AddRec by the non-liv parts. 1302 for (unsigned i = 0;; ++i) 1303 if (Ops[i] == AddRec) { 1304 Ops[i] = NewRec; 1305 break; 1306 } 1307 return getAddExpr(Ops); 1308 } 1309 1310 // Okay, if there weren't any loop invariants to be folded, check to see if 1311 // there are multiple AddRec's with the same loop induction variable being 1312 // added together. If so, we can fold them. 1313 for (unsigned OtherIdx = Idx+1; 1314 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1315 if (OtherIdx != Idx) { 1316 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1317 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1318 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} 1319 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(), 1320 AddRec->op_end()); 1321 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { 1322 if (i >= NewOps.size()) { 1323 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, 1324 OtherAddRec->op_end()); 1325 break; 1326 } 1327 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); 1328 } 1329 const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1330 1331 if (Ops.size() == 2) return NewAddRec; 1332 1333 Ops.erase(Ops.begin()+Idx); 1334 Ops.erase(Ops.begin()+OtherIdx-1); 1335 Ops.push_back(NewAddRec); 1336 return getAddExpr(Ops); 1337 } 1338 } 1339 1340 // Otherwise couldn't fold anything into this recurrence. Move onto the 1341 // next one. 1342 } 1343 1344 // Okay, it looks like we really DO need an add expr. Check to see if we 1345 // already have one, otherwise create a new one. 1346 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1347 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scAddExpr, 1348 SCEVOps)]; 1349 if (Result == 0) Result = new SCEVAddExpr(Ops); 1350 return Result; 1351 } 1352 1353 1354 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1355 /// possible. 1356 const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { 1357 assert(!Ops.empty() && "Cannot get empty mul!"); 1358 #ifndef NDEBUG 1359 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1360 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1361 getEffectiveSCEVType(Ops[0]->getType()) && 1362 "SCEVMulExpr operand types don't match!"); 1363 #endif 1364 1365 // Sort by complexity, this groups all similar expression types together. 1366 GroupByComplexity(Ops, LI); 1367 1368 // If there are any constants, fold them together. 1369 unsigned Idx = 0; 1370 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1371 1372 // C1*(C2+V) -> C1*C2 + C1*V 1373 if (Ops.size() == 2) 1374 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1375 if (Add->getNumOperands() == 2 && 1376 isa<SCEVConstant>(Add->getOperand(0))) 1377 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1378 getMulExpr(LHSC, Add->getOperand(1))); 1379 1380 1381 ++Idx; 1382 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1383 // We found two constants, fold them together! 1384 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() * 1385 RHSC->getValue()->getValue()); 1386 Ops[0] = getConstant(Fold); 1387 Ops.erase(Ops.begin()+1); // Erase the folded element 1388 if (Ops.size() == 1) return Ops[0]; 1389 LHSC = cast<SCEVConstant>(Ops[0]); 1390 } 1391 1392 // If we are left with a constant one being multiplied, strip it off. 1393 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1394 Ops.erase(Ops.begin()); 1395 --Idx; 1396 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1397 // If we have a multiply of zero, it will always be zero. 1398 return Ops[0]; 1399 } 1400 } 1401 1402 // Skip over the add expression until we get to a multiply. 1403 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1404 ++Idx; 1405 1406 if (Ops.size() == 1) 1407 return Ops[0]; 1408 1409 // If there are mul operands inline them all into this expression. 1410 if (Idx < Ops.size()) { 1411 bool DeletedMul = false; 1412 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1413 // If we have an mul, expand the mul operands onto the end of the operands 1414 // list. 1415 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); 1416 Ops.erase(Ops.begin()+Idx); 1417 DeletedMul = true; 1418 } 1419 1420 // If we deleted at least one mul, we added operands to the end of the list, 1421 // and they are not necessarily sorted. Recurse to resort and resimplify 1422 // any operands we just aquired. 1423 if (DeletedMul) 1424 return getMulExpr(Ops); 1425 } 1426 1427 // If there are any add recurrences in the operands list, see if any other 1428 // added values are loop invariant. If so, we can fold them into the 1429 // recurrence. 1430 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1431 ++Idx; 1432 1433 // Scan over all recurrences, trying to fold loop invariants into them. 1434 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1435 // Scan all of the other operands to this mul and add them to the vector if 1436 // they are loop invariant w.r.t. the recurrence. 1437 SmallVector<const SCEV*, 8> LIOps; 1438 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1439 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1440 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1441 LIOps.push_back(Ops[i]); 1442 Ops.erase(Ops.begin()+i); 1443 --i; --e; 1444 } 1445 1446 // If we found some loop invariants, fold them into the recurrence. 1447 if (!LIOps.empty()) { 1448 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1449 SmallVector<const SCEV*, 4> NewOps; 1450 NewOps.reserve(AddRec->getNumOperands()); 1451 if (LIOps.size() == 1) { 1452 const SCEV *Scale = LIOps[0]; 1453 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1454 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1455 } else { 1456 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 1457 SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end()); 1458 MulOps.push_back(AddRec->getOperand(i)); 1459 NewOps.push_back(getMulExpr(MulOps)); 1460 } 1461 } 1462 1463 const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1464 1465 // If all of the other operands were loop invariant, we are done. 1466 if (Ops.size() == 1) return NewRec; 1467 1468 // Otherwise, multiply the folded AddRec by the non-liv parts. 1469 for (unsigned i = 0;; ++i) 1470 if (Ops[i] == AddRec) { 1471 Ops[i] = NewRec; 1472 break; 1473 } 1474 return getMulExpr(Ops); 1475 } 1476 1477 // Okay, if there weren't any loop invariants to be folded, check to see if 1478 // there are multiple AddRec's with the same loop induction variable being 1479 // multiplied together. If so, we can fold them. 1480 for (unsigned OtherIdx = Idx+1; 1481 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1482 if (OtherIdx != Idx) { 1483 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1484 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1485 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} 1486 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1487 const SCEV* NewStart = getMulExpr(F->getStart(), 1488 G->getStart()); 1489 const SCEV* B = F->getStepRecurrence(*this); 1490 const SCEV* D = G->getStepRecurrence(*this); 1491 const SCEV* NewStep = getAddExpr(getMulExpr(F, D), 1492 getMulExpr(G, B), 1493 getMulExpr(B, D)); 1494 const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, 1495 F->getLoop()); 1496 if (Ops.size() == 2) return NewAddRec; 1497 1498 Ops.erase(Ops.begin()+Idx); 1499 Ops.erase(Ops.begin()+OtherIdx-1); 1500 Ops.push_back(NewAddRec); 1501 return getMulExpr(Ops); 1502 } 1503 } 1504 1505 // Otherwise couldn't fold anything into this recurrence. Move onto the 1506 // next one. 1507 } 1508 1509 // Okay, it looks like we really DO need an mul expr. Check to see if we 1510 // already have one, otherwise create a new one. 1511 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1512 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scMulExpr, 1513 SCEVOps)]; 1514 if (Result == 0) 1515 Result = new SCEVMulExpr(Ops); 1516 return Result; 1517 } 1518 1519 /// getUDivExpr - Get a canonical multiply expression, or something simpler if 1520 /// possible. 1521 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 1522 const SCEV *RHS) { 1523 assert(getEffectiveSCEVType(LHS->getType()) == 1524 getEffectiveSCEVType(RHS->getType()) && 1525 "SCEVUDivExpr operand types don't match!"); 1526 1527 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1528 if (RHSC->getValue()->equalsInt(1)) 1529 return LHS; // X udiv 1 --> x 1530 if (RHSC->isZero()) 1531 return getIntegerSCEV(0, LHS->getType()); // value is undefined 1532 1533 // Determine if the division can be folded into the operands of 1534 // its operands. 1535 // TODO: Generalize this to non-constants by using known-bits information. 1536 const Type *Ty = LHS->getType(); 1537 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1538 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; 1539 // For non-power-of-two values, effectively round the value up to the 1540 // nearest power of two. 1541 if (!RHSC->getValue()->getValue().isPowerOf2()) 1542 ++MaxShiftAmt; 1543 const IntegerType *ExtTy = 1544 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt); 1545 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1546 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1547 if (const SCEVConstant *Step = 1548 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1549 if (!Step->getValue()->getValue() 1550 .urem(RHSC->getValue()->getValue()) && 1551 getZeroExtendExpr(AR, ExtTy) == 1552 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1553 getZeroExtendExpr(Step, ExtTy), 1554 AR->getLoop())) { 1555 SmallVector<const SCEV*, 4> Operands; 1556 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1557 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1558 return getAddRecExpr(Operands, AR->getLoop()); 1559 } 1560 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1561 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1562 SmallVector<const SCEV*, 4> Operands; 1563 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1564 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1565 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1566 // Find an operand that's safely divisible. 1567 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1568 const SCEV* Op = M->getOperand(i); 1569 const SCEV* Div = getUDivExpr(Op, RHSC); 1570 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1571 const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands(); 1572 Operands = SmallVector<const SCEV*, 4>(MOperands.begin(), 1573 MOperands.end()); 1574 Operands[i] = Div; 1575 return getMulExpr(Operands); 1576 } 1577 } 1578 } 1579 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1580 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1581 SmallVector<const SCEV*, 4> Operands; 1582 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1583 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1584 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1585 Operands.clear(); 1586 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1587 const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); 1588 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) 1589 break; 1590 Operands.push_back(Op); 1591 } 1592 if (Operands.size() == A->getNumOperands()) 1593 return getAddExpr(Operands); 1594 } 1595 } 1596 1597 // Fold if both operands are constant. 1598 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1599 Constant *LHSCV = LHSC->getValue(); 1600 Constant *RHSCV = RHSC->getValue(); 1601 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 1602 RHSCV))); 1603 } 1604 } 1605 1606 SCEVUDivExpr *&Result = SCEVUDivs[std::make_pair(LHS, RHS)]; 1607 if (Result == 0) Result = new SCEVUDivExpr(LHS, RHS); 1608 return Result; 1609 } 1610 1611 1612 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1613 /// Simplify the expression as much as possible. 1614 const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, 1615 const SCEV* Step, const Loop *L) { 1616 SmallVector<const SCEV*, 4> Operands; 1617 Operands.push_back(Start); 1618 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1619 if (StepChrec->getLoop() == L) { 1620 Operands.insert(Operands.end(), StepChrec->op_begin(), 1621 StepChrec->op_end()); 1622 return getAddRecExpr(Operands, L); 1623 } 1624 1625 Operands.push_back(Step); 1626 return getAddRecExpr(Operands, L); 1627 } 1628 1629 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1630 /// Simplify the expression as much as possible. 1631 const SCEV * 1632 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV*> &Operands, 1633 const Loop *L) { 1634 if (Operands.size() == 1) return Operands[0]; 1635 #ifndef NDEBUG 1636 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1637 assert(getEffectiveSCEVType(Operands[i]->getType()) == 1638 getEffectiveSCEVType(Operands[0]->getType()) && 1639 "SCEVAddRecExpr operand types don't match!"); 1640 #endif 1641 1642 if (Operands.back()->isZero()) { 1643 Operands.pop_back(); 1644 return getAddRecExpr(Operands, L); // {X,+,0} --> X 1645 } 1646 1647 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 1648 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 1649 const Loop* NestedLoop = NestedAR->getLoop(); 1650 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { 1651 SmallVector<const SCEV*, 4> NestedOperands(NestedAR->op_begin(), 1652 NestedAR->op_end()); 1653 Operands[0] = NestedAR->getStart(); 1654 // AddRecs require their operands be loop-invariant with respect to their 1655 // loops. Don't perform this transformation if it would break this 1656 // requirement. 1657 bool AllInvariant = true; 1658 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1659 if (!Operands[i]->isLoopInvariant(L)) { 1660 AllInvariant = false; 1661 break; 1662 } 1663 if (AllInvariant) { 1664 NestedOperands[0] = getAddRecExpr(Operands, L); 1665 AllInvariant = true; 1666 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 1667 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) { 1668 AllInvariant = false; 1669 break; 1670 } 1671 if (AllInvariant) 1672 // Ok, both add recurrences are valid after the transformation. 1673 return getAddRecExpr(NestedOperands, NestedLoop); 1674 } 1675 // Reset Operands to its original state. 1676 Operands[0] = NestedAR; 1677 } 1678 } 1679 1680 std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end()); 1681 SCEVAddRecExpr *&Result = SCEVAddRecExprs[std::make_pair(L, SCEVOps)]; 1682 if (Result == 0) Result = new SCEVAddRecExpr(Operands, L); 1683 return Result; 1684 } 1685 1686 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 1687 const SCEV *RHS) { 1688 SmallVector<const SCEV*, 2> Ops; 1689 Ops.push_back(LHS); 1690 Ops.push_back(RHS); 1691 return getSMaxExpr(Ops); 1692 } 1693 1694 const SCEV* 1695 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1696 assert(!Ops.empty() && "Cannot get empty smax!"); 1697 if (Ops.size() == 1) return Ops[0]; 1698 #ifndef NDEBUG 1699 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1700 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1701 getEffectiveSCEVType(Ops[0]->getType()) && 1702 "SCEVSMaxExpr operand types don't match!"); 1703 #endif 1704 1705 // Sort by complexity, this groups all similar expression types together. 1706 GroupByComplexity(Ops, LI); 1707 1708 // If there are any constants, fold them together. 1709 unsigned Idx = 0; 1710 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1711 ++Idx; 1712 assert(Idx < Ops.size()); 1713 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1714 // We found two constants, fold them together! 1715 ConstantInt *Fold = ConstantInt::get( 1716 APIntOps::smax(LHSC->getValue()->getValue(), 1717 RHSC->getValue()->getValue())); 1718 Ops[0] = getConstant(Fold); 1719 Ops.erase(Ops.begin()+1); // Erase the folded element 1720 if (Ops.size() == 1) return Ops[0]; 1721 LHSC = cast<SCEVConstant>(Ops[0]); 1722 } 1723 1724 // If we are left with a constant minimum-int, strip it off. 1725 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 1726 Ops.erase(Ops.begin()); 1727 --Idx; 1728 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 1729 // If we have an smax with a constant maximum-int, it will always be 1730 // maximum-int. 1731 return Ops[0]; 1732 } 1733 } 1734 1735 if (Ops.size() == 1) return Ops[0]; 1736 1737 // Find the first SMax 1738 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 1739 ++Idx; 1740 1741 // Check to see if one of the operands is an SMax. If so, expand its operands 1742 // onto our operand list, and recurse to simplify. 1743 if (Idx < Ops.size()) { 1744 bool DeletedSMax = false; 1745 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 1746 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); 1747 Ops.erase(Ops.begin()+Idx); 1748 DeletedSMax = true; 1749 } 1750 1751 if (DeletedSMax) 1752 return getSMaxExpr(Ops); 1753 } 1754 1755 // Okay, check to see if the same value occurs in the operand list twice. If 1756 // so, delete one. Since we sorted the list, these values are required to 1757 // be adjacent. 1758 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1759 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y 1760 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1761 --i; --e; 1762 } 1763 1764 if (Ops.size() == 1) return Ops[0]; 1765 1766 assert(!Ops.empty() && "Reduced smax down to nothing!"); 1767 1768 // Okay, it looks like we really DO need an smax expr. Check to see if we 1769 // already have one, otherwise create a new one. 1770 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1771 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scSMaxExpr, 1772 SCEVOps)]; 1773 if (Result == 0) Result = new SCEVSMaxExpr(Ops); 1774 return Result; 1775 } 1776 1777 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 1778 const SCEV *RHS) { 1779 SmallVector<const SCEV*, 2> Ops; 1780 Ops.push_back(LHS); 1781 Ops.push_back(RHS); 1782 return getUMaxExpr(Ops); 1783 } 1784 1785 const SCEV* 1786 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { 1787 assert(!Ops.empty() && "Cannot get empty umax!"); 1788 if (Ops.size() == 1) return Ops[0]; 1789 #ifndef NDEBUG 1790 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1791 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1792 getEffectiveSCEVType(Ops[0]->getType()) && 1793 "SCEVUMaxExpr operand types don't match!"); 1794 #endif 1795 1796 // Sort by complexity, this groups all similar expression types together. 1797 GroupByComplexity(Ops, LI); 1798 1799 // If there are any constants, fold them together. 1800 unsigned Idx = 0; 1801 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1802 ++Idx; 1803 assert(Idx < Ops.size()); 1804 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1805 // We found two constants, fold them together! 1806 ConstantInt *Fold = ConstantInt::get( 1807 APIntOps::umax(LHSC->getValue()->getValue(), 1808 RHSC->getValue()->getValue())); 1809 Ops[0] = getConstant(Fold); 1810 Ops.erase(Ops.begin()+1); // Erase the folded element 1811 if (Ops.size() == 1) return Ops[0]; 1812 LHSC = cast<SCEVConstant>(Ops[0]); 1813 } 1814 1815 // If we are left with a constant minimum-int, strip it off. 1816 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 1817 Ops.erase(Ops.begin()); 1818 --Idx; 1819 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 1820 // If we have an umax with a constant maximum-int, it will always be 1821 // maximum-int. 1822 return Ops[0]; 1823 } 1824 } 1825 1826 if (Ops.size() == 1) return Ops[0]; 1827 1828 // Find the first UMax 1829 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 1830 ++Idx; 1831 1832 // Check to see if one of the operands is a UMax. If so, expand its operands 1833 // onto our operand list, and recurse to simplify. 1834 if (Idx < Ops.size()) { 1835 bool DeletedUMax = false; 1836 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 1837 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); 1838 Ops.erase(Ops.begin()+Idx); 1839 DeletedUMax = true; 1840 } 1841 1842 if (DeletedUMax) 1843 return getUMaxExpr(Ops); 1844 } 1845 1846 // Okay, check to see if the same value occurs in the operand list twice. If 1847 // so, delete one. Since we sorted the list, these values are required to 1848 // be adjacent. 1849 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1850 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y 1851 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1852 --i; --e; 1853 } 1854 1855 if (Ops.size() == 1) return Ops[0]; 1856 1857 assert(!Ops.empty() && "Reduced umax down to nothing!"); 1858 1859 // Okay, it looks like we really DO need a umax expr. Check to see if we 1860 // already have one, otherwise create a new one. 1861 std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end()); 1862 SCEVCommutativeExpr *&Result = SCEVCommExprs[std::make_pair(scUMaxExpr, 1863 SCEVOps)]; 1864 if (Result == 0) Result = new SCEVUMaxExpr(Ops); 1865 return Result; 1866 } 1867 1868 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 1869 const SCEV *RHS) { 1870 // ~smax(~x, ~y) == smin(x, y). 1871 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1872 } 1873 1874 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 1875 const SCEV *RHS) { 1876 // ~umax(~x, ~y) == umin(x, y) 1877 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 1878 } 1879 1880 const SCEV* ScalarEvolution::getUnknown(Value *V) { 1881 // Don't attempt to do anything other than create a SCEVUnknown object 1882 // here. createSCEV only calls getUnknown after checking for all other 1883 // interesting possibilities, and any other code that calls getUnknown 1884 // is doing so in order to hide a value from SCEV canonicalization. 1885 1886 SCEVUnknown *&Result = SCEVUnknowns[V]; 1887 if (Result == 0) Result = new SCEVUnknown(V); 1888 return Result; 1889 } 1890 1891 //===----------------------------------------------------------------------===// 1892 // Basic SCEV Analysis and PHI Idiom Recognition Code 1893 // 1894 1895 /// isSCEVable - Test if values of the given type are analyzable within 1896 /// the SCEV framework. This primarily includes integer types, and it 1897 /// can optionally include pointer types if the ScalarEvolution class 1898 /// has access to target-specific information. 1899 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 1900 // Integers are always SCEVable. 1901 if (Ty->isInteger()) 1902 return true; 1903 1904 // Pointers are SCEVable if TargetData information is available 1905 // to provide pointer size information. 1906 if (isa<PointerType>(Ty)) 1907 return TD != NULL; 1908 1909 // Otherwise it's not SCEVable. 1910 return false; 1911 } 1912 1913 /// getTypeSizeInBits - Return the size in bits of the specified type, 1914 /// for which isSCEVable must return true. 1915 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 1916 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1917 1918 // If we have a TargetData, use it! 1919 if (TD) 1920 return TD->getTypeSizeInBits(Ty); 1921 1922 // Otherwise, we support only integer types. 1923 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!"); 1924 return Ty->getPrimitiveSizeInBits(); 1925 } 1926 1927 /// getEffectiveSCEVType - Return a type with the same bitwidth as 1928 /// the given type and which represents how SCEV will treat the given 1929 /// type, for which isSCEVable must return true. For pointer types, 1930 /// this is the pointer-sized integer type. 1931 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 1932 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 1933 1934 if (Ty->isInteger()) 1935 return Ty; 1936 1937 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); 1938 return TD->getIntPtrType(); 1939 } 1940 1941 const SCEV* ScalarEvolution::getCouldNotCompute() { 1942 return CouldNotCompute; 1943 } 1944 1945 /// hasSCEV - Return true if the SCEV for this value has already been 1946 /// computed. 1947 bool ScalarEvolution::hasSCEV(Value *V) const { 1948 return Scalars.count(V); 1949 } 1950 1951 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 1952 /// expression and create a new one. 1953 const SCEV* ScalarEvolution::getSCEV(Value *V) { 1954 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 1955 1956 std::map<SCEVCallbackVH, const SCEV*>::iterator I = Scalars.find(V); 1957 if (I != Scalars.end()) return I->second; 1958 const SCEV* S = createSCEV(V); 1959 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 1960 return S; 1961 } 1962 1963 /// getIntegerSCEV - Given a SCEVable type, create a constant for the 1964 /// specified signed integer value and return a SCEV for the constant. 1965 const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { 1966 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 1967 return getConstant(ConstantInt::get(ITy, Val)); 1968 } 1969 1970 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 1971 /// 1972 const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { 1973 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1974 return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 1975 1976 const Type *Ty = V->getType(); 1977 Ty = getEffectiveSCEVType(Ty); 1978 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty))); 1979 } 1980 1981 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 1982 const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) { 1983 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 1984 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 1985 1986 const Type *Ty = V->getType(); 1987 Ty = getEffectiveSCEVType(Ty); 1988 const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); 1989 return getMinusSCEV(AllOnes, V); 1990 } 1991 1992 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 1993 /// 1994 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, 1995 const SCEV *RHS) { 1996 // X - Y --> X + -Y 1997 return getAddExpr(LHS, getNegativeSCEV(RHS)); 1998 } 1999 2000 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2001 /// input value to the specified type. If the type must be extended, it is zero 2002 /// extended. 2003 const SCEV* 2004 ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, 2005 const Type *Ty) { 2006 const Type *SrcTy = V->getType(); 2007 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2008 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2009 "Cannot truncate or zero extend with non-integer arguments!"); 2010 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2011 return V; // No conversion 2012 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2013 return getTruncateExpr(V, Ty); 2014 return getZeroExtendExpr(V, Ty); 2015 } 2016 2017 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2018 /// input value to the specified type. If the type must be extended, it is sign 2019 /// extended. 2020 const SCEV* 2021 ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, 2022 const Type *Ty) { 2023 const Type *SrcTy = V->getType(); 2024 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2025 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2026 "Cannot truncate or zero extend with non-integer arguments!"); 2027 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2028 return V; // No conversion 2029 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2030 return getTruncateExpr(V, Ty); 2031 return getSignExtendExpr(V, Ty); 2032 } 2033 2034 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2035 /// input value to the specified type. If the type must be extended, it is zero 2036 /// extended. The conversion must not be narrowing. 2037 const SCEV* 2038 ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { 2039 const Type *SrcTy = V->getType(); 2040 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2041 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2042 "Cannot noop or zero extend with non-integer arguments!"); 2043 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2044 "getNoopOrZeroExtend cannot truncate!"); 2045 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2046 return V; // No conversion 2047 return getZeroExtendExpr(V, Ty); 2048 } 2049 2050 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2051 /// input value to the specified type. If the type must be extended, it is sign 2052 /// extended. The conversion must not be narrowing. 2053 const SCEV* 2054 ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { 2055 const Type *SrcTy = V->getType(); 2056 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2057 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2058 "Cannot noop or sign extend with non-integer arguments!"); 2059 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2060 "getNoopOrSignExtend cannot truncate!"); 2061 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2062 return V; // No conversion 2063 return getSignExtendExpr(V, Ty); 2064 } 2065 2066 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2067 /// the input value to the specified type. If the type must be extended, 2068 /// it is extended with unspecified bits. The conversion must not be 2069 /// narrowing. 2070 const SCEV* 2071 ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { 2072 const Type *SrcTy = V->getType(); 2073 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2074 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2075 "Cannot noop or any extend with non-integer arguments!"); 2076 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2077 "getNoopOrAnyExtend cannot truncate!"); 2078 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2079 return V; // No conversion 2080 return getAnyExtendExpr(V, Ty); 2081 } 2082 2083 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2084 /// input value to the specified type. The conversion must not be widening. 2085 const SCEV* 2086 ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { 2087 const Type *SrcTy = V->getType(); 2088 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2089 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2090 "Cannot truncate or noop with non-integer arguments!"); 2091 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2092 "getTruncateOrNoop cannot extend!"); 2093 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2094 return V; // No conversion 2095 return getTruncateExpr(V, Ty); 2096 } 2097 2098 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2099 /// the types using zero-extension, and then perform a umax operation 2100 /// with them. 2101 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2102 const SCEV *RHS) { 2103 const SCEV* PromotedLHS = LHS; 2104 const SCEV* PromotedRHS = RHS; 2105 2106 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2107 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2108 else 2109 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2110 2111 return getUMaxExpr(PromotedLHS, PromotedRHS); 2112 } 2113 2114 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2115 /// the types using zero-extension, and then perform a umin operation 2116 /// with them. 2117 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2118 const SCEV *RHS) { 2119 const SCEV* PromotedLHS = LHS; 2120 const SCEV* PromotedRHS = RHS; 2121 2122 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2123 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2124 else 2125 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2126 2127 return getUMinExpr(PromotedLHS, PromotedRHS); 2128 } 2129 2130 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for 2131 /// the specified instruction and replaces any references to the symbolic value 2132 /// SymName with the specified value. This is used during PHI resolution. 2133 void 2134 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I, 2135 const SCEV *SymName, 2136 const SCEV *NewVal) { 2137 std::map<SCEVCallbackVH, const SCEV*>::iterator SI = 2138 Scalars.find(SCEVCallbackVH(I, this)); 2139 if (SI == Scalars.end()) return; 2140 2141 const SCEV* NV = 2142 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); 2143 if (NV == SI->second) return; // No change. 2144 2145 SI->second = NV; // Update the scalars map! 2146 2147 // Any instruction values that use this instruction might also need to be 2148 // updated! 2149 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 2150 UI != E; ++UI) 2151 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal); 2152 } 2153 2154 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2155 /// a loop header, making it a potential recurrence, or it doesn't. 2156 /// 2157 const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { 2158 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. 2159 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2160 if (L->getHeader() == PN->getParent()) { 2161 // If it lives in the loop header, it has two incoming values, one 2162 // from outside the loop, and one from inside. 2163 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 2164 unsigned BackEdge = IncomingEdge^1; 2165 2166 // While we are analyzing this PHI node, handle its value symbolically. 2167 const SCEV* SymbolicName = getUnknown(PN); 2168 assert(Scalars.find(PN) == Scalars.end() && 2169 "PHI node already processed?"); 2170 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2171 2172 // Using this symbolic name for the PHI, analyze the value coming around 2173 // the back-edge. 2174 const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge)); 2175 2176 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2177 // has a special value for the first iteration of the loop. 2178 2179 // If the value coming around the backedge is an add with the symbolic 2180 // value we just inserted, then we found a simple induction variable! 2181 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2182 // If there is a single occurrence of the symbolic value, replace it 2183 // with a recurrence. 2184 unsigned FoundIndex = Add->getNumOperands(); 2185 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2186 if (Add->getOperand(i) == SymbolicName) 2187 if (FoundIndex == e) { 2188 FoundIndex = i; 2189 break; 2190 } 2191 2192 if (FoundIndex != Add->getNumOperands()) { 2193 // Create an add with everything but the specified operand. 2194 SmallVector<const SCEV*, 8> Ops; 2195 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2196 if (i != FoundIndex) 2197 Ops.push_back(Add->getOperand(i)); 2198 const SCEV* Accum = getAddExpr(Ops); 2199 2200 // This is not a valid addrec if the step amount is varying each 2201 // loop iteration, but is not itself an addrec in this loop. 2202 if (Accum->isLoopInvariant(L) || 2203 (isa<SCEVAddRecExpr>(Accum) && 2204 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2205 const SCEV *StartVal = 2206 getSCEV(PN->getIncomingValue(IncomingEdge)); 2207 const SCEV *PHISCEV = 2208 getAddRecExpr(StartVal, Accum, L); 2209 2210 // Okay, for the entire analysis of this edge we assumed the PHI 2211 // to be symbolic. We now need to go back and update all of the 2212 // entries for the scalars that use the PHI (except for the PHI 2213 // itself) to use the new analyzed value instead of the "symbolic" 2214 // value. 2215 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2216 return PHISCEV; 2217 } 2218 } 2219 } else if (const SCEVAddRecExpr *AddRec = 2220 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2221 // Otherwise, this could be a loop like this: 2222 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2223 // In this case, j = {1,+,1} and BEValue is j. 2224 // Because the other in-value of i (0) fits the evolution of BEValue 2225 // i really is an addrec evolution. 2226 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2227 const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2228 2229 // If StartVal = j.start - j.stride, we can use StartVal as the 2230 // initial step of the addrec evolution. 2231 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2232 AddRec->getOperand(1))) { 2233 const SCEV* PHISCEV = 2234 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2235 2236 // Okay, for the entire analysis of this edge we assumed the PHI 2237 // to be symbolic. We now need to go back and update all of the 2238 // entries for the scalars that use the PHI (except for the PHI 2239 // itself) to use the new analyzed value instead of the "symbolic" 2240 // value. 2241 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2242 return PHISCEV; 2243 } 2244 } 2245 } 2246 2247 return SymbolicName; 2248 } 2249 2250 // If it's not a loop phi, we can't handle it yet. 2251 return getUnknown(PN); 2252 } 2253 2254 /// createNodeForGEP - Expand GEP instructions into add and multiply 2255 /// operations. This allows them to be analyzed by regular SCEV code. 2256 /// 2257 const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { 2258 2259 const Type *IntPtrTy = TD->getIntPtrType(); 2260 Value *Base = GEP->getOperand(0); 2261 // Don't attempt to analyze GEPs over unsized objects. 2262 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2263 return getUnknown(GEP); 2264 const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy); 2265 gep_type_iterator GTI = gep_type_begin(GEP); 2266 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), 2267 E = GEP->op_end(); 2268 I != E; ++I) { 2269 Value *Index = *I; 2270 // Compute the (potentially symbolic) offset in bytes for this index. 2271 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2272 // For a struct, add the member offset. 2273 const StructLayout &SL = *TD->getStructLayout(STy); 2274 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2275 uint64_t Offset = SL.getElementOffset(FieldNo); 2276 TotalOffset = getAddExpr(TotalOffset, 2277 getIntegerSCEV(Offset, IntPtrTy)); 2278 } else { 2279 // For an array, add the element offset, explicitly scaled. 2280 const SCEV* LocalOffset = getSCEV(Index); 2281 if (!isa<PointerType>(LocalOffset->getType())) 2282 // Getelementptr indicies are signed. 2283 LocalOffset = getTruncateOrSignExtend(LocalOffset, 2284 IntPtrTy); 2285 LocalOffset = 2286 getMulExpr(LocalOffset, 2287 getIntegerSCEV(TD->getTypeAllocSize(*GTI), 2288 IntPtrTy)); 2289 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2290 } 2291 } 2292 return getAddExpr(getSCEV(Base), TotalOffset); 2293 } 2294 2295 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2296 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2297 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2298 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2299 uint32_t 2300 ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { 2301 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2302 return C->getValue()->getValue().countTrailingZeros(); 2303 2304 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2305 return std::min(GetMinTrailingZeros(T->getOperand()), 2306 (uint32_t)getTypeSizeInBits(T->getType())); 2307 2308 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2309 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2310 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2311 getTypeSizeInBits(E->getType()) : OpRes; 2312 } 2313 2314 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2315 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2316 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2317 getTypeSizeInBits(E->getType()) : OpRes; 2318 } 2319 2320 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2321 // The result is the min of all operands results. 2322 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2323 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2324 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2325 return MinOpRes; 2326 } 2327 2328 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2329 // The result is the sum of all operands results. 2330 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2331 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2332 for (unsigned i = 1, e = M->getNumOperands(); 2333 SumOpRes != BitWidth && i != e; ++i) 2334 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2335 BitWidth); 2336 return SumOpRes; 2337 } 2338 2339 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2340 // The result is the min of all operands results. 2341 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2342 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2343 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2344 return MinOpRes; 2345 } 2346 2347 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2348 // The result is the min of all operands results. 2349 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2350 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2351 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2352 return MinOpRes; 2353 } 2354 2355 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2356 // The result is the min of all operands results. 2357 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2358 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2359 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2360 return MinOpRes; 2361 } 2362 2363 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2364 // For a SCEVUnknown, ask ValueTracking. 2365 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2366 APInt Mask = APInt::getAllOnesValue(BitWidth); 2367 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2368 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2369 return Zeros.countTrailingOnes(); 2370 } 2371 2372 // SCEVUDivExpr 2373 return 0; 2374 } 2375 2376 uint32_t 2377 ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { 2378 // TODO: Handle other SCEV expression types here. 2379 2380 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2381 return C->getValue()->getValue().countLeadingZeros(); 2382 2383 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) { 2384 // A zero-extension cast adds zero bits. 2385 return GetMinLeadingZeros(C->getOperand()) + 2386 (getTypeSizeInBits(C->getType()) - 2387 getTypeSizeInBits(C->getOperand()->getType())); 2388 } 2389 2390 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2391 // For a SCEVUnknown, ask ValueTracking. 2392 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2393 APInt Mask = APInt::getAllOnesValue(BitWidth); 2394 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2395 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 2396 return Zeros.countLeadingOnes(); 2397 } 2398 2399 return 1; 2400 } 2401 2402 uint32_t 2403 ScalarEvolution::GetMinSignBits(const SCEV* S) { 2404 // TODO: Handle other SCEV expression types here. 2405 2406 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 2407 const APInt &A = C->getValue()->getValue(); 2408 return A.isNegative() ? A.countLeadingOnes() : 2409 A.countLeadingZeros(); 2410 } 2411 2412 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) { 2413 // A sign-extension cast adds sign bits. 2414 return GetMinSignBits(C->getOperand()) + 2415 (getTypeSizeInBits(C->getType()) - 2416 getTypeSizeInBits(C->getOperand()->getType())); 2417 } 2418 2419 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2420 unsigned BitWidth = getTypeSizeInBits(A->getType()); 2421 2422 // Special case decrementing a value (ADD X, -1): 2423 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0))) 2424 if (CRHS->isAllOnesValue()) { 2425 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end()); 2426 const SCEV *OtherOpsAdd = getAddExpr(OtherOps); 2427 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd); 2428 2429 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2430 // sign bits set. 2431 if (LZ == BitWidth - 1) 2432 return BitWidth; 2433 2434 // If we are subtracting one from a positive number, there is no carry 2435 // out of the result. 2436 if (LZ > 0) 2437 return GetMinSignBits(OtherOpsAdd); 2438 } 2439 2440 // Add can have at most one carry bit. Thus we know that the output 2441 // is, at worst, one more bit than the inputs. 2442 unsigned Min = BitWidth; 2443 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2444 unsigned N = GetMinSignBits(A->getOperand(i)); 2445 Min = std::min(Min, N) - 1; 2446 if (Min == 0) return 1; 2447 } 2448 return 1; 2449 } 2450 2451 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2452 // For a SCEVUnknown, ask ValueTracking. 2453 return ComputeNumSignBits(U->getValue(), TD); 2454 } 2455 2456 return 1; 2457 } 2458 2459 /// createSCEV - We know that there is no SCEV for the specified value. 2460 /// Analyze the expression. 2461 /// 2462 const SCEV* ScalarEvolution::createSCEV(Value *V) { 2463 if (!isSCEVable(V->getType())) 2464 return getUnknown(V); 2465 2466 unsigned Opcode = Instruction::UserOp1; 2467 if (Instruction *I = dyn_cast<Instruction>(V)) 2468 Opcode = I->getOpcode(); 2469 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 2470 Opcode = CE->getOpcode(); 2471 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 2472 return getConstant(CI); 2473 else if (isa<ConstantPointerNull>(V)) 2474 return getIntegerSCEV(0, V->getType()); 2475 else if (isa<UndefValue>(V)) 2476 return getIntegerSCEV(0, V->getType()); 2477 else 2478 return getUnknown(V); 2479 2480 User *U = cast<User>(V); 2481 switch (Opcode) { 2482 case Instruction::Add: 2483 return getAddExpr(getSCEV(U->getOperand(0)), 2484 getSCEV(U->getOperand(1))); 2485 case Instruction::Mul: 2486 return getMulExpr(getSCEV(U->getOperand(0)), 2487 getSCEV(U->getOperand(1))); 2488 case Instruction::UDiv: 2489 return getUDivExpr(getSCEV(U->getOperand(0)), 2490 getSCEV(U->getOperand(1))); 2491 case Instruction::Sub: 2492 return getMinusSCEV(getSCEV(U->getOperand(0)), 2493 getSCEV(U->getOperand(1))); 2494 case Instruction::And: 2495 // For an expression like x&255 that merely masks off the high bits, 2496 // use zext(trunc(x)) as the SCEV expression. 2497 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2498 if (CI->isNullValue()) 2499 return getSCEV(U->getOperand(1)); 2500 if (CI->isAllOnesValue()) 2501 return getSCEV(U->getOperand(0)); 2502 const APInt &A = CI->getValue(); 2503 2504 // Instcombine's ShrinkDemandedConstant may strip bits out of 2505 // constants, obscuring what would otherwise be a low-bits mask. 2506 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 2507 // knew about to reconstruct a low-bits mask value. 2508 unsigned LZ = A.countLeadingZeros(); 2509 unsigned BitWidth = A.getBitWidth(); 2510 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 2511 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 2512 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 2513 2514 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 2515 2516 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 2517 return 2518 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 2519 IntegerType::get(BitWidth - LZ)), 2520 U->getType()); 2521 } 2522 break; 2523 2524 case Instruction::Or: 2525 // If the RHS of the Or is a constant, we may have something like: 2526 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 2527 // optimizations will transparently handle this case. 2528 // 2529 // In order for this transformation to be safe, the LHS must be of the 2530 // form X*(2^n) and the Or constant must be less than 2^n. 2531 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2532 const SCEV* LHS = getSCEV(U->getOperand(0)); 2533 const APInt &CIVal = CI->getValue(); 2534 if (GetMinTrailingZeros(LHS) >= 2535 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) 2536 return getAddExpr(LHS, getSCEV(U->getOperand(1))); 2537 } 2538 break; 2539 case Instruction::Xor: 2540 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2541 // If the RHS of the xor is a signbit, then this is just an add. 2542 // Instcombine turns add of signbit into xor as a strength reduction step. 2543 if (CI->getValue().isSignBit()) 2544 return getAddExpr(getSCEV(U->getOperand(0)), 2545 getSCEV(U->getOperand(1))); 2546 2547 // If the RHS of xor is -1, then this is a not operation. 2548 if (CI->isAllOnesValue()) 2549 return getNotSCEV(getSCEV(U->getOperand(0))); 2550 2551 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 2552 // This is a variant of the check for xor with -1, and it handles 2553 // the case where instcombine has trimmed non-demanded bits out 2554 // of an xor with -1. 2555 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 2556 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 2557 if (BO->getOpcode() == Instruction::And && 2558 LCI->getValue() == CI->getValue()) 2559 if (const SCEVZeroExtendExpr *Z = 2560 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 2561 const Type *UTy = U->getType(); 2562 const SCEV* Z0 = Z->getOperand(); 2563 const Type *Z0Ty = Z0->getType(); 2564 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 2565 2566 // If C is a low-bits mask, the zero extend is zerving to 2567 // mask off the high bits. Complement the operand and 2568 // re-apply the zext. 2569 if (APIntOps::isMask(Z0TySize, CI->getValue())) 2570 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 2571 2572 // If C is a single bit, it may be in the sign-bit position 2573 // before the zero-extend. In this case, represent the xor 2574 // using an add, which is equivalent, and re-apply the zext. 2575 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 2576 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 2577 Trunc.isSignBit()) 2578 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 2579 UTy); 2580 } 2581 } 2582 break; 2583 2584 case Instruction::Shl: 2585 // Turn shift left of a constant amount into a multiply. 2586 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2587 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2588 Constant *X = ConstantInt::get( 2589 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2590 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2591 } 2592 break; 2593 2594 case Instruction::LShr: 2595 // Turn logical shift right of a constant into a unsigned divide. 2596 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2597 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2598 Constant *X = ConstantInt::get( 2599 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2600 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2601 } 2602 break; 2603 2604 case Instruction::AShr: 2605 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 2606 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 2607 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0))) 2608 if (L->getOpcode() == Instruction::Shl && 2609 L->getOperand(1) == U->getOperand(1)) { 2610 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2611 uint64_t Amt = BitWidth - CI->getZExtValue(); 2612 if (Amt == BitWidth) 2613 return getSCEV(L->getOperand(0)); // shift by zero --> noop 2614 if (Amt > BitWidth) 2615 return getIntegerSCEV(0, U->getType()); // value is undefined 2616 return 2617 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 2618 IntegerType::get(Amt)), 2619 U->getType()); 2620 } 2621 break; 2622 2623 case Instruction::Trunc: 2624 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 2625 2626 case Instruction::ZExt: 2627 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2628 2629 case Instruction::SExt: 2630 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2631 2632 case Instruction::BitCast: 2633 // BitCasts are no-op casts so we just eliminate the cast. 2634 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 2635 return getSCEV(U->getOperand(0)); 2636 break; 2637 2638 case Instruction::IntToPtr: 2639 if (!TD) break; // Without TD we can't analyze pointers. 2640 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2641 TD->getIntPtrType()); 2642 2643 case Instruction::PtrToInt: 2644 if (!TD) break; // Without TD we can't analyze pointers. 2645 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)), 2646 U->getType()); 2647 2648 case Instruction::GetElementPtr: 2649 if (!TD) break; // Without TD we can't analyze pointers. 2650 return createNodeForGEP(U); 2651 2652 case Instruction::PHI: 2653 return createNodeForPHI(cast<PHINode>(U)); 2654 2655 case Instruction::Select: 2656 // This could be a smax or umax that was lowered earlier. 2657 // Try to recover it. 2658 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 2659 Value *LHS = ICI->getOperand(0); 2660 Value *RHS = ICI->getOperand(1); 2661 switch (ICI->getPredicate()) { 2662 case ICmpInst::ICMP_SLT: 2663 case ICmpInst::ICMP_SLE: 2664 std::swap(LHS, RHS); 2665 // fall through 2666 case ICmpInst::ICMP_SGT: 2667 case ICmpInst::ICMP_SGE: 2668 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2669 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2670 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2671 return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); 2672 break; 2673 case ICmpInst::ICMP_ULT: 2674 case ICmpInst::ICMP_ULE: 2675 std::swap(LHS, RHS); 2676 // fall through 2677 case ICmpInst::ICMP_UGT: 2678 case ICmpInst::ICMP_UGE: 2679 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2680 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2681 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2682 return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); 2683 break; 2684 case ICmpInst::ICMP_NE: 2685 // n != 0 ? n : 1 -> umax(n, 1) 2686 if (LHS == U->getOperand(1) && 2687 isa<ConstantInt>(U->getOperand(2)) && 2688 cast<ConstantInt>(U->getOperand(2))->isOne() && 2689 isa<ConstantInt>(RHS) && 2690 cast<ConstantInt>(RHS)->isZero()) 2691 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); 2692 break; 2693 case ICmpInst::ICMP_EQ: 2694 // n == 0 ? 1 : n -> umax(n, 1) 2695 if (LHS == U->getOperand(2) && 2696 isa<ConstantInt>(U->getOperand(1)) && 2697 cast<ConstantInt>(U->getOperand(1))->isOne() && 2698 isa<ConstantInt>(RHS) && 2699 cast<ConstantInt>(RHS)->isZero()) 2700 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); 2701 break; 2702 default: 2703 break; 2704 } 2705 } 2706 2707 default: // We cannot analyze this expression. 2708 break; 2709 } 2710 2711 return getUnknown(V); 2712 } 2713 2714 2715 2716 //===----------------------------------------------------------------------===// 2717 // Iteration Count Computation Code 2718 // 2719 2720 /// getBackedgeTakenCount - If the specified loop has a predictable 2721 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 2722 /// object. The backedge-taken count is the number of times the loop header 2723 /// will be branched to from within the loop. This is one less than the 2724 /// trip count of the loop, since it doesn't count the first iteration, 2725 /// when the header is branched to from outside the loop. 2726 /// 2727 /// Note that it is not valid to call this method on a loop without a 2728 /// loop-invariant backedge-taken count (see 2729 /// hasLoopInvariantBackedgeTakenCount). 2730 /// 2731 const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 2732 return getBackedgeTakenInfo(L).Exact; 2733 } 2734 2735 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 2736 /// return the least SCEV value that is known never to be less than the 2737 /// actual backedge taken count. 2738 const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 2739 return getBackedgeTakenInfo(L).Max; 2740 } 2741 2742 const ScalarEvolution::BackedgeTakenInfo & 2743 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 2744 // Initially insert a CouldNotCompute for this loop. If the insertion 2745 // succeeds, procede to actually compute a backedge-taken count and 2746 // update the value. The temporary CouldNotCompute value tells SCEV 2747 // code elsewhere that it shouldn't attempt to request a new 2748 // backedge-taken count, which could result in infinite recursion. 2749 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair = 2750 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 2751 if (Pair.second) { 2752 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); 2753 if (ItCount.Exact != CouldNotCompute) { 2754 assert(ItCount.Exact->isLoopInvariant(L) && 2755 ItCount.Max->isLoopInvariant(L) && 2756 "Computed trip count isn't loop invariant for loop!"); 2757 ++NumTripCountsComputed; 2758 2759 // Update the value in the map. 2760 Pair.first->second = ItCount; 2761 } else { 2762 if (ItCount.Max != CouldNotCompute) 2763 // Update the value in the map. 2764 Pair.first->second = ItCount; 2765 if (isa<PHINode>(L->getHeader()->begin())) 2766 // Only count loops that have phi nodes as not being computable. 2767 ++NumTripCountsNotComputed; 2768 } 2769 2770 // Now that we know more about the trip count for this loop, forget any 2771 // existing SCEV values for PHI nodes in this loop since they are only 2772 // conservative estimates made without the benefit 2773 // of trip count information. 2774 if (ItCount.hasAnyInfo()) 2775 forgetLoopPHIs(L); 2776 } 2777 return Pair.first->second; 2778 } 2779 2780 /// forgetLoopBackedgeTakenCount - This method should be called by the 2781 /// client when it has changed a loop in a way that may effect 2782 /// ScalarEvolution's ability to compute a trip count, or if the loop 2783 /// is deleted. 2784 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) { 2785 BackedgeTakenCounts.erase(L); 2786 forgetLoopPHIs(L); 2787 } 2788 2789 /// forgetLoopPHIs - Delete the memoized SCEVs associated with the 2790 /// PHI nodes in the given loop. This is used when the trip count of 2791 /// the loop may have changed. 2792 void ScalarEvolution::forgetLoopPHIs(const Loop *L) { 2793 BasicBlock *Header = L->getHeader(); 2794 2795 // Push all Loop-header PHIs onto the Worklist stack, except those 2796 // that are presently represented via a SCEVUnknown. SCEVUnknown for 2797 // a PHI either means that it has an unrecognized structure, or it's 2798 // a PHI that's in the progress of being computed by createNodeForPHI. 2799 // In the former case, additional loop trip count information isn't 2800 // going to change anything. In the later case, createNodeForPHI will 2801 // perform the necessary updates on its own when it gets to that point. 2802 SmallVector<Instruction *, 16> Worklist; 2803 for (BasicBlock::iterator I = Header->begin(); 2804 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 2805 std::map<SCEVCallbackVH, const SCEV*>::iterator It = 2806 Scalars.find((Value*)I); 2807 if (It != Scalars.end() && !isa<SCEVUnknown>(It->second)) 2808 Worklist.push_back(PN); 2809 } 2810 2811 while (!Worklist.empty()) { 2812 Instruction *I = Worklist.pop_back_val(); 2813 if (Scalars.erase(I)) 2814 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2815 UI != UE; ++UI) 2816 Worklist.push_back(cast<Instruction>(UI)); 2817 } 2818 } 2819 2820 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 2821 /// of the specified loop will execute. 2822 ScalarEvolution::BackedgeTakenInfo 2823 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 2824 SmallVector<BasicBlock*, 8> ExitingBlocks; 2825 L->getExitingBlocks(ExitingBlocks); 2826 2827 // Examine all exits and pick the most conservative values. 2828 const SCEV* BECount = CouldNotCompute; 2829 const SCEV* MaxBECount = CouldNotCompute; 2830 bool CouldNotComputeBECount = false; 2831 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 2832 BackedgeTakenInfo NewBTI = 2833 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 2834 2835 if (NewBTI.Exact == CouldNotCompute) { 2836 // We couldn't compute an exact value for this exit, so 2837 // we won't be able to compute an exact value for the loop. 2838 CouldNotComputeBECount = true; 2839 BECount = CouldNotCompute; 2840 } else if (!CouldNotComputeBECount) { 2841 if (BECount == CouldNotCompute) 2842 BECount = NewBTI.Exact; 2843 else 2844 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); 2845 } 2846 if (MaxBECount == CouldNotCompute) 2847 MaxBECount = NewBTI.Max; 2848 else if (NewBTI.Max != CouldNotCompute) 2849 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); 2850 } 2851 2852 return BackedgeTakenInfo(BECount, MaxBECount); 2853 } 2854 2855 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 2856 /// of the specified loop will execute if it exits via the specified block. 2857 ScalarEvolution::BackedgeTakenInfo 2858 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 2859 BasicBlock *ExitingBlock) { 2860 2861 // Okay, we've chosen an exiting block. See what condition causes us to 2862 // exit at this block. 2863 // 2864 // FIXME: we should be able to handle switch instructions (with a single exit) 2865 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2866 if (ExitBr == 0) return CouldNotCompute; 2867 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 2868 2869 // At this point, we know we have a conditional branch that determines whether 2870 // the loop is exited. However, we don't know if the branch is executed each 2871 // time through the loop. If not, then the execution count of the branch will 2872 // not be equal to the trip count of the loop. 2873 // 2874 // Currently we check for this by checking to see if the Exit branch goes to 2875 // the loop header. If so, we know it will always execute the same number of 2876 // times as the loop. We also handle the case where the exit block *is* the 2877 // loop header. This is common for un-rotated loops. 2878 // 2879 // If both of those tests fail, walk up the unique predecessor chain to the 2880 // header, stopping if there is an edge that doesn't exit the loop. If the 2881 // header is reached, the execution count of the branch will be equal to the 2882 // trip count of the loop. 2883 // 2884 // More extensive analysis could be done to handle more cases here. 2885 // 2886 if (ExitBr->getSuccessor(0) != L->getHeader() && 2887 ExitBr->getSuccessor(1) != L->getHeader() && 2888 ExitBr->getParent() != L->getHeader()) { 2889 // The simple checks failed, try climbing the unique predecessor chain 2890 // up to the header. 2891 bool Ok = false; 2892 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 2893 BasicBlock *Pred = BB->getUniquePredecessor(); 2894 if (!Pred) 2895 return CouldNotCompute; 2896 TerminatorInst *PredTerm = Pred->getTerminator(); 2897 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 2898 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 2899 if (PredSucc == BB) 2900 continue; 2901 // If the predecessor has a successor that isn't BB and isn't 2902 // outside the loop, assume the worst. 2903 if (L->contains(PredSucc)) 2904 return CouldNotCompute; 2905 } 2906 if (Pred == L->getHeader()) { 2907 Ok = true; 2908 break; 2909 } 2910 BB = Pred; 2911 } 2912 if (!Ok) 2913 return CouldNotCompute; 2914 } 2915 2916 // Procede to the next level to examine the exit condition expression. 2917 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 2918 ExitBr->getSuccessor(0), 2919 ExitBr->getSuccessor(1)); 2920 } 2921 2922 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 2923 /// backedge of the specified loop will execute if its exit condition 2924 /// were a conditional branch of ExitCond, TBB, and FBB. 2925 ScalarEvolution::BackedgeTakenInfo 2926 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 2927 Value *ExitCond, 2928 BasicBlock *TBB, 2929 BasicBlock *FBB) { 2930 // Check if the controlling expression for this loop is an And or Or. 2931 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 2932 if (BO->getOpcode() == Instruction::And) { 2933 // Recurse on the operands of the and. 2934 BackedgeTakenInfo BTI0 = 2935 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2936 BackedgeTakenInfo BTI1 = 2937 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2938 const SCEV* BECount = CouldNotCompute; 2939 const SCEV* MaxBECount = CouldNotCompute; 2940 if (L->contains(TBB)) { 2941 // Both conditions must be true for the loop to continue executing. 2942 // Choose the less conservative count. 2943 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2944 BECount = CouldNotCompute; 2945 else 2946 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2947 if (BTI0.Max == CouldNotCompute) 2948 MaxBECount = BTI1.Max; 2949 else if (BTI1.Max == CouldNotCompute) 2950 MaxBECount = BTI0.Max; 2951 else 2952 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2953 } else { 2954 // Both conditions must be true for the loop to exit. 2955 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 2956 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2957 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2958 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2959 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2960 } 2961 2962 return BackedgeTakenInfo(BECount, MaxBECount); 2963 } 2964 if (BO->getOpcode() == Instruction::Or) { 2965 // Recurse on the operands of the or. 2966 BackedgeTakenInfo BTI0 = 2967 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 2968 BackedgeTakenInfo BTI1 = 2969 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 2970 const SCEV* BECount = CouldNotCompute; 2971 const SCEV* MaxBECount = CouldNotCompute; 2972 if (L->contains(FBB)) { 2973 // Both conditions must be false for the loop to continue executing. 2974 // Choose the less conservative count. 2975 if (BTI0.Exact == CouldNotCompute || BTI1.Exact == CouldNotCompute) 2976 BECount = CouldNotCompute; 2977 else 2978 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2979 if (BTI0.Max == CouldNotCompute) 2980 MaxBECount = BTI1.Max; 2981 else if (BTI1.Max == CouldNotCompute) 2982 MaxBECount = BTI0.Max; 2983 else 2984 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 2985 } else { 2986 // Both conditions must be false for the loop to exit. 2987 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 2988 if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute) 2989 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 2990 if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute) 2991 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 2992 } 2993 2994 return BackedgeTakenInfo(BECount, MaxBECount); 2995 } 2996 } 2997 2998 // With an icmp, it may be feasible to compute an exact backedge-taken count. 2999 // Procede to the next level to examine the icmp. 3000 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3001 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3002 3003 // If it's not an integer or pointer comparison then compute it the hard way. 3004 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3005 } 3006 3007 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 3008 /// backedge of the specified loop will execute if its exit condition 3009 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 3010 ScalarEvolution::BackedgeTakenInfo 3011 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 3012 ICmpInst *ExitCond, 3013 BasicBlock *TBB, 3014 BasicBlock *FBB) { 3015 3016 // If the condition was exit on true, convert the condition to exit on false 3017 ICmpInst::Predicate Cond; 3018 if (!L->contains(FBB)) 3019 Cond = ExitCond->getPredicate(); 3020 else 3021 Cond = ExitCond->getInversePredicate(); 3022 3023 // Handle common loops like: for (X = "string"; *X; ++X) 3024 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 3025 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 3026 const SCEV* ItCnt = 3027 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 3028 if (!isa<SCEVCouldNotCompute>(ItCnt)) { 3029 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); 3030 return BackedgeTakenInfo(ItCnt, 3031 isa<SCEVConstant>(ItCnt) ? ItCnt : 3032 getConstant(APInt::getMaxValue(BitWidth)-1)); 3033 } 3034 } 3035 3036 const SCEV* LHS = getSCEV(ExitCond->getOperand(0)); 3037 const SCEV* RHS = getSCEV(ExitCond->getOperand(1)); 3038 3039 // Try to evaluate any dependencies out of the loop. 3040 LHS = getSCEVAtScope(LHS, L); 3041 RHS = getSCEVAtScope(RHS, L); 3042 3043 // At this point, we would like to compute how many iterations of the 3044 // loop the predicate will return true for these inputs. 3045 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 3046 // If there is a loop-invariant, force it into the RHS. 3047 std::swap(LHS, RHS); 3048 Cond = ICmpInst::getSwappedPredicate(Cond); 3049 } 3050 3051 // If we have a comparison of a chrec against a constant, try to use value 3052 // ranges to answer this query. 3053 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 3054 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 3055 if (AddRec->getLoop() == L) { 3056 // Form the constant range. 3057 ConstantRange CompRange( 3058 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 3059 3060 const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this); 3061 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 3062 } 3063 3064 switch (Cond) { 3065 case ICmpInst::ICMP_NE: { // while (X != Y) 3066 // Convert to: while (X-Y != 0) 3067 const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); 3068 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3069 break; 3070 } 3071 case ICmpInst::ICMP_EQ: { 3072 // Convert to: while (X-Y == 0) // while (X == Y) 3073 const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 3074 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3075 break; 3076 } 3077 case ICmpInst::ICMP_SLT: { 3078 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 3079 if (BTI.hasAnyInfo()) return BTI; 3080 break; 3081 } 3082 case ICmpInst::ICMP_SGT: { 3083 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3084 getNotSCEV(RHS), L, true); 3085 if (BTI.hasAnyInfo()) return BTI; 3086 break; 3087 } 3088 case ICmpInst::ICMP_ULT: { 3089 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 3090 if (BTI.hasAnyInfo()) return BTI; 3091 break; 3092 } 3093 case ICmpInst::ICMP_UGT: { 3094 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3095 getNotSCEV(RHS), L, false); 3096 if (BTI.hasAnyInfo()) return BTI; 3097 break; 3098 } 3099 default: 3100 #if 0 3101 errs() << "ComputeBackedgeTakenCount "; 3102 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 3103 errs() << "[unsigned] "; 3104 errs() << *LHS << " " 3105 << Instruction::getOpcodeName(Instruction::ICmp) 3106 << " " << *RHS << "\n"; 3107 #endif 3108 break; 3109 } 3110 return 3111 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3112 } 3113 3114 static ConstantInt * 3115 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 3116 ScalarEvolution &SE) { 3117 const SCEV* InVal = SE.getConstant(C); 3118 const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE); 3119 assert(isa<SCEVConstant>(Val) && 3120 "Evaluation of SCEV at constant didn't fold correctly?"); 3121 return cast<SCEVConstant>(Val)->getValue(); 3122 } 3123 3124 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 3125 /// and a GEP expression (missing the pointer index) indexing into it, return 3126 /// the addressed element of the initializer or null if the index expression is 3127 /// invalid. 3128 static Constant * 3129 GetAddressedElementFromGlobal(GlobalVariable *GV, 3130 const std::vector<ConstantInt*> &Indices) { 3131 Constant *Init = GV->getInitializer(); 3132 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 3133 uint64_t Idx = Indices[i]->getZExtValue(); 3134 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 3135 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 3136 Init = cast<Constant>(CS->getOperand(Idx)); 3137 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 3138 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 3139 Init = cast<Constant>(CA->getOperand(Idx)); 3140 } else if (isa<ConstantAggregateZero>(Init)) { 3141 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 3142 assert(Idx < STy->getNumElements() && "Bad struct index!"); 3143 Init = Constant::getNullValue(STy->getElementType(Idx)); 3144 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 3145 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 3146 Init = Constant::getNullValue(ATy->getElementType()); 3147 } else { 3148 assert(0 && "Unknown constant aggregate type!"); 3149 } 3150 return 0; 3151 } else { 3152 return 0; // Unknown initializer type 3153 } 3154 } 3155 return Init; 3156 } 3157 3158 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 3159 /// 'icmp op load X, cst', try to see if we can compute the backedge 3160 /// execution count. 3161 const SCEV * 3162 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( 3163 LoadInst *LI, 3164 Constant *RHS, 3165 const Loop *L, 3166 ICmpInst::Predicate predicate) { 3167 if (LI->isVolatile()) return CouldNotCompute; 3168 3169 // Check to see if the loaded pointer is a getelementptr of a global. 3170 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 3171 if (!GEP) return CouldNotCompute; 3172 3173 // Make sure that it is really a constant global we are gepping, with an 3174 // initializer, and make sure the first IDX is really 0. 3175 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 3176 if (!GV || !GV->isConstant() || !GV->hasInitializer() || 3177 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 3178 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 3179 return CouldNotCompute; 3180 3181 // Okay, we allow one non-constant index into the GEP instruction. 3182 Value *VarIdx = 0; 3183 std::vector<ConstantInt*> Indexes; 3184 unsigned VarIdxNum = 0; 3185 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 3186 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 3187 Indexes.push_back(CI); 3188 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 3189 if (VarIdx) return CouldNotCompute; // Multiple non-constant idx's. 3190 VarIdx = GEP->getOperand(i); 3191 VarIdxNum = i-2; 3192 Indexes.push_back(0); 3193 } 3194 3195 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 3196 // Check to see if X is a loop variant variable value now. 3197 const SCEV* Idx = getSCEV(VarIdx); 3198 Idx = getSCEVAtScope(Idx, L); 3199 3200 // We can only recognize very limited forms of loop index expressions, in 3201 // particular, only affine AddRec's like {C1,+,C2}. 3202 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 3203 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 3204 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 3205 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 3206 return CouldNotCompute; 3207 3208 unsigned MaxSteps = MaxBruteForceIterations; 3209 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 3210 ConstantInt *ItCst = 3211 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum); 3212 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 3213 3214 // Form the GEP offset. 3215 Indexes[VarIdxNum] = Val; 3216 3217 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 3218 if (Result == 0) break; // Cannot compute! 3219 3220 // Evaluate the condition for this iteration. 3221 Result = ConstantExpr::getICmp(predicate, Result, RHS); 3222 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 3223 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 3224 #if 0 3225 errs() << "\n***\n*** Computed loop count " << *ItCst 3226 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 3227 << "***\n"; 3228 #endif 3229 ++NumArrayLenItCounts; 3230 return getConstant(ItCst); // Found terminating iteration! 3231 } 3232 } 3233 return CouldNotCompute; 3234 } 3235 3236 3237 /// CanConstantFold - Return true if we can constant fold an instruction of the 3238 /// specified type, assuming that all operands were constants. 3239 static bool CanConstantFold(const Instruction *I) { 3240 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 3241 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 3242 return true; 3243 3244 if (const CallInst *CI = dyn_cast<CallInst>(I)) 3245 if (const Function *F = CI->getCalledFunction()) 3246 return canConstantFoldCallTo(F); 3247 return false; 3248 } 3249 3250 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 3251 /// in the loop that V is derived from. We allow arbitrary operations along the 3252 /// way, but the operands of an operation must either be constants or a value 3253 /// derived from a constant PHI. If this expression does not fit with these 3254 /// constraints, return null. 3255 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 3256 // If this is not an instruction, or if this is an instruction outside of the 3257 // loop, it can't be derived from a loop PHI. 3258 Instruction *I = dyn_cast<Instruction>(V); 3259 if (I == 0 || !L->contains(I->getParent())) return 0; 3260 3261 if (PHINode *PN = dyn_cast<PHINode>(I)) { 3262 if (L->getHeader() == I->getParent()) 3263 return PN; 3264 else 3265 // We don't currently keep track of the control flow needed to evaluate 3266 // PHIs, so we cannot handle PHIs inside of loops. 3267 return 0; 3268 } 3269 3270 // If we won't be able to constant fold this expression even if the operands 3271 // are constants, return early. 3272 if (!CanConstantFold(I)) return 0; 3273 3274 // Otherwise, we can evaluate this instruction if all of its operands are 3275 // constant or derived from a PHI node themselves. 3276 PHINode *PHI = 0; 3277 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 3278 if (!(isa<Constant>(I->getOperand(Op)) || 3279 isa<GlobalValue>(I->getOperand(Op)))) { 3280 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 3281 if (P == 0) return 0; // Not evolving from PHI 3282 if (PHI == 0) 3283 PHI = P; 3284 else if (PHI != P) 3285 return 0; // Evolving from multiple different PHIs. 3286 } 3287 3288 // This is a expression evolving from a constant PHI! 3289 return PHI; 3290 } 3291 3292 /// EvaluateExpression - Given an expression that passes the 3293 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 3294 /// in the loop has the value PHIVal. If we can't fold this expression for some 3295 /// reason, return null. 3296 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { 3297 if (isa<PHINode>(V)) return PHIVal; 3298 if (Constant *C = dyn_cast<Constant>(V)) return C; 3299 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; 3300 Instruction *I = cast<Instruction>(V); 3301 3302 std::vector<Constant*> Operands; 3303 Operands.resize(I->getNumOperands()); 3304 3305 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3306 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal); 3307 if (Operands[i] == 0) return 0; 3308 } 3309 3310 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3311 return ConstantFoldCompareInstOperands(CI->getPredicate(), 3312 &Operands[0], Operands.size()); 3313 else 3314 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3315 &Operands[0], Operands.size()); 3316 } 3317 3318 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 3319 /// in the header of its containing loop, we know the loop executes a 3320 /// constant number of times, and the PHI node is just a recurrence 3321 /// involving constants, fold it. 3322 Constant * 3323 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 3324 const APInt& BEs, 3325 const Loop *L) { 3326 std::map<PHINode*, Constant*>::iterator I = 3327 ConstantEvolutionLoopExitValue.find(PN); 3328 if (I != ConstantEvolutionLoopExitValue.end()) 3329 return I->second; 3330 3331 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) 3332 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 3333 3334 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 3335 3336 // Since the loop is canonicalized, the PHI node must have two entries. One 3337 // entry must be a constant (coming in from outside of the loop), and the 3338 // second must be derived from the same PHI. 3339 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3340 Constant *StartCST = 3341 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3342 if (StartCST == 0) 3343 return RetVal = 0; // Must be a constant. 3344 3345 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3346 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3347 if (PN2 != PN) 3348 return RetVal = 0; // Not derived from same PHI. 3349 3350 // Execute the loop symbolically to determine the exit value. 3351 if (BEs.getActiveBits() >= 32) 3352 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 3353 3354 unsigned NumIterations = BEs.getZExtValue(); // must be in range 3355 unsigned IterationNum = 0; 3356 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 3357 if (IterationNum == NumIterations) 3358 return RetVal = PHIVal; // Got exit value! 3359 3360 // Compute the value of the PHI node for the next iteration. 3361 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3362 if (NextPHI == PHIVal) 3363 return RetVal = NextPHI; // Stopped evolving! 3364 if (NextPHI == 0) 3365 return 0; // Couldn't evaluate! 3366 PHIVal = NextPHI; 3367 } 3368 } 3369 3370 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a 3371 /// constant number of times (the condition evolves only from constants), 3372 /// try to evaluate a few iterations of the loop until we get the exit 3373 /// condition gets a value of ExitWhen (true or false). If we cannot 3374 /// evaluate the trip count of the loop, return CouldNotCompute. 3375 const SCEV * 3376 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, 3377 Value *Cond, 3378 bool ExitWhen) { 3379 PHINode *PN = getConstantEvolvingPHI(Cond, L); 3380 if (PN == 0) return CouldNotCompute; 3381 3382 // Since the loop is canonicalized, the PHI node must have two entries. One 3383 // entry must be a constant (coming in from outside of the loop), and the 3384 // second must be derived from the same PHI. 3385 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3386 Constant *StartCST = 3387 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3388 if (StartCST == 0) return CouldNotCompute; // Must be a constant. 3389 3390 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3391 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3392 if (PN2 != PN) return CouldNotCompute; // Not derived from same PHI. 3393 3394 // Okay, we find a PHI node that defines the trip count of this loop. Execute 3395 // the loop symbolically to determine when the condition gets a value of 3396 // "ExitWhen". 3397 unsigned IterationNum = 0; 3398 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 3399 for (Constant *PHIVal = StartCST; 3400 IterationNum != MaxIterations; ++IterationNum) { 3401 ConstantInt *CondVal = 3402 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal)); 3403 3404 // Couldn't symbolically evaluate. 3405 if (!CondVal) return CouldNotCompute; 3406 3407 if (CondVal->getValue() == uint64_t(ExitWhen)) { 3408 ConstantEvolutionLoopExitValue[PN] = PHIVal; 3409 ++NumBruteForceTripCountsComputed; 3410 return getConstant(Type::Int32Ty, IterationNum); 3411 } 3412 3413 // Compute the value of the PHI node for the next iteration. 3414 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3415 if (NextPHI == 0 || NextPHI == PHIVal) 3416 return CouldNotCompute; // Couldn't evaluate or not making progress... 3417 PHIVal = NextPHI; 3418 } 3419 3420 // Too many iterations were needed to evaluate. 3421 return CouldNotCompute; 3422 } 3423 3424 /// getSCEVAtScope - Return a SCEV expression handle for the specified value 3425 /// at the specified scope in the program. The L value specifies a loop 3426 /// nest to evaluate the expression at, where null is the top-level or a 3427 /// specified loop is immediately inside of the loop. 3428 /// 3429 /// This method can be used to compute the exit value for a variable defined 3430 /// in a loop by querying what the value will hold in the parent loop. 3431 /// 3432 /// In the case that a relevant loop exit value cannot be computed, the 3433 /// original value V is returned. 3434 const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 3435 // FIXME: this should be turned into a virtual method on SCEV! 3436 3437 if (isa<SCEVConstant>(V)) return V; 3438 3439 // If this instruction is evolved from a constant-evolving PHI, compute the 3440 // exit value from the loop without using SCEVs. 3441 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 3442 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 3443 const Loop *LI = (*this->LI)[I->getParent()]; 3444 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 3445 if (PHINode *PN = dyn_cast<PHINode>(I)) 3446 if (PN->getParent() == LI->getHeader()) { 3447 // Okay, there is no closed form solution for the PHI node. Check 3448 // to see if the loop that contains it has a known backedge-taken 3449 // count. If so, we may be able to force computation of the exit 3450 // value. 3451 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI); 3452 if (const SCEVConstant *BTCC = 3453 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 3454 // Okay, we know how many times the containing loop executes. If 3455 // this is a constant evolving PHI node, get the final value at 3456 // the specified iteration number. 3457 Constant *RV = getConstantEvolutionLoopExitValue(PN, 3458 BTCC->getValue()->getValue(), 3459 LI); 3460 if (RV) return getUnknown(RV); 3461 } 3462 } 3463 3464 // Okay, this is an expression that we cannot symbolically evaluate 3465 // into a SCEV. Check to see if it's possible to symbolically evaluate 3466 // the arguments into constants, and if so, try to constant propagate the 3467 // result. This is particularly useful for computing loop exit values. 3468 if (CanConstantFold(I)) { 3469 // Check to see if we've folded this instruction at this loop before. 3470 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I]; 3471 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair = 3472 Values.insert(std::make_pair(L, static_cast<Constant *>(0))); 3473 if (!Pair.second) 3474 return Pair.first->second ? &*getUnknown(Pair.first->second) : V; 3475 3476 std::vector<Constant*> Operands; 3477 Operands.reserve(I->getNumOperands()); 3478 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3479 Value *Op = I->getOperand(i); 3480 if (Constant *C = dyn_cast<Constant>(Op)) { 3481 Operands.push_back(C); 3482 } else { 3483 // If any of the operands is non-constant and if they are 3484 // non-integer and non-pointer, don't even try to analyze them 3485 // with scev techniques. 3486 if (!isSCEVable(Op->getType())) 3487 return V; 3488 3489 const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L); 3490 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { 3491 Constant *C = SC->getValue(); 3492 if (C->getType() != Op->getType()) 3493 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3494 Op->getType(), 3495 false), 3496 C, Op->getType()); 3497 Operands.push_back(C); 3498 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) { 3499 if (Constant *C = dyn_cast<Constant>(SU->getValue())) { 3500 if (C->getType() != Op->getType()) 3501 C = 3502 ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3503 Op->getType(), 3504 false), 3505 C, Op->getType()); 3506 Operands.push_back(C); 3507 } else 3508 return V; 3509 } else { 3510 return V; 3511 } 3512 } 3513 } 3514 3515 Constant *C; 3516 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3517 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 3518 &Operands[0], Operands.size()); 3519 else 3520 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3521 &Operands[0], Operands.size()); 3522 Pair.first->second = C; 3523 return getUnknown(C); 3524 } 3525 } 3526 3527 // This is some other type of SCEVUnknown, just return it. 3528 return V; 3529 } 3530 3531 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 3532 // Avoid performing the look-up in the common case where the specified 3533 // expression has no loop-variant portions. 3534 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 3535 const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3536 if (OpAtScope != Comm->getOperand(i)) { 3537 // Okay, at least one of these operands is loop variant but might be 3538 // foldable. Build a new instance of the folded commutative expression. 3539 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 3540 Comm->op_begin()+i); 3541 NewOps.push_back(OpAtScope); 3542 3543 for (++i; i != e; ++i) { 3544 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3545 NewOps.push_back(OpAtScope); 3546 } 3547 if (isa<SCEVAddExpr>(Comm)) 3548 return getAddExpr(NewOps); 3549 if (isa<SCEVMulExpr>(Comm)) 3550 return getMulExpr(NewOps); 3551 if (isa<SCEVSMaxExpr>(Comm)) 3552 return getSMaxExpr(NewOps); 3553 if (isa<SCEVUMaxExpr>(Comm)) 3554 return getUMaxExpr(NewOps); 3555 assert(0 && "Unknown commutative SCEV type!"); 3556 } 3557 } 3558 // If we got here, all operands are loop invariant. 3559 return Comm; 3560 } 3561 3562 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 3563 const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L); 3564 const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L); 3565 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 3566 return Div; // must be loop invariant 3567 return getUDivExpr(LHS, RHS); 3568 } 3569 3570 // If this is a loop recurrence for a loop that does not contain L, then we 3571 // are dealing with the final value computed by the loop. 3572 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 3573 if (!L || !AddRec->getLoop()->contains(L->getHeader())) { 3574 // To evaluate this recurrence, we need to know how many times the AddRec 3575 // loop iterates. Compute this now. 3576 const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 3577 if (BackedgeTakenCount == CouldNotCompute) return AddRec; 3578 3579 // Then, evaluate the AddRec. 3580 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 3581 } 3582 return AddRec; 3583 } 3584 3585 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 3586 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3587 if (Op == Cast->getOperand()) 3588 return Cast; // must be loop invariant 3589 return getZeroExtendExpr(Op, Cast->getType()); 3590 } 3591 3592 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 3593 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3594 if (Op == Cast->getOperand()) 3595 return Cast; // must be loop invariant 3596 return getSignExtendExpr(Op, Cast->getType()); 3597 } 3598 3599 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 3600 const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); 3601 if (Op == Cast->getOperand()) 3602 return Cast; // must be loop invariant 3603 return getTruncateExpr(Op, Cast->getType()); 3604 } 3605 3606 assert(0 && "Unknown SCEV type!"); 3607 return 0; 3608 } 3609 3610 /// getSCEVAtScope - This is a convenience function which does 3611 /// getSCEVAtScope(getSCEV(V), L). 3612 const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 3613 return getSCEVAtScope(getSCEV(V), L); 3614 } 3615 3616 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 3617 /// following equation: 3618 /// 3619 /// A * X = B (mod N) 3620 /// 3621 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 3622 /// A and B isn't important. 3623 /// 3624 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 3625 static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 3626 ScalarEvolution &SE) { 3627 uint32_t BW = A.getBitWidth(); 3628 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 3629 assert(A != 0 && "A must be non-zero."); 3630 3631 // 1. D = gcd(A, N) 3632 // 3633 // The gcd of A and N may have only one prime factor: 2. The number of 3634 // trailing zeros in A is its multiplicity 3635 uint32_t Mult2 = A.countTrailingZeros(); 3636 // D = 2^Mult2 3637 3638 // 2. Check if B is divisible by D. 3639 // 3640 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 3641 // is not less than multiplicity of this prime factor for D. 3642 if (B.countTrailingZeros() < Mult2) 3643 return SE.getCouldNotCompute(); 3644 3645 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 3646 // modulo (N / D). 3647 // 3648 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 3649 // bit width during computations. 3650 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 3651 APInt Mod(BW + 1, 0); 3652 Mod.set(BW - Mult2); // Mod = N / D 3653 APInt I = AD.multiplicativeInverse(Mod); 3654 3655 // 4. Compute the minimum unsigned root of the equation: 3656 // I * (B / D) mod (N / D) 3657 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 3658 3659 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 3660 // bits. 3661 return SE.getConstant(Result.trunc(BW)); 3662 } 3663 3664 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 3665 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 3666 /// might be the same) or two SCEVCouldNotCompute objects. 3667 /// 3668 static std::pair<const SCEV*,const SCEV*> 3669 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 3670 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 3671 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 3672 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 3673 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 3674 3675 // We currently can only solve this if the coefficients are constants. 3676 if (!LC || !MC || !NC) { 3677 const SCEV *CNC = SE.getCouldNotCompute(); 3678 return std::make_pair(CNC, CNC); 3679 } 3680 3681 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 3682 const APInt &L = LC->getValue()->getValue(); 3683 const APInt &M = MC->getValue()->getValue(); 3684 const APInt &N = NC->getValue()->getValue(); 3685 APInt Two(BitWidth, 2); 3686 APInt Four(BitWidth, 4); 3687 3688 { 3689 using namespace APIntOps; 3690 const APInt& C = L; 3691 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 3692 // The B coefficient is M-N/2 3693 APInt B(M); 3694 B -= sdiv(N,Two); 3695 3696 // The A coefficient is N/2 3697 APInt A(N.sdiv(Two)); 3698 3699 // Compute the B^2-4ac term. 3700 APInt SqrtTerm(B); 3701 SqrtTerm *= B; 3702 SqrtTerm -= Four * (A * C); 3703 3704 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 3705 // integer value or else APInt::sqrt() will assert. 3706 APInt SqrtVal(SqrtTerm.sqrt()); 3707 3708 // Compute the two solutions for the quadratic formula. 3709 // The divisions must be performed as signed divisions. 3710 APInt NegB(-B); 3711 APInt TwoA( A << 1 ); 3712 if (TwoA.isMinValue()) { 3713 const SCEV *CNC = SE.getCouldNotCompute(); 3714 return std::make_pair(CNC, CNC); 3715 } 3716 3717 ConstantInt *Solution1 = ConstantInt::get((NegB + SqrtVal).sdiv(TwoA)); 3718 ConstantInt *Solution2 = ConstantInt::get((NegB - SqrtVal).sdiv(TwoA)); 3719 3720 return std::make_pair(SE.getConstant(Solution1), 3721 SE.getConstant(Solution2)); 3722 } // end APIntOps namespace 3723 } 3724 3725 /// HowFarToZero - Return the number of times a backedge comparing the specified 3726 /// value to zero will execute. If not computable, return CouldNotCompute. 3727 const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 3728 // If the value is a constant 3729 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3730 // If the value is already zero, the branch will execute zero times. 3731 if (C->getValue()->isZero()) return C; 3732 return CouldNotCompute; // Otherwise it will loop infinitely. 3733 } 3734 3735 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 3736 if (!AddRec || AddRec->getLoop() != L) 3737 return CouldNotCompute; 3738 3739 if (AddRec->isAffine()) { 3740 // If this is an affine expression, the execution count of this branch is 3741 // the minimum unsigned root of the following equation: 3742 // 3743 // Start + Step*N = 0 (mod 2^BW) 3744 // 3745 // equivalent to: 3746 // 3747 // Step*N = -Start (mod 2^BW) 3748 // 3749 // where BW is the common bit width of Start and Step. 3750 3751 // Get the initial value for the loop. 3752 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), 3753 L->getParentLoop()); 3754 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), 3755 L->getParentLoop()); 3756 3757 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 3758 // For now we handle only constant steps. 3759 3760 // First, handle unitary steps. 3761 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 3762 return getNegativeSCEV(Start); // N = -Start (as unsigned) 3763 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 3764 return Start; // N = Start (as unsigned) 3765 3766 // Then, try to solve the above equation provided that Start is constant. 3767 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 3768 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 3769 -StartC->getValue()->getValue(), 3770 *this); 3771 } 3772 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { 3773 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 3774 // the quadratic equation to solve it. 3775 std::pair<const SCEV*,const SCEV*> Roots = SolveQuadraticEquation(AddRec, 3776 *this); 3777 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 3778 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 3779 if (R1) { 3780 #if 0 3781 errs() << "HFTZ: " << *V << " - sol#1: " << *R1 3782 << " sol#2: " << *R2 << "\n"; 3783 #endif 3784 // Pick the smallest positive root value. 3785 if (ConstantInt *CB = 3786 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 3787 R1->getValue(), R2->getValue()))) { 3788 if (CB->getZExtValue() == false) 3789 std::swap(R1, R2); // R1 is the minimum root now. 3790 3791 // We can only use this value if the chrec ends up with an exact zero 3792 // value at this index. When solving for "X*X != 5", for example, we 3793 // should not accept a root of 2. 3794 const SCEV* Val = AddRec->evaluateAtIteration(R1, *this); 3795 if (Val->isZero()) 3796 return R1; // We found a quadratic root! 3797 } 3798 } 3799 } 3800 3801 return CouldNotCompute; 3802 } 3803 3804 /// HowFarToNonZero - Return the number of times a backedge checking the 3805 /// specified value for nonzero will execute. If not computable, return 3806 /// CouldNotCompute 3807 const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 3808 // Loops that look like: while (X == 0) are very strange indeed. We don't 3809 // handle them yet except for the trivial case. This could be expanded in the 3810 // future as needed. 3811 3812 // If the value is a constant, check to see if it is known to be non-zero 3813 // already. If so, the backedge will execute zero times. 3814 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 3815 if (!C->getValue()->isNullValue()) 3816 return getIntegerSCEV(0, C->getType()); 3817 return CouldNotCompute; // Otherwise it will loop infinitely. 3818 } 3819 3820 // We could implement others, but I really doubt anyone writes loops like 3821 // this, and if they did, they would already be constant folded. 3822 return CouldNotCompute; 3823 } 3824 3825 /// getLoopPredecessor - If the given loop's header has exactly one unique 3826 /// predecessor outside the loop, return it. Otherwise return null. 3827 /// 3828 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { 3829 BasicBlock *Header = L->getHeader(); 3830 BasicBlock *Pred = 0; 3831 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); 3832 PI != E; ++PI) 3833 if (!L->contains(*PI)) { 3834 if (Pred && Pred != *PI) return 0; // Multiple predecessors. 3835 Pred = *PI; 3836 } 3837 return Pred; 3838 } 3839 3840 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 3841 /// (which may not be an immediate predecessor) which has exactly one 3842 /// successor from which BB is reachable, or null if no such block is 3843 /// found. 3844 /// 3845 BasicBlock * 3846 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 3847 // If the block has a unique predecessor, then there is no path from the 3848 // predecessor to the block that does not go through the direct edge 3849 // from the predecessor to the block. 3850 if (BasicBlock *Pred = BB->getSinglePredecessor()) 3851 return Pred; 3852 3853 // A loop's header is defined to be a block that dominates the loop. 3854 // If the header has a unique predecessor outside the loop, it must be 3855 // a block that has exactly one successor that can reach the loop. 3856 if (Loop *L = LI->getLoopFor(BB)) 3857 return getLoopPredecessor(L); 3858 3859 return 0; 3860 } 3861 3862 /// HasSameValue - SCEV structural equivalence is usually sufficient for 3863 /// testing whether two expressions are equal, however for the purposes of 3864 /// looking for a condition guarding a loop, it can be useful to be a little 3865 /// more general, since a front-end may have replicated the controlling 3866 /// expression. 3867 /// 3868 static bool HasSameValue(const SCEV* A, const SCEV* B) { 3869 // Quick check to see if they are the same SCEV. 3870 if (A == B) return true; 3871 3872 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 3873 // two different instructions with the same value. Check for this case. 3874 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 3875 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 3876 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 3877 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 3878 if (AI->isIdenticalTo(BI)) 3879 return true; 3880 3881 // Otherwise assume they may have a different value. 3882 return false; 3883 } 3884 3885 /// isLoopGuardedByCond - Test whether entry to the loop is protected by 3886 /// a conditional between LHS and RHS. This is used to help avoid max 3887 /// expressions in loop trip counts. 3888 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L, 3889 ICmpInst::Predicate Pred, 3890 const SCEV *LHS, const SCEV *RHS) { 3891 // Interpret a null as meaning no loop, where there is obviously no guard 3892 // (interprocedural conditions notwithstanding). 3893 if (!L) return false; 3894 3895 BasicBlock *Predecessor = getLoopPredecessor(L); 3896 BasicBlock *PredecessorDest = L->getHeader(); 3897 3898 // Starting at the loop predecessor, climb up the predecessor chain, as long 3899 // as there are predecessors that can be found that have unique successors 3900 // leading to the original header. 3901 for (; Predecessor; 3902 PredecessorDest = Predecessor, 3903 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { 3904 3905 BranchInst *LoopEntryPredicate = 3906 dyn_cast<BranchInst>(Predecessor->getTerminator()); 3907 if (!LoopEntryPredicate || 3908 LoopEntryPredicate->isUnconditional()) 3909 continue; 3910 3911 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS, 3912 LoopEntryPredicate->getSuccessor(0) != PredecessorDest)) 3913 return true; 3914 } 3915 3916 return false; 3917 } 3918 3919 /// isNecessaryCond - Test whether the given CondValue value is a condition 3920 /// which is at least as strict as the one described by Pred, LHS, and RHS. 3921 bool ScalarEvolution::isNecessaryCond(Value *CondValue, 3922 ICmpInst::Predicate Pred, 3923 const SCEV *LHS, const SCEV *RHS, 3924 bool Inverse) { 3925 // Recursivly handle And and Or conditions. 3926 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) { 3927 if (BO->getOpcode() == Instruction::And) { 3928 if (!Inverse) 3929 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 3930 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 3931 } else if (BO->getOpcode() == Instruction::Or) { 3932 if (Inverse) 3933 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 3934 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 3935 } 3936 } 3937 3938 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue); 3939 if (!ICI) return false; 3940 3941 // Now that we found a conditional branch that dominates the loop, check to 3942 // see if it is the comparison we are looking for. 3943 Value *PreCondLHS = ICI->getOperand(0); 3944 Value *PreCondRHS = ICI->getOperand(1); 3945 ICmpInst::Predicate Cond; 3946 if (Inverse) 3947 Cond = ICI->getInversePredicate(); 3948 else 3949 Cond = ICI->getPredicate(); 3950 3951 if (Cond == Pred) 3952 ; // An exact match. 3953 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE) 3954 ; // The actual condition is beyond sufficient. 3955 else 3956 // Check a few special cases. 3957 switch (Cond) { 3958 case ICmpInst::ICMP_UGT: 3959 if (Pred == ICmpInst::ICMP_ULT) { 3960 std::swap(PreCondLHS, PreCondRHS); 3961 Cond = ICmpInst::ICMP_ULT; 3962 break; 3963 } 3964 return false; 3965 case ICmpInst::ICMP_SGT: 3966 if (Pred == ICmpInst::ICMP_SLT) { 3967 std::swap(PreCondLHS, PreCondRHS); 3968 Cond = ICmpInst::ICMP_SLT; 3969 break; 3970 } 3971 return false; 3972 case ICmpInst::ICMP_NE: 3973 // Expressions like (x >u 0) are often canonicalized to (x != 0), 3974 // so check for this case by checking if the NE is comparing against 3975 // a minimum or maximum constant. 3976 if (!ICmpInst::isTrueWhenEqual(Pred)) 3977 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) { 3978 const APInt &A = CI->getValue(); 3979 switch (Pred) { 3980 case ICmpInst::ICMP_SLT: 3981 if (A.isMaxSignedValue()) break; 3982 return false; 3983 case ICmpInst::ICMP_SGT: 3984 if (A.isMinSignedValue()) break; 3985 return false; 3986 case ICmpInst::ICMP_ULT: 3987 if (A.isMaxValue()) break; 3988 return false; 3989 case ICmpInst::ICMP_UGT: 3990 if (A.isMinValue()) break; 3991 return false; 3992 default: 3993 return false; 3994 } 3995 Cond = ICmpInst::ICMP_NE; 3996 // NE is symmetric but the original comparison may not be. Swap 3997 // the operands if necessary so that they match below. 3998 if (isa<SCEVConstant>(LHS)) 3999 std::swap(PreCondLHS, PreCondRHS); 4000 break; 4001 } 4002 return false; 4003 default: 4004 // We weren't able to reconcile the condition. 4005 return false; 4006 } 4007 4008 if (!PreCondLHS->getType()->isInteger()) return false; 4009 4010 const SCEV *PreCondLHSSCEV = getSCEV(PreCondLHS); 4011 const SCEV *PreCondRHSSCEV = getSCEV(PreCondRHS); 4012 return (HasSameValue(LHS, PreCondLHSSCEV) && 4013 HasSameValue(RHS, PreCondRHSSCEV)) || 4014 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) && 4015 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV))); 4016 } 4017 4018 /// getBECount - Subtract the end and start values and divide by the step, 4019 /// rounding up, to get the number of times the backedge is executed. Return 4020 /// CouldNotCompute if an intermediate computation overflows. 4021 const SCEV* ScalarEvolution::getBECount(const SCEV* Start, 4022 const SCEV* End, 4023 const SCEV* Step) { 4024 const Type *Ty = Start->getType(); 4025 const SCEV* NegOne = getIntegerSCEV(-1, Ty); 4026 const SCEV* Diff = getMinusSCEV(End, Start); 4027 const SCEV* RoundUp = getAddExpr(Step, NegOne); 4028 4029 // Add an adjustment to the difference between End and Start so that 4030 // the division will effectively round up. 4031 const SCEV* Add = getAddExpr(Diff, RoundUp); 4032 4033 // Check Add for unsigned overflow. 4034 // TODO: More sophisticated things could be done here. 4035 const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1); 4036 const SCEV* OperandExtendedAdd = 4037 getAddExpr(getZeroExtendExpr(Diff, WideTy), 4038 getZeroExtendExpr(RoundUp, WideTy)); 4039 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 4040 return CouldNotCompute; 4041 4042 return getUDivExpr(Add, Step); 4043 } 4044 4045 /// HowManyLessThans - Return the number of times a backedge containing the 4046 /// specified less-than comparison will execute. If not computable, return 4047 /// CouldNotCompute. 4048 ScalarEvolution::BackedgeTakenInfo 4049 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 4050 const Loop *L, bool isSigned) { 4051 // Only handle: "ADDREC < LoopInvariant". 4052 if (!RHS->isLoopInvariant(L)) return CouldNotCompute; 4053 4054 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 4055 if (!AddRec || AddRec->getLoop() != L) 4056 return CouldNotCompute; 4057 4058 if (AddRec->isAffine()) { 4059 // FORNOW: We only support unit strides. 4060 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 4061 const SCEV* Step = AddRec->getStepRecurrence(*this); 4062 4063 // TODO: handle non-constant strides. 4064 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step); 4065 if (!CStep || CStep->isZero()) 4066 return CouldNotCompute; 4067 if (CStep->isOne()) { 4068 // With unit stride, the iteration never steps past the limit value. 4069 } else if (CStep->getValue()->getValue().isStrictlyPositive()) { 4070 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) { 4071 // Test whether a positive iteration iteration can step past the limit 4072 // value and past the maximum value for its type in a single step. 4073 if (isSigned) { 4074 APInt Max = APInt::getSignedMaxValue(BitWidth); 4075 if ((Max - CStep->getValue()->getValue()) 4076 .slt(CLimit->getValue()->getValue())) 4077 return CouldNotCompute; 4078 } else { 4079 APInt Max = APInt::getMaxValue(BitWidth); 4080 if ((Max - CStep->getValue()->getValue()) 4081 .ult(CLimit->getValue()->getValue())) 4082 return CouldNotCompute; 4083 } 4084 } else 4085 // TODO: handle non-constant limit values below. 4086 return CouldNotCompute; 4087 } else 4088 // TODO: handle negative strides below. 4089 return CouldNotCompute; 4090 4091 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 4092 // m. So, we count the number of iterations in which {n,+,s} < m is true. 4093 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 4094 // treat m-n as signed nor unsigned due to overflow possibility. 4095 4096 // First, we get the value of the LHS in the first iteration: n 4097 const SCEV* Start = AddRec->getOperand(0); 4098 4099 // Determine the minimum constant start value. 4100 const SCEV *MinStart = isa<SCEVConstant>(Start) ? Start : 4101 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) : 4102 APInt::getMinValue(BitWidth)); 4103 4104 // If we know that the condition is true in order to enter the loop, 4105 // then we know that it will run exactly (m-n)/s times. Otherwise, we 4106 // only know that it will execute (max(m,n)-n)/s times. In both cases, 4107 // the division must round up. 4108 const SCEV* End = RHS; 4109 if (!isLoopGuardedByCond(L, 4110 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, 4111 getMinusSCEV(Start, Step), RHS)) 4112 End = isSigned ? getSMaxExpr(RHS, Start) 4113 : getUMaxExpr(RHS, Start); 4114 4115 // Determine the maximum constant end value. 4116 const SCEV* MaxEnd = 4117 isa<SCEVConstant>(End) ? End : 4118 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth) 4119 .ashr(GetMinSignBits(End) - 1) : 4120 APInt::getMaxValue(BitWidth) 4121 .lshr(GetMinLeadingZeros(End))); 4122 4123 // Finally, we subtract these two values and divide, rounding up, to get 4124 // the number of times the backedge is executed. 4125 const SCEV* BECount = getBECount(Start, End, Step); 4126 4127 // The maximum backedge count is similar, except using the minimum start 4128 // value and the maximum end value. 4129 const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step);; 4130 4131 return BackedgeTakenInfo(BECount, MaxBECount); 4132 } 4133 4134 return CouldNotCompute; 4135 } 4136 4137 /// getNumIterationsInRange - Return the number of iterations of this loop that 4138 /// produce values in the specified constant range. Another way of looking at 4139 /// this is that it returns the first iteration number where the value is not in 4140 /// the condition, thus computing the exit count. If the iteration count can't 4141 /// be computed, an instance of SCEVCouldNotCompute is returned. 4142 const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 4143 ScalarEvolution &SE) const { 4144 if (Range.isFullSet()) // Infinite loop. 4145 return SE.getCouldNotCompute(); 4146 4147 // If the start is a non-zero constant, shift the range to simplify things. 4148 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 4149 if (!SC->getValue()->isZero()) { 4150 SmallVector<const SCEV*, 4> Operands(op_begin(), op_end()); 4151 Operands[0] = SE.getIntegerSCEV(0, SC->getType()); 4152 const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop()); 4153 if (const SCEVAddRecExpr *ShiftedAddRec = 4154 dyn_cast<SCEVAddRecExpr>(Shifted)) 4155 return ShiftedAddRec->getNumIterationsInRange( 4156 Range.subtract(SC->getValue()->getValue()), SE); 4157 // This is strange and shouldn't happen. 4158 return SE.getCouldNotCompute(); 4159 } 4160 4161 // The only time we can solve this is when we have all constant indices. 4162 // Otherwise, we cannot determine the overflow conditions. 4163 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 4164 if (!isa<SCEVConstant>(getOperand(i))) 4165 return SE.getCouldNotCompute(); 4166 4167 4168 // Okay at this point we know that all elements of the chrec are constants and 4169 // that the start element is zero. 4170 4171 // First check to see if the range contains zero. If not, the first 4172 // iteration exits. 4173 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 4174 if (!Range.contains(APInt(BitWidth, 0))) 4175 return SE.getIntegerSCEV(0, getType()); 4176 4177 if (isAffine()) { 4178 // If this is an affine expression then we have this situation: 4179 // Solve {0,+,A} in Range === Ax in Range 4180 4181 // We know that zero is in the range. If A is positive then we know that 4182 // the upper value of the range must be the first possible exit value. 4183 // If A is negative then the lower of the range is the last possible loop 4184 // value. Also note that we already checked for a full range. 4185 APInt One(BitWidth,1); 4186 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 4187 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 4188 4189 // The exit value should be (End+A)/A. 4190 APInt ExitVal = (End + A).udiv(A); 4191 ConstantInt *ExitValue = ConstantInt::get(ExitVal); 4192 4193 // Evaluate at the exit value. If we really did fall out of the valid 4194 // range, then we computed our trip count, otherwise wrap around or other 4195 // things must have happened. 4196 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 4197 if (Range.contains(Val->getValue())) 4198 return SE.getCouldNotCompute(); // Something strange happened 4199 4200 // Ensure that the previous value is in the range. This is a sanity check. 4201 assert(Range.contains( 4202 EvaluateConstantChrecAtConstant(this, 4203 ConstantInt::get(ExitVal - One), SE)->getValue()) && 4204 "Linear scev computation is off in a bad way!"); 4205 return SE.getConstant(ExitValue); 4206 } else if (isQuadratic()) { 4207 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 4208 // quadratic equation to solve it. To do this, we must frame our problem in 4209 // terms of figuring out when zero is crossed, instead of when 4210 // Range.getUpper() is crossed. 4211 SmallVector<const SCEV*, 4> NewOps(op_begin(), op_end()); 4212 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 4213 const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 4214 4215 // Next, solve the constructed addrec 4216 std::pair<const SCEV*,const SCEV*> Roots = 4217 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 4218 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4219 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4220 if (R1) { 4221 // Pick the smallest positive root value. 4222 if (ConstantInt *CB = 4223 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 4224 R1->getValue(), R2->getValue()))) { 4225 if (CB->getZExtValue() == false) 4226 std::swap(R1, R2); // R1 is the minimum root now. 4227 4228 // Make sure the root is not off by one. The returned iteration should 4229 // not be in the range, but the previous one should be. When solving 4230 // for "X*X < 5", for example, we should not return a root of 2. 4231 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 4232 R1->getValue(), 4233 SE); 4234 if (Range.contains(R1Val->getValue())) { 4235 // The next iteration must be out of the range... 4236 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()+1); 4237 4238 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4239 if (!Range.contains(R1Val->getValue())) 4240 return SE.getConstant(NextVal); 4241 return SE.getCouldNotCompute(); // Something strange happened 4242 } 4243 4244 // If R1 was not in the range, then it is a good return value. Make 4245 // sure that R1-1 WAS in the range though, just in case. 4246 ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()-1); 4247 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4248 if (Range.contains(R1Val->getValue())) 4249 return R1; 4250 return SE.getCouldNotCompute(); // Something strange happened 4251 } 4252 } 4253 } 4254 4255 return SE.getCouldNotCompute(); 4256 } 4257 4258 4259 4260 //===----------------------------------------------------------------------===// 4261 // SCEVCallbackVH Class Implementation 4262 //===----------------------------------------------------------------------===// 4263 4264 void ScalarEvolution::SCEVCallbackVH::deleted() { 4265 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4266 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 4267 SE->ConstantEvolutionLoopExitValue.erase(PN); 4268 if (Instruction *I = dyn_cast<Instruction>(getValPtr())) 4269 SE->ValuesAtScopes.erase(I); 4270 SE->Scalars.erase(getValPtr()); 4271 // this now dangles! 4272 } 4273 4274 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) { 4275 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!"); 4276 4277 // Forget all the expressions associated with users of the old value, 4278 // so that future queries will recompute the expressions using the new 4279 // value. 4280 SmallVector<User *, 16> Worklist; 4281 Value *Old = getValPtr(); 4282 bool DeleteOld = false; 4283 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 4284 UI != UE; ++UI) 4285 Worklist.push_back(*UI); 4286 while (!Worklist.empty()) { 4287 User *U = Worklist.pop_back_val(); 4288 // Deleting the Old value will cause this to dangle. Postpone 4289 // that until everything else is done. 4290 if (U == Old) { 4291 DeleteOld = true; 4292 continue; 4293 } 4294 if (PHINode *PN = dyn_cast<PHINode>(U)) 4295 SE->ConstantEvolutionLoopExitValue.erase(PN); 4296 if (Instruction *I = dyn_cast<Instruction>(U)) 4297 SE->ValuesAtScopes.erase(I); 4298 if (SE->Scalars.erase(U)) 4299 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 4300 UI != UE; ++UI) 4301 Worklist.push_back(*UI); 4302 } 4303 if (DeleteOld) { 4304 if (PHINode *PN = dyn_cast<PHINode>(Old)) 4305 SE->ConstantEvolutionLoopExitValue.erase(PN); 4306 if (Instruction *I = dyn_cast<Instruction>(Old)) 4307 SE->ValuesAtScopes.erase(I); 4308 SE->Scalars.erase(Old); 4309 // this now dangles! 4310 } 4311 // this may dangle! 4312 } 4313 4314 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 4315 : CallbackVH(V), SE(se) {} 4316 4317 //===----------------------------------------------------------------------===// 4318 // ScalarEvolution Class Implementation 4319 //===----------------------------------------------------------------------===// 4320 4321 ScalarEvolution::ScalarEvolution() 4322 : FunctionPass(&ID), CouldNotCompute(new SCEVCouldNotCompute()) { 4323 } 4324 4325 bool ScalarEvolution::runOnFunction(Function &F) { 4326 this->F = &F; 4327 LI = &getAnalysis<LoopInfo>(); 4328 TD = getAnalysisIfAvailable<TargetData>(); 4329 return false; 4330 } 4331 4332 void ScalarEvolution::releaseMemory() { 4333 Scalars.clear(); 4334 BackedgeTakenCounts.clear(); 4335 ConstantEvolutionLoopExitValue.clear(); 4336 ValuesAtScopes.clear(); 4337 4338 for (std::map<ConstantInt*, SCEVConstant*>::iterator 4339 I = SCEVConstants.begin(), E = SCEVConstants.end(); I != E; ++I) 4340 delete I->second; 4341 for (std::map<std::pair<const SCEV*, const Type*>, 4342 SCEVTruncateExpr*>::iterator I = SCEVTruncates.begin(), 4343 E = SCEVTruncates.end(); I != E; ++I) 4344 delete I->second; 4345 for (std::map<std::pair<const SCEV*, const Type*>, 4346 SCEVZeroExtendExpr*>::iterator I = SCEVZeroExtends.begin(), 4347 E = SCEVZeroExtends.end(); I != E; ++I) 4348 delete I->second; 4349 for (std::map<std::pair<unsigned, std::vector<const SCEV*> >, 4350 SCEVCommutativeExpr*>::iterator I = SCEVCommExprs.begin(), 4351 E = SCEVCommExprs.end(); I != E; ++I) 4352 delete I->second; 4353 for (std::map<std::pair<const SCEV*, const SCEV*>, SCEVUDivExpr*>::iterator 4354 I = SCEVUDivs.begin(), E = SCEVUDivs.end(); I != E; ++I) 4355 delete I->second; 4356 for (std::map<std::pair<const SCEV*, const Type*>, 4357 SCEVSignExtendExpr*>::iterator I = SCEVSignExtends.begin(), 4358 E = SCEVSignExtends.end(); I != E; ++I) 4359 delete I->second; 4360 for (std::map<std::pair<const Loop *, std::vector<const SCEV*> >, 4361 SCEVAddRecExpr*>::iterator I = SCEVAddRecExprs.begin(), 4362 E = SCEVAddRecExprs.end(); I != E; ++I) 4363 delete I->second; 4364 for (std::map<Value*, SCEVUnknown*>::iterator I = SCEVUnknowns.begin(), 4365 E = SCEVUnknowns.end(); I != E; ++I) 4366 delete I->second; 4367 4368 SCEVConstants.clear(); 4369 SCEVTruncates.clear(); 4370 SCEVZeroExtends.clear(); 4371 SCEVCommExprs.clear(); 4372 SCEVUDivs.clear(); 4373 SCEVSignExtends.clear(); 4374 SCEVAddRecExprs.clear(); 4375 SCEVUnknowns.clear(); 4376 } 4377 4378 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 4379 AU.setPreservesAll(); 4380 AU.addRequiredTransitive<LoopInfo>(); 4381 } 4382 4383 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 4384 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 4385 } 4386 4387 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 4388 const Loop *L) { 4389 // Print all inner loops first 4390 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 4391 PrintLoopInfo(OS, SE, *I); 4392 4393 OS << "Loop " << L->getHeader()->getName() << ": "; 4394 4395 SmallVector<BasicBlock*, 8> ExitBlocks; 4396 L->getExitBlocks(ExitBlocks); 4397 if (ExitBlocks.size() != 1) 4398 OS << "<multiple exits> "; 4399 4400 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 4401 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 4402 } else { 4403 OS << "Unpredictable backedge-taken count. "; 4404 } 4405 4406 OS << "\n"; 4407 OS << "Loop " << L->getHeader()->getName() << ": "; 4408 4409 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 4410 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 4411 } else { 4412 OS << "Unpredictable max backedge-taken count. "; 4413 } 4414 4415 OS << "\n"; 4416 } 4417 4418 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { 4419 // ScalarEvolution's implementaiton of the print method is to print 4420 // out SCEV values of all instructions that are interesting. Doing 4421 // this potentially causes it to create new SCEV objects though, 4422 // which technically conflicts with the const qualifier. This isn't 4423 // observable from outside the class though (the hasSCEV function 4424 // notwithstanding), so casting away the const isn't dangerous. 4425 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this); 4426 4427 OS << "Classifying expressions for: " << F->getName() << "\n"; 4428 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 4429 if (isSCEVable(I->getType())) { 4430 OS << *I; 4431 OS << " --> "; 4432 const SCEV* SV = SE.getSCEV(&*I); 4433 SV->print(OS); 4434 4435 const Loop *L = LI->getLoopFor((*I).getParent()); 4436 4437 const SCEV* AtUse = SE.getSCEVAtScope(SV, L); 4438 if (AtUse != SV) { 4439 OS << " --> "; 4440 AtUse->print(OS); 4441 } 4442 4443 if (L) { 4444 OS << "\t\t" "Exits: "; 4445 const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 4446 if (!ExitValue->isLoopInvariant(L)) { 4447 OS << "<<Unknown>>"; 4448 } else { 4449 OS << *ExitValue; 4450 } 4451 } 4452 4453 OS << "\n"; 4454 } 4455 4456 OS << "Determining loop execution counts for: " << F->getName() << "\n"; 4457 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 4458 PrintLoopInfo(OS, &SE, *I); 4459 } 4460 4461 void ScalarEvolution::print(std::ostream &o, const Module *M) const { 4462 raw_os_ostream OS(o); 4463 print(OS, M); 4464 } 4465