1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. These classes are reference counted, managed by the const SCEV * 18 // class. We only create one SCEV of a particular shape, so pointer-comparisons 19 // for equality are legal. 20 // 21 // One important aspect of the SCEV objects is that they are never cyclic, even 22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 24 // recurrence) then we represent it directly as a recurrence node, otherwise we 25 // represent it as a SCEVUnknown node. 26 // 27 // In addition to being able to represent expressions of various types, we also 28 // have folders that are used to build the *canonical* representation for a 29 // particular expression. These folders are capable of using a variety of 30 // rewrite rules to simplify the expressions. 31 // 32 // Once the folders are defined, we can implement the more interesting 33 // higher-level code, such as the code that recognizes PHI nodes of various 34 // types, computes the execution count of a loop, etc. 35 // 36 // TODO: We should use these routines and value representations to implement 37 // dependence analysis! 38 // 39 //===----------------------------------------------------------------------===// 40 // 41 // There are several good references for the techniques used in this analysis. 42 // 43 // Chains of recurrences -- a method to expedite the evaluation 44 // of closed-form functions 45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 46 // 47 // On computational properties of chains of recurrences 48 // Eugene V. Zima 49 // 50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 51 // Robert A. van Engelen 52 // 53 // Efficient Symbolic Analysis for Optimizing Compilers 54 // Robert A. van Engelen 55 // 56 // Using the chains of recurrences algebra for data dependence testing and 57 // induction variable substitution 58 // MS Thesis, Johnie Birch 59 // 60 //===----------------------------------------------------------------------===// 61 62 #define DEBUG_TYPE "scalar-evolution" 63 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 64 #include "llvm/Constants.h" 65 #include "llvm/DerivedTypes.h" 66 #include "llvm/GlobalVariable.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/LLVMContext.h" 69 #include "llvm/Operator.h" 70 #include "llvm/Analysis/ConstantFolding.h" 71 #include "llvm/Analysis/Dominators.h" 72 #include "llvm/Analysis/LoopInfo.h" 73 #include "llvm/Analysis/ValueTracking.h" 74 #include "llvm/Assembly/Writer.h" 75 #include "llvm/Target/TargetData.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/Compiler.h" 78 #include "llvm/Support/ConstantRange.h" 79 #include "llvm/Support/ErrorHandling.h" 80 #include "llvm/Support/GetElementPtrTypeIterator.h" 81 #include "llvm/Support/InstIterator.h" 82 #include "llvm/Support/MathExtras.h" 83 #include "llvm/Support/raw_ostream.h" 84 #include "llvm/ADT/Statistic.h" 85 #include "llvm/ADT/STLExtras.h" 86 #include "llvm/ADT/SmallPtrSet.h" 87 #include <algorithm> 88 using namespace llvm; 89 90 STATISTIC(NumArrayLenItCounts, 91 "Number of trip counts computed with array length"); 92 STATISTIC(NumTripCountsComputed, 93 "Number of loops with predictable loop counts"); 94 STATISTIC(NumTripCountsNotComputed, 95 "Number of loops without predictable loop counts"); 96 STATISTIC(NumBruteForceTripCountsComputed, 97 "Number of loops with trip counts computed by force"); 98 99 static cl::opt<unsigned> 100 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 101 cl::desc("Maximum number of iterations SCEV will " 102 "symbolically execute a constant " 103 "derived loop"), 104 cl::init(100)); 105 106 static RegisterPass<ScalarEvolution> 107 R("scalar-evolution", "Scalar Evolution Analysis", false, true); 108 char ScalarEvolution::ID = 0; 109 110 //===----------------------------------------------------------------------===// 111 // SCEV class definitions 112 //===----------------------------------------------------------------------===// 113 114 //===----------------------------------------------------------------------===// 115 // Implementation of the SCEV class. 116 // 117 118 SCEV::~SCEV() {} 119 120 void SCEV::dump() const { 121 print(errs()); 122 errs() << '\n'; 123 } 124 125 void SCEV::print(std::ostream &o) const { 126 raw_os_ostream OS(o); 127 print(OS); 128 } 129 130 bool SCEV::isZero() const { 131 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 132 return SC->getValue()->isZero(); 133 return false; 134 } 135 136 bool SCEV::isOne() const { 137 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 138 return SC->getValue()->isOne(); 139 return false; 140 } 141 142 bool SCEV::isAllOnesValue() const { 143 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 144 return SC->getValue()->isAllOnesValue(); 145 return false; 146 } 147 148 SCEVCouldNotCompute::SCEVCouldNotCompute() : 149 SCEV(FoldingSetNodeID(), scCouldNotCompute) {} 150 151 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 152 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 153 return false; 154 } 155 156 const Type *SCEVCouldNotCompute::getType() const { 157 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 158 return 0; 159 } 160 161 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 162 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 163 return false; 164 } 165 166 const SCEV * 167 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete( 168 const SCEV *Sym, 169 const SCEV *Conc, 170 ScalarEvolution &SE) const { 171 return this; 172 } 173 174 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 175 OS << "***COULDNOTCOMPUTE***"; 176 } 177 178 bool SCEVCouldNotCompute::classof(const SCEV *S) { 179 return S->getSCEVType() == scCouldNotCompute; 180 } 181 182 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 183 FoldingSetNodeID ID; 184 ID.AddInteger(scConstant); 185 ID.AddPointer(V); 186 void *IP = 0; 187 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 188 SCEV *S = SCEVAllocator.Allocate<SCEVConstant>(); 189 new (S) SCEVConstant(ID, V); 190 UniqueSCEVs.InsertNode(S, IP); 191 return S; 192 } 193 194 const SCEV *ScalarEvolution::getConstant(const APInt& Val) { 195 return getConstant(Context->getConstantInt(Val)); 196 } 197 198 const SCEV * 199 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 200 return getConstant( 201 Context->getConstantInt(cast<IntegerType>(Ty), V, isSigned)); 202 } 203 204 const Type *SCEVConstant::getType() const { return V->getType(); } 205 206 void SCEVConstant::print(raw_ostream &OS) const { 207 WriteAsOperand(OS, V, false); 208 } 209 210 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID, 211 unsigned SCEVTy, const SCEV *op, const Type *ty) 212 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 213 214 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 215 return Op->dominates(BB, DT); 216 } 217 218 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID, 219 const SCEV *op, const Type *ty) 220 : SCEVCastExpr(ID, scTruncate, op, ty) { 221 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 222 (Ty->isInteger() || isa<PointerType>(Ty)) && 223 "Cannot truncate non-integer value!"); 224 } 225 226 void SCEVTruncateExpr::print(raw_ostream &OS) const { 227 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 228 } 229 230 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID, 231 const SCEV *op, const Type *ty) 232 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 233 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 234 (Ty->isInteger() || isa<PointerType>(Ty)) && 235 "Cannot zero extend non-integer value!"); 236 } 237 238 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 239 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 240 } 241 242 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID, 243 const SCEV *op, const Type *ty) 244 : SCEVCastExpr(ID, scSignExtend, op, ty) { 245 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 246 (Ty->isInteger() || isa<PointerType>(Ty)) && 247 "Cannot sign extend non-integer value!"); 248 } 249 250 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 251 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 252 } 253 254 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 255 assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); 256 const char *OpStr = getOperationStr(); 257 OS << "(" << *Operands[0]; 258 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 259 OS << OpStr << *Operands[i]; 260 OS << ")"; 261 } 262 263 const SCEV * 264 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete( 265 const SCEV *Sym, 266 const SCEV *Conc, 267 ScalarEvolution &SE) const { 268 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 269 const SCEV *H = 270 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 271 if (H != getOperand(i)) { 272 SmallVector<const SCEV *, 8> NewOps; 273 NewOps.reserve(getNumOperands()); 274 for (unsigned j = 0; j != i; ++j) 275 NewOps.push_back(getOperand(j)); 276 NewOps.push_back(H); 277 for (++i; i != e; ++i) 278 NewOps.push_back(getOperand(i)-> 279 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 280 281 if (isa<SCEVAddExpr>(this)) 282 return SE.getAddExpr(NewOps); 283 else if (isa<SCEVMulExpr>(this)) 284 return SE.getMulExpr(NewOps); 285 else if (isa<SCEVSMaxExpr>(this)) 286 return SE.getSMaxExpr(NewOps); 287 else if (isa<SCEVUMaxExpr>(this)) 288 return SE.getUMaxExpr(NewOps); 289 else 290 llvm_unreachable("Unknown commutative expr!"); 291 } 292 } 293 return this; 294 } 295 296 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 297 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 298 if (!getOperand(i)->dominates(BB, DT)) 299 return false; 300 } 301 return true; 302 } 303 304 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 305 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 306 } 307 308 void SCEVUDivExpr::print(raw_ostream &OS) const { 309 OS << "(" << *LHS << " /u " << *RHS << ")"; 310 } 311 312 const Type *SCEVUDivExpr::getType() const { 313 // In most cases the types of LHS and RHS will be the same, but in some 314 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 315 // depend on the type for correctness, but handling types carefully can 316 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 317 // a pointer type than the RHS, so use the RHS' type here. 318 return RHS->getType(); 319 } 320 321 const SCEV * 322 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym, 323 const SCEV *Conc, 324 ScalarEvolution &SE) const { 325 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 326 const SCEV *H = 327 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); 328 if (H != getOperand(i)) { 329 SmallVector<const SCEV *, 8> NewOps; 330 NewOps.reserve(getNumOperands()); 331 for (unsigned j = 0; j != i; ++j) 332 NewOps.push_back(getOperand(j)); 333 NewOps.push_back(H); 334 for (++i; i != e; ++i) 335 NewOps.push_back(getOperand(i)-> 336 replaceSymbolicValuesWithConcrete(Sym, Conc, SE)); 337 338 return SE.getAddRecExpr(NewOps, L); 339 } 340 } 341 return this; 342 } 343 344 345 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 346 // Add recurrences are never invariant in the function-body (null loop). 347 if (!QueryLoop) 348 return false; 349 350 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. 351 if (QueryLoop->contains(L->getHeader())) 352 return false; 353 354 // This recurrence is variant w.r.t. QueryLoop if any of its operands 355 // are variant. 356 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 357 if (!getOperand(i)->isLoopInvariant(QueryLoop)) 358 return false; 359 360 // Otherwise it's loop-invariant. 361 return true; 362 } 363 364 void SCEVAddRecExpr::print(raw_ostream &OS) const { 365 OS << "{" << *Operands[0]; 366 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 367 OS << ",+," << *Operands[i]; 368 OS << "}<" << L->getHeader()->getName() + ">"; 369 } 370 371 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 372 // All non-instruction values are loop invariant. All instructions are loop 373 // invariant if they are not contained in the specified loop. 374 // Instructions are never considered invariant in the function body 375 // (null loop) because they are defined within the "loop". 376 if (Instruction *I = dyn_cast<Instruction>(V)) 377 return L && !L->contains(I->getParent()); 378 return true; 379 } 380 381 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 382 if (Instruction *I = dyn_cast<Instruction>(getValue())) 383 return DT->dominates(I->getParent(), BB); 384 return true; 385 } 386 387 const Type *SCEVUnknown::getType() const { 388 return V->getType(); 389 } 390 391 void SCEVUnknown::print(raw_ostream &OS) const { 392 WriteAsOperand(OS, V, false); 393 } 394 395 //===----------------------------------------------------------------------===// 396 // SCEV Utilities 397 //===----------------------------------------------------------------------===// 398 399 namespace { 400 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 401 /// than the complexity of the RHS. This comparator is used to canonicalize 402 /// expressions. 403 class VISIBILITY_HIDDEN SCEVComplexityCompare { 404 LoopInfo *LI; 405 public: 406 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} 407 408 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 409 // Primarily, sort the SCEVs by their getSCEVType(). 410 if (LHS->getSCEVType() != RHS->getSCEVType()) 411 return LHS->getSCEVType() < RHS->getSCEVType(); 412 413 // Aside from the getSCEVType() ordering, the particular ordering 414 // isn't very important except that it's beneficial to be consistent, 415 // so that (a + b) and (b + a) don't end up as different expressions. 416 417 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 418 // not as complete as it could be. 419 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { 420 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 421 422 // Order pointer values after integer values. This helps SCEVExpander 423 // form GEPs. 424 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) 425 return false; 426 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) 427 return true; 428 429 // Compare getValueID values. 430 if (LU->getValue()->getValueID() != RU->getValue()->getValueID()) 431 return LU->getValue()->getValueID() < RU->getValue()->getValueID(); 432 433 // Sort arguments by their position. 434 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) { 435 const Argument *RA = cast<Argument>(RU->getValue()); 436 return LA->getArgNo() < RA->getArgNo(); 437 } 438 439 // For instructions, compare their loop depth, and their opcode. 440 // This is pretty loose. 441 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) { 442 Instruction *RV = cast<Instruction>(RU->getValue()); 443 444 // Compare loop depths. 445 if (LI->getLoopDepth(LV->getParent()) != 446 LI->getLoopDepth(RV->getParent())) 447 return LI->getLoopDepth(LV->getParent()) < 448 LI->getLoopDepth(RV->getParent()); 449 450 // Compare opcodes. 451 if (LV->getOpcode() != RV->getOpcode()) 452 return LV->getOpcode() < RV->getOpcode(); 453 454 // Compare the number of operands. 455 if (LV->getNumOperands() != RV->getNumOperands()) 456 return LV->getNumOperands() < RV->getNumOperands(); 457 } 458 459 return false; 460 } 461 462 // Compare constant values. 463 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { 464 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 465 if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth()) 466 return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth(); 467 return LC->getValue()->getValue().ult(RC->getValue()->getValue()); 468 } 469 470 // Compare addrec loop depths. 471 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { 472 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 473 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth()) 474 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth(); 475 } 476 477 // Lexicographically compare n-ary expressions. 478 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { 479 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 480 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) { 481 if (i >= RC->getNumOperands()) 482 return false; 483 if (operator()(LC->getOperand(i), RC->getOperand(i))) 484 return true; 485 if (operator()(RC->getOperand(i), LC->getOperand(i))) 486 return false; 487 } 488 return LC->getNumOperands() < RC->getNumOperands(); 489 } 490 491 // Lexicographically compare udiv expressions. 492 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { 493 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 494 if (operator()(LC->getLHS(), RC->getLHS())) 495 return true; 496 if (operator()(RC->getLHS(), LC->getLHS())) 497 return false; 498 if (operator()(LC->getRHS(), RC->getRHS())) 499 return true; 500 if (operator()(RC->getRHS(), LC->getRHS())) 501 return false; 502 return false; 503 } 504 505 // Compare cast expressions by operand. 506 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { 507 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 508 return operator()(LC->getOperand(), RC->getOperand()); 509 } 510 511 llvm_unreachable("Unknown SCEV kind!"); 512 return false; 513 } 514 }; 515 } 516 517 /// GroupByComplexity - Given a list of SCEV objects, order them by their 518 /// complexity, and group objects of the same complexity together by value. 519 /// When this routine is finished, we know that any duplicates in the vector are 520 /// consecutive and that complexity is monotonically increasing. 521 /// 522 /// Note that we go take special precautions to ensure that we get determinstic 523 /// results from this routine. In other words, we don't want the results of 524 /// this to depend on where the addresses of various SCEV objects happened to 525 /// land in memory. 526 /// 527 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 528 LoopInfo *LI) { 529 if (Ops.size() < 2) return; // Noop 530 if (Ops.size() == 2) { 531 // This is the common case, which also happens to be trivially simple. 532 // Special case it. 533 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) 534 std::swap(Ops[0], Ops[1]); 535 return; 536 } 537 538 // Do the rough sort by complexity. 539 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 540 541 // Now that we are sorted by complexity, group elements of the same 542 // complexity. Note that this is, at worst, N^2, but the vector is likely to 543 // be extremely short in practice. Note that we take this approach because we 544 // do not want to depend on the addresses of the objects we are grouping. 545 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 546 const SCEV *S = Ops[i]; 547 unsigned Complexity = S->getSCEVType(); 548 549 // If there are any objects of the same complexity and same value as this 550 // one, group them. 551 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 552 if (Ops[j] == S) { // Found a duplicate. 553 // Move it to immediately after i'th element. 554 std::swap(Ops[i+1], Ops[j]); 555 ++i; // no need to rescan it. 556 if (i == e-2) return; // Done! 557 } 558 } 559 } 560 } 561 562 563 564 //===----------------------------------------------------------------------===// 565 // Simple SCEV method implementations 566 //===----------------------------------------------------------------------===// 567 568 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 569 /// Assume, K > 0. 570 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 571 ScalarEvolution &SE, 572 const Type* ResultTy) { 573 // Handle the simplest case efficiently. 574 if (K == 1) 575 return SE.getTruncateOrZeroExtend(It, ResultTy); 576 577 // We are using the following formula for BC(It, K): 578 // 579 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 580 // 581 // Suppose, W is the bitwidth of the return value. We must be prepared for 582 // overflow. Hence, we must assure that the result of our computation is 583 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 584 // safe in modular arithmetic. 585 // 586 // However, this code doesn't use exactly that formula; the formula it uses 587 // is something like the following, where T is the number of factors of 2 in 588 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 589 // exponentiation: 590 // 591 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 592 // 593 // This formula is trivially equivalent to the previous formula. However, 594 // this formula can be implemented much more efficiently. The trick is that 595 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 596 // arithmetic. To do exact division in modular arithmetic, all we have 597 // to do is multiply by the inverse. Therefore, this step can be done at 598 // width W. 599 // 600 // The next issue is how to safely do the division by 2^T. The way this 601 // is done is by doing the multiplication step at a width of at least W + T 602 // bits. This way, the bottom W+T bits of the product are accurate. Then, 603 // when we perform the division by 2^T (which is equivalent to a right shift 604 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 605 // truncated out after the division by 2^T. 606 // 607 // In comparison to just directly using the first formula, this technique 608 // is much more efficient; using the first formula requires W * K bits, 609 // but this formula less than W + K bits. Also, the first formula requires 610 // a division step, whereas this formula only requires multiplies and shifts. 611 // 612 // It doesn't matter whether the subtraction step is done in the calculation 613 // width or the input iteration count's width; if the subtraction overflows, 614 // the result must be zero anyway. We prefer here to do it in the width of 615 // the induction variable because it helps a lot for certain cases; CodeGen 616 // isn't smart enough to ignore the overflow, which leads to much less 617 // efficient code if the width of the subtraction is wider than the native 618 // register width. 619 // 620 // (It's possible to not widen at all by pulling out factors of 2 before 621 // the multiplication; for example, K=2 can be calculated as 622 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 623 // extra arithmetic, so it's not an obvious win, and it gets 624 // much more complicated for K > 3.) 625 626 // Protection from insane SCEVs; this bound is conservative, 627 // but it probably doesn't matter. 628 if (K > 1000) 629 return SE.getCouldNotCompute(); 630 631 unsigned W = SE.getTypeSizeInBits(ResultTy); 632 633 // Calculate K! / 2^T and T; we divide out the factors of two before 634 // multiplying for calculating K! / 2^T to avoid overflow. 635 // Other overflow doesn't matter because we only care about the bottom 636 // W bits of the result. 637 APInt OddFactorial(W, 1); 638 unsigned T = 1; 639 for (unsigned i = 3; i <= K; ++i) { 640 APInt Mult(W, i); 641 unsigned TwoFactors = Mult.countTrailingZeros(); 642 T += TwoFactors; 643 Mult = Mult.lshr(TwoFactors); 644 OddFactorial *= Mult; 645 } 646 647 // We need at least W + T bits for the multiplication step 648 unsigned CalculationBits = W + T; 649 650 // Calcuate 2^T, at width T+W. 651 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 652 653 // Calculate the multiplicative inverse of K! / 2^T; 654 // this multiplication factor will perform the exact division by 655 // K! / 2^T. 656 APInt Mod = APInt::getSignedMinValue(W+1); 657 APInt MultiplyFactor = OddFactorial.zext(W+1); 658 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 659 MultiplyFactor = MultiplyFactor.trunc(W); 660 661 // Calculate the product, at width T+W 662 const IntegerType *CalculationTy = IntegerType::get(CalculationBits); 663 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 664 for (unsigned i = 1; i != K; ++i) { 665 const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); 666 Dividend = SE.getMulExpr(Dividend, 667 SE.getTruncateOrZeroExtend(S, CalculationTy)); 668 } 669 670 // Divide by 2^T 671 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 672 673 // Truncate the result, and divide by K! / 2^T. 674 675 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 676 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 677 } 678 679 /// evaluateAtIteration - Return the value of this chain of recurrences at 680 /// the specified iteration number. We can evaluate this recurrence by 681 /// multiplying each element in the chain by the binomial coefficient 682 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 683 /// 684 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 685 /// 686 /// where BC(It, k) stands for binomial coefficient. 687 /// 688 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 689 ScalarEvolution &SE) const { 690 const SCEV *Result = getStart(); 691 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 692 // The computation is correct in the face of overflow provided that the 693 // multiplication is performed _after_ the evaluation of the binomial 694 // coefficient. 695 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 696 if (isa<SCEVCouldNotCompute>(Coeff)) 697 return Coeff; 698 699 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 700 } 701 return Result; 702 } 703 704 //===----------------------------------------------------------------------===// 705 // SCEV Expression folder implementations 706 //===----------------------------------------------------------------------===// 707 708 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 709 const Type *Ty) { 710 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 711 "This is not a truncating conversion!"); 712 assert(isSCEVable(Ty) && 713 "This is not a conversion to a SCEVable type!"); 714 Ty = getEffectiveSCEVType(Ty); 715 716 FoldingSetNodeID ID; 717 ID.AddInteger(scTruncate); 718 ID.AddPointer(Op); 719 ID.AddPointer(Ty); 720 void *IP = 0; 721 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 722 723 // Fold if the operand is constant. 724 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 725 return getConstant( 726 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 727 728 // trunc(trunc(x)) --> trunc(x) 729 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 730 return getTruncateExpr(ST->getOperand(), Ty); 731 732 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 733 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 734 return getTruncateOrSignExtend(SS->getOperand(), Ty); 735 736 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 737 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 738 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 739 740 // If the input value is a chrec scev, truncate the chrec's operands. 741 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 742 SmallVector<const SCEV *, 4> Operands; 743 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 744 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 745 return getAddRecExpr(Operands, AddRec->getLoop()); 746 } 747 748 // The cast wasn't folded; create an explicit cast node. 749 // Recompute the insert position, as it may have been invalidated. 750 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 751 SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>(); 752 new (S) SCEVTruncateExpr(ID, Op, Ty); 753 UniqueSCEVs.InsertNode(S, IP); 754 return S; 755 } 756 757 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 758 const Type *Ty) { 759 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 760 "This is not an extending conversion!"); 761 assert(isSCEVable(Ty) && 762 "This is not a conversion to a SCEVable type!"); 763 Ty = getEffectiveSCEVType(Ty); 764 765 // Fold if the operand is constant. 766 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 767 const Type *IntTy = getEffectiveSCEVType(Ty); 768 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); 769 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 770 return getConstant(cast<ConstantInt>(C)); 771 } 772 773 // zext(zext(x)) --> zext(x) 774 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 775 return getZeroExtendExpr(SZ->getOperand(), Ty); 776 777 // Before doing any expensive analysis, check to see if we've already 778 // computed a SCEV for this Op and Ty. 779 FoldingSetNodeID ID; 780 ID.AddInteger(scZeroExtend); 781 ID.AddPointer(Op); 782 ID.AddPointer(Ty); 783 void *IP = 0; 784 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 785 786 // If the input value is a chrec scev, and we can prove that the value 787 // did not overflow the old, smaller, value, we can zero extend all of the 788 // operands (often constants). This allows analysis of something like 789 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 790 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 791 if (AR->isAffine()) { 792 const SCEV *Start = AR->getStart(); 793 const SCEV *Step = AR->getStepRecurrence(*this); 794 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 795 const Loop *L = AR->getLoop(); 796 797 // Check whether the backedge-taken count is SCEVCouldNotCompute. 798 // Note that this serves two purposes: It filters out loops that are 799 // simply not analyzable, and it covers the case where this code is 800 // being called from within backedge-taken count analysis, such that 801 // attempting to ask for the backedge-taken count would likely result 802 // in infinite recursion. In the later case, the analysis code will 803 // cope with a conservative value, and it will take care to purge 804 // that value once it has finished. 805 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 806 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 807 // Manually compute the final value for AR, checking for 808 // overflow. 809 810 // Check whether the backedge-taken count can be losslessly casted to 811 // the addrec's type. The count is always unsigned. 812 const SCEV *CastedMaxBECount = 813 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 814 const SCEV *RecastedMaxBECount = 815 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 816 if (MaxBECount == RecastedMaxBECount) { 817 const Type *WideTy = IntegerType::get(BitWidth * 2); 818 // Check whether Start+Step*MaxBECount has no unsigned overflow. 819 const SCEV *ZMul = 820 getMulExpr(CastedMaxBECount, 821 getTruncateOrZeroExtend(Step, Start->getType())); 822 const SCEV *Add = getAddExpr(Start, ZMul); 823 const SCEV *OperandExtendedAdd = 824 getAddExpr(getZeroExtendExpr(Start, WideTy), 825 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 826 getZeroExtendExpr(Step, WideTy))); 827 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 828 // Return the expression with the addrec on the outside. 829 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 830 getZeroExtendExpr(Step, Ty), 831 L); 832 833 // Similar to above, only this time treat the step value as signed. 834 // This covers loops that count down. 835 const SCEV *SMul = 836 getMulExpr(CastedMaxBECount, 837 getTruncateOrSignExtend(Step, Start->getType())); 838 Add = getAddExpr(Start, SMul); 839 OperandExtendedAdd = 840 getAddExpr(getZeroExtendExpr(Start, WideTy), 841 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 842 getSignExtendExpr(Step, WideTy))); 843 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 844 // Return the expression with the addrec on the outside. 845 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 846 getSignExtendExpr(Step, Ty), 847 L); 848 } 849 850 // If the backedge is guarded by a comparison with the pre-inc value 851 // the addrec is safe. Also, if the entry is guarded by a comparison 852 // with the start value and the backedge is guarded by a comparison 853 // with the post-inc value, the addrec is safe. 854 if (isKnownPositive(Step)) { 855 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 856 getUnsignedRange(Step).getUnsignedMax()); 857 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 858 (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 859 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 860 AR->getPostIncExpr(*this), N))) 861 // Return the expression with the addrec on the outside. 862 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 863 getZeroExtendExpr(Step, Ty), 864 L); 865 } else if (isKnownNegative(Step)) { 866 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 867 getSignedRange(Step).getSignedMin()); 868 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) && 869 (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) || 870 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 871 AR->getPostIncExpr(*this), N))) 872 // Return the expression with the addrec on the outside. 873 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 874 getSignExtendExpr(Step, Ty), 875 L); 876 } 877 } 878 } 879 880 // The cast wasn't folded; create an explicit cast node. 881 // Recompute the insert position, as it may have been invalidated. 882 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 883 SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>(); 884 new (S) SCEVZeroExtendExpr(ID, Op, Ty); 885 UniqueSCEVs.InsertNode(S, IP); 886 return S; 887 } 888 889 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 890 const Type *Ty) { 891 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 892 "This is not an extending conversion!"); 893 assert(isSCEVable(Ty) && 894 "This is not a conversion to a SCEVable type!"); 895 Ty = getEffectiveSCEVType(Ty); 896 897 // Fold if the operand is constant. 898 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 899 const Type *IntTy = getEffectiveSCEVType(Ty); 900 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); 901 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 902 return getConstant(cast<ConstantInt>(C)); 903 } 904 905 // sext(sext(x)) --> sext(x) 906 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 907 return getSignExtendExpr(SS->getOperand(), Ty); 908 909 // Before doing any expensive analysis, check to see if we've already 910 // computed a SCEV for this Op and Ty. 911 FoldingSetNodeID ID; 912 ID.AddInteger(scSignExtend); 913 ID.AddPointer(Op); 914 ID.AddPointer(Ty); 915 void *IP = 0; 916 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 917 918 // If the input value is a chrec scev, and we can prove that the value 919 // did not overflow the old, smaller, value, we can sign extend all of the 920 // operands (often constants). This allows analysis of something like 921 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 922 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 923 if (AR->isAffine()) { 924 const SCEV *Start = AR->getStart(); 925 const SCEV *Step = AR->getStepRecurrence(*this); 926 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 927 const Loop *L = AR->getLoop(); 928 929 // Check whether the backedge-taken count is SCEVCouldNotCompute. 930 // Note that this serves two purposes: It filters out loops that are 931 // simply not analyzable, and it covers the case where this code is 932 // being called from within backedge-taken count analysis, such that 933 // attempting to ask for the backedge-taken count would likely result 934 // in infinite recursion. In the later case, the analysis code will 935 // cope with a conservative value, and it will take care to purge 936 // that value once it has finished. 937 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 938 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 939 // Manually compute the final value for AR, checking for 940 // overflow. 941 942 // Check whether the backedge-taken count can be losslessly casted to 943 // the addrec's type. The count is always unsigned. 944 const SCEV *CastedMaxBECount = 945 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 946 const SCEV *RecastedMaxBECount = 947 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 948 if (MaxBECount == RecastedMaxBECount) { 949 const Type *WideTy = IntegerType::get(BitWidth * 2); 950 // Check whether Start+Step*MaxBECount has no signed overflow. 951 const SCEV *SMul = 952 getMulExpr(CastedMaxBECount, 953 getTruncateOrSignExtend(Step, Start->getType())); 954 const SCEV *Add = getAddExpr(Start, SMul); 955 const SCEV *OperandExtendedAdd = 956 getAddExpr(getSignExtendExpr(Start, WideTy), 957 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 958 getSignExtendExpr(Step, WideTy))); 959 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 960 // Return the expression with the addrec on the outside. 961 return getAddRecExpr(getSignExtendExpr(Start, Ty), 962 getSignExtendExpr(Step, Ty), 963 L); 964 965 // Similar to above, only this time treat the step value as unsigned. 966 // This covers loops that count up with an unsigned step. 967 const SCEV *UMul = 968 getMulExpr(CastedMaxBECount, 969 getTruncateOrZeroExtend(Step, Start->getType())); 970 Add = getAddExpr(Start, UMul); 971 OperandExtendedAdd = 972 getAddExpr(getZeroExtendExpr(Start, WideTy), 973 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 974 getZeroExtendExpr(Step, WideTy))); 975 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 976 // Return the expression with the addrec on the outside. 977 return getAddRecExpr(getSignExtendExpr(Start, Ty), 978 getZeroExtendExpr(Step, Ty), 979 L); 980 } 981 982 // If the backedge is guarded by a comparison with the pre-inc value 983 // the addrec is safe. Also, if the entry is guarded by a comparison 984 // with the start value and the backedge is guarded by a comparison 985 // with the post-inc value, the addrec is safe. 986 if (isKnownPositive(Step)) { 987 const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - 988 getSignedRange(Step).getSignedMax()); 989 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || 990 (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && 991 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, 992 AR->getPostIncExpr(*this), N))) 993 // Return the expression with the addrec on the outside. 994 return getAddRecExpr(getSignExtendExpr(Start, Ty), 995 getSignExtendExpr(Step, Ty), 996 L); 997 } else if (isKnownNegative(Step)) { 998 const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - 999 getSignedRange(Step).getSignedMin()); 1000 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || 1001 (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && 1002 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, 1003 AR->getPostIncExpr(*this), N))) 1004 // Return the expression with the addrec on the outside. 1005 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1006 getSignExtendExpr(Step, Ty), 1007 L); 1008 } 1009 } 1010 } 1011 1012 // The cast wasn't folded; create an explicit cast node. 1013 // Recompute the insert position, as it may have been invalidated. 1014 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1015 SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>(); 1016 new (S) SCEVSignExtendExpr(ID, Op, Ty); 1017 UniqueSCEVs.InsertNode(S, IP); 1018 return S; 1019 } 1020 1021 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1022 /// unspecified bits out to the given type. 1023 /// 1024 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1025 const Type *Ty) { 1026 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1027 "This is not an extending conversion!"); 1028 assert(isSCEVable(Ty) && 1029 "This is not a conversion to a SCEVable type!"); 1030 Ty = getEffectiveSCEVType(Ty); 1031 1032 // Sign-extend negative constants. 1033 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1034 if (SC->getValue()->getValue().isNegative()) 1035 return getSignExtendExpr(Op, Ty); 1036 1037 // Peel off a truncate cast. 1038 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1039 const SCEV *NewOp = T->getOperand(); 1040 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1041 return getAnyExtendExpr(NewOp, Ty); 1042 return getTruncateOrNoop(NewOp, Ty); 1043 } 1044 1045 // Next try a zext cast. If the cast is folded, use it. 1046 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1047 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1048 return ZExt; 1049 1050 // Next try a sext cast. If the cast is folded, use it. 1051 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1052 if (!isa<SCEVSignExtendExpr>(SExt)) 1053 return SExt; 1054 1055 // If the expression is obviously signed, use the sext cast value. 1056 if (isa<SCEVSMaxExpr>(Op)) 1057 return SExt; 1058 1059 // Absent any other information, use the zext cast value. 1060 return ZExt; 1061 } 1062 1063 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1064 /// a list of operands to be added under the given scale, update the given 1065 /// map. This is a helper function for getAddRecExpr. As an example of 1066 /// what it does, given a sequence of operands that would form an add 1067 /// expression like this: 1068 /// 1069 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 1070 /// 1071 /// where A and B are constants, update the map with these values: 1072 /// 1073 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1074 /// 1075 /// and add 13 + A*B*29 to AccumulatedConstant. 1076 /// This will allow getAddRecExpr to produce this: 1077 /// 1078 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1079 /// 1080 /// This form often exposes folding opportunities that are hidden in 1081 /// the original operand list. 1082 /// 1083 /// Return true iff it appears that any interesting folding opportunities 1084 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1085 /// the common case where no interesting opportunities are present, and 1086 /// is also used as a check to avoid infinite recursion. 1087 /// 1088 static bool 1089 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1090 SmallVector<const SCEV *, 8> &NewOps, 1091 APInt &AccumulatedConstant, 1092 const SmallVectorImpl<const SCEV *> &Ops, 1093 const APInt &Scale, 1094 ScalarEvolution &SE) { 1095 bool Interesting = false; 1096 1097 // Iterate over the add operands. 1098 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1099 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1100 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1101 APInt NewScale = 1102 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 1103 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1104 // A multiplication of a constant with another add; recurse. 1105 Interesting |= 1106 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1107 cast<SCEVAddExpr>(Mul->getOperand(1)) 1108 ->getOperands(), 1109 NewScale, SE); 1110 } else { 1111 // A multiplication of a constant with some other value. Update 1112 // the map. 1113 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1114 const SCEV *Key = SE.getMulExpr(MulOps); 1115 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1116 M.insert(std::make_pair(Key, NewScale)); 1117 if (Pair.second) { 1118 NewOps.push_back(Pair.first->first); 1119 } else { 1120 Pair.first->second += NewScale; 1121 // The map already had an entry for this value, which may indicate 1122 // a folding opportunity. 1123 Interesting = true; 1124 } 1125 } 1126 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1127 // Pull a buried constant out to the outside. 1128 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) 1129 Interesting = true; 1130 AccumulatedConstant += Scale * C->getValue()->getValue(); 1131 } else { 1132 // An ordinary operand. Update the map. 1133 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1134 M.insert(std::make_pair(Ops[i], Scale)); 1135 if (Pair.second) { 1136 NewOps.push_back(Pair.first->first); 1137 } else { 1138 Pair.first->second += Scale; 1139 // The map already had an entry for this value, which may indicate 1140 // a folding opportunity. 1141 Interesting = true; 1142 } 1143 } 1144 } 1145 1146 return Interesting; 1147 } 1148 1149 namespace { 1150 struct APIntCompare { 1151 bool operator()(const APInt &LHS, const APInt &RHS) const { 1152 return LHS.ult(RHS); 1153 } 1154 }; 1155 } 1156 1157 /// getAddExpr - Get a canonical add expression, or something simpler if 1158 /// possible. 1159 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) { 1160 assert(!Ops.empty() && "Cannot get empty add!"); 1161 if (Ops.size() == 1) return Ops[0]; 1162 #ifndef NDEBUG 1163 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1164 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1165 getEffectiveSCEVType(Ops[0]->getType()) && 1166 "SCEVAddExpr operand types don't match!"); 1167 #endif 1168 1169 // Sort by complexity, this groups all similar expression types together. 1170 GroupByComplexity(Ops, LI); 1171 1172 // If there are any constants, fold them together. 1173 unsigned Idx = 0; 1174 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1175 ++Idx; 1176 assert(Idx < Ops.size()); 1177 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1178 // We found two constants, fold them together! 1179 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1180 RHSC->getValue()->getValue()); 1181 if (Ops.size() == 2) return Ops[0]; 1182 Ops.erase(Ops.begin()+1); // Erase the folded element 1183 LHSC = cast<SCEVConstant>(Ops[0]); 1184 } 1185 1186 // If we are left with a constant zero being added, strip it off. 1187 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1188 Ops.erase(Ops.begin()); 1189 --Idx; 1190 } 1191 } 1192 1193 if (Ops.size() == 1) return Ops[0]; 1194 1195 // Okay, check to see if the same value occurs in the operand list twice. If 1196 // so, merge them together into an multiply expression. Since we sorted the 1197 // list, these values are required to be adjacent. 1198 const Type *Ty = Ops[0]->getType(); 1199 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1200 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1201 // Found a match, merge the two values into a multiply, and add any 1202 // remaining values to the result. 1203 const SCEV *Two = getIntegerSCEV(2, Ty); 1204 const SCEV *Mul = getMulExpr(Ops[i], Two); 1205 if (Ops.size() == 2) 1206 return Mul; 1207 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1208 Ops.push_back(Mul); 1209 return getAddExpr(Ops); 1210 } 1211 1212 // Check for truncates. If all the operands are truncated from the same 1213 // type, see if factoring out the truncate would permit the result to be 1214 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1215 // if the contents of the resulting outer trunc fold to something simple. 1216 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1217 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1218 const Type *DstType = Trunc->getType(); 1219 const Type *SrcType = Trunc->getOperand()->getType(); 1220 SmallVector<const SCEV *, 8> LargeOps; 1221 bool Ok = true; 1222 // Check all the operands to see if they can be represented in the 1223 // source type of the truncate. 1224 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1225 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1226 if (T->getOperand()->getType() != SrcType) { 1227 Ok = false; 1228 break; 1229 } 1230 LargeOps.push_back(T->getOperand()); 1231 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1232 // This could be either sign or zero extension, but sign extension 1233 // is much more likely to be foldable here. 1234 LargeOps.push_back(getSignExtendExpr(C, SrcType)); 1235 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1236 SmallVector<const SCEV *, 8> LargeMulOps; 1237 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1238 if (const SCEVTruncateExpr *T = 1239 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1240 if (T->getOperand()->getType() != SrcType) { 1241 Ok = false; 1242 break; 1243 } 1244 LargeMulOps.push_back(T->getOperand()); 1245 } else if (const SCEVConstant *C = 1246 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1247 // This could be either sign or zero extension, but sign extension 1248 // is much more likely to be foldable here. 1249 LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); 1250 } else { 1251 Ok = false; 1252 break; 1253 } 1254 } 1255 if (Ok) 1256 LargeOps.push_back(getMulExpr(LargeMulOps)); 1257 } else { 1258 Ok = false; 1259 break; 1260 } 1261 } 1262 if (Ok) { 1263 // Evaluate the expression in the larger type. 1264 const SCEV *Fold = getAddExpr(LargeOps); 1265 // If it folds to something simple, use it. Otherwise, don't. 1266 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1267 return getTruncateExpr(Fold, DstType); 1268 } 1269 } 1270 1271 // Skip past any other cast SCEVs. 1272 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1273 ++Idx; 1274 1275 // If there are add operands they would be next. 1276 if (Idx < Ops.size()) { 1277 bool DeletedAdd = false; 1278 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1279 // If we have an add, expand the add operands onto the end of the operands 1280 // list. 1281 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 1282 Ops.erase(Ops.begin()+Idx); 1283 DeletedAdd = true; 1284 } 1285 1286 // If we deleted at least one add, we added operands to the end of the list, 1287 // and they are not necessarily sorted. Recurse to resort and resimplify 1288 // any operands we just aquired. 1289 if (DeletedAdd) 1290 return getAddExpr(Ops); 1291 } 1292 1293 // Skip over the add expression until we get to a multiply. 1294 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1295 ++Idx; 1296 1297 // Check to see if there are any folding opportunities present with 1298 // operands multiplied by constant values. 1299 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1300 uint64_t BitWidth = getTypeSizeInBits(Ty); 1301 DenseMap<const SCEV *, APInt> M; 1302 SmallVector<const SCEV *, 8> NewOps; 1303 APInt AccumulatedConstant(BitWidth, 0); 1304 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1305 Ops, APInt(BitWidth, 1), *this)) { 1306 // Some interesting folding opportunity is present, so its worthwhile to 1307 // re-generate the operands list. Group the operands by constant scale, 1308 // to avoid multiplying by the same constant scale multiple times. 1309 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 1310 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(), 1311 E = NewOps.end(); I != E; ++I) 1312 MulOpLists[M.find(*I)->second].push_back(*I); 1313 // Re-generate the operands list. 1314 Ops.clear(); 1315 if (AccumulatedConstant != 0) 1316 Ops.push_back(getConstant(AccumulatedConstant)); 1317 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1318 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1319 if (I->first != 0) 1320 Ops.push_back(getMulExpr(getConstant(I->first), 1321 getAddExpr(I->second))); 1322 if (Ops.empty()) 1323 return getIntegerSCEV(0, Ty); 1324 if (Ops.size() == 1) 1325 return Ops[0]; 1326 return getAddExpr(Ops); 1327 } 1328 } 1329 1330 // If we are adding something to a multiply expression, make sure the 1331 // something is not already an operand of the multiply. If so, merge it into 1332 // the multiply. 1333 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1334 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1335 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1336 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1337 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1338 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { 1339 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1340 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 1341 if (Mul->getNumOperands() != 2) { 1342 // If the multiply has more than two operands, we must get the 1343 // Y*Z term. 1344 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1345 MulOps.erase(MulOps.begin()+MulOp); 1346 InnerMul = getMulExpr(MulOps); 1347 } 1348 const SCEV *One = getIntegerSCEV(1, Ty); 1349 const SCEV *AddOne = getAddExpr(InnerMul, One); 1350 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]); 1351 if (Ops.size() == 2) return OuterMul; 1352 if (AddOp < Idx) { 1353 Ops.erase(Ops.begin()+AddOp); 1354 Ops.erase(Ops.begin()+Idx-1); 1355 } else { 1356 Ops.erase(Ops.begin()+Idx); 1357 Ops.erase(Ops.begin()+AddOp-1); 1358 } 1359 Ops.push_back(OuterMul); 1360 return getAddExpr(Ops); 1361 } 1362 1363 // Check this multiply against other multiplies being added together. 1364 for (unsigned OtherMulIdx = Idx+1; 1365 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1366 ++OtherMulIdx) { 1367 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1368 // If MulOp occurs in OtherMul, we can fold the two multiplies 1369 // together. 1370 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1371 OMulOp != e; ++OMulOp) 1372 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1373 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1374 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 1375 if (Mul->getNumOperands() != 2) { 1376 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1377 Mul->op_end()); 1378 MulOps.erase(MulOps.begin()+MulOp); 1379 InnerMul1 = getMulExpr(MulOps); 1380 } 1381 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1382 if (OtherMul->getNumOperands() != 2) { 1383 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1384 OtherMul->op_end()); 1385 MulOps.erase(MulOps.begin()+OMulOp); 1386 InnerMul2 = getMulExpr(MulOps); 1387 } 1388 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1389 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1390 if (Ops.size() == 2) return OuterMul; 1391 Ops.erase(Ops.begin()+Idx); 1392 Ops.erase(Ops.begin()+OtherMulIdx-1); 1393 Ops.push_back(OuterMul); 1394 return getAddExpr(Ops); 1395 } 1396 } 1397 } 1398 } 1399 1400 // If there are any add recurrences in the operands list, see if any other 1401 // added values are loop invariant. If so, we can fold them into the 1402 // recurrence. 1403 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1404 ++Idx; 1405 1406 // Scan over all recurrences, trying to fold loop invariants into them. 1407 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1408 // Scan all of the other operands to this add and add them to the vector if 1409 // they are loop invariant w.r.t. the recurrence. 1410 SmallVector<const SCEV *, 8> LIOps; 1411 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1412 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1413 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1414 LIOps.push_back(Ops[i]); 1415 Ops.erase(Ops.begin()+i); 1416 --i; --e; 1417 } 1418 1419 // If we found some loop invariants, fold them into the recurrence. 1420 if (!LIOps.empty()) { 1421 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1422 LIOps.push_back(AddRec->getStart()); 1423 1424 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1425 AddRec->op_end()); 1426 AddRecOps[0] = getAddExpr(LIOps); 1427 1428 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); 1429 // If all of the other operands were loop invariant, we are done. 1430 if (Ops.size() == 1) return NewRec; 1431 1432 // Otherwise, add the folded AddRec by the non-liv parts. 1433 for (unsigned i = 0;; ++i) 1434 if (Ops[i] == AddRec) { 1435 Ops[i] = NewRec; 1436 break; 1437 } 1438 return getAddExpr(Ops); 1439 } 1440 1441 // Okay, if there weren't any loop invariants to be folded, check to see if 1442 // there are multiple AddRec's with the same loop induction variable being 1443 // added together. If so, we can fold them. 1444 for (unsigned OtherIdx = Idx+1; 1445 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1446 if (OtherIdx != Idx) { 1447 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1448 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1449 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} 1450 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(), 1451 AddRec->op_end()); 1452 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { 1453 if (i >= NewOps.size()) { 1454 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, 1455 OtherAddRec->op_end()); 1456 break; 1457 } 1458 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); 1459 } 1460 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1461 1462 if (Ops.size() == 2) return NewAddRec; 1463 1464 Ops.erase(Ops.begin()+Idx); 1465 Ops.erase(Ops.begin()+OtherIdx-1); 1466 Ops.push_back(NewAddRec); 1467 return getAddExpr(Ops); 1468 } 1469 } 1470 1471 // Otherwise couldn't fold anything into this recurrence. Move onto the 1472 // next one. 1473 } 1474 1475 // Okay, it looks like we really DO need an add expr. Check to see if we 1476 // already have one, otherwise create a new one. 1477 FoldingSetNodeID ID; 1478 ID.AddInteger(scAddExpr); 1479 ID.AddInteger(Ops.size()); 1480 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1481 ID.AddPointer(Ops[i]); 1482 void *IP = 0; 1483 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1484 SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>(); 1485 new (S) SCEVAddExpr(ID, Ops); 1486 UniqueSCEVs.InsertNode(S, IP); 1487 return S; 1488 } 1489 1490 1491 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1492 /// possible. 1493 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) { 1494 assert(!Ops.empty() && "Cannot get empty mul!"); 1495 #ifndef NDEBUG 1496 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1497 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1498 getEffectiveSCEVType(Ops[0]->getType()) && 1499 "SCEVMulExpr operand types don't match!"); 1500 #endif 1501 1502 // Sort by complexity, this groups all similar expression types together. 1503 GroupByComplexity(Ops, LI); 1504 1505 // If there are any constants, fold them together. 1506 unsigned Idx = 0; 1507 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1508 1509 // C1*(C2+V) -> C1*C2 + C1*V 1510 if (Ops.size() == 2) 1511 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1512 if (Add->getNumOperands() == 2 && 1513 isa<SCEVConstant>(Add->getOperand(0))) 1514 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1515 getMulExpr(LHSC, Add->getOperand(1))); 1516 1517 1518 ++Idx; 1519 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1520 // We found two constants, fold them together! 1521 ConstantInt *Fold = Context->getConstantInt(LHSC->getValue()->getValue() * 1522 RHSC->getValue()->getValue()); 1523 Ops[0] = getConstant(Fold); 1524 Ops.erase(Ops.begin()+1); // Erase the folded element 1525 if (Ops.size() == 1) return Ops[0]; 1526 LHSC = cast<SCEVConstant>(Ops[0]); 1527 } 1528 1529 // If we are left with a constant one being multiplied, strip it off. 1530 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1531 Ops.erase(Ops.begin()); 1532 --Idx; 1533 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1534 // If we have a multiply of zero, it will always be zero. 1535 return Ops[0]; 1536 } 1537 } 1538 1539 // Skip over the add expression until we get to a multiply. 1540 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1541 ++Idx; 1542 1543 if (Ops.size() == 1) 1544 return Ops[0]; 1545 1546 // If there are mul operands inline them all into this expression. 1547 if (Idx < Ops.size()) { 1548 bool DeletedMul = false; 1549 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1550 // If we have an mul, expand the mul operands onto the end of the operands 1551 // list. 1552 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); 1553 Ops.erase(Ops.begin()+Idx); 1554 DeletedMul = true; 1555 } 1556 1557 // If we deleted at least one mul, we added operands to the end of the list, 1558 // and they are not necessarily sorted. Recurse to resort and resimplify 1559 // any operands we just aquired. 1560 if (DeletedMul) 1561 return getMulExpr(Ops); 1562 } 1563 1564 // If there are any add recurrences in the operands list, see if any other 1565 // added values are loop invariant. If so, we can fold them into the 1566 // recurrence. 1567 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1568 ++Idx; 1569 1570 // Scan over all recurrences, trying to fold loop invariants into them. 1571 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1572 // Scan all of the other operands to this mul and add them to the vector if 1573 // they are loop invariant w.r.t. the recurrence. 1574 SmallVector<const SCEV *, 8> LIOps; 1575 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1576 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1577 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1578 LIOps.push_back(Ops[i]); 1579 Ops.erase(Ops.begin()+i); 1580 --i; --e; 1581 } 1582 1583 // If we found some loop invariants, fold them into the recurrence. 1584 if (!LIOps.empty()) { 1585 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1586 SmallVector<const SCEV *, 4> NewOps; 1587 NewOps.reserve(AddRec->getNumOperands()); 1588 if (LIOps.size() == 1) { 1589 const SCEV *Scale = LIOps[0]; 1590 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1591 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1592 } else { 1593 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 1594 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end()); 1595 MulOps.push_back(AddRec->getOperand(i)); 1596 NewOps.push_back(getMulExpr(MulOps)); 1597 } 1598 } 1599 1600 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1601 1602 // If all of the other operands were loop invariant, we are done. 1603 if (Ops.size() == 1) return NewRec; 1604 1605 // Otherwise, multiply the folded AddRec by the non-liv parts. 1606 for (unsigned i = 0;; ++i) 1607 if (Ops[i] == AddRec) { 1608 Ops[i] = NewRec; 1609 break; 1610 } 1611 return getMulExpr(Ops); 1612 } 1613 1614 // Okay, if there weren't any loop invariants to be folded, check to see if 1615 // there are multiple AddRec's with the same loop induction variable being 1616 // multiplied together. If so, we can fold them. 1617 for (unsigned OtherIdx = Idx+1; 1618 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1619 if (OtherIdx != Idx) { 1620 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1621 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1622 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} 1623 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1624 const SCEV *NewStart = getMulExpr(F->getStart(), 1625 G->getStart()); 1626 const SCEV *B = F->getStepRecurrence(*this); 1627 const SCEV *D = G->getStepRecurrence(*this); 1628 const SCEV *NewStep = getAddExpr(getMulExpr(F, D), 1629 getMulExpr(G, B), 1630 getMulExpr(B, D)); 1631 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, 1632 F->getLoop()); 1633 if (Ops.size() == 2) return NewAddRec; 1634 1635 Ops.erase(Ops.begin()+Idx); 1636 Ops.erase(Ops.begin()+OtherIdx-1); 1637 Ops.push_back(NewAddRec); 1638 return getMulExpr(Ops); 1639 } 1640 } 1641 1642 // Otherwise couldn't fold anything into this recurrence. Move onto the 1643 // next one. 1644 } 1645 1646 // Okay, it looks like we really DO need an mul expr. Check to see if we 1647 // already have one, otherwise create a new one. 1648 FoldingSetNodeID ID; 1649 ID.AddInteger(scMulExpr); 1650 ID.AddInteger(Ops.size()); 1651 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1652 ID.AddPointer(Ops[i]); 1653 void *IP = 0; 1654 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1655 SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>(); 1656 new (S) SCEVMulExpr(ID, Ops); 1657 UniqueSCEVs.InsertNode(S, IP); 1658 return S; 1659 } 1660 1661 /// getUDivExpr - Get a canonical multiply expression, or something simpler if 1662 /// possible. 1663 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 1664 const SCEV *RHS) { 1665 assert(getEffectiveSCEVType(LHS->getType()) == 1666 getEffectiveSCEVType(RHS->getType()) && 1667 "SCEVUDivExpr operand types don't match!"); 1668 1669 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1670 if (RHSC->getValue()->equalsInt(1)) 1671 return LHS; // X udiv 1 --> x 1672 if (RHSC->isZero()) 1673 return getIntegerSCEV(0, LHS->getType()); // value is undefined 1674 1675 // Determine if the division can be folded into the operands of 1676 // its operands. 1677 // TODO: Generalize this to non-constants by using known-bits information. 1678 const Type *Ty = LHS->getType(); 1679 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1680 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; 1681 // For non-power-of-two values, effectively round the value up to the 1682 // nearest power of two. 1683 if (!RHSC->getValue()->getValue().isPowerOf2()) 1684 ++MaxShiftAmt; 1685 const IntegerType *ExtTy = 1686 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt); 1687 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1688 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1689 if (const SCEVConstant *Step = 1690 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1691 if (!Step->getValue()->getValue() 1692 .urem(RHSC->getValue()->getValue()) && 1693 getZeroExtendExpr(AR, ExtTy) == 1694 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1695 getZeroExtendExpr(Step, ExtTy), 1696 AR->getLoop())) { 1697 SmallVector<const SCEV *, 4> Operands; 1698 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1699 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1700 return getAddRecExpr(Operands, AR->getLoop()); 1701 } 1702 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1703 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1704 SmallVector<const SCEV *, 4> Operands; 1705 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1706 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1707 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1708 // Find an operand that's safely divisible. 1709 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1710 const SCEV *Op = M->getOperand(i); 1711 const SCEV *Div = getUDivExpr(Op, RHSC); 1712 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1713 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands(); 1714 Operands = SmallVector<const SCEV *, 4>(MOperands.begin(), 1715 MOperands.end()); 1716 Operands[i] = Div; 1717 return getMulExpr(Operands); 1718 } 1719 } 1720 } 1721 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1722 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1723 SmallVector<const SCEV *, 4> Operands; 1724 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1725 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1726 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1727 Operands.clear(); 1728 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1729 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 1730 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) 1731 break; 1732 Operands.push_back(Op); 1733 } 1734 if (Operands.size() == A->getNumOperands()) 1735 return getAddExpr(Operands); 1736 } 1737 } 1738 1739 // Fold if both operands are constant. 1740 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1741 Constant *LHSCV = LHSC->getValue(); 1742 Constant *RHSCV = RHSC->getValue(); 1743 return getConstant(cast<ConstantInt>(Context->getConstantExprUDiv(LHSCV, 1744 RHSCV))); 1745 } 1746 } 1747 1748 FoldingSetNodeID ID; 1749 ID.AddInteger(scUDivExpr); 1750 ID.AddPointer(LHS); 1751 ID.AddPointer(RHS); 1752 void *IP = 0; 1753 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1754 SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>(); 1755 new (S) SCEVUDivExpr(ID, LHS, RHS); 1756 UniqueSCEVs.InsertNode(S, IP); 1757 return S; 1758 } 1759 1760 1761 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1762 /// Simplify the expression as much as possible. 1763 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, 1764 const SCEV *Step, const Loop *L) { 1765 SmallVector<const SCEV *, 4> Operands; 1766 Operands.push_back(Start); 1767 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1768 if (StepChrec->getLoop() == L) { 1769 Operands.insert(Operands.end(), StepChrec->op_begin(), 1770 StepChrec->op_end()); 1771 return getAddRecExpr(Operands, L); 1772 } 1773 1774 Operands.push_back(Step); 1775 return getAddRecExpr(Operands, L); 1776 } 1777 1778 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1779 /// Simplify the expression as much as possible. 1780 const SCEV * 1781 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 1782 const Loop *L) { 1783 if (Operands.size() == 1) return Operands[0]; 1784 #ifndef NDEBUG 1785 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1786 assert(getEffectiveSCEVType(Operands[i]->getType()) == 1787 getEffectiveSCEVType(Operands[0]->getType()) && 1788 "SCEVAddRecExpr operand types don't match!"); 1789 #endif 1790 1791 if (Operands.back()->isZero()) { 1792 Operands.pop_back(); 1793 return getAddRecExpr(Operands, L); // {X,+,0} --> X 1794 } 1795 1796 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 1797 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 1798 const Loop* NestedLoop = NestedAR->getLoop(); 1799 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { 1800 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 1801 NestedAR->op_end()); 1802 Operands[0] = NestedAR->getStart(); 1803 // AddRecs require their operands be loop-invariant with respect to their 1804 // loops. Don't perform this transformation if it would break this 1805 // requirement. 1806 bool AllInvariant = true; 1807 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1808 if (!Operands[i]->isLoopInvariant(L)) { 1809 AllInvariant = false; 1810 break; 1811 } 1812 if (AllInvariant) { 1813 NestedOperands[0] = getAddRecExpr(Operands, L); 1814 AllInvariant = true; 1815 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 1816 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) { 1817 AllInvariant = false; 1818 break; 1819 } 1820 if (AllInvariant) 1821 // Ok, both add recurrences are valid after the transformation. 1822 return getAddRecExpr(NestedOperands, NestedLoop); 1823 } 1824 // Reset Operands to its original state. 1825 Operands[0] = NestedAR; 1826 } 1827 } 1828 1829 FoldingSetNodeID ID; 1830 ID.AddInteger(scAddRecExpr); 1831 ID.AddInteger(Operands.size()); 1832 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1833 ID.AddPointer(Operands[i]); 1834 ID.AddPointer(L); 1835 void *IP = 0; 1836 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1837 SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>(); 1838 new (S) SCEVAddRecExpr(ID, Operands, L); 1839 UniqueSCEVs.InsertNode(S, IP); 1840 return S; 1841 } 1842 1843 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 1844 const SCEV *RHS) { 1845 SmallVector<const SCEV *, 2> Ops; 1846 Ops.push_back(LHS); 1847 Ops.push_back(RHS); 1848 return getSMaxExpr(Ops); 1849 } 1850 1851 const SCEV * 1852 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 1853 assert(!Ops.empty() && "Cannot get empty smax!"); 1854 if (Ops.size() == 1) return Ops[0]; 1855 #ifndef NDEBUG 1856 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1857 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1858 getEffectiveSCEVType(Ops[0]->getType()) && 1859 "SCEVSMaxExpr operand types don't match!"); 1860 #endif 1861 1862 // Sort by complexity, this groups all similar expression types together. 1863 GroupByComplexity(Ops, LI); 1864 1865 // If there are any constants, fold them together. 1866 unsigned Idx = 0; 1867 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1868 ++Idx; 1869 assert(Idx < Ops.size()); 1870 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1871 // We found two constants, fold them together! 1872 ConstantInt *Fold = Context->getConstantInt( 1873 APIntOps::smax(LHSC->getValue()->getValue(), 1874 RHSC->getValue()->getValue())); 1875 Ops[0] = getConstant(Fold); 1876 Ops.erase(Ops.begin()+1); // Erase the folded element 1877 if (Ops.size() == 1) return Ops[0]; 1878 LHSC = cast<SCEVConstant>(Ops[0]); 1879 } 1880 1881 // If we are left with a constant minimum-int, strip it off. 1882 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 1883 Ops.erase(Ops.begin()); 1884 --Idx; 1885 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 1886 // If we have an smax with a constant maximum-int, it will always be 1887 // maximum-int. 1888 return Ops[0]; 1889 } 1890 } 1891 1892 if (Ops.size() == 1) return Ops[0]; 1893 1894 // Find the first SMax 1895 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 1896 ++Idx; 1897 1898 // Check to see if one of the operands is an SMax. If so, expand its operands 1899 // onto our operand list, and recurse to simplify. 1900 if (Idx < Ops.size()) { 1901 bool DeletedSMax = false; 1902 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 1903 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); 1904 Ops.erase(Ops.begin()+Idx); 1905 DeletedSMax = true; 1906 } 1907 1908 if (DeletedSMax) 1909 return getSMaxExpr(Ops); 1910 } 1911 1912 // Okay, check to see if the same value occurs in the operand list twice. If 1913 // so, delete one. Since we sorted the list, these values are required to 1914 // be adjacent. 1915 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1916 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y 1917 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 1918 --i; --e; 1919 } 1920 1921 if (Ops.size() == 1) return Ops[0]; 1922 1923 assert(!Ops.empty() && "Reduced smax down to nothing!"); 1924 1925 // Okay, it looks like we really DO need an smax expr. Check to see if we 1926 // already have one, otherwise create a new one. 1927 FoldingSetNodeID ID; 1928 ID.AddInteger(scSMaxExpr); 1929 ID.AddInteger(Ops.size()); 1930 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1931 ID.AddPointer(Ops[i]); 1932 void *IP = 0; 1933 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1934 SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>(); 1935 new (S) SCEVSMaxExpr(ID, Ops); 1936 UniqueSCEVs.InsertNode(S, IP); 1937 return S; 1938 } 1939 1940 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 1941 const SCEV *RHS) { 1942 SmallVector<const SCEV *, 2> Ops; 1943 Ops.push_back(LHS); 1944 Ops.push_back(RHS); 1945 return getUMaxExpr(Ops); 1946 } 1947 1948 const SCEV * 1949 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 1950 assert(!Ops.empty() && "Cannot get empty umax!"); 1951 if (Ops.size() == 1) return Ops[0]; 1952 #ifndef NDEBUG 1953 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1954 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1955 getEffectiveSCEVType(Ops[0]->getType()) && 1956 "SCEVUMaxExpr operand types don't match!"); 1957 #endif 1958 1959 // Sort by complexity, this groups all similar expression types together. 1960 GroupByComplexity(Ops, LI); 1961 1962 // If there are any constants, fold them together. 1963 unsigned Idx = 0; 1964 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1965 ++Idx; 1966 assert(Idx < Ops.size()); 1967 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1968 // We found two constants, fold them together! 1969 ConstantInt *Fold = Context->getConstantInt( 1970 APIntOps::umax(LHSC->getValue()->getValue(), 1971 RHSC->getValue()->getValue())); 1972 Ops[0] = getConstant(Fold); 1973 Ops.erase(Ops.begin()+1); // Erase the folded element 1974 if (Ops.size() == 1) return Ops[0]; 1975 LHSC = cast<SCEVConstant>(Ops[0]); 1976 } 1977 1978 // If we are left with a constant minimum-int, strip it off. 1979 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 1980 Ops.erase(Ops.begin()); 1981 --Idx; 1982 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 1983 // If we have an umax with a constant maximum-int, it will always be 1984 // maximum-int. 1985 return Ops[0]; 1986 } 1987 } 1988 1989 if (Ops.size() == 1) return Ops[0]; 1990 1991 // Find the first UMax 1992 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 1993 ++Idx; 1994 1995 // Check to see if one of the operands is a UMax. If so, expand its operands 1996 // onto our operand list, and recurse to simplify. 1997 if (Idx < Ops.size()) { 1998 bool DeletedUMax = false; 1999 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 2000 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); 2001 Ops.erase(Ops.begin()+Idx); 2002 DeletedUMax = true; 2003 } 2004 2005 if (DeletedUMax) 2006 return getUMaxExpr(Ops); 2007 } 2008 2009 // Okay, check to see if the same value occurs in the operand list twice. If 2010 // so, delete one. Since we sorted the list, these values are required to 2011 // be adjacent. 2012 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2013 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y 2014 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2015 --i; --e; 2016 } 2017 2018 if (Ops.size() == 1) return Ops[0]; 2019 2020 assert(!Ops.empty() && "Reduced umax down to nothing!"); 2021 2022 // Okay, it looks like we really DO need a umax expr. Check to see if we 2023 // already have one, otherwise create a new one. 2024 FoldingSetNodeID ID; 2025 ID.AddInteger(scUMaxExpr); 2026 ID.AddInteger(Ops.size()); 2027 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2028 ID.AddPointer(Ops[i]); 2029 void *IP = 0; 2030 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2031 SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>(); 2032 new (S) SCEVUMaxExpr(ID, Ops); 2033 UniqueSCEVs.InsertNode(S, IP); 2034 return S; 2035 } 2036 2037 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 2038 const SCEV *RHS) { 2039 // ~smax(~x, ~y) == smin(x, y). 2040 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2041 } 2042 2043 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 2044 const SCEV *RHS) { 2045 // ~umax(~x, ~y) == umin(x, y) 2046 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2047 } 2048 2049 const SCEV *ScalarEvolution::getUnknown(Value *V) { 2050 // Don't attempt to do anything other than create a SCEVUnknown object 2051 // here. createSCEV only calls getUnknown after checking for all other 2052 // interesting possibilities, and any other code that calls getUnknown 2053 // is doing so in order to hide a value from SCEV canonicalization. 2054 2055 FoldingSetNodeID ID; 2056 ID.AddInteger(scUnknown); 2057 ID.AddPointer(V); 2058 void *IP = 0; 2059 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2060 SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>(); 2061 new (S) SCEVUnknown(ID, V); 2062 UniqueSCEVs.InsertNode(S, IP); 2063 return S; 2064 } 2065 2066 //===----------------------------------------------------------------------===// 2067 // Basic SCEV Analysis and PHI Idiom Recognition Code 2068 // 2069 2070 /// isSCEVable - Test if values of the given type are analyzable within 2071 /// the SCEV framework. This primarily includes integer types, and it 2072 /// can optionally include pointer types if the ScalarEvolution class 2073 /// has access to target-specific information. 2074 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 2075 // Integers are always SCEVable. 2076 if (Ty->isInteger()) 2077 return true; 2078 2079 // Pointers are SCEVable if TargetData information is available 2080 // to provide pointer size information. 2081 if (isa<PointerType>(Ty)) 2082 return TD != NULL; 2083 2084 // Otherwise it's not SCEVable. 2085 return false; 2086 } 2087 2088 /// getTypeSizeInBits - Return the size in bits of the specified type, 2089 /// for which isSCEVable must return true. 2090 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 2091 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2092 2093 // If we have a TargetData, use it! 2094 if (TD) 2095 return TD->getTypeSizeInBits(Ty); 2096 2097 // Otherwise, we support only integer types. 2098 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!"); 2099 return Ty->getPrimitiveSizeInBits(); 2100 } 2101 2102 /// getEffectiveSCEVType - Return a type with the same bitwidth as 2103 /// the given type and which represents how SCEV will treat the given 2104 /// type, for which isSCEVable must return true. For pointer types, 2105 /// this is the pointer-sized integer type. 2106 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 2107 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2108 2109 if (Ty->isInteger()) 2110 return Ty; 2111 2112 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); 2113 return TD->getIntPtrType(); 2114 } 2115 2116 const SCEV *ScalarEvolution::getCouldNotCompute() { 2117 return &CouldNotCompute; 2118 } 2119 2120 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 2121 /// expression and create a new one. 2122 const SCEV *ScalarEvolution::getSCEV(Value *V) { 2123 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 2124 2125 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V); 2126 if (I != Scalars.end()) return I->second; 2127 const SCEV *S = createSCEV(V); 2128 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 2129 return S; 2130 } 2131 2132 /// getIntegerSCEV - Given a SCEVable type, create a constant for the 2133 /// specified signed integer value and return a SCEV for the constant. 2134 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { 2135 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 2136 return getConstant(Context->getConstantInt(ITy, Val)); 2137 } 2138 2139 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 2140 /// 2141 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { 2142 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2143 return getConstant( 2144 cast<ConstantInt>(Context->getConstantExprNeg(VC->getValue()))); 2145 2146 const Type *Ty = V->getType(); 2147 Ty = getEffectiveSCEVType(Ty); 2148 return getMulExpr(V, 2149 getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty)))); 2150 } 2151 2152 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 2153 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 2154 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2155 return getConstant( 2156 cast<ConstantInt>(Context->getConstantExprNot(VC->getValue()))); 2157 2158 const Type *Ty = V->getType(); 2159 Ty = getEffectiveSCEVType(Ty); 2160 const SCEV *AllOnes = 2161 getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty))); 2162 return getMinusSCEV(AllOnes, V); 2163 } 2164 2165 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 2166 /// 2167 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, 2168 const SCEV *RHS) { 2169 // X - Y --> X + -Y 2170 return getAddExpr(LHS, getNegativeSCEV(RHS)); 2171 } 2172 2173 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2174 /// input value to the specified type. If the type must be extended, it is zero 2175 /// extended. 2176 const SCEV * 2177 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, 2178 const Type *Ty) { 2179 const Type *SrcTy = V->getType(); 2180 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2181 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2182 "Cannot truncate or zero extend with non-integer arguments!"); 2183 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2184 return V; // No conversion 2185 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2186 return getTruncateExpr(V, Ty); 2187 return getZeroExtendExpr(V, Ty); 2188 } 2189 2190 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2191 /// input value to the specified type. If the type must be extended, it is sign 2192 /// extended. 2193 const SCEV * 2194 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 2195 const Type *Ty) { 2196 const Type *SrcTy = V->getType(); 2197 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2198 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2199 "Cannot truncate or zero extend with non-integer arguments!"); 2200 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2201 return V; // No conversion 2202 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2203 return getTruncateExpr(V, Ty); 2204 return getSignExtendExpr(V, Ty); 2205 } 2206 2207 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2208 /// input value to the specified type. If the type must be extended, it is zero 2209 /// extended. The conversion must not be narrowing. 2210 const SCEV * 2211 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { 2212 const Type *SrcTy = V->getType(); 2213 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2214 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2215 "Cannot noop or zero extend with non-integer arguments!"); 2216 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2217 "getNoopOrZeroExtend cannot truncate!"); 2218 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2219 return V; // No conversion 2220 return getZeroExtendExpr(V, Ty); 2221 } 2222 2223 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2224 /// input value to the specified type. If the type must be extended, it is sign 2225 /// extended. The conversion must not be narrowing. 2226 const SCEV * 2227 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { 2228 const Type *SrcTy = V->getType(); 2229 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2230 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2231 "Cannot noop or sign extend with non-integer arguments!"); 2232 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2233 "getNoopOrSignExtend cannot truncate!"); 2234 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2235 return V; // No conversion 2236 return getSignExtendExpr(V, Ty); 2237 } 2238 2239 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2240 /// the input value to the specified type. If the type must be extended, 2241 /// it is extended with unspecified bits. The conversion must not be 2242 /// narrowing. 2243 const SCEV * 2244 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { 2245 const Type *SrcTy = V->getType(); 2246 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2247 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2248 "Cannot noop or any extend with non-integer arguments!"); 2249 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2250 "getNoopOrAnyExtend cannot truncate!"); 2251 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2252 return V; // No conversion 2253 return getAnyExtendExpr(V, Ty); 2254 } 2255 2256 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2257 /// input value to the specified type. The conversion must not be widening. 2258 const SCEV * 2259 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { 2260 const Type *SrcTy = V->getType(); 2261 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) && 2262 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) && 2263 "Cannot truncate or noop with non-integer arguments!"); 2264 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2265 "getTruncateOrNoop cannot extend!"); 2266 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2267 return V; // No conversion 2268 return getTruncateExpr(V, Ty); 2269 } 2270 2271 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2272 /// the types using zero-extension, and then perform a umax operation 2273 /// with them. 2274 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2275 const SCEV *RHS) { 2276 const SCEV *PromotedLHS = LHS; 2277 const SCEV *PromotedRHS = RHS; 2278 2279 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2280 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2281 else 2282 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2283 2284 return getUMaxExpr(PromotedLHS, PromotedRHS); 2285 } 2286 2287 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2288 /// the types using zero-extension, and then perform a umin operation 2289 /// with them. 2290 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2291 const SCEV *RHS) { 2292 const SCEV *PromotedLHS = LHS; 2293 const SCEV *PromotedRHS = RHS; 2294 2295 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2296 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2297 else 2298 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2299 2300 return getUMinExpr(PromotedLHS, PromotedRHS); 2301 } 2302 2303 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for 2304 /// the specified instruction and replaces any references to the symbolic value 2305 /// SymName with the specified value. This is used during PHI resolution. 2306 void 2307 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I, 2308 const SCEV *SymName, 2309 const SCEV *NewVal) { 2310 std::map<SCEVCallbackVH, const SCEV *>::iterator SI = 2311 Scalars.find(SCEVCallbackVH(I, this)); 2312 if (SI == Scalars.end()) return; 2313 2314 const SCEV *NV = 2315 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); 2316 if (NV == SI->second) return; // No change. 2317 2318 SI->second = NV; // Update the scalars map! 2319 2320 // Any instruction values that use this instruction might also need to be 2321 // updated! 2322 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); 2323 UI != E; ++UI) 2324 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal); 2325 } 2326 2327 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2328 /// a loop header, making it a potential recurrence, or it doesn't. 2329 /// 2330 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 2331 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. 2332 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2333 if (L->getHeader() == PN->getParent()) { 2334 // If it lives in the loop header, it has two incoming values, one 2335 // from outside the loop, and one from inside. 2336 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 2337 unsigned BackEdge = IncomingEdge^1; 2338 2339 // While we are analyzing this PHI node, handle its value symbolically. 2340 const SCEV *SymbolicName = getUnknown(PN); 2341 assert(Scalars.find(PN) == Scalars.end() && 2342 "PHI node already processed?"); 2343 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2344 2345 // Using this symbolic name for the PHI, analyze the value coming around 2346 // the back-edge. 2347 const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge)); 2348 2349 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2350 // has a special value for the first iteration of the loop. 2351 2352 // If the value coming around the backedge is an add with the symbolic 2353 // value we just inserted, then we found a simple induction variable! 2354 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2355 // If there is a single occurrence of the symbolic value, replace it 2356 // with a recurrence. 2357 unsigned FoundIndex = Add->getNumOperands(); 2358 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2359 if (Add->getOperand(i) == SymbolicName) 2360 if (FoundIndex == e) { 2361 FoundIndex = i; 2362 break; 2363 } 2364 2365 if (FoundIndex != Add->getNumOperands()) { 2366 // Create an add with everything but the specified operand. 2367 SmallVector<const SCEV *, 8> Ops; 2368 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2369 if (i != FoundIndex) 2370 Ops.push_back(Add->getOperand(i)); 2371 const SCEV *Accum = getAddExpr(Ops); 2372 2373 // This is not a valid addrec if the step amount is varying each 2374 // loop iteration, but is not itself an addrec in this loop. 2375 if (Accum->isLoopInvariant(L) || 2376 (isa<SCEVAddRecExpr>(Accum) && 2377 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2378 const SCEV *StartVal = 2379 getSCEV(PN->getIncomingValue(IncomingEdge)); 2380 const SCEV *PHISCEV = 2381 getAddRecExpr(StartVal, Accum, L); 2382 2383 // Okay, for the entire analysis of this edge we assumed the PHI 2384 // to be symbolic. We now need to go back and update all of the 2385 // entries for the scalars that use the PHI (except for the PHI 2386 // itself) to use the new analyzed value instead of the "symbolic" 2387 // value. 2388 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2389 return PHISCEV; 2390 } 2391 } 2392 } else if (const SCEVAddRecExpr *AddRec = 2393 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2394 // Otherwise, this could be a loop like this: 2395 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2396 // In this case, j = {1,+,1} and BEValue is j. 2397 // Because the other in-value of i (0) fits the evolution of BEValue 2398 // i really is an addrec evolution. 2399 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2400 const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2401 2402 // If StartVal = j.start - j.stride, we can use StartVal as the 2403 // initial step of the addrec evolution. 2404 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2405 AddRec->getOperand(1))) { 2406 const SCEV *PHISCEV = 2407 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2408 2409 // Okay, for the entire analysis of this edge we assumed the PHI 2410 // to be symbolic. We now need to go back and update all of the 2411 // entries for the scalars that use the PHI (except for the PHI 2412 // itself) to use the new analyzed value instead of the "symbolic" 2413 // value. 2414 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV); 2415 return PHISCEV; 2416 } 2417 } 2418 } 2419 2420 return SymbolicName; 2421 } 2422 2423 // It's tempting to recognize PHIs with a unique incoming value, however 2424 // this leads passes like indvars to break LCSSA form. Fortunately, such 2425 // PHIs are rare, as instcombine zaps them. 2426 2427 // If it's not a loop phi, we can't handle it yet. 2428 return getUnknown(PN); 2429 } 2430 2431 /// createNodeForGEP - Expand GEP instructions into add and multiply 2432 /// operations. This allows them to be analyzed by regular SCEV code. 2433 /// 2434 const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) { 2435 2436 const Type *IntPtrTy = TD->getIntPtrType(); 2437 Value *Base = GEP->getOperand(0); 2438 // Don't attempt to analyze GEPs over unsized objects. 2439 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2440 return getUnknown(GEP); 2441 const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy); 2442 gep_type_iterator GTI = gep_type_begin(GEP); 2443 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), 2444 E = GEP->op_end(); 2445 I != E; ++I) { 2446 Value *Index = *I; 2447 // Compute the (potentially symbolic) offset in bytes for this index. 2448 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2449 // For a struct, add the member offset. 2450 const StructLayout &SL = *TD->getStructLayout(STy); 2451 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2452 uint64_t Offset = SL.getElementOffset(FieldNo); 2453 TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy)); 2454 } else { 2455 // For an array, add the element offset, explicitly scaled. 2456 const SCEV *LocalOffset = getSCEV(Index); 2457 if (!isa<PointerType>(LocalOffset->getType())) 2458 // Getelementptr indicies are signed. 2459 LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy); 2460 LocalOffset = 2461 getMulExpr(LocalOffset, 2462 getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy)); 2463 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2464 } 2465 } 2466 return getAddExpr(getSCEV(Base), TotalOffset); 2467 } 2468 2469 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2470 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2471 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2472 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2473 uint32_t 2474 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 2475 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2476 return C->getValue()->getValue().countTrailingZeros(); 2477 2478 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2479 return std::min(GetMinTrailingZeros(T->getOperand()), 2480 (uint32_t)getTypeSizeInBits(T->getType())); 2481 2482 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2483 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2484 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2485 getTypeSizeInBits(E->getType()) : OpRes; 2486 } 2487 2488 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2489 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2490 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2491 getTypeSizeInBits(E->getType()) : OpRes; 2492 } 2493 2494 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2495 // The result is the min of all operands results. 2496 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2497 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2498 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2499 return MinOpRes; 2500 } 2501 2502 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2503 // The result is the sum of all operands results. 2504 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2505 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2506 for (unsigned i = 1, e = M->getNumOperands(); 2507 SumOpRes != BitWidth && i != e; ++i) 2508 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2509 BitWidth); 2510 return SumOpRes; 2511 } 2512 2513 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2514 // The result is the min of all operands results. 2515 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2516 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2517 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2518 return MinOpRes; 2519 } 2520 2521 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2522 // The result is the min of all operands results. 2523 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2524 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2525 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2526 return MinOpRes; 2527 } 2528 2529 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2530 // The result is the min of all operands results. 2531 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2532 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2533 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2534 return MinOpRes; 2535 } 2536 2537 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2538 // For a SCEVUnknown, ask ValueTracking. 2539 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2540 APInt Mask = APInt::getAllOnesValue(BitWidth); 2541 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2542 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2543 return Zeros.countTrailingOnes(); 2544 } 2545 2546 // SCEVUDivExpr 2547 return 0; 2548 } 2549 2550 /// getUnsignedRange - Determine the unsigned range for a particular SCEV. 2551 /// 2552 ConstantRange 2553 ScalarEvolution::getUnsignedRange(const SCEV *S) { 2554 2555 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2556 return ConstantRange(C->getValue()->getValue()); 2557 2558 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2559 ConstantRange X = getUnsignedRange(Add->getOperand(0)); 2560 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 2561 X = X.add(getUnsignedRange(Add->getOperand(i))); 2562 return X; 2563 } 2564 2565 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2566 ConstantRange X = getUnsignedRange(Mul->getOperand(0)); 2567 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 2568 X = X.multiply(getUnsignedRange(Mul->getOperand(i))); 2569 return X; 2570 } 2571 2572 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 2573 ConstantRange X = getUnsignedRange(SMax->getOperand(0)); 2574 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 2575 X = X.smax(getUnsignedRange(SMax->getOperand(i))); 2576 return X; 2577 } 2578 2579 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 2580 ConstantRange X = getUnsignedRange(UMax->getOperand(0)); 2581 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 2582 X = X.umax(getUnsignedRange(UMax->getOperand(i))); 2583 return X; 2584 } 2585 2586 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 2587 ConstantRange X = getUnsignedRange(UDiv->getLHS()); 2588 ConstantRange Y = getUnsignedRange(UDiv->getRHS()); 2589 return X.udiv(Y); 2590 } 2591 2592 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 2593 ConstantRange X = getUnsignedRange(ZExt->getOperand()); 2594 return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth()); 2595 } 2596 2597 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 2598 ConstantRange X = getUnsignedRange(SExt->getOperand()); 2599 return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth()); 2600 } 2601 2602 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 2603 ConstantRange X = getUnsignedRange(Trunc->getOperand()); 2604 return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth()); 2605 } 2606 2607 ConstantRange FullSet(getTypeSizeInBits(S->getType()), true); 2608 2609 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 2610 const SCEV *T = getBackedgeTakenCount(AddRec->getLoop()); 2611 const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T); 2612 if (!Trip) return FullSet; 2613 2614 // TODO: non-affine addrec 2615 if (AddRec->isAffine()) { 2616 const Type *Ty = AddRec->getType(); 2617 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 2618 if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) { 2619 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 2620 2621 const SCEV *Start = AddRec->getStart(); 2622 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 2623 2624 // Check for overflow. 2625 if (!isKnownPredicate(ICmpInst::ICMP_ULE, Start, End)) 2626 return FullSet; 2627 2628 ConstantRange StartRange = getUnsignedRange(Start); 2629 ConstantRange EndRange = getUnsignedRange(End); 2630 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), 2631 EndRange.getUnsignedMin()); 2632 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), 2633 EndRange.getUnsignedMax()); 2634 if (Min.isMinValue() && Max.isMaxValue()) 2635 return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true); 2636 return ConstantRange(Min, Max+1); 2637 } 2638 } 2639 } 2640 2641 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2642 // For a SCEVUnknown, ask ValueTracking. 2643 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2644 APInt Mask = APInt::getAllOnesValue(BitWidth); 2645 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2646 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 2647 if (Ones == ~Zeros + 1) 2648 return FullSet; 2649 return ConstantRange(Ones, ~Zeros + 1); 2650 } 2651 2652 return FullSet; 2653 } 2654 2655 /// getSignedRange - Determine the signed range for a particular SCEV. 2656 /// 2657 ConstantRange 2658 ScalarEvolution::getSignedRange(const SCEV *S) { 2659 2660 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2661 return ConstantRange(C->getValue()->getValue()); 2662 2663 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2664 ConstantRange X = getSignedRange(Add->getOperand(0)); 2665 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 2666 X = X.add(getSignedRange(Add->getOperand(i))); 2667 return X; 2668 } 2669 2670 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2671 ConstantRange X = getSignedRange(Mul->getOperand(0)); 2672 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 2673 X = X.multiply(getSignedRange(Mul->getOperand(i))); 2674 return X; 2675 } 2676 2677 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 2678 ConstantRange X = getSignedRange(SMax->getOperand(0)); 2679 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 2680 X = X.smax(getSignedRange(SMax->getOperand(i))); 2681 return X; 2682 } 2683 2684 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 2685 ConstantRange X = getSignedRange(UMax->getOperand(0)); 2686 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 2687 X = X.umax(getSignedRange(UMax->getOperand(i))); 2688 return X; 2689 } 2690 2691 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 2692 ConstantRange X = getSignedRange(UDiv->getLHS()); 2693 ConstantRange Y = getSignedRange(UDiv->getRHS()); 2694 return X.udiv(Y); 2695 } 2696 2697 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 2698 ConstantRange X = getSignedRange(ZExt->getOperand()); 2699 return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth()); 2700 } 2701 2702 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 2703 ConstantRange X = getSignedRange(SExt->getOperand()); 2704 return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth()); 2705 } 2706 2707 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 2708 ConstantRange X = getSignedRange(Trunc->getOperand()); 2709 return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth()); 2710 } 2711 2712 ConstantRange FullSet(getTypeSizeInBits(S->getType()), true); 2713 2714 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 2715 const SCEV *T = getBackedgeTakenCount(AddRec->getLoop()); 2716 const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T); 2717 if (!Trip) return FullSet; 2718 2719 // TODO: non-affine addrec 2720 if (AddRec->isAffine()) { 2721 const Type *Ty = AddRec->getType(); 2722 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 2723 if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) { 2724 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 2725 2726 const SCEV *Start = AddRec->getStart(); 2727 const SCEV *Step = AddRec->getStepRecurrence(*this); 2728 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 2729 2730 // Check for overflow. 2731 if (!(isKnownPositive(Step) && 2732 isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) && 2733 !(isKnownNegative(Step) && 2734 isKnownPredicate(ICmpInst::ICMP_SGT, Start, End))) 2735 return FullSet; 2736 2737 ConstantRange StartRange = getSignedRange(Start); 2738 ConstantRange EndRange = getSignedRange(End); 2739 APInt Min = APIntOps::smin(StartRange.getSignedMin(), 2740 EndRange.getSignedMin()); 2741 APInt Max = APIntOps::smax(StartRange.getSignedMax(), 2742 EndRange.getSignedMax()); 2743 if (Min.isMinSignedValue() && Max.isMaxSignedValue()) 2744 return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true); 2745 return ConstantRange(Min, Max+1); 2746 } 2747 } 2748 } 2749 2750 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2751 // For a SCEVUnknown, ask ValueTracking. 2752 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2753 unsigned NS = ComputeNumSignBits(U->getValue(), TD); 2754 if (NS == 1) 2755 return FullSet; 2756 return 2757 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 2758 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1); 2759 } 2760 2761 return FullSet; 2762 } 2763 2764 /// createSCEV - We know that there is no SCEV for the specified value. 2765 /// Analyze the expression. 2766 /// 2767 const SCEV *ScalarEvolution::createSCEV(Value *V) { 2768 if (!isSCEVable(V->getType())) 2769 return getUnknown(V); 2770 2771 unsigned Opcode = Instruction::UserOp1; 2772 if (Instruction *I = dyn_cast<Instruction>(V)) 2773 Opcode = I->getOpcode(); 2774 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 2775 Opcode = CE->getOpcode(); 2776 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 2777 return getConstant(CI); 2778 else if (isa<ConstantPointerNull>(V)) 2779 return getIntegerSCEV(0, V->getType()); 2780 else if (isa<UndefValue>(V)) 2781 return getIntegerSCEV(0, V->getType()); 2782 else 2783 return getUnknown(V); 2784 2785 Operator *U = cast<Operator>(V); 2786 switch (Opcode) { 2787 case Instruction::Add: 2788 return getAddExpr(getSCEV(U->getOperand(0)), 2789 getSCEV(U->getOperand(1))); 2790 case Instruction::Mul: 2791 return getMulExpr(getSCEV(U->getOperand(0)), 2792 getSCEV(U->getOperand(1))); 2793 case Instruction::UDiv: 2794 return getUDivExpr(getSCEV(U->getOperand(0)), 2795 getSCEV(U->getOperand(1))); 2796 case Instruction::Sub: 2797 return getMinusSCEV(getSCEV(U->getOperand(0)), 2798 getSCEV(U->getOperand(1))); 2799 case Instruction::And: 2800 // For an expression like x&255 that merely masks off the high bits, 2801 // use zext(trunc(x)) as the SCEV expression. 2802 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2803 if (CI->isNullValue()) 2804 return getSCEV(U->getOperand(1)); 2805 if (CI->isAllOnesValue()) 2806 return getSCEV(U->getOperand(0)); 2807 const APInt &A = CI->getValue(); 2808 2809 // Instcombine's ShrinkDemandedConstant may strip bits out of 2810 // constants, obscuring what would otherwise be a low-bits mask. 2811 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 2812 // knew about to reconstruct a low-bits mask value. 2813 unsigned LZ = A.countLeadingZeros(); 2814 unsigned BitWidth = A.getBitWidth(); 2815 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 2816 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 2817 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 2818 2819 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 2820 2821 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 2822 return 2823 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 2824 IntegerType::get(BitWidth - LZ)), 2825 U->getType()); 2826 } 2827 break; 2828 2829 case Instruction::Or: 2830 // If the RHS of the Or is a constant, we may have something like: 2831 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 2832 // optimizations will transparently handle this case. 2833 // 2834 // In order for this transformation to be safe, the LHS must be of the 2835 // form X*(2^n) and the Or constant must be less than 2^n. 2836 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2837 const SCEV *LHS = getSCEV(U->getOperand(0)); 2838 const APInt &CIVal = CI->getValue(); 2839 if (GetMinTrailingZeros(LHS) >= 2840 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) 2841 return getAddExpr(LHS, getSCEV(U->getOperand(1))); 2842 } 2843 break; 2844 case Instruction::Xor: 2845 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 2846 // If the RHS of the xor is a signbit, then this is just an add. 2847 // Instcombine turns add of signbit into xor as a strength reduction step. 2848 if (CI->getValue().isSignBit()) 2849 return getAddExpr(getSCEV(U->getOperand(0)), 2850 getSCEV(U->getOperand(1))); 2851 2852 // If the RHS of xor is -1, then this is a not operation. 2853 if (CI->isAllOnesValue()) 2854 return getNotSCEV(getSCEV(U->getOperand(0))); 2855 2856 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 2857 // This is a variant of the check for xor with -1, and it handles 2858 // the case where instcombine has trimmed non-demanded bits out 2859 // of an xor with -1. 2860 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 2861 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 2862 if (BO->getOpcode() == Instruction::And && 2863 LCI->getValue() == CI->getValue()) 2864 if (const SCEVZeroExtendExpr *Z = 2865 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 2866 const Type *UTy = U->getType(); 2867 const SCEV *Z0 = Z->getOperand(); 2868 const Type *Z0Ty = Z0->getType(); 2869 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 2870 2871 // If C is a low-bits mask, the zero extend is zerving to 2872 // mask off the high bits. Complement the operand and 2873 // re-apply the zext. 2874 if (APIntOps::isMask(Z0TySize, CI->getValue())) 2875 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 2876 2877 // If C is a single bit, it may be in the sign-bit position 2878 // before the zero-extend. In this case, represent the xor 2879 // using an add, which is equivalent, and re-apply the zext. 2880 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 2881 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 2882 Trunc.isSignBit()) 2883 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 2884 UTy); 2885 } 2886 } 2887 break; 2888 2889 case Instruction::Shl: 2890 // Turn shift left of a constant amount into a multiply. 2891 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2892 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2893 Constant *X = Context->getConstantInt( 2894 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2895 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2896 } 2897 break; 2898 2899 case Instruction::LShr: 2900 // Turn logical shift right of a constant into a unsigned divide. 2901 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 2902 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2903 Constant *X = Context->getConstantInt( 2904 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 2905 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 2906 } 2907 break; 2908 2909 case Instruction::AShr: 2910 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 2911 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 2912 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0))) 2913 if (L->getOpcode() == Instruction::Shl && 2914 L->getOperand(1) == U->getOperand(1)) { 2915 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2916 uint64_t Amt = BitWidth - CI->getZExtValue(); 2917 if (Amt == BitWidth) 2918 return getSCEV(L->getOperand(0)); // shift by zero --> noop 2919 if (Amt > BitWidth) 2920 return getIntegerSCEV(0, U->getType()); // value is undefined 2921 return 2922 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 2923 IntegerType::get(Amt)), 2924 U->getType()); 2925 } 2926 break; 2927 2928 case Instruction::Trunc: 2929 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 2930 2931 case Instruction::ZExt: 2932 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2933 2934 case Instruction::SExt: 2935 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 2936 2937 case Instruction::BitCast: 2938 // BitCasts are no-op casts so we just eliminate the cast. 2939 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 2940 return getSCEV(U->getOperand(0)); 2941 break; 2942 2943 // It's tempting to handle inttoptr and ptrtoint, however this can 2944 // lead to pointer expressions which cannot be expanded to GEPs 2945 // (because they may overflow). For now, the only pointer-typed 2946 // expressions we handle are GEPs and address literals. 2947 2948 case Instruction::GetElementPtr: 2949 if (!TD) break; // Without TD we can't analyze pointers. 2950 return createNodeForGEP(U); 2951 2952 case Instruction::PHI: 2953 return createNodeForPHI(cast<PHINode>(U)); 2954 2955 case Instruction::Select: 2956 // This could be a smax or umax that was lowered earlier. 2957 // Try to recover it. 2958 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 2959 Value *LHS = ICI->getOperand(0); 2960 Value *RHS = ICI->getOperand(1); 2961 switch (ICI->getPredicate()) { 2962 case ICmpInst::ICMP_SLT: 2963 case ICmpInst::ICMP_SLE: 2964 std::swap(LHS, RHS); 2965 // fall through 2966 case ICmpInst::ICMP_SGT: 2967 case ICmpInst::ICMP_SGE: 2968 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2969 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2970 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2971 return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); 2972 break; 2973 case ICmpInst::ICMP_ULT: 2974 case ICmpInst::ICMP_ULE: 2975 std::swap(LHS, RHS); 2976 // fall through 2977 case ICmpInst::ICMP_UGT: 2978 case ICmpInst::ICMP_UGE: 2979 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 2980 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); 2981 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 2982 return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); 2983 break; 2984 case ICmpInst::ICMP_NE: 2985 // n != 0 ? n : 1 -> umax(n, 1) 2986 if (LHS == U->getOperand(1) && 2987 isa<ConstantInt>(U->getOperand(2)) && 2988 cast<ConstantInt>(U->getOperand(2))->isOne() && 2989 isa<ConstantInt>(RHS) && 2990 cast<ConstantInt>(RHS)->isZero()) 2991 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); 2992 break; 2993 case ICmpInst::ICMP_EQ: 2994 // n == 0 ? 1 : n -> umax(n, 1) 2995 if (LHS == U->getOperand(2) && 2996 isa<ConstantInt>(U->getOperand(1)) && 2997 cast<ConstantInt>(U->getOperand(1))->isOne() && 2998 isa<ConstantInt>(RHS) && 2999 cast<ConstantInt>(RHS)->isZero()) 3000 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); 3001 break; 3002 default: 3003 break; 3004 } 3005 } 3006 3007 default: // We cannot analyze this expression. 3008 break; 3009 } 3010 3011 return getUnknown(V); 3012 } 3013 3014 3015 3016 //===----------------------------------------------------------------------===// 3017 // Iteration Count Computation Code 3018 // 3019 3020 /// getBackedgeTakenCount - If the specified loop has a predictable 3021 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 3022 /// object. The backedge-taken count is the number of times the loop header 3023 /// will be branched to from within the loop. This is one less than the 3024 /// trip count of the loop, since it doesn't count the first iteration, 3025 /// when the header is branched to from outside the loop. 3026 /// 3027 /// Note that it is not valid to call this method on a loop without a 3028 /// loop-invariant backedge-taken count (see 3029 /// hasLoopInvariantBackedgeTakenCount). 3030 /// 3031 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 3032 return getBackedgeTakenInfo(L).Exact; 3033 } 3034 3035 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 3036 /// return the least SCEV value that is known never to be less than the 3037 /// actual backedge taken count. 3038 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 3039 return getBackedgeTakenInfo(L).Max; 3040 } 3041 3042 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 3043 /// onto the given Worklist. 3044 static void 3045 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 3046 BasicBlock *Header = L->getHeader(); 3047 3048 // Push all Loop-header PHIs onto the Worklist stack. 3049 for (BasicBlock::iterator I = Header->begin(); 3050 PHINode *PN = dyn_cast<PHINode>(I); ++I) 3051 Worklist.push_back(PN); 3052 } 3053 3054 /// PushDefUseChildren - Push users of the given Instruction 3055 /// onto the given Worklist. 3056 static void 3057 PushDefUseChildren(Instruction *I, 3058 SmallVectorImpl<Instruction *> &Worklist) { 3059 // Push the def-use children onto the Worklist stack. 3060 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 3061 UI != UE; ++UI) 3062 Worklist.push_back(cast<Instruction>(UI)); 3063 } 3064 3065 const ScalarEvolution::BackedgeTakenInfo & 3066 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 3067 // Initially insert a CouldNotCompute for this loop. If the insertion 3068 // succeeds, procede to actually compute a backedge-taken count and 3069 // update the value. The temporary CouldNotCompute value tells SCEV 3070 // code elsewhere that it shouldn't attempt to request a new 3071 // backedge-taken count, which could result in infinite recursion. 3072 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair = 3073 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 3074 if (Pair.second) { 3075 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); 3076 if (ItCount.Exact != getCouldNotCompute()) { 3077 assert(ItCount.Exact->isLoopInvariant(L) && 3078 ItCount.Max->isLoopInvariant(L) && 3079 "Computed trip count isn't loop invariant for loop!"); 3080 ++NumTripCountsComputed; 3081 3082 // Update the value in the map. 3083 Pair.first->second = ItCount; 3084 } else { 3085 if (ItCount.Max != getCouldNotCompute()) 3086 // Update the value in the map. 3087 Pair.first->second = ItCount; 3088 if (isa<PHINode>(L->getHeader()->begin())) 3089 // Only count loops that have phi nodes as not being computable. 3090 ++NumTripCountsNotComputed; 3091 } 3092 3093 // Now that we know more about the trip count for this loop, forget any 3094 // existing SCEV values for PHI nodes in this loop since they are only 3095 // conservative estimates made without the benefit of trip count 3096 // information. This is similar to the code in 3097 // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI 3098 // nodes specially. 3099 if (ItCount.hasAnyInfo()) { 3100 SmallVector<Instruction *, 16> Worklist; 3101 PushLoopPHIs(L, Worklist); 3102 3103 SmallPtrSet<Instruction *, 8> Visited; 3104 while (!Worklist.empty()) { 3105 Instruction *I = Worklist.pop_back_val(); 3106 if (!Visited.insert(I)) continue; 3107 3108 std::map<SCEVCallbackVH, const SCEV*>::iterator It = 3109 Scalars.find(static_cast<Value *>(I)); 3110 if (It != Scalars.end()) { 3111 // SCEVUnknown for a PHI either means that it has an unrecognized 3112 // structure, or it's a PHI that's in the progress of being computed 3113 // by createNodeForPHI. In the former case, additional loop trip 3114 // count information isn't going to change anything. In the later 3115 // case, createNodeForPHI will perform the necessary updates on its 3116 // own when it gets to that point. 3117 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) 3118 Scalars.erase(It); 3119 ValuesAtScopes.erase(I); 3120 if (PHINode *PN = dyn_cast<PHINode>(I)) 3121 ConstantEvolutionLoopExitValue.erase(PN); 3122 } 3123 3124 PushDefUseChildren(I, Worklist); 3125 } 3126 } 3127 } 3128 return Pair.first->second; 3129 } 3130 3131 /// forgetLoopBackedgeTakenCount - This method should be called by the 3132 /// client when it has changed a loop in a way that may effect 3133 /// ScalarEvolution's ability to compute a trip count, or if the loop 3134 /// is deleted. 3135 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) { 3136 BackedgeTakenCounts.erase(L); 3137 3138 SmallVector<Instruction *, 16> Worklist; 3139 PushLoopPHIs(L, Worklist); 3140 3141 SmallPtrSet<Instruction *, 8> Visited; 3142 while (!Worklist.empty()) { 3143 Instruction *I = Worklist.pop_back_val(); 3144 if (!Visited.insert(I)) continue; 3145 3146 std::map<SCEVCallbackVH, const SCEV*>::iterator It = 3147 Scalars.find(static_cast<Value *>(I)); 3148 if (It != Scalars.end()) { 3149 Scalars.erase(It); 3150 ValuesAtScopes.erase(I); 3151 if (PHINode *PN = dyn_cast<PHINode>(I)) 3152 ConstantEvolutionLoopExitValue.erase(PN); 3153 } 3154 3155 PushDefUseChildren(I, Worklist); 3156 } 3157 } 3158 3159 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 3160 /// of the specified loop will execute. 3161 ScalarEvolution::BackedgeTakenInfo 3162 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 3163 SmallVector<BasicBlock*, 8> ExitingBlocks; 3164 L->getExitingBlocks(ExitingBlocks); 3165 3166 // Examine all exits and pick the most conservative values. 3167 const SCEV *BECount = getCouldNotCompute(); 3168 const SCEV *MaxBECount = getCouldNotCompute(); 3169 bool CouldNotComputeBECount = false; 3170 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 3171 BackedgeTakenInfo NewBTI = 3172 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 3173 3174 if (NewBTI.Exact == getCouldNotCompute()) { 3175 // We couldn't compute an exact value for this exit, so 3176 // we won't be able to compute an exact value for the loop. 3177 CouldNotComputeBECount = true; 3178 BECount = getCouldNotCompute(); 3179 } else if (!CouldNotComputeBECount) { 3180 if (BECount == getCouldNotCompute()) 3181 BECount = NewBTI.Exact; 3182 else 3183 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); 3184 } 3185 if (MaxBECount == getCouldNotCompute()) 3186 MaxBECount = NewBTI.Max; 3187 else if (NewBTI.Max != getCouldNotCompute()) 3188 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); 3189 } 3190 3191 return BackedgeTakenInfo(BECount, MaxBECount); 3192 } 3193 3194 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 3195 /// of the specified loop will execute if it exits via the specified block. 3196 ScalarEvolution::BackedgeTakenInfo 3197 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 3198 BasicBlock *ExitingBlock) { 3199 3200 // Okay, we've chosen an exiting block. See what condition causes us to 3201 // exit at this block. 3202 // 3203 // FIXME: we should be able to handle switch instructions (with a single exit) 3204 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 3205 if (ExitBr == 0) return getCouldNotCompute(); 3206 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 3207 3208 // At this point, we know we have a conditional branch that determines whether 3209 // the loop is exited. However, we don't know if the branch is executed each 3210 // time through the loop. If not, then the execution count of the branch will 3211 // not be equal to the trip count of the loop. 3212 // 3213 // Currently we check for this by checking to see if the Exit branch goes to 3214 // the loop header. If so, we know it will always execute the same number of 3215 // times as the loop. We also handle the case where the exit block *is* the 3216 // loop header. This is common for un-rotated loops. 3217 // 3218 // If both of those tests fail, walk up the unique predecessor chain to the 3219 // header, stopping if there is an edge that doesn't exit the loop. If the 3220 // header is reached, the execution count of the branch will be equal to the 3221 // trip count of the loop. 3222 // 3223 // More extensive analysis could be done to handle more cases here. 3224 // 3225 if (ExitBr->getSuccessor(0) != L->getHeader() && 3226 ExitBr->getSuccessor(1) != L->getHeader() && 3227 ExitBr->getParent() != L->getHeader()) { 3228 // The simple checks failed, try climbing the unique predecessor chain 3229 // up to the header. 3230 bool Ok = false; 3231 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 3232 BasicBlock *Pred = BB->getUniquePredecessor(); 3233 if (!Pred) 3234 return getCouldNotCompute(); 3235 TerminatorInst *PredTerm = Pred->getTerminator(); 3236 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 3237 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 3238 if (PredSucc == BB) 3239 continue; 3240 // If the predecessor has a successor that isn't BB and isn't 3241 // outside the loop, assume the worst. 3242 if (L->contains(PredSucc)) 3243 return getCouldNotCompute(); 3244 } 3245 if (Pred == L->getHeader()) { 3246 Ok = true; 3247 break; 3248 } 3249 BB = Pred; 3250 } 3251 if (!Ok) 3252 return getCouldNotCompute(); 3253 } 3254 3255 // Procede to the next level to examine the exit condition expression. 3256 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 3257 ExitBr->getSuccessor(0), 3258 ExitBr->getSuccessor(1)); 3259 } 3260 3261 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 3262 /// backedge of the specified loop will execute if its exit condition 3263 /// were a conditional branch of ExitCond, TBB, and FBB. 3264 ScalarEvolution::BackedgeTakenInfo 3265 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 3266 Value *ExitCond, 3267 BasicBlock *TBB, 3268 BasicBlock *FBB) { 3269 // Check if the controlling expression for this loop is an And or Or. 3270 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 3271 if (BO->getOpcode() == Instruction::And) { 3272 // Recurse on the operands of the and. 3273 BackedgeTakenInfo BTI0 = 3274 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3275 BackedgeTakenInfo BTI1 = 3276 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3277 const SCEV *BECount = getCouldNotCompute(); 3278 const SCEV *MaxBECount = getCouldNotCompute(); 3279 if (L->contains(TBB)) { 3280 // Both conditions must be true for the loop to continue executing. 3281 // Choose the less conservative count. 3282 if (BTI0.Exact == getCouldNotCompute() || 3283 BTI1.Exact == getCouldNotCompute()) 3284 BECount = getCouldNotCompute(); 3285 else 3286 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3287 if (BTI0.Max == getCouldNotCompute()) 3288 MaxBECount = BTI1.Max; 3289 else if (BTI1.Max == getCouldNotCompute()) 3290 MaxBECount = BTI0.Max; 3291 else 3292 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3293 } else { 3294 // Both conditions must be true for the loop to exit. 3295 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 3296 if (BTI0.Exact != getCouldNotCompute() && 3297 BTI1.Exact != getCouldNotCompute()) 3298 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3299 if (BTI0.Max != getCouldNotCompute() && 3300 BTI1.Max != getCouldNotCompute()) 3301 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 3302 } 3303 3304 return BackedgeTakenInfo(BECount, MaxBECount); 3305 } 3306 if (BO->getOpcode() == Instruction::Or) { 3307 // Recurse on the operands of the or. 3308 BackedgeTakenInfo BTI0 = 3309 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3310 BackedgeTakenInfo BTI1 = 3311 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3312 const SCEV *BECount = getCouldNotCompute(); 3313 const SCEV *MaxBECount = getCouldNotCompute(); 3314 if (L->contains(FBB)) { 3315 // Both conditions must be false for the loop to continue executing. 3316 // Choose the less conservative count. 3317 if (BTI0.Exact == getCouldNotCompute() || 3318 BTI1.Exact == getCouldNotCompute()) 3319 BECount = getCouldNotCompute(); 3320 else 3321 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3322 if (BTI0.Max == getCouldNotCompute()) 3323 MaxBECount = BTI1.Max; 3324 else if (BTI1.Max == getCouldNotCompute()) 3325 MaxBECount = BTI0.Max; 3326 else 3327 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3328 } else { 3329 // Both conditions must be false for the loop to exit. 3330 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 3331 if (BTI0.Exact != getCouldNotCompute() && 3332 BTI1.Exact != getCouldNotCompute()) 3333 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3334 if (BTI0.Max != getCouldNotCompute() && 3335 BTI1.Max != getCouldNotCompute()) 3336 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 3337 } 3338 3339 return BackedgeTakenInfo(BECount, MaxBECount); 3340 } 3341 } 3342 3343 // With an icmp, it may be feasible to compute an exact backedge-taken count. 3344 // Procede to the next level to examine the icmp. 3345 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3346 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3347 3348 // If it's not an integer or pointer comparison then compute it the hard way. 3349 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3350 } 3351 3352 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 3353 /// backedge of the specified loop will execute if its exit condition 3354 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 3355 ScalarEvolution::BackedgeTakenInfo 3356 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 3357 ICmpInst *ExitCond, 3358 BasicBlock *TBB, 3359 BasicBlock *FBB) { 3360 3361 // If the condition was exit on true, convert the condition to exit on false 3362 ICmpInst::Predicate Cond; 3363 if (!L->contains(FBB)) 3364 Cond = ExitCond->getPredicate(); 3365 else 3366 Cond = ExitCond->getInversePredicate(); 3367 3368 // Handle common loops like: for (X = "string"; *X; ++X) 3369 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 3370 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 3371 const SCEV *ItCnt = 3372 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 3373 if (!isa<SCEVCouldNotCompute>(ItCnt)) { 3374 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); 3375 return BackedgeTakenInfo(ItCnt, 3376 isa<SCEVConstant>(ItCnt) ? ItCnt : 3377 getConstant(APInt::getMaxValue(BitWidth)-1)); 3378 } 3379 } 3380 3381 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 3382 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 3383 3384 // Try to evaluate any dependencies out of the loop. 3385 LHS = getSCEVAtScope(LHS, L); 3386 RHS = getSCEVAtScope(RHS, L); 3387 3388 // At this point, we would like to compute how many iterations of the 3389 // loop the predicate will return true for these inputs. 3390 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 3391 // If there is a loop-invariant, force it into the RHS. 3392 std::swap(LHS, RHS); 3393 Cond = ICmpInst::getSwappedPredicate(Cond); 3394 } 3395 3396 // If we have a comparison of a chrec against a constant, try to use value 3397 // ranges to answer this query. 3398 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 3399 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 3400 if (AddRec->getLoop() == L) { 3401 // Form the constant range. 3402 ConstantRange CompRange( 3403 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 3404 3405 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 3406 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 3407 } 3408 3409 switch (Cond) { 3410 case ICmpInst::ICMP_NE: { // while (X != Y) 3411 // Convert to: while (X-Y != 0) 3412 const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); 3413 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3414 break; 3415 } 3416 case ICmpInst::ICMP_EQ: { 3417 // Convert to: while (X-Y == 0) // while (X == Y) 3418 const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 3419 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3420 break; 3421 } 3422 case ICmpInst::ICMP_SLT: { 3423 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 3424 if (BTI.hasAnyInfo()) return BTI; 3425 break; 3426 } 3427 case ICmpInst::ICMP_SGT: { 3428 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3429 getNotSCEV(RHS), L, true); 3430 if (BTI.hasAnyInfo()) return BTI; 3431 break; 3432 } 3433 case ICmpInst::ICMP_ULT: { 3434 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 3435 if (BTI.hasAnyInfo()) return BTI; 3436 break; 3437 } 3438 case ICmpInst::ICMP_UGT: { 3439 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3440 getNotSCEV(RHS), L, false); 3441 if (BTI.hasAnyInfo()) return BTI; 3442 break; 3443 } 3444 default: 3445 #if 0 3446 errs() << "ComputeBackedgeTakenCount "; 3447 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 3448 errs() << "[unsigned] "; 3449 errs() << *LHS << " " 3450 << Instruction::getOpcodeName(Instruction::ICmp) 3451 << " " << *RHS << "\n"; 3452 #endif 3453 break; 3454 } 3455 return 3456 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3457 } 3458 3459 static ConstantInt * 3460 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 3461 ScalarEvolution &SE) { 3462 const SCEV *InVal = SE.getConstant(C); 3463 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 3464 assert(isa<SCEVConstant>(Val) && 3465 "Evaluation of SCEV at constant didn't fold correctly?"); 3466 return cast<SCEVConstant>(Val)->getValue(); 3467 } 3468 3469 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 3470 /// and a GEP expression (missing the pointer index) indexing into it, return 3471 /// the addressed element of the initializer or null if the index expression is 3472 /// invalid. 3473 static Constant * 3474 GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV, 3475 const std::vector<ConstantInt*> &Indices) { 3476 Constant *Init = GV->getInitializer(); 3477 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 3478 uint64_t Idx = Indices[i]->getZExtValue(); 3479 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 3480 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 3481 Init = cast<Constant>(CS->getOperand(Idx)); 3482 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 3483 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 3484 Init = cast<Constant>(CA->getOperand(Idx)); 3485 } else if (isa<ConstantAggregateZero>(Init)) { 3486 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 3487 assert(Idx < STy->getNumElements() && "Bad struct index!"); 3488 Init = Context->getNullValue(STy->getElementType(Idx)); 3489 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 3490 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 3491 Init = Context->getNullValue(ATy->getElementType()); 3492 } else { 3493 llvm_unreachable("Unknown constant aggregate type!"); 3494 } 3495 return 0; 3496 } else { 3497 return 0; // Unknown initializer type 3498 } 3499 } 3500 return Init; 3501 } 3502 3503 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 3504 /// 'icmp op load X, cst', try to see if we can compute the backedge 3505 /// execution count. 3506 const SCEV * 3507 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( 3508 LoadInst *LI, 3509 Constant *RHS, 3510 const Loop *L, 3511 ICmpInst::Predicate predicate) { 3512 if (LI->isVolatile()) return getCouldNotCompute(); 3513 3514 // Check to see if the loaded pointer is a getelementptr of a global. 3515 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 3516 if (!GEP) return getCouldNotCompute(); 3517 3518 // Make sure that it is really a constant global we are gepping, with an 3519 // initializer, and make sure the first IDX is really 0. 3520 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 3521 if (!GV || !GV->isConstant() || !GV->hasInitializer() || 3522 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 3523 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 3524 return getCouldNotCompute(); 3525 3526 // Okay, we allow one non-constant index into the GEP instruction. 3527 Value *VarIdx = 0; 3528 std::vector<ConstantInt*> Indexes; 3529 unsigned VarIdxNum = 0; 3530 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 3531 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 3532 Indexes.push_back(CI); 3533 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 3534 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 3535 VarIdx = GEP->getOperand(i); 3536 VarIdxNum = i-2; 3537 Indexes.push_back(0); 3538 } 3539 3540 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 3541 // Check to see if X is a loop variant variable value now. 3542 const SCEV *Idx = getSCEV(VarIdx); 3543 Idx = getSCEVAtScope(Idx, L); 3544 3545 // We can only recognize very limited forms of loop index expressions, in 3546 // particular, only affine AddRec's like {C1,+,C2}. 3547 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 3548 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 3549 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 3550 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 3551 return getCouldNotCompute(); 3552 3553 unsigned MaxSteps = MaxBruteForceIterations; 3554 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 3555 ConstantInt *ItCst = Context->getConstantInt( 3556 cast<IntegerType>(IdxExpr->getType()), IterationNum); 3557 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 3558 3559 // Form the GEP offset. 3560 Indexes[VarIdxNum] = Val; 3561 3562 Constant *Result = GetAddressedElementFromGlobal(Context, GV, Indexes); 3563 if (Result == 0) break; // Cannot compute! 3564 3565 // Evaluate the condition for this iteration. 3566 Result = ConstantExpr::getICmp(predicate, Result, RHS); 3567 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 3568 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 3569 #if 0 3570 errs() << "\n***\n*** Computed loop count " << *ItCst 3571 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 3572 << "***\n"; 3573 #endif 3574 ++NumArrayLenItCounts; 3575 return getConstant(ItCst); // Found terminating iteration! 3576 } 3577 } 3578 return getCouldNotCompute(); 3579 } 3580 3581 3582 /// CanConstantFold - Return true if we can constant fold an instruction of the 3583 /// specified type, assuming that all operands were constants. 3584 static bool CanConstantFold(const Instruction *I) { 3585 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 3586 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 3587 return true; 3588 3589 if (const CallInst *CI = dyn_cast<CallInst>(I)) 3590 if (const Function *F = CI->getCalledFunction()) 3591 return canConstantFoldCallTo(F); 3592 return false; 3593 } 3594 3595 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 3596 /// in the loop that V is derived from. We allow arbitrary operations along the 3597 /// way, but the operands of an operation must either be constants or a value 3598 /// derived from a constant PHI. If this expression does not fit with these 3599 /// constraints, return null. 3600 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 3601 // If this is not an instruction, or if this is an instruction outside of the 3602 // loop, it can't be derived from a loop PHI. 3603 Instruction *I = dyn_cast<Instruction>(V); 3604 if (I == 0 || !L->contains(I->getParent())) return 0; 3605 3606 if (PHINode *PN = dyn_cast<PHINode>(I)) { 3607 if (L->getHeader() == I->getParent()) 3608 return PN; 3609 else 3610 // We don't currently keep track of the control flow needed to evaluate 3611 // PHIs, so we cannot handle PHIs inside of loops. 3612 return 0; 3613 } 3614 3615 // If we won't be able to constant fold this expression even if the operands 3616 // are constants, return early. 3617 if (!CanConstantFold(I)) return 0; 3618 3619 // Otherwise, we can evaluate this instruction if all of its operands are 3620 // constant or derived from a PHI node themselves. 3621 PHINode *PHI = 0; 3622 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 3623 if (!(isa<Constant>(I->getOperand(Op)) || 3624 isa<GlobalValue>(I->getOperand(Op)))) { 3625 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 3626 if (P == 0) return 0; // Not evolving from PHI 3627 if (PHI == 0) 3628 PHI = P; 3629 else if (PHI != P) 3630 return 0; // Evolving from multiple different PHIs. 3631 } 3632 3633 // This is a expression evolving from a constant PHI! 3634 return PHI; 3635 } 3636 3637 /// EvaluateExpression - Given an expression that passes the 3638 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 3639 /// in the loop has the value PHIVal. If we can't fold this expression for some 3640 /// reason, return null. 3641 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { 3642 if (isa<PHINode>(V)) return PHIVal; 3643 if (Constant *C = dyn_cast<Constant>(V)) return C; 3644 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; 3645 Instruction *I = cast<Instruction>(V); 3646 LLVMContext *Context = I->getParent()->getContext(); 3647 3648 std::vector<Constant*> Operands; 3649 Operands.resize(I->getNumOperands()); 3650 3651 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3652 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal); 3653 if (Operands[i] == 0) return 0; 3654 } 3655 3656 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3657 return ConstantFoldCompareInstOperands(CI->getPredicate(), 3658 &Operands[0], Operands.size(), 3659 Context); 3660 else 3661 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3662 &Operands[0], Operands.size(), 3663 Context); 3664 } 3665 3666 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 3667 /// in the header of its containing loop, we know the loop executes a 3668 /// constant number of times, and the PHI node is just a recurrence 3669 /// involving constants, fold it. 3670 Constant * 3671 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 3672 const APInt& BEs, 3673 const Loop *L) { 3674 std::map<PHINode*, Constant*>::iterator I = 3675 ConstantEvolutionLoopExitValue.find(PN); 3676 if (I != ConstantEvolutionLoopExitValue.end()) 3677 return I->second; 3678 3679 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) 3680 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 3681 3682 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 3683 3684 // Since the loop is canonicalized, the PHI node must have two entries. One 3685 // entry must be a constant (coming in from outside of the loop), and the 3686 // second must be derived from the same PHI. 3687 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3688 Constant *StartCST = 3689 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3690 if (StartCST == 0) 3691 return RetVal = 0; // Must be a constant. 3692 3693 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3694 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3695 if (PN2 != PN) 3696 return RetVal = 0; // Not derived from same PHI. 3697 3698 // Execute the loop symbolically to determine the exit value. 3699 if (BEs.getActiveBits() >= 32) 3700 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 3701 3702 unsigned NumIterations = BEs.getZExtValue(); // must be in range 3703 unsigned IterationNum = 0; 3704 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 3705 if (IterationNum == NumIterations) 3706 return RetVal = PHIVal; // Got exit value! 3707 3708 // Compute the value of the PHI node for the next iteration. 3709 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3710 if (NextPHI == PHIVal) 3711 return RetVal = NextPHI; // Stopped evolving! 3712 if (NextPHI == 0) 3713 return 0; // Couldn't evaluate! 3714 PHIVal = NextPHI; 3715 } 3716 } 3717 3718 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a 3719 /// constant number of times (the condition evolves only from constants), 3720 /// try to evaluate a few iterations of the loop until we get the exit 3721 /// condition gets a value of ExitWhen (true or false). If we cannot 3722 /// evaluate the trip count of the loop, return getCouldNotCompute(). 3723 const SCEV * 3724 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, 3725 Value *Cond, 3726 bool ExitWhen) { 3727 PHINode *PN = getConstantEvolvingPHI(Cond, L); 3728 if (PN == 0) return getCouldNotCompute(); 3729 3730 // Since the loop is canonicalized, the PHI node must have two entries. One 3731 // entry must be a constant (coming in from outside of the loop), and the 3732 // second must be derived from the same PHI. 3733 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 3734 Constant *StartCST = 3735 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 3736 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. 3737 3738 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 3739 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 3740 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI. 3741 3742 // Okay, we find a PHI node that defines the trip count of this loop. Execute 3743 // the loop symbolically to determine when the condition gets a value of 3744 // "ExitWhen". 3745 unsigned IterationNum = 0; 3746 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 3747 for (Constant *PHIVal = StartCST; 3748 IterationNum != MaxIterations; ++IterationNum) { 3749 ConstantInt *CondVal = 3750 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal)); 3751 3752 // Couldn't symbolically evaluate. 3753 if (!CondVal) return getCouldNotCompute(); 3754 3755 if (CondVal->getValue() == uint64_t(ExitWhen)) { 3756 ++NumBruteForceTripCountsComputed; 3757 return getConstant(Type::Int32Ty, IterationNum); 3758 } 3759 3760 // Compute the value of the PHI node for the next iteration. 3761 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); 3762 if (NextPHI == 0 || NextPHI == PHIVal) 3763 return getCouldNotCompute();// Couldn't evaluate or not making progress... 3764 PHIVal = NextPHI; 3765 } 3766 3767 // Too many iterations were needed to evaluate. 3768 return getCouldNotCompute(); 3769 } 3770 3771 /// getSCEVAtScope - Return a SCEV expression handle for the specified value 3772 /// at the specified scope in the program. The L value specifies a loop 3773 /// nest to evaluate the expression at, where null is the top-level or a 3774 /// specified loop is immediately inside of the loop. 3775 /// 3776 /// This method can be used to compute the exit value for a variable defined 3777 /// in a loop by querying what the value will hold in the parent loop. 3778 /// 3779 /// In the case that a relevant loop exit value cannot be computed, the 3780 /// original value V is returned. 3781 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 3782 // FIXME: this should be turned into a virtual method on SCEV! 3783 3784 if (isa<SCEVConstant>(V)) return V; 3785 3786 // If this instruction is evolved from a constant-evolving PHI, compute the 3787 // exit value from the loop without using SCEVs. 3788 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 3789 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 3790 const Loop *LI = (*this->LI)[I->getParent()]; 3791 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 3792 if (PHINode *PN = dyn_cast<PHINode>(I)) 3793 if (PN->getParent() == LI->getHeader()) { 3794 // Okay, there is no closed form solution for the PHI node. Check 3795 // to see if the loop that contains it has a known backedge-taken 3796 // count. If so, we may be able to force computation of the exit 3797 // value. 3798 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 3799 if (const SCEVConstant *BTCC = 3800 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 3801 // Okay, we know how many times the containing loop executes. If 3802 // this is a constant evolving PHI node, get the final value at 3803 // the specified iteration number. 3804 Constant *RV = getConstantEvolutionLoopExitValue(PN, 3805 BTCC->getValue()->getValue(), 3806 LI); 3807 if (RV) return getSCEV(RV); 3808 } 3809 } 3810 3811 // Okay, this is an expression that we cannot symbolically evaluate 3812 // into a SCEV. Check to see if it's possible to symbolically evaluate 3813 // the arguments into constants, and if so, try to constant propagate the 3814 // result. This is particularly useful for computing loop exit values. 3815 if (CanConstantFold(I)) { 3816 // Check to see if we've folded this instruction at this loop before. 3817 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I]; 3818 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair = 3819 Values.insert(std::make_pair(L, static_cast<Constant *>(0))); 3820 if (!Pair.second) 3821 return Pair.first->second ? &*getSCEV(Pair.first->second) : V; 3822 3823 std::vector<Constant*> Operands; 3824 Operands.reserve(I->getNumOperands()); 3825 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3826 Value *Op = I->getOperand(i); 3827 if (Constant *C = dyn_cast<Constant>(Op)) { 3828 Operands.push_back(C); 3829 } else { 3830 // If any of the operands is non-constant and if they are 3831 // non-integer and non-pointer, don't even try to analyze them 3832 // with scev techniques. 3833 if (!isSCEVable(Op->getType())) 3834 return V; 3835 3836 const SCEV* OpV = getSCEVAtScope(Op, L); 3837 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { 3838 Constant *C = SC->getValue(); 3839 if (C->getType() != Op->getType()) 3840 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3841 Op->getType(), 3842 false), 3843 C, Op->getType()); 3844 Operands.push_back(C); 3845 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) { 3846 if (Constant *C = dyn_cast<Constant>(SU->getValue())) { 3847 if (C->getType() != Op->getType()) 3848 C = 3849 ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3850 Op->getType(), 3851 false), 3852 C, Op->getType()); 3853 Operands.push_back(C); 3854 } else 3855 return V; 3856 } else { 3857 return V; 3858 } 3859 } 3860 } 3861 3862 Constant *C; 3863 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3864 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 3865 &Operands[0], Operands.size(), 3866 Context); 3867 else 3868 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3869 &Operands[0], Operands.size(), Context); 3870 Pair.first->second = C; 3871 return getSCEV(C); 3872 } 3873 } 3874 3875 // This is some other type of SCEVUnknown, just return it. 3876 return V; 3877 } 3878 3879 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 3880 // Avoid performing the look-up in the common case where the specified 3881 // expression has no loop-variant portions. 3882 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 3883 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3884 if (OpAtScope != Comm->getOperand(i)) { 3885 // Okay, at least one of these operands is loop variant but might be 3886 // foldable. Build a new instance of the folded commutative expression. 3887 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 3888 Comm->op_begin()+i); 3889 NewOps.push_back(OpAtScope); 3890 3891 for (++i; i != e; ++i) { 3892 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 3893 NewOps.push_back(OpAtScope); 3894 } 3895 if (isa<SCEVAddExpr>(Comm)) 3896 return getAddExpr(NewOps); 3897 if (isa<SCEVMulExpr>(Comm)) 3898 return getMulExpr(NewOps); 3899 if (isa<SCEVSMaxExpr>(Comm)) 3900 return getSMaxExpr(NewOps); 3901 if (isa<SCEVUMaxExpr>(Comm)) 3902 return getUMaxExpr(NewOps); 3903 llvm_unreachable("Unknown commutative SCEV type!"); 3904 } 3905 } 3906 // If we got here, all operands are loop invariant. 3907 return Comm; 3908 } 3909 3910 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 3911 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 3912 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 3913 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 3914 return Div; // must be loop invariant 3915 return getUDivExpr(LHS, RHS); 3916 } 3917 3918 // If this is a loop recurrence for a loop that does not contain L, then we 3919 // are dealing with the final value computed by the loop. 3920 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 3921 if (!L || !AddRec->getLoop()->contains(L->getHeader())) { 3922 // To evaluate this recurrence, we need to know how many times the AddRec 3923 // loop iterates. Compute this now. 3924 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 3925 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 3926 3927 // Then, evaluate the AddRec. 3928 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 3929 } 3930 return AddRec; 3931 } 3932 3933 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 3934 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 3935 if (Op == Cast->getOperand()) 3936 return Cast; // must be loop invariant 3937 return getZeroExtendExpr(Op, Cast->getType()); 3938 } 3939 3940 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 3941 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 3942 if (Op == Cast->getOperand()) 3943 return Cast; // must be loop invariant 3944 return getSignExtendExpr(Op, Cast->getType()); 3945 } 3946 3947 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 3948 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 3949 if (Op == Cast->getOperand()) 3950 return Cast; // must be loop invariant 3951 return getTruncateExpr(Op, Cast->getType()); 3952 } 3953 3954 llvm_unreachable("Unknown SCEV type!"); 3955 return 0; 3956 } 3957 3958 /// getSCEVAtScope - This is a convenience function which does 3959 /// getSCEVAtScope(getSCEV(V), L). 3960 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 3961 return getSCEVAtScope(getSCEV(V), L); 3962 } 3963 3964 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 3965 /// following equation: 3966 /// 3967 /// A * X = B (mod N) 3968 /// 3969 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 3970 /// A and B isn't important. 3971 /// 3972 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 3973 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 3974 ScalarEvolution &SE) { 3975 uint32_t BW = A.getBitWidth(); 3976 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 3977 assert(A != 0 && "A must be non-zero."); 3978 3979 // 1. D = gcd(A, N) 3980 // 3981 // The gcd of A and N may have only one prime factor: 2. The number of 3982 // trailing zeros in A is its multiplicity 3983 uint32_t Mult2 = A.countTrailingZeros(); 3984 // D = 2^Mult2 3985 3986 // 2. Check if B is divisible by D. 3987 // 3988 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 3989 // is not less than multiplicity of this prime factor for D. 3990 if (B.countTrailingZeros() < Mult2) 3991 return SE.getCouldNotCompute(); 3992 3993 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 3994 // modulo (N / D). 3995 // 3996 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 3997 // bit width during computations. 3998 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 3999 APInt Mod(BW + 1, 0); 4000 Mod.set(BW - Mult2); // Mod = N / D 4001 APInt I = AD.multiplicativeInverse(Mod); 4002 4003 // 4. Compute the minimum unsigned root of the equation: 4004 // I * (B / D) mod (N / D) 4005 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 4006 4007 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 4008 // bits. 4009 return SE.getConstant(Result.trunc(BW)); 4010 } 4011 4012 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 4013 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 4014 /// might be the same) or two SCEVCouldNotCompute objects. 4015 /// 4016 static std::pair<const SCEV *,const SCEV *> 4017 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 4018 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 4019 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 4020 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 4021 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 4022 4023 // We currently can only solve this if the coefficients are constants. 4024 if (!LC || !MC || !NC) { 4025 const SCEV *CNC = SE.getCouldNotCompute(); 4026 return std::make_pair(CNC, CNC); 4027 } 4028 4029 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 4030 const APInt &L = LC->getValue()->getValue(); 4031 const APInt &M = MC->getValue()->getValue(); 4032 const APInt &N = NC->getValue()->getValue(); 4033 APInt Two(BitWidth, 2); 4034 APInt Four(BitWidth, 4); 4035 4036 { 4037 using namespace APIntOps; 4038 const APInt& C = L; 4039 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 4040 // The B coefficient is M-N/2 4041 APInt B(M); 4042 B -= sdiv(N,Two); 4043 4044 // The A coefficient is N/2 4045 APInt A(N.sdiv(Two)); 4046 4047 // Compute the B^2-4ac term. 4048 APInt SqrtTerm(B); 4049 SqrtTerm *= B; 4050 SqrtTerm -= Four * (A * C); 4051 4052 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 4053 // integer value or else APInt::sqrt() will assert. 4054 APInt SqrtVal(SqrtTerm.sqrt()); 4055 4056 // Compute the two solutions for the quadratic formula. 4057 // The divisions must be performed as signed divisions. 4058 APInt NegB(-B); 4059 APInt TwoA( A << 1 ); 4060 if (TwoA.isMinValue()) { 4061 const SCEV *CNC = SE.getCouldNotCompute(); 4062 return std::make_pair(CNC, CNC); 4063 } 4064 4065 LLVMContext *Context = SE.getContext(); 4066 4067 ConstantInt *Solution1 = 4068 Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA)); 4069 ConstantInt *Solution2 = 4070 Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA)); 4071 4072 return std::make_pair(SE.getConstant(Solution1), 4073 SE.getConstant(Solution2)); 4074 } // end APIntOps namespace 4075 } 4076 4077 /// HowFarToZero - Return the number of times a backedge comparing the specified 4078 /// value to zero will execute. If not computable, return CouldNotCompute. 4079 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 4080 // If the value is a constant 4081 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4082 // If the value is already zero, the branch will execute zero times. 4083 if (C->getValue()->isZero()) return C; 4084 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4085 } 4086 4087 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 4088 if (!AddRec || AddRec->getLoop() != L) 4089 return getCouldNotCompute(); 4090 4091 if (AddRec->isAffine()) { 4092 // If this is an affine expression, the execution count of this branch is 4093 // the minimum unsigned root of the following equation: 4094 // 4095 // Start + Step*N = 0 (mod 2^BW) 4096 // 4097 // equivalent to: 4098 // 4099 // Step*N = -Start (mod 2^BW) 4100 // 4101 // where BW is the common bit width of Start and Step. 4102 4103 // Get the initial value for the loop. 4104 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), 4105 L->getParentLoop()); 4106 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), 4107 L->getParentLoop()); 4108 4109 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 4110 // For now we handle only constant steps. 4111 4112 // First, handle unitary steps. 4113 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 4114 return getNegativeSCEV(Start); // N = -Start (as unsigned) 4115 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 4116 return Start; // N = Start (as unsigned) 4117 4118 // Then, try to solve the above equation provided that Start is constant. 4119 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 4120 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 4121 -StartC->getValue()->getValue(), 4122 *this); 4123 } 4124 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { 4125 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 4126 // the quadratic equation to solve it. 4127 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec, 4128 *this); 4129 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4130 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4131 if (R1) { 4132 #if 0 4133 errs() << "HFTZ: " << *V << " - sol#1: " << *R1 4134 << " sol#2: " << *R2 << "\n"; 4135 #endif 4136 // Pick the smallest positive root value. 4137 if (ConstantInt *CB = 4138 dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT, 4139 R1->getValue(), R2->getValue()))) { 4140 if (CB->getZExtValue() == false) 4141 std::swap(R1, R2); // R1 is the minimum root now. 4142 4143 // We can only use this value if the chrec ends up with an exact zero 4144 // value at this index. When solving for "X*X != 5", for example, we 4145 // should not accept a root of 2. 4146 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 4147 if (Val->isZero()) 4148 return R1; // We found a quadratic root! 4149 } 4150 } 4151 } 4152 4153 return getCouldNotCompute(); 4154 } 4155 4156 /// HowFarToNonZero - Return the number of times a backedge checking the 4157 /// specified value for nonzero will execute. If not computable, return 4158 /// CouldNotCompute 4159 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 4160 // Loops that look like: while (X == 0) are very strange indeed. We don't 4161 // handle them yet except for the trivial case. This could be expanded in the 4162 // future as needed. 4163 4164 // If the value is a constant, check to see if it is known to be non-zero 4165 // already. If so, the backedge will execute zero times. 4166 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4167 if (!C->getValue()->isNullValue()) 4168 return getIntegerSCEV(0, C->getType()); 4169 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4170 } 4171 4172 // We could implement others, but I really doubt anyone writes loops like 4173 // this, and if they did, they would already be constant folded. 4174 return getCouldNotCompute(); 4175 } 4176 4177 /// getLoopPredecessor - If the given loop's header has exactly one unique 4178 /// predecessor outside the loop, return it. Otherwise return null. 4179 /// 4180 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { 4181 BasicBlock *Header = L->getHeader(); 4182 BasicBlock *Pred = 0; 4183 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); 4184 PI != E; ++PI) 4185 if (!L->contains(*PI)) { 4186 if (Pred && Pred != *PI) return 0; // Multiple predecessors. 4187 Pred = *PI; 4188 } 4189 return Pred; 4190 } 4191 4192 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 4193 /// (which may not be an immediate predecessor) which has exactly one 4194 /// successor from which BB is reachable, or null if no such block is 4195 /// found. 4196 /// 4197 BasicBlock * 4198 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 4199 // If the block has a unique predecessor, then there is no path from the 4200 // predecessor to the block that does not go through the direct edge 4201 // from the predecessor to the block. 4202 if (BasicBlock *Pred = BB->getSinglePredecessor()) 4203 return Pred; 4204 4205 // A loop's header is defined to be a block that dominates the loop. 4206 // If the header has a unique predecessor outside the loop, it must be 4207 // a block that has exactly one successor that can reach the loop. 4208 if (Loop *L = LI->getLoopFor(BB)) 4209 return getLoopPredecessor(L); 4210 4211 return 0; 4212 } 4213 4214 /// HasSameValue - SCEV structural equivalence is usually sufficient for 4215 /// testing whether two expressions are equal, however for the purposes of 4216 /// looking for a condition guarding a loop, it can be useful to be a little 4217 /// more general, since a front-end may have replicated the controlling 4218 /// expression. 4219 /// 4220 static bool HasSameValue(const SCEV *A, const SCEV *B) { 4221 // Quick check to see if they are the same SCEV. 4222 if (A == B) return true; 4223 4224 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 4225 // two different instructions with the same value. Check for this case. 4226 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 4227 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 4228 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 4229 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 4230 if (AI->isIdenticalTo(BI)) 4231 return true; 4232 4233 // Otherwise assume they may have a different value. 4234 return false; 4235 } 4236 4237 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 4238 return getSignedRange(S).getSignedMax().isNegative(); 4239 } 4240 4241 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 4242 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 4243 } 4244 4245 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 4246 return !getSignedRange(S).getSignedMin().isNegative(); 4247 } 4248 4249 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 4250 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 4251 } 4252 4253 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 4254 return isKnownNegative(S) || isKnownPositive(S); 4255 } 4256 4257 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 4258 const SCEV *LHS, const SCEV *RHS) { 4259 4260 if (HasSameValue(LHS, RHS)) 4261 return ICmpInst::isTrueWhenEqual(Pred); 4262 4263 switch (Pred) { 4264 default: 4265 llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4266 break; 4267 case ICmpInst::ICMP_SGT: 4268 Pred = ICmpInst::ICMP_SLT; 4269 std::swap(LHS, RHS); 4270 case ICmpInst::ICMP_SLT: { 4271 ConstantRange LHSRange = getSignedRange(LHS); 4272 ConstantRange RHSRange = getSignedRange(RHS); 4273 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) 4274 return true; 4275 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) 4276 return false; 4277 4278 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4279 ConstantRange DiffRange = getUnsignedRange(Diff); 4280 if (isKnownNegative(Diff)) { 4281 if (DiffRange.getUnsignedMax().ult(LHSRange.getUnsignedMin())) 4282 return true; 4283 if (DiffRange.getUnsignedMin().uge(LHSRange.getUnsignedMax())) 4284 return false; 4285 } else if (isKnownPositive(Diff)) { 4286 if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin())) 4287 return true; 4288 if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax())) 4289 return false; 4290 } 4291 break; 4292 } 4293 case ICmpInst::ICMP_SGE: 4294 Pred = ICmpInst::ICMP_SLE; 4295 std::swap(LHS, RHS); 4296 case ICmpInst::ICMP_SLE: { 4297 ConstantRange LHSRange = getSignedRange(LHS); 4298 ConstantRange RHSRange = getSignedRange(RHS); 4299 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) 4300 return true; 4301 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) 4302 return false; 4303 4304 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4305 ConstantRange DiffRange = getUnsignedRange(Diff); 4306 if (isKnownNonPositive(Diff)) { 4307 if (DiffRange.getUnsignedMax().ule(LHSRange.getUnsignedMin())) 4308 return true; 4309 if (DiffRange.getUnsignedMin().ugt(LHSRange.getUnsignedMax())) 4310 return false; 4311 } else if (isKnownNonNegative(Diff)) { 4312 if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin())) 4313 return true; 4314 if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax())) 4315 return false; 4316 } 4317 break; 4318 } 4319 case ICmpInst::ICMP_UGT: 4320 Pred = ICmpInst::ICMP_ULT; 4321 std::swap(LHS, RHS); 4322 case ICmpInst::ICMP_ULT: { 4323 ConstantRange LHSRange = getUnsignedRange(LHS); 4324 ConstantRange RHSRange = getUnsignedRange(RHS); 4325 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) 4326 return true; 4327 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) 4328 return false; 4329 4330 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4331 ConstantRange DiffRange = getUnsignedRange(Diff); 4332 if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin())) 4333 return true; 4334 if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax())) 4335 return false; 4336 break; 4337 } 4338 case ICmpInst::ICMP_UGE: 4339 Pred = ICmpInst::ICMP_ULE; 4340 std::swap(LHS, RHS); 4341 case ICmpInst::ICMP_ULE: { 4342 ConstantRange LHSRange = getUnsignedRange(LHS); 4343 ConstantRange RHSRange = getUnsignedRange(RHS); 4344 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) 4345 return true; 4346 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) 4347 return false; 4348 4349 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4350 ConstantRange DiffRange = getUnsignedRange(Diff); 4351 if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin())) 4352 return true; 4353 if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax())) 4354 return false; 4355 break; 4356 } 4357 case ICmpInst::ICMP_NE: { 4358 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) 4359 return true; 4360 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) 4361 return true; 4362 4363 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4364 if (isKnownNonZero(Diff)) 4365 return true; 4366 break; 4367 } 4368 case ICmpInst::ICMP_EQ: 4369 break; 4370 } 4371 return false; 4372 } 4373 4374 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 4375 /// protected by a conditional between LHS and RHS. This is used to 4376 /// to eliminate casts. 4377 bool 4378 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 4379 ICmpInst::Predicate Pred, 4380 const SCEV *LHS, const SCEV *RHS) { 4381 // Interpret a null as meaning no loop, where there is obviously no guard 4382 // (interprocedural conditions notwithstanding). 4383 if (!L) return true; 4384 4385 BasicBlock *Latch = L->getLoopLatch(); 4386 if (!Latch) 4387 return false; 4388 4389 BranchInst *LoopContinuePredicate = 4390 dyn_cast<BranchInst>(Latch->getTerminator()); 4391 if (!LoopContinuePredicate || 4392 LoopContinuePredicate->isUnconditional()) 4393 return false; 4394 4395 return 4396 isNecessaryCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS, 4397 LoopContinuePredicate->getSuccessor(0) != L->getHeader()); 4398 } 4399 4400 /// isLoopGuardedByCond - Test whether entry to the loop is protected 4401 /// by a conditional between LHS and RHS. This is used to help avoid max 4402 /// expressions in loop trip counts, and to eliminate casts. 4403 bool 4404 ScalarEvolution::isLoopGuardedByCond(const Loop *L, 4405 ICmpInst::Predicate Pred, 4406 const SCEV *LHS, const SCEV *RHS) { 4407 // Interpret a null as meaning no loop, where there is obviously no guard 4408 // (interprocedural conditions notwithstanding). 4409 if (!L) return false; 4410 4411 BasicBlock *Predecessor = getLoopPredecessor(L); 4412 BasicBlock *PredecessorDest = L->getHeader(); 4413 4414 // Starting at the loop predecessor, climb up the predecessor chain, as long 4415 // as there are predecessors that can be found that have unique successors 4416 // leading to the original header. 4417 for (; Predecessor; 4418 PredecessorDest = Predecessor, 4419 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { 4420 4421 BranchInst *LoopEntryPredicate = 4422 dyn_cast<BranchInst>(Predecessor->getTerminator()); 4423 if (!LoopEntryPredicate || 4424 LoopEntryPredicate->isUnconditional()) 4425 continue; 4426 4427 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS, 4428 LoopEntryPredicate->getSuccessor(0) != PredecessorDest)) 4429 return true; 4430 } 4431 4432 return false; 4433 } 4434 4435 /// isNecessaryCond - Test whether the condition described by Pred, LHS, 4436 /// and RHS is a necessary condition for the given Cond value to evaluate 4437 /// to true. 4438 bool ScalarEvolution::isNecessaryCond(Value *CondValue, 4439 ICmpInst::Predicate Pred, 4440 const SCEV *LHS, const SCEV *RHS, 4441 bool Inverse) { 4442 // Recursivly handle And and Or conditions. 4443 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) { 4444 if (BO->getOpcode() == Instruction::And) { 4445 if (!Inverse) 4446 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 4447 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 4448 } else if (BO->getOpcode() == Instruction::Or) { 4449 if (Inverse) 4450 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 4451 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 4452 } 4453 } 4454 4455 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue); 4456 if (!ICI) return false; 4457 4458 // Now that we found a conditional branch that dominates the loop, check to 4459 // see if it is the comparison we are looking for. 4460 Value *PreCondLHS = ICI->getOperand(0); 4461 Value *PreCondRHS = ICI->getOperand(1); 4462 ICmpInst::Predicate FoundPred; 4463 if (Inverse) 4464 FoundPred = ICI->getInversePredicate(); 4465 else 4466 FoundPred = ICI->getPredicate(); 4467 4468 if (FoundPred == Pred) 4469 ; // An exact match. 4470 else if (!ICmpInst::isTrueWhenEqual(FoundPred) && Pred == ICmpInst::ICMP_NE) { 4471 // The actual condition is beyond sufficient. 4472 FoundPred = ICmpInst::ICMP_NE; 4473 // NE is symmetric but the original comparison may not be. Swap 4474 // the operands if necessary so that they match below. 4475 if (isa<SCEVConstant>(LHS)) 4476 std::swap(PreCondLHS, PreCondRHS); 4477 } else 4478 // Check a few special cases. 4479 switch (FoundPred) { 4480 case ICmpInst::ICMP_UGT: 4481 if (Pred == ICmpInst::ICMP_ULT) { 4482 std::swap(PreCondLHS, PreCondRHS); 4483 FoundPred = ICmpInst::ICMP_ULT; 4484 break; 4485 } 4486 return false; 4487 case ICmpInst::ICMP_SGT: 4488 if (Pred == ICmpInst::ICMP_SLT) { 4489 std::swap(PreCondLHS, PreCondRHS); 4490 FoundPred = ICmpInst::ICMP_SLT; 4491 break; 4492 } 4493 return false; 4494 case ICmpInst::ICMP_NE: 4495 // Expressions like (x >u 0) are often canonicalized to (x != 0), 4496 // so check for this case by checking if the NE is comparing against 4497 // a minimum or maximum constant. 4498 if (!ICmpInst::isTrueWhenEqual(Pred)) 4499 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(RHS)) { 4500 const APInt &A = C->getValue()->getValue(); 4501 switch (Pred) { 4502 case ICmpInst::ICMP_SLT: 4503 if (A.isMaxSignedValue()) break; 4504 return false; 4505 case ICmpInst::ICMP_SGT: 4506 if (A.isMinSignedValue()) break; 4507 return false; 4508 case ICmpInst::ICMP_ULT: 4509 if (A.isMaxValue()) break; 4510 return false; 4511 case ICmpInst::ICMP_UGT: 4512 if (A.isMinValue()) break; 4513 return false; 4514 default: 4515 return false; 4516 } 4517 FoundPred = Pred; 4518 // NE is symmetric but the original comparison may not be. Swap 4519 // the operands if necessary so that they match below. 4520 if (isa<SCEVConstant>(LHS)) 4521 std::swap(PreCondLHS, PreCondRHS); 4522 break; 4523 } 4524 return false; 4525 default: 4526 // We weren't able to reconcile the condition. 4527 return false; 4528 } 4529 4530 assert(Pred == FoundPred && "Conditions were not reconciled!"); 4531 4532 // Bail if the ICmp's operands' types are wider than the needed type 4533 // before attempting to call getSCEV on them. This avoids infinite 4534 // recursion, since the analysis of widening casts can require loop 4535 // exit condition information for overflow checking, which would 4536 // lead back here. 4537 if (getTypeSizeInBits(LHS->getType()) < 4538 getTypeSizeInBits(PreCondLHS->getType())) 4539 return false; 4540 4541 const SCEV *FoundLHS = getSCEV(PreCondLHS); 4542 const SCEV *FoundRHS = getSCEV(PreCondRHS); 4543 4544 // Balance the types. The case where FoundLHS' type is wider than 4545 // LHS' type is checked for above. 4546 if (getTypeSizeInBits(LHS->getType()) > 4547 getTypeSizeInBits(FoundLHS->getType())) { 4548 if (CmpInst::isSigned(Pred)) { 4549 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 4550 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 4551 } else { 4552 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 4553 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 4554 } 4555 } 4556 4557 return isNecessaryCondOperands(Pred, LHS, RHS, 4558 FoundLHS, FoundRHS) || 4559 // ~x < ~y --> x > y 4560 isNecessaryCondOperands(Pred, LHS, RHS, 4561 getNotSCEV(FoundRHS), getNotSCEV(FoundLHS)); 4562 } 4563 4564 /// isNecessaryCondOperands - Test whether the condition described by Pred, 4565 /// LHS, and RHS is a necessary condition for the condition described by 4566 /// Pred, FoundLHS, and FoundRHS to evaluate to true. 4567 bool 4568 ScalarEvolution::isNecessaryCondOperands(ICmpInst::Predicate Pred, 4569 const SCEV *LHS, const SCEV *RHS, 4570 const SCEV *FoundLHS, 4571 const SCEV *FoundRHS) { 4572 switch (Pred) { 4573 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4574 case ICmpInst::ICMP_EQ: 4575 case ICmpInst::ICMP_NE: 4576 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 4577 return true; 4578 break; 4579 case ICmpInst::ICMP_SLT: 4580 case ICmpInst::ICMP_SLE: 4581 if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 4582 isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 4583 return true; 4584 break; 4585 case ICmpInst::ICMP_SGT: 4586 case ICmpInst::ICMP_SGE: 4587 if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 4588 isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 4589 return true; 4590 break; 4591 case ICmpInst::ICMP_ULT: 4592 case ICmpInst::ICMP_ULE: 4593 if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 4594 isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 4595 return true; 4596 break; 4597 case ICmpInst::ICMP_UGT: 4598 case ICmpInst::ICMP_UGE: 4599 if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 4600 isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 4601 return true; 4602 break; 4603 } 4604 4605 return false; 4606 } 4607 4608 /// getBECount - Subtract the end and start values and divide by the step, 4609 /// rounding up, to get the number of times the backedge is executed. Return 4610 /// CouldNotCompute if an intermediate computation overflows. 4611 const SCEV *ScalarEvolution::getBECount(const SCEV *Start, 4612 const SCEV *End, 4613 const SCEV *Step) { 4614 const Type *Ty = Start->getType(); 4615 const SCEV *NegOne = getIntegerSCEV(-1, Ty); 4616 const SCEV *Diff = getMinusSCEV(End, Start); 4617 const SCEV *RoundUp = getAddExpr(Step, NegOne); 4618 4619 // Add an adjustment to the difference between End and Start so that 4620 // the division will effectively round up. 4621 const SCEV *Add = getAddExpr(Diff, RoundUp); 4622 4623 // Check Add for unsigned overflow. 4624 // TODO: More sophisticated things could be done here. 4625 const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1); 4626 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); 4627 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); 4628 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); 4629 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 4630 return getCouldNotCompute(); 4631 4632 return getUDivExpr(Add, Step); 4633 } 4634 4635 /// HowManyLessThans - Return the number of times a backedge containing the 4636 /// specified less-than comparison will execute. If not computable, return 4637 /// CouldNotCompute. 4638 ScalarEvolution::BackedgeTakenInfo 4639 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 4640 const Loop *L, bool isSigned) { 4641 // Only handle: "ADDREC < LoopInvariant". 4642 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute(); 4643 4644 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 4645 if (!AddRec || AddRec->getLoop() != L) 4646 return getCouldNotCompute(); 4647 4648 if (AddRec->isAffine()) { 4649 // FORNOW: We only support unit strides. 4650 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 4651 const SCEV *Step = AddRec->getStepRecurrence(*this); 4652 4653 // TODO: handle non-constant strides. 4654 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step); 4655 if (!CStep || CStep->isZero()) 4656 return getCouldNotCompute(); 4657 if (CStep->isOne()) { 4658 // With unit stride, the iteration never steps past the limit value. 4659 } else if (CStep->getValue()->getValue().isStrictlyPositive()) { 4660 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) { 4661 // Test whether a positive iteration iteration can step past the limit 4662 // value and past the maximum value for its type in a single step. 4663 if (isSigned) { 4664 APInt Max = APInt::getSignedMaxValue(BitWidth); 4665 if ((Max - CStep->getValue()->getValue()) 4666 .slt(CLimit->getValue()->getValue())) 4667 return getCouldNotCompute(); 4668 } else { 4669 APInt Max = APInt::getMaxValue(BitWidth); 4670 if ((Max - CStep->getValue()->getValue()) 4671 .ult(CLimit->getValue()->getValue())) 4672 return getCouldNotCompute(); 4673 } 4674 } else 4675 // TODO: handle non-constant limit values below. 4676 return getCouldNotCompute(); 4677 } else 4678 // TODO: handle negative strides below. 4679 return getCouldNotCompute(); 4680 4681 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 4682 // m. So, we count the number of iterations in which {n,+,s} < m is true. 4683 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 4684 // treat m-n as signed nor unsigned due to overflow possibility. 4685 4686 // First, we get the value of the LHS in the first iteration: n 4687 const SCEV *Start = AddRec->getOperand(0); 4688 4689 // Determine the minimum constant start value. 4690 const SCEV *MinStart = getConstant(isSigned ? 4691 getSignedRange(Start).getSignedMin() : 4692 getUnsignedRange(Start).getUnsignedMin()); 4693 4694 // If we know that the condition is true in order to enter the loop, 4695 // then we know that it will run exactly (m-n)/s times. Otherwise, we 4696 // only know that it will execute (max(m,n)-n)/s times. In both cases, 4697 // the division must round up. 4698 const SCEV *End = RHS; 4699 if (!isLoopGuardedByCond(L, 4700 isSigned ? ICmpInst::ICMP_SLT : 4701 ICmpInst::ICMP_ULT, 4702 getMinusSCEV(Start, Step), RHS)) 4703 End = isSigned ? getSMaxExpr(RHS, Start) 4704 : getUMaxExpr(RHS, Start); 4705 4706 // Determine the maximum constant end value. 4707 const SCEV *MaxEnd = getConstant(isSigned ? 4708 getSignedRange(End).getSignedMax() : 4709 getUnsignedRange(End).getUnsignedMax()); 4710 4711 // Finally, we subtract these two values and divide, rounding up, to get 4712 // the number of times the backedge is executed. 4713 const SCEV *BECount = getBECount(Start, End, Step); 4714 4715 // The maximum backedge count is similar, except using the minimum start 4716 // value and the maximum end value. 4717 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step); 4718 4719 return BackedgeTakenInfo(BECount, MaxBECount); 4720 } 4721 4722 return getCouldNotCompute(); 4723 } 4724 4725 /// getNumIterationsInRange - Return the number of iterations of this loop that 4726 /// produce values in the specified constant range. Another way of looking at 4727 /// this is that it returns the first iteration number where the value is not in 4728 /// the condition, thus computing the exit count. If the iteration count can't 4729 /// be computed, an instance of SCEVCouldNotCompute is returned. 4730 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 4731 ScalarEvolution &SE) const { 4732 if (Range.isFullSet()) // Infinite loop. 4733 return SE.getCouldNotCompute(); 4734 4735 // If the start is a non-zero constant, shift the range to simplify things. 4736 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 4737 if (!SC->getValue()->isZero()) { 4738 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 4739 Operands[0] = SE.getIntegerSCEV(0, SC->getType()); 4740 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); 4741 if (const SCEVAddRecExpr *ShiftedAddRec = 4742 dyn_cast<SCEVAddRecExpr>(Shifted)) 4743 return ShiftedAddRec->getNumIterationsInRange( 4744 Range.subtract(SC->getValue()->getValue()), SE); 4745 // This is strange and shouldn't happen. 4746 return SE.getCouldNotCompute(); 4747 } 4748 4749 // The only time we can solve this is when we have all constant indices. 4750 // Otherwise, we cannot determine the overflow conditions. 4751 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 4752 if (!isa<SCEVConstant>(getOperand(i))) 4753 return SE.getCouldNotCompute(); 4754 4755 4756 // Okay at this point we know that all elements of the chrec are constants and 4757 // that the start element is zero. 4758 4759 // First check to see if the range contains zero. If not, the first 4760 // iteration exits. 4761 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 4762 if (!Range.contains(APInt(BitWidth, 0))) 4763 return SE.getIntegerSCEV(0, getType()); 4764 4765 if (isAffine()) { 4766 // If this is an affine expression then we have this situation: 4767 // Solve {0,+,A} in Range === Ax in Range 4768 4769 // We know that zero is in the range. If A is positive then we know that 4770 // the upper value of the range must be the first possible exit value. 4771 // If A is negative then the lower of the range is the last possible loop 4772 // value. Also note that we already checked for a full range. 4773 APInt One(BitWidth,1); 4774 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 4775 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 4776 4777 // The exit value should be (End+A)/A. 4778 APInt ExitVal = (End + A).udiv(A); 4779 ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal); 4780 4781 // Evaluate at the exit value. If we really did fall out of the valid 4782 // range, then we computed our trip count, otherwise wrap around or other 4783 // things must have happened. 4784 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 4785 if (Range.contains(Val->getValue())) 4786 return SE.getCouldNotCompute(); // Something strange happened 4787 4788 // Ensure that the previous value is in the range. This is a sanity check. 4789 assert(Range.contains( 4790 EvaluateConstantChrecAtConstant(this, 4791 SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) && 4792 "Linear scev computation is off in a bad way!"); 4793 return SE.getConstant(ExitValue); 4794 } else if (isQuadratic()) { 4795 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 4796 // quadratic equation to solve it. To do this, we must frame our problem in 4797 // terms of figuring out when zero is crossed, instead of when 4798 // Range.getUpper() is crossed. 4799 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 4800 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 4801 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 4802 4803 // Next, solve the constructed addrec 4804 std::pair<const SCEV *,const SCEV *> Roots = 4805 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 4806 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4807 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4808 if (R1) { 4809 // Pick the smallest positive root value. 4810 if (ConstantInt *CB = 4811 dyn_cast<ConstantInt>( 4812 SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT, 4813 R1->getValue(), R2->getValue()))) { 4814 if (CB->getZExtValue() == false) 4815 std::swap(R1, R2); // R1 is the minimum root now. 4816 4817 // Make sure the root is not off by one. The returned iteration should 4818 // not be in the range, but the previous one should be. When solving 4819 // for "X*X < 5", for example, we should not return a root of 2. 4820 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 4821 R1->getValue(), 4822 SE); 4823 if (Range.contains(R1Val->getValue())) { 4824 // The next iteration must be out of the range... 4825 ConstantInt *NextVal = 4826 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1); 4827 4828 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4829 if (!Range.contains(R1Val->getValue())) 4830 return SE.getConstant(NextVal); 4831 return SE.getCouldNotCompute(); // Something strange happened 4832 } 4833 4834 // If R1 was not in the range, then it is a good return value. Make 4835 // sure that R1-1 WAS in the range though, just in case. 4836 ConstantInt *NextVal = 4837 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1); 4838 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 4839 if (Range.contains(R1Val->getValue())) 4840 return R1; 4841 return SE.getCouldNotCompute(); // Something strange happened 4842 } 4843 } 4844 } 4845 4846 return SE.getCouldNotCompute(); 4847 } 4848 4849 4850 4851 //===----------------------------------------------------------------------===// 4852 // SCEVCallbackVH Class Implementation 4853 //===----------------------------------------------------------------------===// 4854 4855 void ScalarEvolution::SCEVCallbackVH::deleted() { 4856 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 4857 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 4858 SE->ConstantEvolutionLoopExitValue.erase(PN); 4859 if (Instruction *I = dyn_cast<Instruction>(getValPtr())) 4860 SE->ValuesAtScopes.erase(I); 4861 SE->Scalars.erase(getValPtr()); 4862 // this now dangles! 4863 } 4864 4865 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) { 4866 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 4867 4868 // Forget all the expressions associated with users of the old value, 4869 // so that future queries will recompute the expressions using the new 4870 // value. 4871 SmallVector<User *, 16> Worklist; 4872 SmallPtrSet<User *, 8> Visited; 4873 Value *Old = getValPtr(); 4874 bool DeleteOld = false; 4875 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 4876 UI != UE; ++UI) 4877 Worklist.push_back(*UI); 4878 while (!Worklist.empty()) { 4879 User *U = Worklist.pop_back_val(); 4880 // Deleting the Old value will cause this to dangle. Postpone 4881 // that until everything else is done. 4882 if (U == Old) { 4883 DeleteOld = true; 4884 continue; 4885 } 4886 if (!Visited.insert(U)) 4887 continue; 4888 if (PHINode *PN = dyn_cast<PHINode>(U)) 4889 SE->ConstantEvolutionLoopExitValue.erase(PN); 4890 if (Instruction *I = dyn_cast<Instruction>(U)) 4891 SE->ValuesAtScopes.erase(I); 4892 SE->Scalars.erase(U); 4893 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 4894 UI != UE; ++UI) 4895 Worklist.push_back(*UI); 4896 } 4897 // Delete the Old value if it (indirectly) references itself. 4898 if (DeleteOld) { 4899 if (PHINode *PN = dyn_cast<PHINode>(Old)) 4900 SE->ConstantEvolutionLoopExitValue.erase(PN); 4901 if (Instruction *I = dyn_cast<Instruction>(Old)) 4902 SE->ValuesAtScopes.erase(I); 4903 SE->Scalars.erase(Old); 4904 // this now dangles! 4905 } 4906 // this may dangle! 4907 } 4908 4909 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 4910 : CallbackVH(V), SE(se) {} 4911 4912 //===----------------------------------------------------------------------===// 4913 // ScalarEvolution Class Implementation 4914 //===----------------------------------------------------------------------===// 4915 4916 ScalarEvolution::ScalarEvolution() 4917 : FunctionPass(&ID) { 4918 } 4919 4920 bool ScalarEvolution::runOnFunction(Function &F) { 4921 this->F = &F; 4922 LI = &getAnalysis<LoopInfo>(); 4923 TD = getAnalysisIfAvailable<TargetData>(); 4924 return false; 4925 } 4926 4927 void ScalarEvolution::releaseMemory() { 4928 Scalars.clear(); 4929 BackedgeTakenCounts.clear(); 4930 ConstantEvolutionLoopExitValue.clear(); 4931 ValuesAtScopes.clear(); 4932 UniqueSCEVs.clear(); 4933 SCEVAllocator.Reset(); 4934 } 4935 4936 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 4937 AU.setPreservesAll(); 4938 AU.addRequiredTransitive<LoopInfo>(); 4939 } 4940 4941 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 4942 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 4943 } 4944 4945 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 4946 const Loop *L) { 4947 // Print all inner loops first 4948 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 4949 PrintLoopInfo(OS, SE, *I); 4950 4951 OS << "Loop " << L->getHeader()->getName() << ": "; 4952 4953 SmallVector<BasicBlock*, 8> ExitBlocks; 4954 L->getExitBlocks(ExitBlocks); 4955 if (ExitBlocks.size() != 1) 4956 OS << "<multiple exits> "; 4957 4958 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 4959 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 4960 } else { 4961 OS << "Unpredictable backedge-taken count. "; 4962 } 4963 4964 OS << "\n"; 4965 OS << "Loop " << L->getHeader()->getName() << ": "; 4966 4967 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 4968 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 4969 } else { 4970 OS << "Unpredictable max backedge-taken count. "; 4971 } 4972 4973 OS << "\n"; 4974 } 4975 4976 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { 4977 // ScalarEvolution's implementaiton of the print method is to print 4978 // out SCEV values of all instructions that are interesting. Doing 4979 // this potentially causes it to create new SCEV objects though, 4980 // which technically conflicts with the const qualifier. This isn't 4981 // observable from outside the class though, so casting away the 4982 // const isn't dangerous. 4983 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this); 4984 4985 OS << "Classifying expressions for: " << F->getName() << "\n"; 4986 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 4987 if (isSCEVable(I->getType())) { 4988 OS << *I << '\n'; 4989 OS << " --> "; 4990 const SCEV *SV = SE.getSCEV(&*I); 4991 SV->print(OS); 4992 4993 const Loop *L = LI->getLoopFor((*I).getParent()); 4994 4995 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 4996 if (AtUse != SV) { 4997 OS << " --> "; 4998 AtUse->print(OS); 4999 } 5000 5001 if (L) { 5002 OS << "\t\t" "Exits: "; 5003 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 5004 if (!ExitValue->isLoopInvariant(L)) { 5005 OS << "<<Unknown>>"; 5006 } else { 5007 OS << *ExitValue; 5008 } 5009 } 5010 5011 OS << "\n"; 5012 } 5013 5014 OS << "Determining loop execution counts for: " << F->getName() << "\n"; 5015 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 5016 PrintLoopInfo(OS, &SE, *I); 5017 } 5018 5019 void ScalarEvolution::print(std::ostream &o, const Module *M) const { 5020 raw_os_ostream OS(o); 5021 print(OS, M); 5022 } 5023