xref: /llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp (revision 0b89dff37d9947cd6f8bcc1afa23e451130aac99)
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
13 //
14 // There are several aspects to this library.  First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle.  These classes are reference counted, managed by the const SCEV *
18 // class.  We only create one SCEV of a particular shape, so pointer-comparisons
19 // for equality are legal.
20 //
21 // One important aspect of the SCEV objects is that they are never cyclic, even
22 // if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
24 // recurrence) then we represent it directly as a recurrence node, otherwise we
25 // represent it as a SCEVUnknown node.
26 //
27 // In addition to being able to represent expressions of various types, we also
28 // have folders that are used to build the *canonical* representation for a
29 // particular expression.  These folders are capable of using a variety of
30 // rewrite rules to simplify the expressions.
31 //
32 // Once the folders are defined, we can implement the more interesting
33 // higher-level code, such as the code that recognizes PHI nodes of various
34 // types, computes the execution count of a loop, etc.
35 //
36 // TODO: We should use these routines and value representations to implement
37 // dependence analysis!
38 //
39 //===----------------------------------------------------------------------===//
40 //
41 // There are several good references for the techniques used in this analysis.
42 //
43 //  Chains of recurrences -- a method to expedite the evaluation
44 //  of closed-form functions
45 //  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 //
47 //  On computational properties of chains of recurrences
48 //  Eugene V. Zima
49 //
50 //  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51 //  Robert A. van Engelen
52 //
53 //  Efficient Symbolic Analysis for Optimizing Compilers
54 //  Robert A. van Engelen
55 //
56 //  Using the chains of recurrences algebra for data dependence testing and
57 //  induction variable substitution
58 //  MS Thesis, Johnie Birch
59 //
60 //===----------------------------------------------------------------------===//
61 
62 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
64 #include "llvm/Constants.h"
65 #include "llvm/DerivedTypes.h"
66 #include "llvm/GlobalVariable.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Operator.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/Dominators.h"
72 #include "llvm/Analysis/LoopInfo.h"
73 #include "llvm/Analysis/ValueTracking.h"
74 #include "llvm/Assembly/Writer.h"
75 #include "llvm/Target/TargetData.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/ConstantRange.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/GetElementPtrTypeIterator.h"
81 #include "llvm/Support/InstIterator.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include "llvm/ADT/Statistic.h"
85 #include "llvm/ADT/STLExtras.h"
86 #include "llvm/ADT/SmallPtrSet.h"
87 #include <algorithm>
88 using namespace llvm;
89 
90 STATISTIC(NumArrayLenItCounts,
91           "Number of trip counts computed with array length");
92 STATISTIC(NumTripCountsComputed,
93           "Number of loops with predictable loop counts");
94 STATISTIC(NumTripCountsNotComputed,
95           "Number of loops without predictable loop counts");
96 STATISTIC(NumBruteForceTripCountsComputed,
97           "Number of loops with trip counts computed by force");
98 
99 static cl::opt<unsigned>
100 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                         cl::desc("Maximum number of iterations SCEV will "
102                                  "symbolically execute a constant "
103                                  "derived loop"),
104                         cl::init(100));
105 
106 static RegisterPass<ScalarEvolution>
107 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108 char ScalarEvolution::ID = 0;
109 
110 //===----------------------------------------------------------------------===//
111 //                           SCEV class definitions
112 //===----------------------------------------------------------------------===//
113 
114 //===----------------------------------------------------------------------===//
115 // Implementation of the SCEV class.
116 //
117 
118 SCEV::~SCEV() {}
119 
120 void SCEV::dump() const {
121   print(errs());
122   errs() << '\n';
123 }
124 
125 void SCEV::print(std::ostream &o) const {
126   raw_os_ostream OS(o);
127   print(OS);
128 }
129 
130 bool SCEV::isZero() const {
131   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132     return SC->getValue()->isZero();
133   return false;
134 }
135 
136 bool SCEV::isOne() const {
137   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
138     return SC->getValue()->isOne();
139   return false;
140 }
141 
142 bool SCEV::isAllOnesValue() const {
143   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
144     return SC->getValue()->isAllOnesValue();
145   return false;
146 }
147 
148 SCEVCouldNotCompute::SCEVCouldNotCompute() :
149   SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
150 
151 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
152   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153   return false;
154 }
155 
156 const Type *SCEVCouldNotCompute::getType() const {
157   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158   return 0;
159 }
160 
161 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
162   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163   return false;
164 }
165 
166 bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
167   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
168   return false;
169 }
170 
171 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
172   OS << "***COULDNOTCOMPUTE***";
173 }
174 
175 bool SCEVCouldNotCompute::classof(const SCEV *S) {
176   return S->getSCEVType() == scCouldNotCompute;
177 }
178 
179 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
180   FoldingSetNodeID ID;
181   ID.AddInteger(scConstant);
182   ID.AddPointer(V);
183   void *IP = 0;
184   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
185   SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
186   new (S) SCEVConstant(ID, V);
187   UniqueSCEVs.InsertNode(S, IP);
188   return S;
189 }
190 
191 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
192   return getConstant(ConstantInt::get(getContext(), Val));
193 }
194 
195 const SCEV *
196 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
197   return getConstant(
198     ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
199 }
200 
201 const Type *SCEVConstant::getType() const { return V->getType(); }
202 
203 void SCEVConstant::print(raw_ostream &OS) const {
204   WriteAsOperand(OS, V, false);
205 }
206 
207 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
208                            unsigned SCEVTy, const SCEV *op, const Type *ty)
209   : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
210 
211 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
212   return Op->dominates(BB, DT);
213 }
214 
215 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
216                                    const SCEV *op, const Type *ty)
217   : SCEVCastExpr(ID, scTruncate, op, ty) {
218   assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
219          (Ty->isInteger() || isa<PointerType>(Ty)) &&
220          "Cannot truncate non-integer value!");
221 }
222 
223 void SCEVTruncateExpr::print(raw_ostream &OS) const {
224   OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
225 }
226 
227 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
228                                        const SCEV *op, const Type *ty)
229   : SCEVCastExpr(ID, scZeroExtend, op, ty) {
230   assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
231          (Ty->isInteger() || isa<PointerType>(Ty)) &&
232          "Cannot zero extend non-integer value!");
233 }
234 
235 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
236   OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
237 }
238 
239 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
240                                        const SCEV *op, const Type *ty)
241   : SCEVCastExpr(ID, scSignExtend, op, ty) {
242   assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
243          (Ty->isInteger() || isa<PointerType>(Ty)) &&
244          "Cannot sign extend non-integer value!");
245 }
246 
247 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
248   OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
249 }
250 
251 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
252   assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
253   const char *OpStr = getOperationStr();
254   OS << "(" << *Operands[0];
255   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
256     OS << OpStr << *Operands[i];
257   OS << ")";
258 }
259 
260 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
261   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
262     if (!getOperand(i)->dominates(BB, DT))
263       return false;
264   }
265   return true;
266 }
267 
268 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
269   return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
270 }
271 
272 void SCEVUDivExpr::print(raw_ostream &OS) const {
273   OS << "(" << *LHS << " /u " << *RHS << ")";
274 }
275 
276 const Type *SCEVUDivExpr::getType() const {
277   // In most cases the types of LHS and RHS will be the same, but in some
278   // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
279   // depend on the type for correctness, but handling types carefully can
280   // avoid extra casts in the SCEVExpander. The LHS is more likely to be
281   // a pointer type than the RHS, so use the RHS' type here.
282   return RHS->getType();
283 }
284 
285 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
286   // Add recurrences are never invariant in the function-body (null loop).
287   if (!QueryLoop)
288     return false;
289 
290   // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
291   if (QueryLoop->contains(L->getHeader()))
292     return false;
293 
294   // This recurrence is variant w.r.t. QueryLoop if any of its operands
295   // are variant.
296   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
297     if (!getOperand(i)->isLoopInvariant(QueryLoop))
298       return false;
299 
300   // Otherwise it's loop-invariant.
301   return true;
302 }
303 
304 void SCEVAddRecExpr::print(raw_ostream &OS) const {
305   OS << "{" << *Operands[0];
306   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
307     OS << ",+," << *Operands[i];
308   OS << "}<" << L->getHeader()->getName() + ">";
309 }
310 
311 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
312   // All non-instruction values are loop invariant.  All instructions are loop
313   // invariant if they are not contained in the specified loop.
314   // Instructions are never considered invariant in the function body
315   // (null loop) because they are defined within the "loop".
316   if (Instruction *I = dyn_cast<Instruction>(V))
317     return L && !L->contains(I->getParent());
318   return true;
319 }
320 
321 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
322   if (Instruction *I = dyn_cast<Instruction>(getValue()))
323     return DT->dominates(I->getParent(), BB);
324   return true;
325 }
326 
327 const Type *SCEVUnknown::getType() const {
328   return V->getType();
329 }
330 
331 void SCEVUnknown::print(raw_ostream &OS) const {
332   WriteAsOperand(OS, V, false);
333 }
334 
335 //===----------------------------------------------------------------------===//
336 //                               SCEV Utilities
337 //===----------------------------------------------------------------------===//
338 
339 namespace {
340   /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
341   /// than the complexity of the RHS.  This comparator is used to canonicalize
342   /// expressions.
343   class VISIBILITY_HIDDEN SCEVComplexityCompare {
344     LoopInfo *LI;
345   public:
346     explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
347 
348     bool operator()(const SCEV *LHS, const SCEV *RHS) const {
349       // Primarily, sort the SCEVs by their getSCEVType().
350       if (LHS->getSCEVType() != RHS->getSCEVType())
351         return LHS->getSCEVType() < RHS->getSCEVType();
352 
353       // Aside from the getSCEVType() ordering, the particular ordering
354       // isn't very important except that it's beneficial to be consistent,
355       // so that (a + b) and (b + a) don't end up as different expressions.
356 
357       // Sort SCEVUnknown values with some loose heuristics. TODO: This is
358       // not as complete as it could be.
359       if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
360         const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
361 
362         // Order pointer values after integer values. This helps SCEVExpander
363         // form GEPs.
364         if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
365           return false;
366         if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
367           return true;
368 
369         // Compare getValueID values.
370         if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
371           return LU->getValue()->getValueID() < RU->getValue()->getValueID();
372 
373         // Sort arguments by their position.
374         if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
375           const Argument *RA = cast<Argument>(RU->getValue());
376           return LA->getArgNo() < RA->getArgNo();
377         }
378 
379         // For instructions, compare their loop depth, and their opcode.
380         // This is pretty loose.
381         if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
382           Instruction *RV = cast<Instruction>(RU->getValue());
383 
384           // Compare loop depths.
385           if (LI->getLoopDepth(LV->getParent()) !=
386               LI->getLoopDepth(RV->getParent()))
387             return LI->getLoopDepth(LV->getParent()) <
388                    LI->getLoopDepth(RV->getParent());
389 
390           // Compare opcodes.
391           if (LV->getOpcode() != RV->getOpcode())
392             return LV->getOpcode() < RV->getOpcode();
393 
394           // Compare the number of operands.
395           if (LV->getNumOperands() != RV->getNumOperands())
396             return LV->getNumOperands() < RV->getNumOperands();
397         }
398 
399         return false;
400       }
401 
402       // Compare constant values.
403       if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
404         const SCEVConstant *RC = cast<SCEVConstant>(RHS);
405         if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
406           return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
407         return LC->getValue()->getValue().ult(RC->getValue()->getValue());
408       }
409 
410       // Compare addrec loop depths.
411       if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
412         const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
413         if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
414           return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
415       }
416 
417       // Lexicographically compare n-ary expressions.
418       if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
419         const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
420         for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
421           if (i >= RC->getNumOperands())
422             return false;
423           if (operator()(LC->getOperand(i), RC->getOperand(i)))
424             return true;
425           if (operator()(RC->getOperand(i), LC->getOperand(i)))
426             return false;
427         }
428         return LC->getNumOperands() < RC->getNumOperands();
429       }
430 
431       // Lexicographically compare udiv expressions.
432       if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
433         const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
434         if (operator()(LC->getLHS(), RC->getLHS()))
435           return true;
436         if (operator()(RC->getLHS(), LC->getLHS()))
437           return false;
438         if (operator()(LC->getRHS(), RC->getRHS()))
439           return true;
440         if (operator()(RC->getRHS(), LC->getRHS()))
441           return false;
442         return false;
443       }
444 
445       // Compare cast expressions by operand.
446       if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
447         const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
448         return operator()(LC->getOperand(), RC->getOperand());
449       }
450 
451       llvm_unreachable("Unknown SCEV kind!");
452       return false;
453     }
454   };
455 }
456 
457 /// GroupByComplexity - Given a list of SCEV objects, order them by their
458 /// complexity, and group objects of the same complexity together by value.
459 /// When this routine is finished, we know that any duplicates in the vector are
460 /// consecutive and that complexity is monotonically increasing.
461 ///
462 /// Note that we go take special precautions to ensure that we get determinstic
463 /// results from this routine.  In other words, we don't want the results of
464 /// this to depend on where the addresses of various SCEV objects happened to
465 /// land in memory.
466 ///
467 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
468                               LoopInfo *LI) {
469   if (Ops.size() < 2) return;  // Noop
470   if (Ops.size() == 2) {
471     // This is the common case, which also happens to be trivially simple.
472     // Special case it.
473     if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
474       std::swap(Ops[0], Ops[1]);
475     return;
476   }
477 
478   // Do the rough sort by complexity.
479   std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
480 
481   // Now that we are sorted by complexity, group elements of the same
482   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
483   // be extremely short in practice.  Note that we take this approach because we
484   // do not want to depend on the addresses of the objects we are grouping.
485   for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
486     const SCEV *S = Ops[i];
487     unsigned Complexity = S->getSCEVType();
488 
489     // If there are any objects of the same complexity and same value as this
490     // one, group them.
491     for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
492       if (Ops[j] == S) { // Found a duplicate.
493         // Move it to immediately after i'th element.
494         std::swap(Ops[i+1], Ops[j]);
495         ++i;   // no need to rescan it.
496         if (i == e-2) return;  // Done!
497       }
498     }
499   }
500 }
501 
502 
503 
504 //===----------------------------------------------------------------------===//
505 //                      Simple SCEV method implementations
506 //===----------------------------------------------------------------------===//
507 
508 /// BinomialCoefficient - Compute BC(It, K).  The result has width W.
509 /// Assume, K > 0.
510 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
511                                        ScalarEvolution &SE,
512                                        const Type* ResultTy) {
513   // Handle the simplest case efficiently.
514   if (K == 1)
515     return SE.getTruncateOrZeroExtend(It, ResultTy);
516 
517   // We are using the following formula for BC(It, K):
518   //
519   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
520   //
521   // Suppose, W is the bitwidth of the return value.  We must be prepared for
522   // overflow.  Hence, we must assure that the result of our computation is
523   // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
524   // safe in modular arithmetic.
525   //
526   // However, this code doesn't use exactly that formula; the formula it uses
527   // is something like the following, where T is the number of factors of 2 in
528   // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
529   // exponentiation:
530   //
531   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
532   //
533   // This formula is trivially equivalent to the previous formula.  However,
534   // this formula can be implemented much more efficiently.  The trick is that
535   // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
536   // arithmetic.  To do exact division in modular arithmetic, all we have
537   // to do is multiply by the inverse.  Therefore, this step can be done at
538   // width W.
539   //
540   // The next issue is how to safely do the division by 2^T.  The way this
541   // is done is by doing the multiplication step at a width of at least W + T
542   // bits.  This way, the bottom W+T bits of the product are accurate. Then,
543   // when we perform the division by 2^T (which is equivalent to a right shift
544   // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
545   // truncated out after the division by 2^T.
546   //
547   // In comparison to just directly using the first formula, this technique
548   // is much more efficient; using the first formula requires W * K bits,
549   // but this formula less than W + K bits. Also, the first formula requires
550   // a division step, whereas this formula only requires multiplies and shifts.
551   //
552   // It doesn't matter whether the subtraction step is done in the calculation
553   // width or the input iteration count's width; if the subtraction overflows,
554   // the result must be zero anyway.  We prefer here to do it in the width of
555   // the induction variable because it helps a lot for certain cases; CodeGen
556   // isn't smart enough to ignore the overflow, which leads to much less
557   // efficient code if the width of the subtraction is wider than the native
558   // register width.
559   //
560   // (It's possible to not widen at all by pulling out factors of 2 before
561   // the multiplication; for example, K=2 can be calculated as
562   // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
563   // extra arithmetic, so it's not an obvious win, and it gets
564   // much more complicated for K > 3.)
565 
566   // Protection from insane SCEVs; this bound is conservative,
567   // but it probably doesn't matter.
568   if (K > 1000)
569     return SE.getCouldNotCompute();
570 
571   unsigned W = SE.getTypeSizeInBits(ResultTy);
572 
573   // Calculate K! / 2^T and T; we divide out the factors of two before
574   // multiplying for calculating K! / 2^T to avoid overflow.
575   // Other overflow doesn't matter because we only care about the bottom
576   // W bits of the result.
577   APInt OddFactorial(W, 1);
578   unsigned T = 1;
579   for (unsigned i = 3; i <= K; ++i) {
580     APInt Mult(W, i);
581     unsigned TwoFactors = Mult.countTrailingZeros();
582     T += TwoFactors;
583     Mult = Mult.lshr(TwoFactors);
584     OddFactorial *= Mult;
585   }
586 
587   // We need at least W + T bits for the multiplication step
588   unsigned CalculationBits = W + T;
589 
590   // Calcuate 2^T, at width T+W.
591   APInt DivFactor = APInt(CalculationBits, 1).shl(T);
592 
593   // Calculate the multiplicative inverse of K! / 2^T;
594   // this multiplication factor will perform the exact division by
595   // K! / 2^T.
596   APInt Mod = APInt::getSignedMinValue(W+1);
597   APInt MultiplyFactor = OddFactorial.zext(W+1);
598   MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
599   MultiplyFactor = MultiplyFactor.trunc(W);
600 
601   // Calculate the product, at width T+W
602   const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
603   const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
604   for (unsigned i = 1; i != K; ++i) {
605     const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
606     Dividend = SE.getMulExpr(Dividend,
607                              SE.getTruncateOrZeroExtend(S, CalculationTy));
608   }
609 
610   // Divide by 2^T
611   const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
612 
613   // Truncate the result, and divide by K! / 2^T.
614 
615   return SE.getMulExpr(SE.getConstant(MultiplyFactor),
616                        SE.getTruncateOrZeroExtend(DivResult, ResultTy));
617 }
618 
619 /// evaluateAtIteration - Return the value of this chain of recurrences at
620 /// the specified iteration number.  We can evaluate this recurrence by
621 /// multiplying each element in the chain by the binomial coefficient
622 /// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
623 ///
624 ///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
625 ///
626 /// where BC(It, k) stands for binomial coefficient.
627 ///
628 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
629                                                 ScalarEvolution &SE) const {
630   const SCEV *Result = getStart();
631   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
632     // The computation is correct in the face of overflow provided that the
633     // multiplication is performed _after_ the evaluation of the binomial
634     // coefficient.
635     const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
636     if (isa<SCEVCouldNotCompute>(Coeff))
637       return Coeff;
638 
639     Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
640   }
641   return Result;
642 }
643 
644 //===----------------------------------------------------------------------===//
645 //                    SCEV Expression folder implementations
646 //===----------------------------------------------------------------------===//
647 
648 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
649                                              const Type *Ty) {
650   assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
651          "This is not a truncating conversion!");
652   assert(isSCEVable(Ty) &&
653          "This is not a conversion to a SCEVable type!");
654   Ty = getEffectiveSCEVType(Ty);
655 
656   FoldingSetNodeID ID;
657   ID.AddInteger(scTruncate);
658   ID.AddPointer(Op);
659   ID.AddPointer(Ty);
660   void *IP = 0;
661   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
662 
663   // Fold if the operand is constant.
664   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
665     return getConstant(
666       cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
667 
668   // trunc(trunc(x)) --> trunc(x)
669   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
670     return getTruncateExpr(ST->getOperand(), Ty);
671 
672   // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
673   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
674     return getTruncateOrSignExtend(SS->getOperand(), Ty);
675 
676   // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
677   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
678     return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
679 
680   // If the input value is a chrec scev, truncate the chrec's operands.
681   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
682     SmallVector<const SCEV *, 4> Operands;
683     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
684       Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
685     return getAddRecExpr(Operands, AddRec->getLoop());
686   }
687 
688   // The cast wasn't folded; create an explicit cast node.
689   // Recompute the insert position, as it may have been invalidated.
690   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
691   SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
692   new (S) SCEVTruncateExpr(ID, Op, Ty);
693   UniqueSCEVs.InsertNode(S, IP);
694   return S;
695 }
696 
697 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
698                                                const Type *Ty) {
699   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
700          "This is not an extending conversion!");
701   assert(isSCEVable(Ty) &&
702          "This is not a conversion to a SCEVable type!");
703   Ty = getEffectiveSCEVType(Ty);
704 
705   // Fold if the operand is constant.
706   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
707     const Type *IntTy = getEffectiveSCEVType(Ty);
708     Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
709     if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
710     return getConstant(cast<ConstantInt>(C));
711   }
712 
713   // zext(zext(x)) --> zext(x)
714   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
715     return getZeroExtendExpr(SZ->getOperand(), Ty);
716 
717   // Before doing any expensive analysis, check to see if we've already
718   // computed a SCEV for this Op and Ty.
719   FoldingSetNodeID ID;
720   ID.AddInteger(scZeroExtend);
721   ID.AddPointer(Op);
722   ID.AddPointer(Ty);
723   void *IP = 0;
724   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
725 
726   // If the input value is a chrec scev, and we can prove that the value
727   // did not overflow the old, smaller, value, we can zero extend all of the
728   // operands (often constants).  This allows analysis of something like
729   // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
730   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
731     if (AR->isAffine()) {
732       const SCEV *Start = AR->getStart();
733       const SCEV *Step = AR->getStepRecurrence(*this);
734       unsigned BitWidth = getTypeSizeInBits(AR->getType());
735       const Loop *L = AR->getLoop();
736 
737       // Check whether the backedge-taken count is SCEVCouldNotCompute.
738       // Note that this serves two purposes: It filters out loops that are
739       // simply not analyzable, and it covers the case where this code is
740       // being called from within backedge-taken count analysis, such that
741       // attempting to ask for the backedge-taken count would likely result
742       // in infinite recursion. In the later case, the analysis code will
743       // cope with a conservative value, and it will take care to purge
744       // that value once it has finished.
745       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
746       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
747         // Manually compute the final value for AR, checking for
748         // overflow.
749 
750         // Check whether the backedge-taken count can be losslessly casted to
751         // the addrec's type. The count is always unsigned.
752         const SCEV *CastedMaxBECount =
753           getTruncateOrZeroExtend(MaxBECount, Start->getType());
754         const SCEV *RecastedMaxBECount =
755           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
756         if (MaxBECount == RecastedMaxBECount) {
757           const Type *WideTy = IntegerType::get(BitWidth * 2);
758           // Check whether Start+Step*MaxBECount has no unsigned overflow.
759           const SCEV *ZMul =
760             getMulExpr(CastedMaxBECount,
761                        getTruncateOrZeroExtend(Step, Start->getType()));
762           const SCEV *Add = getAddExpr(Start, ZMul);
763           const SCEV *OperandExtendedAdd =
764             getAddExpr(getZeroExtendExpr(Start, WideTy),
765                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
766                                   getZeroExtendExpr(Step, WideTy)));
767           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
768             // Return the expression with the addrec on the outside.
769             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
770                                  getZeroExtendExpr(Step, Ty),
771                                  L);
772 
773           // Similar to above, only this time treat the step value as signed.
774           // This covers loops that count down.
775           const SCEV *SMul =
776             getMulExpr(CastedMaxBECount,
777                        getTruncateOrSignExtend(Step, Start->getType()));
778           Add = getAddExpr(Start, SMul);
779           OperandExtendedAdd =
780             getAddExpr(getZeroExtendExpr(Start, WideTy),
781                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
782                                   getSignExtendExpr(Step, WideTy)));
783           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
784             // Return the expression with the addrec on the outside.
785             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
786                                  getSignExtendExpr(Step, Ty),
787                                  L);
788         }
789 
790         // If the backedge is guarded by a comparison with the pre-inc value
791         // the addrec is safe. Also, if the entry is guarded by a comparison
792         // with the start value and the backedge is guarded by a comparison
793         // with the post-inc value, the addrec is safe.
794         if (isKnownPositive(Step)) {
795           const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
796                                       getUnsignedRange(Step).getUnsignedMax());
797           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
798               (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
799                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
800                                            AR->getPostIncExpr(*this), N)))
801             // Return the expression with the addrec on the outside.
802             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
803                                  getZeroExtendExpr(Step, Ty),
804                                  L);
805         } else if (isKnownNegative(Step)) {
806           const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
807                                       getSignedRange(Step).getSignedMin());
808           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
809               (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
810                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
811                                            AR->getPostIncExpr(*this), N)))
812             // Return the expression with the addrec on the outside.
813             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
814                                  getSignExtendExpr(Step, Ty),
815                                  L);
816         }
817       }
818     }
819 
820   // The cast wasn't folded; create an explicit cast node.
821   // Recompute the insert position, as it may have been invalidated.
822   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
823   SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
824   new (S) SCEVZeroExtendExpr(ID, Op, Ty);
825   UniqueSCEVs.InsertNode(S, IP);
826   return S;
827 }
828 
829 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
830                                                const Type *Ty) {
831   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
832          "This is not an extending conversion!");
833   assert(isSCEVable(Ty) &&
834          "This is not a conversion to a SCEVable type!");
835   Ty = getEffectiveSCEVType(Ty);
836 
837   // Fold if the operand is constant.
838   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
839     const Type *IntTy = getEffectiveSCEVType(Ty);
840     Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
841     if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
842     return getConstant(cast<ConstantInt>(C));
843   }
844 
845   // sext(sext(x)) --> sext(x)
846   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
847     return getSignExtendExpr(SS->getOperand(), Ty);
848 
849   // Before doing any expensive analysis, check to see if we've already
850   // computed a SCEV for this Op and Ty.
851   FoldingSetNodeID ID;
852   ID.AddInteger(scSignExtend);
853   ID.AddPointer(Op);
854   ID.AddPointer(Ty);
855   void *IP = 0;
856   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
857 
858   // If the input value is a chrec scev, and we can prove that the value
859   // did not overflow the old, smaller, value, we can sign extend all of the
860   // operands (often constants).  This allows analysis of something like
861   // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
862   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
863     if (AR->isAffine()) {
864       const SCEV *Start = AR->getStart();
865       const SCEV *Step = AR->getStepRecurrence(*this);
866       unsigned BitWidth = getTypeSizeInBits(AR->getType());
867       const Loop *L = AR->getLoop();
868 
869       // Check whether the backedge-taken count is SCEVCouldNotCompute.
870       // Note that this serves two purposes: It filters out loops that are
871       // simply not analyzable, and it covers the case where this code is
872       // being called from within backedge-taken count analysis, such that
873       // attempting to ask for the backedge-taken count would likely result
874       // in infinite recursion. In the later case, the analysis code will
875       // cope with a conservative value, and it will take care to purge
876       // that value once it has finished.
877       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
878       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
879         // Manually compute the final value for AR, checking for
880         // overflow.
881 
882         // Check whether the backedge-taken count can be losslessly casted to
883         // the addrec's type. The count is always unsigned.
884         const SCEV *CastedMaxBECount =
885           getTruncateOrZeroExtend(MaxBECount, Start->getType());
886         const SCEV *RecastedMaxBECount =
887           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
888         if (MaxBECount == RecastedMaxBECount) {
889           const Type *WideTy = IntegerType::get(BitWidth * 2);
890           // Check whether Start+Step*MaxBECount has no signed overflow.
891           const SCEV *SMul =
892             getMulExpr(CastedMaxBECount,
893                        getTruncateOrSignExtend(Step, Start->getType()));
894           const SCEV *Add = getAddExpr(Start, SMul);
895           const SCEV *OperandExtendedAdd =
896             getAddExpr(getSignExtendExpr(Start, WideTy),
897                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
898                                   getSignExtendExpr(Step, WideTy)));
899           if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
900             // Return the expression with the addrec on the outside.
901             return getAddRecExpr(getSignExtendExpr(Start, Ty),
902                                  getSignExtendExpr(Step, Ty),
903                                  L);
904 
905           // Similar to above, only this time treat the step value as unsigned.
906           // This covers loops that count up with an unsigned step.
907           const SCEV *UMul =
908             getMulExpr(CastedMaxBECount,
909                        getTruncateOrZeroExtend(Step, Start->getType()));
910           Add = getAddExpr(Start, UMul);
911           OperandExtendedAdd =
912             getAddExpr(getZeroExtendExpr(Start, WideTy),
913                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
914                                   getZeroExtendExpr(Step, WideTy)));
915           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
916             // Return the expression with the addrec on the outside.
917             return getAddRecExpr(getSignExtendExpr(Start, Ty),
918                                  getZeroExtendExpr(Step, Ty),
919                                  L);
920         }
921 
922         // If the backedge is guarded by a comparison with the pre-inc value
923         // the addrec is safe. Also, if the entry is guarded by a comparison
924         // with the start value and the backedge is guarded by a comparison
925         // with the post-inc value, the addrec is safe.
926         if (isKnownPositive(Step)) {
927           const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
928                                       getSignedRange(Step).getSignedMax());
929           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
930               (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
931                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
932                                            AR->getPostIncExpr(*this), N)))
933             // Return the expression with the addrec on the outside.
934             return getAddRecExpr(getSignExtendExpr(Start, Ty),
935                                  getSignExtendExpr(Step, Ty),
936                                  L);
937         } else if (isKnownNegative(Step)) {
938           const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
939                                       getSignedRange(Step).getSignedMin());
940           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
941               (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
942                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
943                                            AR->getPostIncExpr(*this), N)))
944             // Return the expression with the addrec on the outside.
945             return getAddRecExpr(getSignExtendExpr(Start, Ty),
946                                  getSignExtendExpr(Step, Ty),
947                                  L);
948         }
949       }
950     }
951 
952   // The cast wasn't folded; create an explicit cast node.
953   // Recompute the insert position, as it may have been invalidated.
954   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
955   SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
956   new (S) SCEVSignExtendExpr(ID, Op, Ty);
957   UniqueSCEVs.InsertNode(S, IP);
958   return S;
959 }
960 
961 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
962 /// unspecified bits out to the given type.
963 ///
964 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
965                                              const Type *Ty) {
966   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
967          "This is not an extending conversion!");
968   assert(isSCEVable(Ty) &&
969          "This is not a conversion to a SCEVable type!");
970   Ty = getEffectiveSCEVType(Ty);
971 
972   // Sign-extend negative constants.
973   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
974     if (SC->getValue()->getValue().isNegative())
975       return getSignExtendExpr(Op, Ty);
976 
977   // Peel off a truncate cast.
978   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
979     const SCEV *NewOp = T->getOperand();
980     if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
981       return getAnyExtendExpr(NewOp, Ty);
982     return getTruncateOrNoop(NewOp, Ty);
983   }
984 
985   // Next try a zext cast. If the cast is folded, use it.
986   const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
987   if (!isa<SCEVZeroExtendExpr>(ZExt))
988     return ZExt;
989 
990   // Next try a sext cast. If the cast is folded, use it.
991   const SCEV *SExt = getSignExtendExpr(Op, Ty);
992   if (!isa<SCEVSignExtendExpr>(SExt))
993     return SExt;
994 
995   // If the expression is obviously signed, use the sext cast value.
996   if (isa<SCEVSMaxExpr>(Op))
997     return SExt;
998 
999   // Absent any other information, use the zext cast value.
1000   return ZExt;
1001 }
1002 
1003 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1004 /// a list of operands to be added under the given scale, update the given
1005 /// map. This is a helper function for getAddRecExpr. As an example of
1006 /// what it does, given a sequence of operands that would form an add
1007 /// expression like this:
1008 ///
1009 ///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1010 ///
1011 /// where A and B are constants, update the map with these values:
1012 ///
1013 ///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1014 ///
1015 /// and add 13 + A*B*29 to AccumulatedConstant.
1016 /// This will allow getAddRecExpr to produce this:
1017 ///
1018 ///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1019 ///
1020 /// This form often exposes folding opportunities that are hidden in
1021 /// the original operand list.
1022 ///
1023 /// Return true iff it appears that any interesting folding opportunities
1024 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1025 /// the common case where no interesting opportunities are present, and
1026 /// is also used as a check to avoid infinite recursion.
1027 ///
1028 static bool
1029 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1030                              SmallVector<const SCEV *, 8> &NewOps,
1031                              APInt &AccumulatedConstant,
1032                              const SmallVectorImpl<const SCEV *> &Ops,
1033                              const APInt &Scale,
1034                              ScalarEvolution &SE) {
1035   bool Interesting = false;
1036 
1037   // Iterate over the add operands.
1038   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1039     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1040     if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1041       APInt NewScale =
1042         Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1043       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1044         // A multiplication of a constant with another add; recurse.
1045         Interesting |=
1046           CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1047                                        cast<SCEVAddExpr>(Mul->getOperand(1))
1048                                          ->getOperands(),
1049                                        NewScale, SE);
1050       } else {
1051         // A multiplication of a constant with some other value. Update
1052         // the map.
1053         SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1054         const SCEV *Key = SE.getMulExpr(MulOps);
1055         std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1056           M.insert(std::make_pair(Key, NewScale));
1057         if (Pair.second) {
1058           NewOps.push_back(Pair.first->first);
1059         } else {
1060           Pair.first->second += NewScale;
1061           // The map already had an entry for this value, which may indicate
1062           // a folding opportunity.
1063           Interesting = true;
1064         }
1065       }
1066     } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1067       // Pull a buried constant out to the outside.
1068       if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1069         Interesting = true;
1070       AccumulatedConstant += Scale * C->getValue()->getValue();
1071     } else {
1072       // An ordinary operand. Update the map.
1073       std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1074         M.insert(std::make_pair(Ops[i], Scale));
1075       if (Pair.second) {
1076         NewOps.push_back(Pair.first->first);
1077       } else {
1078         Pair.first->second += Scale;
1079         // The map already had an entry for this value, which may indicate
1080         // a folding opportunity.
1081         Interesting = true;
1082       }
1083     }
1084   }
1085 
1086   return Interesting;
1087 }
1088 
1089 namespace {
1090   struct APIntCompare {
1091     bool operator()(const APInt &LHS, const APInt &RHS) const {
1092       return LHS.ult(RHS);
1093     }
1094   };
1095 }
1096 
1097 /// getAddExpr - Get a canonical add expression, or something simpler if
1098 /// possible.
1099 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1100   assert(!Ops.empty() && "Cannot get empty add!");
1101   if (Ops.size() == 1) return Ops[0];
1102 #ifndef NDEBUG
1103   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1104     assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1105            getEffectiveSCEVType(Ops[0]->getType()) &&
1106            "SCEVAddExpr operand types don't match!");
1107 #endif
1108 
1109   // Sort by complexity, this groups all similar expression types together.
1110   GroupByComplexity(Ops, LI);
1111 
1112   // If there are any constants, fold them together.
1113   unsigned Idx = 0;
1114   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1115     ++Idx;
1116     assert(Idx < Ops.size());
1117     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1118       // We found two constants, fold them together!
1119       Ops[0] = getConstant(LHSC->getValue()->getValue() +
1120                            RHSC->getValue()->getValue());
1121       if (Ops.size() == 2) return Ops[0];
1122       Ops.erase(Ops.begin()+1);  // Erase the folded element
1123       LHSC = cast<SCEVConstant>(Ops[0]);
1124     }
1125 
1126     // If we are left with a constant zero being added, strip it off.
1127     if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1128       Ops.erase(Ops.begin());
1129       --Idx;
1130     }
1131   }
1132 
1133   if (Ops.size() == 1) return Ops[0];
1134 
1135   // Okay, check to see if the same value occurs in the operand list twice.  If
1136   // so, merge them together into an multiply expression.  Since we sorted the
1137   // list, these values are required to be adjacent.
1138   const Type *Ty = Ops[0]->getType();
1139   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1140     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1141       // Found a match, merge the two values into a multiply, and add any
1142       // remaining values to the result.
1143       const SCEV *Two = getIntegerSCEV(2, Ty);
1144       const SCEV *Mul = getMulExpr(Ops[i], Two);
1145       if (Ops.size() == 2)
1146         return Mul;
1147       Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1148       Ops.push_back(Mul);
1149       return getAddExpr(Ops);
1150     }
1151 
1152   // Check for truncates. If all the operands are truncated from the same
1153   // type, see if factoring out the truncate would permit the result to be
1154   // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1155   // if the contents of the resulting outer trunc fold to something simple.
1156   for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1157     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1158     const Type *DstType = Trunc->getType();
1159     const Type *SrcType = Trunc->getOperand()->getType();
1160     SmallVector<const SCEV *, 8> LargeOps;
1161     bool Ok = true;
1162     // Check all the operands to see if they can be represented in the
1163     // source type of the truncate.
1164     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1165       if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1166         if (T->getOperand()->getType() != SrcType) {
1167           Ok = false;
1168           break;
1169         }
1170         LargeOps.push_back(T->getOperand());
1171       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1172         // This could be either sign or zero extension, but sign extension
1173         // is much more likely to be foldable here.
1174         LargeOps.push_back(getSignExtendExpr(C, SrcType));
1175       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1176         SmallVector<const SCEV *, 8> LargeMulOps;
1177         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1178           if (const SCEVTruncateExpr *T =
1179                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1180             if (T->getOperand()->getType() != SrcType) {
1181               Ok = false;
1182               break;
1183             }
1184             LargeMulOps.push_back(T->getOperand());
1185           } else if (const SCEVConstant *C =
1186                        dyn_cast<SCEVConstant>(M->getOperand(j))) {
1187             // This could be either sign or zero extension, but sign extension
1188             // is much more likely to be foldable here.
1189             LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1190           } else {
1191             Ok = false;
1192             break;
1193           }
1194         }
1195         if (Ok)
1196           LargeOps.push_back(getMulExpr(LargeMulOps));
1197       } else {
1198         Ok = false;
1199         break;
1200       }
1201     }
1202     if (Ok) {
1203       // Evaluate the expression in the larger type.
1204       const SCEV *Fold = getAddExpr(LargeOps);
1205       // If it folds to something simple, use it. Otherwise, don't.
1206       if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1207         return getTruncateExpr(Fold, DstType);
1208     }
1209   }
1210 
1211   // Skip past any other cast SCEVs.
1212   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1213     ++Idx;
1214 
1215   // If there are add operands they would be next.
1216   if (Idx < Ops.size()) {
1217     bool DeletedAdd = false;
1218     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1219       // If we have an add, expand the add operands onto the end of the operands
1220       // list.
1221       Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1222       Ops.erase(Ops.begin()+Idx);
1223       DeletedAdd = true;
1224     }
1225 
1226     // If we deleted at least one add, we added operands to the end of the list,
1227     // and they are not necessarily sorted.  Recurse to resort and resimplify
1228     // any operands we just aquired.
1229     if (DeletedAdd)
1230       return getAddExpr(Ops);
1231   }
1232 
1233   // Skip over the add expression until we get to a multiply.
1234   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1235     ++Idx;
1236 
1237   // Check to see if there are any folding opportunities present with
1238   // operands multiplied by constant values.
1239   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1240     uint64_t BitWidth = getTypeSizeInBits(Ty);
1241     DenseMap<const SCEV *, APInt> M;
1242     SmallVector<const SCEV *, 8> NewOps;
1243     APInt AccumulatedConstant(BitWidth, 0);
1244     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1245                                      Ops, APInt(BitWidth, 1), *this)) {
1246       // Some interesting folding opportunity is present, so its worthwhile to
1247       // re-generate the operands list. Group the operands by constant scale,
1248       // to avoid multiplying by the same constant scale multiple times.
1249       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1250       for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1251            E = NewOps.end(); I != E; ++I)
1252         MulOpLists[M.find(*I)->second].push_back(*I);
1253       // Re-generate the operands list.
1254       Ops.clear();
1255       if (AccumulatedConstant != 0)
1256         Ops.push_back(getConstant(AccumulatedConstant));
1257       for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1258            I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1259         if (I->first != 0)
1260           Ops.push_back(getMulExpr(getConstant(I->first),
1261                                    getAddExpr(I->second)));
1262       if (Ops.empty())
1263         return getIntegerSCEV(0, Ty);
1264       if (Ops.size() == 1)
1265         return Ops[0];
1266       return getAddExpr(Ops);
1267     }
1268   }
1269 
1270   // If we are adding something to a multiply expression, make sure the
1271   // something is not already an operand of the multiply.  If so, merge it into
1272   // the multiply.
1273   for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1274     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1275     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1276       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1277       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1278         if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1279           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1280           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1281           if (Mul->getNumOperands() != 2) {
1282             // If the multiply has more than two operands, we must get the
1283             // Y*Z term.
1284             SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1285             MulOps.erase(MulOps.begin()+MulOp);
1286             InnerMul = getMulExpr(MulOps);
1287           }
1288           const SCEV *One = getIntegerSCEV(1, Ty);
1289           const SCEV *AddOne = getAddExpr(InnerMul, One);
1290           const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1291           if (Ops.size() == 2) return OuterMul;
1292           if (AddOp < Idx) {
1293             Ops.erase(Ops.begin()+AddOp);
1294             Ops.erase(Ops.begin()+Idx-1);
1295           } else {
1296             Ops.erase(Ops.begin()+Idx);
1297             Ops.erase(Ops.begin()+AddOp-1);
1298           }
1299           Ops.push_back(OuterMul);
1300           return getAddExpr(Ops);
1301         }
1302 
1303       // Check this multiply against other multiplies being added together.
1304       for (unsigned OtherMulIdx = Idx+1;
1305            OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1306            ++OtherMulIdx) {
1307         const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1308         // If MulOp occurs in OtherMul, we can fold the two multiplies
1309         // together.
1310         for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1311              OMulOp != e; ++OMulOp)
1312           if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1313             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1314             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1315             if (Mul->getNumOperands() != 2) {
1316               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1317                                                   Mul->op_end());
1318               MulOps.erase(MulOps.begin()+MulOp);
1319               InnerMul1 = getMulExpr(MulOps);
1320             }
1321             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1322             if (OtherMul->getNumOperands() != 2) {
1323               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1324                                                   OtherMul->op_end());
1325               MulOps.erase(MulOps.begin()+OMulOp);
1326               InnerMul2 = getMulExpr(MulOps);
1327             }
1328             const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1329             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1330             if (Ops.size() == 2) return OuterMul;
1331             Ops.erase(Ops.begin()+Idx);
1332             Ops.erase(Ops.begin()+OtherMulIdx-1);
1333             Ops.push_back(OuterMul);
1334             return getAddExpr(Ops);
1335           }
1336       }
1337     }
1338   }
1339 
1340   // If there are any add recurrences in the operands list, see if any other
1341   // added values are loop invariant.  If so, we can fold them into the
1342   // recurrence.
1343   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1344     ++Idx;
1345 
1346   // Scan over all recurrences, trying to fold loop invariants into them.
1347   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1348     // Scan all of the other operands to this add and add them to the vector if
1349     // they are loop invariant w.r.t. the recurrence.
1350     SmallVector<const SCEV *, 8> LIOps;
1351     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1352     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1353       if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1354         LIOps.push_back(Ops[i]);
1355         Ops.erase(Ops.begin()+i);
1356         --i; --e;
1357       }
1358 
1359     // If we found some loop invariants, fold them into the recurrence.
1360     if (!LIOps.empty()) {
1361       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1362       LIOps.push_back(AddRec->getStart());
1363 
1364       SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1365                                            AddRec->op_end());
1366       AddRecOps[0] = getAddExpr(LIOps);
1367 
1368       const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1369       // If all of the other operands were loop invariant, we are done.
1370       if (Ops.size() == 1) return NewRec;
1371 
1372       // Otherwise, add the folded AddRec by the non-liv parts.
1373       for (unsigned i = 0;; ++i)
1374         if (Ops[i] == AddRec) {
1375           Ops[i] = NewRec;
1376           break;
1377         }
1378       return getAddExpr(Ops);
1379     }
1380 
1381     // Okay, if there weren't any loop invariants to be folded, check to see if
1382     // there are multiple AddRec's with the same loop induction variable being
1383     // added together.  If so, we can fold them.
1384     for (unsigned OtherIdx = Idx+1;
1385          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1386       if (OtherIdx != Idx) {
1387         const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1388         if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1389           // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1390           SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1391                                               AddRec->op_end());
1392           for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1393             if (i >= NewOps.size()) {
1394               NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1395                             OtherAddRec->op_end());
1396               break;
1397             }
1398             NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1399           }
1400           const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1401 
1402           if (Ops.size() == 2) return NewAddRec;
1403 
1404           Ops.erase(Ops.begin()+Idx);
1405           Ops.erase(Ops.begin()+OtherIdx-1);
1406           Ops.push_back(NewAddRec);
1407           return getAddExpr(Ops);
1408         }
1409       }
1410 
1411     // Otherwise couldn't fold anything into this recurrence.  Move onto the
1412     // next one.
1413   }
1414 
1415   // Okay, it looks like we really DO need an add expr.  Check to see if we
1416   // already have one, otherwise create a new one.
1417   FoldingSetNodeID ID;
1418   ID.AddInteger(scAddExpr);
1419   ID.AddInteger(Ops.size());
1420   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1421     ID.AddPointer(Ops[i]);
1422   void *IP = 0;
1423   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1424   SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1425   new (S) SCEVAddExpr(ID, Ops);
1426   UniqueSCEVs.InsertNode(S, IP);
1427   return S;
1428 }
1429 
1430 
1431 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1432 /// possible.
1433 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1434   assert(!Ops.empty() && "Cannot get empty mul!");
1435 #ifndef NDEBUG
1436   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1437     assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1438            getEffectiveSCEVType(Ops[0]->getType()) &&
1439            "SCEVMulExpr operand types don't match!");
1440 #endif
1441 
1442   // Sort by complexity, this groups all similar expression types together.
1443   GroupByComplexity(Ops, LI);
1444 
1445   // If there are any constants, fold them together.
1446   unsigned Idx = 0;
1447   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1448 
1449     // C1*(C2+V) -> C1*C2 + C1*V
1450     if (Ops.size() == 2)
1451       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1452         if (Add->getNumOperands() == 2 &&
1453             isa<SCEVConstant>(Add->getOperand(0)))
1454           return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1455                             getMulExpr(LHSC, Add->getOperand(1)));
1456 
1457 
1458     ++Idx;
1459     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1460       // We found two constants, fold them together!
1461       ConstantInt *Fold = ConstantInt::get(getContext(),
1462                                            LHSC->getValue()->getValue() *
1463                                            RHSC->getValue()->getValue());
1464       Ops[0] = getConstant(Fold);
1465       Ops.erase(Ops.begin()+1);  // Erase the folded element
1466       if (Ops.size() == 1) return Ops[0];
1467       LHSC = cast<SCEVConstant>(Ops[0]);
1468     }
1469 
1470     // If we are left with a constant one being multiplied, strip it off.
1471     if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1472       Ops.erase(Ops.begin());
1473       --Idx;
1474     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1475       // If we have a multiply of zero, it will always be zero.
1476       return Ops[0];
1477     }
1478   }
1479 
1480   // Skip over the add expression until we get to a multiply.
1481   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1482     ++Idx;
1483 
1484   if (Ops.size() == 1)
1485     return Ops[0];
1486 
1487   // If there are mul operands inline them all into this expression.
1488   if (Idx < Ops.size()) {
1489     bool DeletedMul = false;
1490     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1491       // If we have an mul, expand the mul operands onto the end of the operands
1492       // list.
1493       Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1494       Ops.erase(Ops.begin()+Idx);
1495       DeletedMul = true;
1496     }
1497 
1498     // If we deleted at least one mul, we added operands to the end of the list,
1499     // and they are not necessarily sorted.  Recurse to resort and resimplify
1500     // any operands we just aquired.
1501     if (DeletedMul)
1502       return getMulExpr(Ops);
1503   }
1504 
1505   // If there are any add recurrences in the operands list, see if any other
1506   // added values are loop invariant.  If so, we can fold them into the
1507   // recurrence.
1508   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1509     ++Idx;
1510 
1511   // Scan over all recurrences, trying to fold loop invariants into them.
1512   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1513     // Scan all of the other operands to this mul and add them to the vector if
1514     // they are loop invariant w.r.t. the recurrence.
1515     SmallVector<const SCEV *, 8> LIOps;
1516     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1517     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1518       if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1519         LIOps.push_back(Ops[i]);
1520         Ops.erase(Ops.begin()+i);
1521         --i; --e;
1522       }
1523 
1524     // If we found some loop invariants, fold them into the recurrence.
1525     if (!LIOps.empty()) {
1526       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1527       SmallVector<const SCEV *, 4> NewOps;
1528       NewOps.reserve(AddRec->getNumOperands());
1529       if (LIOps.size() == 1) {
1530         const SCEV *Scale = LIOps[0];
1531         for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1532           NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1533       } else {
1534         for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1535           SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1536           MulOps.push_back(AddRec->getOperand(i));
1537           NewOps.push_back(getMulExpr(MulOps));
1538         }
1539       }
1540 
1541       const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1542 
1543       // If all of the other operands were loop invariant, we are done.
1544       if (Ops.size() == 1) return NewRec;
1545 
1546       // Otherwise, multiply the folded AddRec by the non-liv parts.
1547       for (unsigned i = 0;; ++i)
1548         if (Ops[i] == AddRec) {
1549           Ops[i] = NewRec;
1550           break;
1551         }
1552       return getMulExpr(Ops);
1553     }
1554 
1555     // Okay, if there weren't any loop invariants to be folded, check to see if
1556     // there are multiple AddRec's with the same loop induction variable being
1557     // multiplied together.  If so, we can fold them.
1558     for (unsigned OtherIdx = Idx+1;
1559          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1560       if (OtherIdx != Idx) {
1561         const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1562         if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1563           // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1564           const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1565           const SCEV *NewStart = getMulExpr(F->getStart(),
1566                                                  G->getStart());
1567           const SCEV *B = F->getStepRecurrence(*this);
1568           const SCEV *D = G->getStepRecurrence(*this);
1569           const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1570                                           getMulExpr(G, B),
1571                                           getMulExpr(B, D));
1572           const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1573                                                F->getLoop());
1574           if (Ops.size() == 2) return NewAddRec;
1575 
1576           Ops.erase(Ops.begin()+Idx);
1577           Ops.erase(Ops.begin()+OtherIdx-1);
1578           Ops.push_back(NewAddRec);
1579           return getMulExpr(Ops);
1580         }
1581       }
1582 
1583     // Otherwise couldn't fold anything into this recurrence.  Move onto the
1584     // next one.
1585   }
1586 
1587   // Okay, it looks like we really DO need an mul expr.  Check to see if we
1588   // already have one, otherwise create a new one.
1589   FoldingSetNodeID ID;
1590   ID.AddInteger(scMulExpr);
1591   ID.AddInteger(Ops.size());
1592   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1593     ID.AddPointer(Ops[i]);
1594   void *IP = 0;
1595   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1596   SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1597   new (S) SCEVMulExpr(ID, Ops);
1598   UniqueSCEVs.InsertNode(S, IP);
1599   return S;
1600 }
1601 
1602 /// getUDivExpr - Get a canonical multiply expression, or something simpler if
1603 /// possible.
1604 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1605                                          const SCEV *RHS) {
1606   assert(getEffectiveSCEVType(LHS->getType()) ==
1607          getEffectiveSCEVType(RHS->getType()) &&
1608          "SCEVUDivExpr operand types don't match!");
1609 
1610   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1611     if (RHSC->getValue()->equalsInt(1))
1612       return LHS;                            // X udiv 1 --> x
1613     if (RHSC->isZero())
1614       return getIntegerSCEV(0, LHS->getType()); // value is undefined
1615 
1616     // Determine if the division can be folded into the operands of
1617     // its operands.
1618     // TODO: Generalize this to non-constants by using known-bits information.
1619     const Type *Ty = LHS->getType();
1620     unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1621     unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1622     // For non-power-of-two values, effectively round the value up to the
1623     // nearest power of two.
1624     if (!RHSC->getValue()->getValue().isPowerOf2())
1625       ++MaxShiftAmt;
1626     const IntegerType *ExtTy =
1627       IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1628     // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1629     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1630       if (const SCEVConstant *Step =
1631             dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1632         if (!Step->getValue()->getValue()
1633               .urem(RHSC->getValue()->getValue()) &&
1634             getZeroExtendExpr(AR, ExtTy) ==
1635             getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1636                           getZeroExtendExpr(Step, ExtTy),
1637                           AR->getLoop())) {
1638           SmallVector<const SCEV *, 4> Operands;
1639           for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1640             Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1641           return getAddRecExpr(Operands, AR->getLoop());
1642         }
1643     // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1644     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1645       SmallVector<const SCEV *, 4> Operands;
1646       for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1647         Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1648       if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1649         // Find an operand that's safely divisible.
1650         for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1651           const SCEV *Op = M->getOperand(i);
1652           const SCEV *Div = getUDivExpr(Op, RHSC);
1653           if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1654             const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1655             Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1656                                                   MOperands.end());
1657             Operands[i] = Div;
1658             return getMulExpr(Operands);
1659           }
1660         }
1661     }
1662     // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1663     if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1664       SmallVector<const SCEV *, 4> Operands;
1665       for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1666         Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1667       if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1668         Operands.clear();
1669         for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1670           const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1671           if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1672             break;
1673           Operands.push_back(Op);
1674         }
1675         if (Operands.size() == A->getNumOperands())
1676           return getAddExpr(Operands);
1677       }
1678     }
1679 
1680     // Fold if both operands are constant.
1681     if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1682       Constant *LHSCV = LHSC->getValue();
1683       Constant *RHSCV = RHSC->getValue();
1684       return getConstant(cast<ConstantInt>(getContext().getConstantExprUDiv(LHSCV,
1685                                                                  RHSCV)));
1686     }
1687   }
1688 
1689   FoldingSetNodeID ID;
1690   ID.AddInteger(scUDivExpr);
1691   ID.AddPointer(LHS);
1692   ID.AddPointer(RHS);
1693   void *IP = 0;
1694   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1695   SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1696   new (S) SCEVUDivExpr(ID, LHS, RHS);
1697   UniqueSCEVs.InsertNode(S, IP);
1698   return S;
1699 }
1700 
1701 
1702 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1703 /// Simplify the expression as much as possible.
1704 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1705                                            const SCEV *Step, const Loop *L) {
1706   SmallVector<const SCEV *, 4> Operands;
1707   Operands.push_back(Start);
1708   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1709     if (StepChrec->getLoop() == L) {
1710       Operands.insert(Operands.end(), StepChrec->op_begin(),
1711                       StepChrec->op_end());
1712       return getAddRecExpr(Operands, L);
1713     }
1714 
1715   Operands.push_back(Step);
1716   return getAddRecExpr(Operands, L);
1717 }
1718 
1719 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1720 /// Simplify the expression as much as possible.
1721 const SCEV *
1722 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1723                                const Loop *L) {
1724   if (Operands.size() == 1) return Operands[0];
1725 #ifndef NDEBUG
1726   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1727     assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1728            getEffectiveSCEVType(Operands[0]->getType()) &&
1729            "SCEVAddRecExpr operand types don't match!");
1730 #endif
1731 
1732   if (Operands.back()->isZero()) {
1733     Operands.pop_back();
1734     return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1735   }
1736 
1737   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1738   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1739     const Loop* NestedLoop = NestedAR->getLoop();
1740     if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1741       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1742                                                 NestedAR->op_end());
1743       Operands[0] = NestedAR->getStart();
1744       // AddRecs require their operands be loop-invariant with respect to their
1745       // loops. Don't perform this transformation if it would break this
1746       // requirement.
1747       bool AllInvariant = true;
1748       for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1749         if (!Operands[i]->isLoopInvariant(L)) {
1750           AllInvariant = false;
1751           break;
1752         }
1753       if (AllInvariant) {
1754         NestedOperands[0] = getAddRecExpr(Operands, L);
1755         AllInvariant = true;
1756         for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1757           if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1758             AllInvariant = false;
1759             break;
1760           }
1761         if (AllInvariant)
1762           // Ok, both add recurrences are valid after the transformation.
1763           return getAddRecExpr(NestedOperands, NestedLoop);
1764       }
1765       // Reset Operands to its original state.
1766       Operands[0] = NestedAR;
1767     }
1768   }
1769 
1770   FoldingSetNodeID ID;
1771   ID.AddInteger(scAddRecExpr);
1772   ID.AddInteger(Operands.size());
1773   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1774     ID.AddPointer(Operands[i]);
1775   ID.AddPointer(L);
1776   void *IP = 0;
1777   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1778   SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1779   new (S) SCEVAddRecExpr(ID, Operands, L);
1780   UniqueSCEVs.InsertNode(S, IP);
1781   return S;
1782 }
1783 
1784 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1785                                          const SCEV *RHS) {
1786   SmallVector<const SCEV *, 2> Ops;
1787   Ops.push_back(LHS);
1788   Ops.push_back(RHS);
1789   return getSMaxExpr(Ops);
1790 }
1791 
1792 const SCEV *
1793 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1794   assert(!Ops.empty() && "Cannot get empty smax!");
1795   if (Ops.size() == 1) return Ops[0];
1796 #ifndef NDEBUG
1797   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1798     assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1799            getEffectiveSCEVType(Ops[0]->getType()) &&
1800            "SCEVSMaxExpr operand types don't match!");
1801 #endif
1802 
1803   // Sort by complexity, this groups all similar expression types together.
1804   GroupByComplexity(Ops, LI);
1805 
1806   // If there are any constants, fold them together.
1807   unsigned Idx = 0;
1808   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1809     ++Idx;
1810     assert(Idx < Ops.size());
1811     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1812       // We found two constants, fold them together!
1813       ConstantInt *Fold = ConstantInt::get(getContext(),
1814                               APIntOps::smax(LHSC->getValue()->getValue(),
1815                                              RHSC->getValue()->getValue()));
1816       Ops[0] = getConstant(Fold);
1817       Ops.erase(Ops.begin()+1);  // Erase the folded element
1818       if (Ops.size() == 1) return Ops[0];
1819       LHSC = cast<SCEVConstant>(Ops[0]);
1820     }
1821 
1822     // If we are left with a constant minimum-int, strip it off.
1823     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1824       Ops.erase(Ops.begin());
1825       --Idx;
1826     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1827       // If we have an smax with a constant maximum-int, it will always be
1828       // maximum-int.
1829       return Ops[0];
1830     }
1831   }
1832 
1833   if (Ops.size() == 1) return Ops[0];
1834 
1835   // Find the first SMax
1836   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1837     ++Idx;
1838 
1839   // Check to see if one of the operands is an SMax. If so, expand its operands
1840   // onto our operand list, and recurse to simplify.
1841   if (Idx < Ops.size()) {
1842     bool DeletedSMax = false;
1843     while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1844       Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1845       Ops.erase(Ops.begin()+Idx);
1846       DeletedSMax = true;
1847     }
1848 
1849     if (DeletedSMax)
1850       return getSMaxExpr(Ops);
1851   }
1852 
1853   // Okay, check to see if the same value occurs in the operand list twice.  If
1854   // so, delete one.  Since we sorted the list, these values are required to
1855   // be adjacent.
1856   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1857     if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1858       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1859       --i; --e;
1860     }
1861 
1862   if (Ops.size() == 1) return Ops[0];
1863 
1864   assert(!Ops.empty() && "Reduced smax down to nothing!");
1865 
1866   // Okay, it looks like we really DO need an smax expr.  Check to see if we
1867   // already have one, otherwise create a new one.
1868   FoldingSetNodeID ID;
1869   ID.AddInteger(scSMaxExpr);
1870   ID.AddInteger(Ops.size());
1871   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1872     ID.AddPointer(Ops[i]);
1873   void *IP = 0;
1874   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1875   SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1876   new (S) SCEVSMaxExpr(ID, Ops);
1877   UniqueSCEVs.InsertNode(S, IP);
1878   return S;
1879 }
1880 
1881 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1882                                          const SCEV *RHS) {
1883   SmallVector<const SCEV *, 2> Ops;
1884   Ops.push_back(LHS);
1885   Ops.push_back(RHS);
1886   return getUMaxExpr(Ops);
1887 }
1888 
1889 const SCEV *
1890 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1891   assert(!Ops.empty() && "Cannot get empty umax!");
1892   if (Ops.size() == 1) return Ops[0];
1893 #ifndef NDEBUG
1894   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1895     assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1896            getEffectiveSCEVType(Ops[0]->getType()) &&
1897            "SCEVUMaxExpr operand types don't match!");
1898 #endif
1899 
1900   // Sort by complexity, this groups all similar expression types together.
1901   GroupByComplexity(Ops, LI);
1902 
1903   // If there are any constants, fold them together.
1904   unsigned Idx = 0;
1905   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1906     ++Idx;
1907     assert(Idx < Ops.size());
1908     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1909       // We found two constants, fold them together!
1910       ConstantInt *Fold = ConstantInt::get(getContext(),
1911                               APIntOps::umax(LHSC->getValue()->getValue(),
1912                                              RHSC->getValue()->getValue()));
1913       Ops[0] = getConstant(Fold);
1914       Ops.erase(Ops.begin()+1);  // Erase the folded element
1915       if (Ops.size() == 1) return Ops[0];
1916       LHSC = cast<SCEVConstant>(Ops[0]);
1917     }
1918 
1919     // If we are left with a constant minimum-int, strip it off.
1920     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1921       Ops.erase(Ops.begin());
1922       --Idx;
1923     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1924       // If we have an umax with a constant maximum-int, it will always be
1925       // maximum-int.
1926       return Ops[0];
1927     }
1928   }
1929 
1930   if (Ops.size() == 1) return Ops[0];
1931 
1932   // Find the first UMax
1933   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1934     ++Idx;
1935 
1936   // Check to see if one of the operands is a UMax. If so, expand its operands
1937   // onto our operand list, and recurse to simplify.
1938   if (Idx < Ops.size()) {
1939     bool DeletedUMax = false;
1940     while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1941       Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1942       Ops.erase(Ops.begin()+Idx);
1943       DeletedUMax = true;
1944     }
1945 
1946     if (DeletedUMax)
1947       return getUMaxExpr(Ops);
1948   }
1949 
1950   // Okay, check to see if the same value occurs in the operand list twice.  If
1951   // so, delete one.  Since we sorted the list, these values are required to
1952   // be adjacent.
1953   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1954     if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
1955       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1956       --i; --e;
1957     }
1958 
1959   if (Ops.size() == 1) return Ops[0];
1960 
1961   assert(!Ops.empty() && "Reduced umax down to nothing!");
1962 
1963   // Okay, it looks like we really DO need a umax expr.  Check to see if we
1964   // already have one, otherwise create a new one.
1965   FoldingSetNodeID ID;
1966   ID.AddInteger(scUMaxExpr);
1967   ID.AddInteger(Ops.size());
1968   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1969     ID.AddPointer(Ops[i]);
1970   void *IP = 0;
1971   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1972   SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
1973   new (S) SCEVUMaxExpr(ID, Ops);
1974   UniqueSCEVs.InsertNode(S, IP);
1975   return S;
1976 }
1977 
1978 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1979                                          const SCEV *RHS) {
1980   // ~smax(~x, ~y) == smin(x, y).
1981   return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1982 }
1983 
1984 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1985                                          const SCEV *RHS) {
1986   // ~umax(~x, ~y) == umin(x, y)
1987   return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1988 }
1989 
1990 const SCEV *ScalarEvolution::getUnknown(Value *V) {
1991   // Don't attempt to do anything other than create a SCEVUnknown object
1992   // here.  createSCEV only calls getUnknown after checking for all other
1993   // interesting possibilities, and any other code that calls getUnknown
1994   // is doing so in order to hide a value from SCEV canonicalization.
1995 
1996   FoldingSetNodeID ID;
1997   ID.AddInteger(scUnknown);
1998   ID.AddPointer(V);
1999   void *IP = 0;
2000   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2001   SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2002   new (S) SCEVUnknown(ID, V);
2003   UniqueSCEVs.InsertNode(S, IP);
2004   return S;
2005 }
2006 
2007 //===----------------------------------------------------------------------===//
2008 //            Basic SCEV Analysis and PHI Idiom Recognition Code
2009 //
2010 
2011 /// isSCEVable - Test if values of the given type are analyzable within
2012 /// the SCEV framework. This primarily includes integer types, and it
2013 /// can optionally include pointer types if the ScalarEvolution class
2014 /// has access to target-specific information.
2015 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2016   // Integers are always SCEVable.
2017   if (Ty->isInteger())
2018     return true;
2019 
2020   // Pointers are SCEVable if TargetData information is available
2021   // to provide pointer size information.
2022   if (isa<PointerType>(Ty))
2023     return TD != NULL;
2024 
2025   // Otherwise it's not SCEVable.
2026   return false;
2027 }
2028 
2029 /// getTypeSizeInBits - Return the size in bits of the specified type,
2030 /// for which isSCEVable must return true.
2031 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2032   assert(isSCEVable(Ty) && "Type is not SCEVable!");
2033 
2034   // If we have a TargetData, use it!
2035   if (TD)
2036     return TD->getTypeSizeInBits(Ty);
2037 
2038   // Otherwise, we support only integer types.
2039   assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2040   return Ty->getPrimitiveSizeInBits();
2041 }
2042 
2043 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2044 /// the given type and which represents how SCEV will treat the given
2045 /// type, for which isSCEVable must return true. For pointer types,
2046 /// this is the pointer-sized integer type.
2047 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2048   assert(isSCEVable(Ty) && "Type is not SCEVable!");
2049 
2050   if (Ty->isInteger())
2051     return Ty;
2052 
2053   assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2054   return TD->getIntPtrType();
2055 }
2056 
2057 const SCEV *ScalarEvolution::getCouldNotCompute() {
2058   return &CouldNotCompute;
2059 }
2060 
2061 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2062 /// expression and create a new one.
2063 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2064   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2065 
2066   std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2067   if (I != Scalars.end()) return I->second;
2068   const SCEV *S = createSCEV(V);
2069   Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2070   return S;
2071 }
2072 
2073 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
2074 /// specified signed integer value and return a SCEV for the constant.
2075 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2076   const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2077   return getConstant(ConstantInt::get(ITy, Val));
2078 }
2079 
2080 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2081 ///
2082 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2083   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2084     return getConstant(
2085                cast<ConstantInt>(getContext().getConstantExprNeg(VC->getValue())));
2086 
2087   const Type *Ty = V->getType();
2088   Ty = getEffectiveSCEVType(Ty);
2089   return getMulExpr(V,
2090                   getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty))));
2091 }
2092 
2093 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2094 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2095   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2096     return getConstant(
2097                 cast<ConstantInt>(getContext().getConstantExprNot(VC->getValue())));
2098 
2099   const Type *Ty = V->getType();
2100   Ty = getEffectiveSCEVType(Ty);
2101   const SCEV *AllOnes =
2102                    getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty)));
2103   return getMinusSCEV(AllOnes, V);
2104 }
2105 
2106 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2107 ///
2108 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2109                                           const SCEV *RHS) {
2110   // X - Y --> X + -Y
2111   return getAddExpr(LHS, getNegativeSCEV(RHS));
2112 }
2113 
2114 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2115 /// input value to the specified type.  If the type must be extended, it is zero
2116 /// extended.
2117 const SCEV *
2118 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2119                                          const Type *Ty) {
2120   const Type *SrcTy = V->getType();
2121   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2122          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2123          "Cannot truncate or zero extend with non-integer arguments!");
2124   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2125     return V;  // No conversion
2126   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2127     return getTruncateExpr(V, Ty);
2128   return getZeroExtendExpr(V, Ty);
2129 }
2130 
2131 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2132 /// input value to the specified type.  If the type must be extended, it is sign
2133 /// extended.
2134 const SCEV *
2135 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2136                                          const Type *Ty) {
2137   const Type *SrcTy = V->getType();
2138   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2139          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2140          "Cannot truncate or zero extend with non-integer arguments!");
2141   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2142     return V;  // No conversion
2143   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2144     return getTruncateExpr(V, Ty);
2145   return getSignExtendExpr(V, Ty);
2146 }
2147 
2148 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2149 /// input value to the specified type.  If the type must be extended, it is zero
2150 /// extended.  The conversion must not be narrowing.
2151 const SCEV *
2152 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2153   const Type *SrcTy = V->getType();
2154   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2155          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2156          "Cannot noop or zero extend with non-integer arguments!");
2157   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2158          "getNoopOrZeroExtend cannot truncate!");
2159   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2160     return V;  // No conversion
2161   return getZeroExtendExpr(V, Ty);
2162 }
2163 
2164 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2165 /// input value to the specified type.  If the type must be extended, it is sign
2166 /// extended.  The conversion must not be narrowing.
2167 const SCEV *
2168 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2169   const Type *SrcTy = V->getType();
2170   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2171          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2172          "Cannot noop or sign extend with non-integer arguments!");
2173   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2174          "getNoopOrSignExtend cannot truncate!");
2175   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2176     return V;  // No conversion
2177   return getSignExtendExpr(V, Ty);
2178 }
2179 
2180 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2181 /// the input value to the specified type. If the type must be extended,
2182 /// it is extended with unspecified bits. The conversion must not be
2183 /// narrowing.
2184 const SCEV *
2185 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2186   const Type *SrcTy = V->getType();
2187   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2188          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2189          "Cannot noop or any extend with non-integer arguments!");
2190   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2191          "getNoopOrAnyExtend cannot truncate!");
2192   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2193     return V;  // No conversion
2194   return getAnyExtendExpr(V, Ty);
2195 }
2196 
2197 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2198 /// input value to the specified type.  The conversion must not be widening.
2199 const SCEV *
2200 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2201   const Type *SrcTy = V->getType();
2202   assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2203          (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2204          "Cannot truncate or noop with non-integer arguments!");
2205   assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2206          "getTruncateOrNoop cannot extend!");
2207   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2208     return V;  // No conversion
2209   return getTruncateExpr(V, Ty);
2210 }
2211 
2212 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2213 /// the types using zero-extension, and then perform a umax operation
2214 /// with them.
2215 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2216                                                         const SCEV *RHS) {
2217   const SCEV *PromotedLHS = LHS;
2218   const SCEV *PromotedRHS = RHS;
2219 
2220   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2221     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2222   else
2223     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2224 
2225   return getUMaxExpr(PromotedLHS, PromotedRHS);
2226 }
2227 
2228 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2229 /// the types using zero-extension, and then perform a umin operation
2230 /// with them.
2231 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2232                                                         const SCEV *RHS) {
2233   const SCEV *PromotedLHS = LHS;
2234   const SCEV *PromotedRHS = RHS;
2235 
2236   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2237     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2238   else
2239     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2240 
2241   return getUMinExpr(PromotedLHS, PromotedRHS);
2242 }
2243 
2244 /// PushDefUseChildren - Push users of the given Instruction
2245 /// onto the given Worklist.
2246 static void
2247 PushDefUseChildren(Instruction *I,
2248                    SmallVectorImpl<Instruction *> &Worklist) {
2249   // Push the def-use children onto the Worklist stack.
2250   for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2251        UI != UE; ++UI)
2252     Worklist.push_back(cast<Instruction>(UI));
2253 }
2254 
2255 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2256 /// instructions that depend on the given instruction and removes them from
2257 /// the Scalars map if they reference SymName. This is used during PHI
2258 /// resolution.
2259 void
2260 ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) {
2261   SmallVector<Instruction *, 16> Worklist;
2262   PushDefUseChildren(I, Worklist);
2263 
2264   SmallPtrSet<Instruction *, 8> Visited;
2265   Visited.insert(I);
2266   while (!Worklist.empty()) {
2267     Instruction *I = Worklist.pop_back_val();
2268     if (!Visited.insert(I)) continue;
2269 
2270     std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2271       Scalars.find(static_cast<Value *>(I));
2272     if (It != Scalars.end()) {
2273       // Short-circuit the def-use traversal if the symbolic name
2274       // ceases to appear in expressions.
2275       if (!It->second->hasOperand(SymName))
2276         continue;
2277 
2278       // SCEVUnknown for a PHI either means that it has an unrecognized
2279       // structure, or it's a PHI that's in the progress of being computed
2280       // by createNodeForPHI.  In the former case, additional loop trip
2281       // count information isn't going to change anything. In the later
2282       // case, createNodeForPHI will perform the necessary updates on its
2283       // own when it gets to that point.
2284       if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
2285         Scalars.erase(It);
2286       ValuesAtScopes.erase(I);
2287     }
2288 
2289     PushDefUseChildren(I, Worklist);
2290   }
2291 }
2292 
2293 /// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2294 /// a loop header, making it a potential recurrence, or it doesn't.
2295 ///
2296 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2297   if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2298     if (const Loop *L = LI->getLoopFor(PN->getParent()))
2299       if (L->getHeader() == PN->getParent()) {
2300         // If it lives in the loop header, it has two incoming values, one
2301         // from outside the loop, and one from inside.
2302         unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2303         unsigned BackEdge     = IncomingEdge^1;
2304 
2305         // While we are analyzing this PHI node, handle its value symbolically.
2306         const SCEV *SymbolicName = getUnknown(PN);
2307         assert(Scalars.find(PN) == Scalars.end() &&
2308                "PHI node already processed?");
2309         Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2310 
2311         // Using this symbolic name for the PHI, analyze the value coming around
2312         // the back-edge.
2313         Value *BEValueV = PN->getIncomingValue(BackEdge);
2314         const SCEV *BEValue = getSCEV(BEValueV);
2315 
2316         // NOTE: If BEValue is loop invariant, we know that the PHI node just
2317         // has a special value for the first iteration of the loop.
2318 
2319         // If the value coming around the backedge is an add with the symbolic
2320         // value we just inserted, then we found a simple induction variable!
2321         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2322           // If there is a single occurrence of the symbolic value, replace it
2323           // with a recurrence.
2324           unsigned FoundIndex = Add->getNumOperands();
2325           for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2326             if (Add->getOperand(i) == SymbolicName)
2327               if (FoundIndex == e) {
2328                 FoundIndex = i;
2329                 break;
2330               }
2331 
2332           if (FoundIndex != Add->getNumOperands()) {
2333             // Create an add with everything but the specified operand.
2334             SmallVector<const SCEV *, 8> Ops;
2335             for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2336               if (i != FoundIndex)
2337                 Ops.push_back(Add->getOperand(i));
2338             const SCEV *Accum = getAddExpr(Ops);
2339 
2340             // This is not a valid addrec if the step amount is varying each
2341             // loop iteration, but is not itself an addrec in this loop.
2342             if (Accum->isLoopInvariant(L) ||
2343                 (isa<SCEVAddRecExpr>(Accum) &&
2344                  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2345               const SCEV *StartVal =
2346                 getSCEV(PN->getIncomingValue(IncomingEdge));
2347               const SCEV *PHISCEV =
2348                 getAddRecExpr(StartVal, Accum, L);
2349 
2350               // Okay, for the entire analysis of this edge we assumed the PHI
2351               // to be symbolic.  We now need to go back and purge all of the
2352               // entries for the scalars that use the symbolic expression.
2353               ForgetSymbolicName(PN, SymbolicName);
2354               Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2355               return PHISCEV;
2356             }
2357           }
2358         } else if (const SCEVAddRecExpr *AddRec =
2359                      dyn_cast<SCEVAddRecExpr>(BEValue)) {
2360           // Otherwise, this could be a loop like this:
2361           //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2362           // In this case, j = {1,+,1}  and BEValue is j.
2363           // Because the other in-value of i (0) fits the evolution of BEValue
2364           // i really is an addrec evolution.
2365           if (AddRec->getLoop() == L && AddRec->isAffine()) {
2366             const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2367 
2368             // If StartVal = j.start - j.stride, we can use StartVal as the
2369             // initial step of the addrec evolution.
2370             if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2371                                             AddRec->getOperand(1))) {
2372               const SCEV *PHISCEV =
2373                  getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2374 
2375               // Okay, for the entire analysis of this edge we assumed the PHI
2376               // to be symbolic.  We now need to go back and purge all of the
2377               // entries for the scalars that use the symbolic expression.
2378               ForgetSymbolicName(PN, SymbolicName);
2379               Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2380               return PHISCEV;
2381             }
2382           }
2383         }
2384 
2385         return SymbolicName;
2386       }
2387 
2388   // It's tempting to recognize PHIs with a unique incoming value, however
2389   // this leads passes like indvars to break LCSSA form. Fortunately, such
2390   // PHIs are rare, as instcombine zaps them.
2391 
2392   // If it's not a loop phi, we can't handle it yet.
2393   return getUnknown(PN);
2394 }
2395 
2396 /// createNodeForGEP - Expand GEP instructions into add and multiply
2397 /// operations. This allows them to be analyzed by regular SCEV code.
2398 ///
2399 const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
2400 
2401   const Type *IntPtrTy = TD->getIntPtrType();
2402   Value *Base = GEP->getOperand(0);
2403   // Don't attempt to analyze GEPs over unsized objects.
2404   if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2405     return getUnknown(GEP);
2406   const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2407   gep_type_iterator GTI = gep_type_begin(GEP);
2408   for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2409                                       E = GEP->op_end();
2410        I != E; ++I) {
2411     Value *Index = *I;
2412     // Compute the (potentially symbolic) offset in bytes for this index.
2413     if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2414       // For a struct, add the member offset.
2415       const StructLayout &SL = *TD->getStructLayout(STy);
2416       unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2417       uint64_t Offset = SL.getElementOffset(FieldNo);
2418       TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
2419     } else {
2420       // For an array, add the element offset, explicitly scaled.
2421       const SCEV *LocalOffset = getSCEV(Index);
2422       if (!isa<PointerType>(LocalOffset->getType()))
2423         // Getelementptr indicies are signed.
2424         LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2425       LocalOffset =
2426         getMulExpr(LocalOffset,
2427                    getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
2428       TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2429     }
2430   }
2431   return getAddExpr(getSCEV(Base), TotalOffset);
2432 }
2433 
2434 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2435 /// guaranteed to end in (at every loop iteration).  It is, at the same time,
2436 /// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2437 /// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2438 uint32_t
2439 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2440   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2441     return C->getValue()->getValue().countTrailingZeros();
2442 
2443   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2444     return std::min(GetMinTrailingZeros(T->getOperand()),
2445                     (uint32_t)getTypeSizeInBits(T->getType()));
2446 
2447   if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2448     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2449     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2450              getTypeSizeInBits(E->getType()) : OpRes;
2451   }
2452 
2453   if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2454     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2455     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2456              getTypeSizeInBits(E->getType()) : OpRes;
2457   }
2458 
2459   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2460     // The result is the min of all operands results.
2461     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2462     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2463       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2464     return MinOpRes;
2465   }
2466 
2467   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2468     // The result is the sum of all operands results.
2469     uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2470     uint32_t BitWidth = getTypeSizeInBits(M->getType());
2471     for (unsigned i = 1, e = M->getNumOperands();
2472          SumOpRes != BitWidth && i != e; ++i)
2473       SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2474                           BitWidth);
2475     return SumOpRes;
2476   }
2477 
2478   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2479     // The result is the min of all operands results.
2480     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2481     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2482       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2483     return MinOpRes;
2484   }
2485 
2486   if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2487     // The result is the min of all operands results.
2488     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2489     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2490       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2491     return MinOpRes;
2492   }
2493 
2494   if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2495     // The result is the min of all operands results.
2496     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2497     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2498       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2499     return MinOpRes;
2500   }
2501 
2502   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2503     // For a SCEVUnknown, ask ValueTracking.
2504     unsigned BitWidth = getTypeSizeInBits(U->getType());
2505     APInt Mask = APInt::getAllOnesValue(BitWidth);
2506     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2507     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2508     return Zeros.countTrailingOnes();
2509   }
2510 
2511   // SCEVUDivExpr
2512   return 0;
2513 }
2514 
2515 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2516 ///
2517 ConstantRange
2518 ScalarEvolution::getUnsignedRange(const SCEV *S) {
2519 
2520   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2521     return ConstantRange(C->getValue()->getValue());
2522 
2523   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2524     ConstantRange X = getUnsignedRange(Add->getOperand(0));
2525     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2526       X = X.add(getUnsignedRange(Add->getOperand(i)));
2527     return X;
2528   }
2529 
2530   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2531     ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2532     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2533       X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2534     return X;
2535   }
2536 
2537   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2538     ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2539     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2540       X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2541     return X;
2542   }
2543 
2544   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2545     ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2546     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2547       X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2548     return X;
2549   }
2550 
2551   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2552     ConstantRange X = getUnsignedRange(UDiv->getLHS());
2553     ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2554     return X.udiv(Y);
2555   }
2556 
2557   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2558     ConstantRange X = getUnsignedRange(ZExt->getOperand());
2559     return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2560   }
2561 
2562   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2563     ConstantRange X = getUnsignedRange(SExt->getOperand());
2564     return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2565   }
2566 
2567   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2568     ConstantRange X = getUnsignedRange(Trunc->getOperand());
2569     return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2570   }
2571 
2572   ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2573 
2574   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2575     const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2576     const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2577     if (!Trip) return FullSet;
2578 
2579     // TODO: non-affine addrec
2580     if (AddRec->isAffine()) {
2581       const Type *Ty = AddRec->getType();
2582       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2583       if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2584         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2585 
2586         const SCEV *Start = AddRec->getStart();
2587         const SCEV *Step = AddRec->getStepRecurrence(*this);
2588         const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2589 
2590         // Check for overflow.
2591         // TODO: This is very conservative.
2592         if (!(Step->isOne() &&
2593               isKnownPredicate(ICmpInst::ICMP_ULT, Start, End)) &&
2594             !(Step->isAllOnesValue() &&
2595               isKnownPredicate(ICmpInst::ICMP_UGT, Start, End)))
2596           return FullSet;
2597 
2598         ConstantRange StartRange = getUnsignedRange(Start);
2599         ConstantRange EndRange = getUnsignedRange(End);
2600         APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2601                                    EndRange.getUnsignedMin());
2602         APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2603                                    EndRange.getUnsignedMax());
2604         if (Min.isMinValue() && Max.isMaxValue())
2605           return FullSet;
2606         return ConstantRange(Min, Max+1);
2607       }
2608     }
2609   }
2610 
2611   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2612     // For a SCEVUnknown, ask ValueTracking.
2613     unsigned BitWidth = getTypeSizeInBits(U->getType());
2614     APInt Mask = APInt::getAllOnesValue(BitWidth);
2615     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2616     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2617     if (Ones == ~Zeros + 1)
2618       return FullSet;
2619     return ConstantRange(Ones, ~Zeros + 1);
2620   }
2621 
2622   return FullSet;
2623 }
2624 
2625 /// getSignedRange - Determine the signed range for a particular SCEV.
2626 ///
2627 ConstantRange
2628 ScalarEvolution::getSignedRange(const SCEV *S) {
2629 
2630   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2631     return ConstantRange(C->getValue()->getValue());
2632 
2633   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2634     ConstantRange X = getSignedRange(Add->getOperand(0));
2635     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2636       X = X.add(getSignedRange(Add->getOperand(i)));
2637     return X;
2638   }
2639 
2640   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2641     ConstantRange X = getSignedRange(Mul->getOperand(0));
2642     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2643       X = X.multiply(getSignedRange(Mul->getOperand(i)));
2644     return X;
2645   }
2646 
2647   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2648     ConstantRange X = getSignedRange(SMax->getOperand(0));
2649     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2650       X = X.smax(getSignedRange(SMax->getOperand(i)));
2651     return X;
2652   }
2653 
2654   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2655     ConstantRange X = getSignedRange(UMax->getOperand(0));
2656     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2657       X = X.umax(getSignedRange(UMax->getOperand(i)));
2658     return X;
2659   }
2660 
2661   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2662     ConstantRange X = getSignedRange(UDiv->getLHS());
2663     ConstantRange Y = getSignedRange(UDiv->getRHS());
2664     return X.udiv(Y);
2665   }
2666 
2667   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2668     ConstantRange X = getSignedRange(ZExt->getOperand());
2669     return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2670   }
2671 
2672   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2673     ConstantRange X = getSignedRange(SExt->getOperand());
2674     return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2675   }
2676 
2677   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2678     ConstantRange X = getSignedRange(Trunc->getOperand());
2679     return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2680   }
2681 
2682   ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2683 
2684   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2685     const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2686     const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2687     if (!Trip) return FullSet;
2688 
2689     // TODO: non-affine addrec
2690     if (AddRec->isAffine()) {
2691       const Type *Ty = AddRec->getType();
2692       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2693       if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2694         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2695 
2696         const SCEV *Start = AddRec->getStart();
2697         const SCEV *Step = AddRec->getStepRecurrence(*this);
2698         const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2699 
2700         // Check for overflow.
2701         // TODO: This is very conservative.
2702         if (!(Step->isOne() &&
2703               isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
2704             !(Step->isAllOnesValue() &&
2705               isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
2706           return FullSet;
2707 
2708         ConstantRange StartRange = getSignedRange(Start);
2709         ConstantRange EndRange = getSignedRange(End);
2710         APInt Min = APIntOps::smin(StartRange.getSignedMin(),
2711                                    EndRange.getSignedMin());
2712         APInt Max = APIntOps::smax(StartRange.getSignedMax(),
2713                                    EndRange.getSignedMax());
2714         if (Min.isMinSignedValue() && Max.isMaxSignedValue())
2715           return FullSet;
2716         return ConstantRange(Min, Max+1);
2717       }
2718     }
2719   }
2720 
2721   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2722     // For a SCEVUnknown, ask ValueTracking.
2723     unsigned BitWidth = getTypeSizeInBits(U->getType());
2724     unsigned NS = ComputeNumSignBits(U->getValue(), TD);
2725     if (NS == 1)
2726       return FullSet;
2727     return
2728       ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
2729                     APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
2730   }
2731 
2732   return FullSet;
2733 }
2734 
2735 /// createSCEV - We know that there is no SCEV for the specified value.
2736 /// Analyze the expression.
2737 ///
2738 const SCEV *ScalarEvolution::createSCEV(Value *V) {
2739   if (!isSCEVable(V->getType()))
2740     return getUnknown(V);
2741 
2742   unsigned Opcode = Instruction::UserOp1;
2743   if (Instruction *I = dyn_cast<Instruction>(V))
2744     Opcode = I->getOpcode();
2745   else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2746     Opcode = CE->getOpcode();
2747   else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2748     return getConstant(CI);
2749   else if (isa<ConstantPointerNull>(V))
2750     return getIntegerSCEV(0, V->getType());
2751   else if (isa<UndefValue>(V))
2752     return getIntegerSCEV(0, V->getType());
2753   else
2754     return getUnknown(V);
2755 
2756   Operator *U = cast<Operator>(V);
2757   switch (Opcode) {
2758   case Instruction::Add:
2759     return getAddExpr(getSCEV(U->getOperand(0)),
2760                       getSCEV(U->getOperand(1)));
2761   case Instruction::Mul:
2762     return getMulExpr(getSCEV(U->getOperand(0)),
2763                       getSCEV(U->getOperand(1)));
2764   case Instruction::UDiv:
2765     return getUDivExpr(getSCEV(U->getOperand(0)),
2766                        getSCEV(U->getOperand(1)));
2767   case Instruction::Sub:
2768     return getMinusSCEV(getSCEV(U->getOperand(0)),
2769                         getSCEV(U->getOperand(1)));
2770   case Instruction::And:
2771     // For an expression like x&255 that merely masks off the high bits,
2772     // use zext(trunc(x)) as the SCEV expression.
2773     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2774       if (CI->isNullValue())
2775         return getSCEV(U->getOperand(1));
2776       if (CI->isAllOnesValue())
2777         return getSCEV(U->getOperand(0));
2778       const APInt &A = CI->getValue();
2779 
2780       // Instcombine's ShrinkDemandedConstant may strip bits out of
2781       // constants, obscuring what would otherwise be a low-bits mask.
2782       // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2783       // knew about to reconstruct a low-bits mask value.
2784       unsigned LZ = A.countLeadingZeros();
2785       unsigned BitWidth = A.getBitWidth();
2786       APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2787       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2788       ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2789 
2790       APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2791 
2792       if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2793         return
2794           getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2795                                             IntegerType::get(BitWidth - LZ)),
2796                             U->getType());
2797     }
2798     break;
2799 
2800   case Instruction::Or:
2801     // If the RHS of the Or is a constant, we may have something like:
2802     // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2803     // optimizations will transparently handle this case.
2804     //
2805     // In order for this transformation to be safe, the LHS must be of the
2806     // form X*(2^n) and the Or constant must be less than 2^n.
2807     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2808       const SCEV *LHS = getSCEV(U->getOperand(0));
2809       const APInt &CIVal = CI->getValue();
2810       if (GetMinTrailingZeros(LHS) >=
2811           (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2812         return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2813     }
2814     break;
2815   case Instruction::Xor:
2816     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2817       // If the RHS of the xor is a signbit, then this is just an add.
2818       // Instcombine turns add of signbit into xor as a strength reduction step.
2819       if (CI->getValue().isSignBit())
2820         return getAddExpr(getSCEV(U->getOperand(0)),
2821                           getSCEV(U->getOperand(1)));
2822 
2823       // If the RHS of xor is -1, then this is a not operation.
2824       if (CI->isAllOnesValue())
2825         return getNotSCEV(getSCEV(U->getOperand(0)));
2826 
2827       // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2828       // This is a variant of the check for xor with -1, and it handles
2829       // the case where instcombine has trimmed non-demanded bits out
2830       // of an xor with -1.
2831       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2832         if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2833           if (BO->getOpcode() == Instruction::And &&
2834               LCI->getValue() == CI->getValue())
2835             if (const SCEVZeroExtendExpr *Z =
2836                   dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2837               const Type *UTy = U->getType();
2838               const SCEV *Z0 = Z->getOperand();
2839               const Type *Z0Ty = Z0->getType();
2840               unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2841 
2842               // If C is a low-bits mask, the zero extend is zerving to
2843               // mask off the high bits. Complement the operand and
2844               // re-apply the zext.
2845               if (APIntOps::isMask(Z0TySize, CI->getValue()))
2846                 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2847 
2848               // If C is a single bit, it may be in the sign-bit position
2849               // before the zero-extend. In this case, represent the xor
2850               // using an add, which is equivalent, and re-apply the zext.
2851               APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2852               if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2853                   Trunc.isSignBit())
2854                 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2855                                          UTy);
2856             }
2857     }
2858     break;
2859 
2860   case Instruction::Shl:
2861     // Turn shift left of a constant amount into a multiply.
2862     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2863       uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2864       Constant *X = ConstantInt::get(getContext(),
2865         APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2866       return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2867     }
2868     break;
2869 
2870   case Instruction::LShr:
2871     // Turn logical shift right of a constant into a unsigned divide.
2872     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2873       uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2874       Constant *X = ConstantInt::get(getContext(),
2875         APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2876       return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2877     }
2878     break;
2879 
2880   case Instruction::AShr:
2881     // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2882     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2883       if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2884         if (L->getOpcode() == Instruction::Shl &&
2885             L->getOperand(1) == U->getOperand(1)) {
2886           unsigned BitWidth = getTypeSizeInBits(U->getType());
2887           uint64_t Amt = BitWidth - CI->getZExtValue();
2888           if (Amt == BitWidth)
2889             return getSCEV(L->getOperand(0));       // shift by zero --> noop
2890           if (Amt > BitWidth)
2891             return getIntegerSCEV(0, U->getType()); // value is undefined
2892           return
2893             getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2894                                                       IntegerType::get(Amt)),
2895                                  U->getType());
2896         }
2897     break;
2898 
2899   case Instruction::Trunc:
2900     return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2901 
2902   case Instruction::ZExt:
2903     return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2904 
2905   case Instruction::SExt:
2906     return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2907 
2908   case Instruction::BitCast:
2909     // BitCasts are no-op casts so we just eliminate the cast.
2910     if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2911       return getSCEV(U->getOperand(0));
2912     break;
2913 
2914     // It's tempting to handle inttoptr and ptrtoint, however this can
2915     // lead to pointer expressions which cannot be expanded to GEPs
2916     // (because they may overflow). For now, the only pointer-typed
2917     // expressions we handle are GEPs and address literals.
2918 
2919   case Instruction::GetElementPtr:
2920     if (!TD) break; // Without TD we can't analyze pointers.
2921     return createNodeForGEP(U);
2922 
2923   case Instruction::PHI:
2924     return createNodeForPHI(cast<PHINode>(U));
2925 
2926   case Instruction::Select:
2927     // This could be a smax or umax that was lowered earlier.
2928     // Try to recover it.
2929     if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2930       Value *LHS = ICI->getOperand(0);
2931       Value *RHS = ICI->getOperand(1);
2932       switch (ICI->getPredicate()) {
2933       case ICmpInst::ICMP_SLT:
2934       case ICmpInst::ICMP_SLE:
2935         std::swap(LHS, RHS);
2936         // fall through
2937       case ICmpInst::ICMP_SGT:
2938       case ICmpInst::ICMP_SGE:
2939         if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2940           return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2941         else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2942           return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2943         break;
2944       case ICmpInst::ICMP_ULT:
2945       case ICmpInst::ICMP_ULE:
2946         std::swap(LHS, RHS);
2947         // fall through
2948       case ICmpInst::ICMP_UGT:
2949       case ICmpInst::ICMP_UGE:
2950         if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2951           return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2952         else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2953           return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2954         break;
2955       case ICmpInst::ICMP_NE:
2956         // n != 0 ? n : 1  ->  umax(n, 1)
2957         if (LHS == U->getOperand(1) &&
2958             isa<ConstantInt>(U->getOperand(2)) &&
2959             cast<ConstantInt>(U->getOperand(2))->isOne() &&
2960             isa<ConstantInt>(RHS) &&
2961             cast<ConstantInt>(RHS)->isZero())
2962           return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2963         break;
2964       case ICmpInst::ICMP_EQ:
2965         // n == 0 ? 1 : n  ->  umax(n, 1)
2966         if (LHS == U->getOperand(2) &&
2967             isa<ConstantInt>(U->getOperand(1)) &&
2968             cast<ConstantInt>(U->getOperand(1))->isOne() &&
2969             isa<ConstantInt>(RHS) &&
2970             cast<ConstantInt>(RHS)->isZero())
2971           return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2972         break;
2973       default:
2974         break;
2975       }
2976     }
2977 
2978   default: // We cannot analyze this expression.
2979     break;
2980   }
2981 
2982   return getUnknown(V);
2983 }
2984 
2985 
2986 
2987 //===----------------------------------------------------------------------===//
2988 //                   Iteration Count Computation Code
2989 //
2990 
2991 /// getBackedgeTakenCount - If the specified loop has a predictable
2992 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2993 /// object. The backedge-taken count is the number of times the loop header
2994 /// will be branched to from within the loop. This is one less than the
2995 /// trip count of the loop, since it doesn't count the first iteration,
2996 /// when the header is branched to from outside the loop.
2997 ///
2998 /// Note that it is not valid to call this method on a loop without a
2999 /// loop-invariant backedge-taken count (see
3000 /// hasLoopInvariantBackedgeTakenCount).
3001 ///
3002 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3003   return getBackedgeTakenInfo(L).Exact;
3004 }
3005 
3006 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3007 /// return the least SCEV value that is known never to be less than the
3008 /// actual backedge taken count.
3009 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3010   return getBackedgeTakenInfo(L).Max;
3011 }
3012 
3013 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
3014 /// onto the given Worklist.
3015 static void
3016 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3017   BasicBlock *Header = L->getHeader();
3018 
3019   // Push all Loop-header PHIs onto the Worklist stack.
3020   for (BasicBlock::iterator I = Header->begin();
3021        PHINode *PN = dyn_cast<PHINode>(I); ++I)
3022     Worklist.push_back(PN);
3023 }
3024 
3025 const ScalarEvolution::BackedgeTakenInfo &
3026 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3027   // Initially insert a CouldNotCompute for this loop. If the insertion
3028   // succeeds, procede to actually compute a backedge-taken count and
3029   // update the value. The temporary CouldNotCompute value tells SCEV
3030   // code elsewhere that it shouldn't attempt to request a new
3031   // backedge-taken count, which could result in infinite recursion.
3032   std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
3033     BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3034   if (Pair.second) {
3035     BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
3036     if (ItCount.Exact != getCouldNotCompute()) {
3037       assert(ItCount.Exact->isLoopInvariant(L) &&
3038              ItCount.Max->isLoopInvariant(L) &&
3039              "Computed trip count isn't loop invariant for loop!");
3040       ++NumTripCountsComputed;
3041 
3042       // Update the value in the map.
3043       Pair.first->second = ItCount;
3044     } else {
3045       if (ItCount.Max != getCouldNotCompute())
3046         // Update the value in the map.
3047         Pair.first->second = ItCount;
3048       if (isa<PHINode>(L->getHeader()->begin()))
3049         // Only count loops that have phi nodes as not being computable.
3050         ++NumTripCountsNotComputed;
3051     }
3052 
3053     // Now that we know more about the trip count for this loop, forget any
3054     // existing SCEV values for PHI nodes in this loop since they are only
3055     // conservative estimates made without the benefit of trip count
3056     // information. This is similar to the code in
3057     // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
3058     // nodes specially.
3059     if (ItCount.hasAnyInfo()) {
3060       SmallVector<Instruction *, 16> Worklist;
3061       PushLoopPHIs(L, Worklist);
3062 
3063       SmallPtrSet<Instruction *, 8> Visited;
3064       while (!Worklist.empty()) {
3065         Instruction *I = Worklist.pop_back_val();
3066         if (!Visited.insert(I)) continue;
3067 
3068         std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3069           Scalars.find(static_cast<Value *>(I));
3070         if (It != Scalars.end()) {
3071           // SCEVUnknown for a PHI either means that it has an unrecognized
3072           // structure, or it's a PHI that's in the progress of being computed
3073           // by createNodeForPHI.  In the former case, additional loop trip
3074           // count information isn't going to change anything. In the later
3075           // case, createNodeForPHI will perform the necessary updates on its
3076           // own when it gets to that point.
3077           if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
3078             Scalars.erase(It);
3079           ValuesAtScopes.erase(I);
3080           if (PHINode *PN = dyn_cast<PHINode>(I))
3081             ConstantEvolutionLoopExitValue.erase(PN);
3082         }
3083 
3084         PushDefUseChildren(I, Worklist);
3085       }
3086     }
3087   }
3088   return Pair.first->second;
3089 }
3090 
3091 /// forgetLoopBackedgeTakenCount - This method should be called by the
3092 /// client when it has changed a loop in a way that may effect
3093 /// ScalarEvolution's ability to compute a trip count, or if the loop
3094 /// is deleted.
3095 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
3096   BackedgeTakenCounts.erase(L);
3097 
3098   SmallVector<Instruction *, 16> Worklist;
3099   PushLoopPHIs(L, Worklist);
3100 
3101   SmallPtrSet<Instruction *, 8> Visited;
3102   while (!Worklist.empty()) {
3103     Instruction *I = Worklist.pop_back_val();
3104     if (!Visited.insert(I)) continue;
3105 
3106     std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3107       Scalars.find(static_cast<Value *>(I));
3108     if (It != Scalars.end()) {
3109       Scalars.erase(It);
3110       ValuesAtScopes.erase(I);
3111       if (PHINode *PN = dyn_cast<PHINode>(I))
3112         ConstantEvolutionLoopExitValue.erase(PN);
3113     }
3114 
3115     PushDefUseChildren(I, Worklist);
3116   }
3117 }
3118 
3119 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
3120 /// of the specified loop will execute.
3121 ScalarEvolution::BackedgeTakenInfo
3122 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3123   SmallVector<BasicBlock*, 8> ExitingBlocks;
3124   L->getExitingBlocks(ExitingBlocks);
3125 
3126   // Examine all exits and pick the most conservative values.
3127   const SCEV *BECount = getCouldNotCompute();
3128   const SCEV *MaxBECount = getCouldNotCompute();
3129   bool CouldNotComputeBECount = false;
3130   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3131     BackedgeTakenInfo NewBTI =
3132       ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3133 
3134     if (NewBTI.Exact == getCouldNotCompute()) {
3135       // We couldn't compute an exact value for this exit, so
3136       // we won't be able to compute an exact value for the loop.
3137       CouldNotComputeBECount = true;
3138       BECount = getCouldNotCompute();
3139     } else if (!CouldNotComputeBECount) {
3140       if (BECount == getCouldNotCompute())
3141         BECount = NewBTI.Exact;
3142       else
3143         BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3144     }
3145     if (MaxBECount == getCouldNotCompute())
3146       MaxBECount = NewBTI.Max;
3147     else if (NewBTI.Max != getCouldNotCompute())
3148       MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3149   }
3150 
3151   return BackedgeTakenInfo(BECount, MaxBECount);
3152 }
3153 
3154 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3155 /// of the specified loop will execute if it exits via the specified block.
3156 ScalarEvolution::BackedgeTakenInfo
3157 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3158                                                    BasicBlock *ExitingBlock) {
3159 
3160   // Okay, we've chosen an exiting block.  See what condition causes us to
3161   // exit at this block.
3162   //
3163   // FIXME: we should be able to handle switch instructions (with a single exit)
3164   BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3165   if (ExitBr == 0) return getCouldNotCompute();
3166   assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3167 
3168   // At this point, we know we have a conditional branch that determines whether
3169   // the loop is exited.  However, we don't know if the branch is executed each
3170   // time through the loop.  If not, then the execution count of the branch will
3171   // not be equal to the trip count of the loop.
3172   //
3173   // Currently we check for this by checking to see if the Exit branch goes to
3174   // the loop header.  If so, we know it will always execute the same number of
3175   // times as the loop.  We also handle the case where the exit block *is* the
3176   // loop header.  This is common for un-rotated loops.
3177   //
3178   // If both of those tests fail, walk up the unique predecessor chain to the
3179   // header, stopping if there is an edge that doesn't exit the loop. If the
3180   // header is reached, the execution count of the branch will be equal to the
3181   // trip count of the loop.
3182   //
3183   //  More extensive analysis could be done to handle more cases here.
3184   //
3185   if (ExitBr->getSuccessor(0) != L->getHeader() &&
3186       ExitBr->getSuccessor(1) != L->getHeader() &&
3187       ExitBr->getParent() != L->getHeader()) {
3188     // The simple checks failed, try climbing the unique predecessor chain
3189     // up to the header.
3190     bool Ok = false;
3191     for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3192       BasicBlock *Pred = BB->getUniquePredecessor();
3193       if (!Pred)
3194         return getCouldNotCompute();
3195       TerminatorInst *PredTerm = Pred->getTerminator();
3196       for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3197         BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3198         if (PredSucc == BB)
3199           continue;
3200         // If the predecessor has a successor that isn't BB and isn't
3201         // outside the loop, assume the worst.
3202         if (L->contains(PredSucc))
3203           return getCouldNotCompute();
3204       }
3205       if (Pred == L->getHeader()) {
3206         Ok = true;
3207         break;
3208       }
3209       BB = Pred;
3210     }
3211     if (!Ok)
3212       return getCouldNotCompute();
3213   }
3214 
3215   // Procede to the next level to examine the exit condition expression.
3216   return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3217                                                ExitBr->getSuccessor(0),
3218                                                ExitBr->getSuccessor(1));
3219 }
3220 
3221 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3222 /// backedge of the specified loop will execute if its exit condition
3223 /// were a conditional branch of ExitCond, TBB, and FBB.
3224 ScalarEvolution::BackedgeTakenInfo
3225 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3226                                                        Value *ExitCond,
3227                                                        BasicBlock *TBB,
3228                                                        BasicBlock *FBB) {
3229   // Check if the controlling expression for this loop is an And or Or.
3230   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3231     if (BO->getOpcode() == Instruction::And) {
3232       // Recurse on the operands of the and.
3233       BackedgeTakenInfo BTI0 =
3234         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3235       BackedgeTakenInfo BTI1 =
3236         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3237       const SCEV *BECount = getCouldNotCompute();
3238       const SCEV *MaxBECount = getCouldNotCompute();
3239       if (L->contains(TBB)) {
3240         // Both conditions must be true for the loop to continue executing.
3241         // Choose the less conservative count.
3242         if (BTI0.Exact == getCouldNotCompute() ||
3243             BTI1.Exact == getCouldNotCompute())
3244           BECount = getCouldNotCompute();
3245         else
3246           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3247         if (BTI0.Max == getCouldNotCompute())
3248           MaxBECount = BTI1.Max;
3249         else if (BTI1.Max == getCouldNotCompute())
3250           MaxBECount = BTI0.Max;
3251         else
3252           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3253       } else {
3254         // Both conditions must be true for the loop to exit.
3255         assert(L->contains(FBB) && "Loop block has no successor in loop!");
3256         if (BTI0.Exact != getCouldNotCompute() &&
3257             BTI1.Exact != getCouldNotCompute())
3258           BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3259         if (BTI0.Max != getCouldNotCompute() &&
3260             BTI1.Max != getCouldNotCompute())
3261           MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3262       }
3263 
3264       return BackedgeTakenInfo(BECount, MaxBECount);
3265     }
3266     if (BO->getOpcode() == Instruction::Or) {
3267       // Recurse on the operands of the or.
3268       BackedgeTakenInfo BTI0 =
3269         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3270       BackedgeTakenInfo BTI1 =
3271         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3272       const SCEV *BECount = getCouldNotCompute();
3273       const SCEV *MaxBECount = getCouldNotCompute();
3274       if (L->contains(FBB)) {
3275         // Both conditions must be false for the loop to continue executing.
3276         // Choose the less conservative count.
3277         if (BTI0.Exact == getCouldNotCompute() ||
3278             BTI1.Exact == getCouldNotCompute())
3279           BECount = getCouldNotCompute();
3280         else
3281           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3282         if (BTI0.Max == getCouldNotCompute())
3283           MaxBECount = BTI1.Max;
3284         else if (BTI1.Max == getCouldNotCompute())
3285           MaxBECount = BTI0.Max;
3286         else
3287           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3288       } else {
3289         // Both conditions must be false for the loop to exit.
3290         assert(L->contains(TBB) && "Loop block has no successor in loop!");
3291         if (BTI0.Exact != getCouldNotCompute() &&
3292             BTI1.Exact != getCouldNotCompute())
3293           BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3294         if (BTI0.Max != getCouldNotCompute() &&
3295             BTI1.Max != getCouldNotCompute())
3296           MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3297       }
3298 
3299       return BackedgeTakenInfo(BECount, MaxBECount);
3300     }
3301   }
3302 
3303   // With an icmp, it may be feasible to compute an exact backedge-taken count.
3304   // Procede to the next level to examine the icmp.
3305   if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3306     return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3307 
3308   // If it's not an integer or pointer comparison then compute it the hard way.
3309   return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3310 }
3311 
3312 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3313 /// backedge of the specified loop will execute if its exit condition
3314 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3315 ScalarEvolution::BackedgeTakenInfo
3316 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3317                                                            ICmpInst *ExitCond,
3318                                                            BasicBlock *TBB,
3319                                                            BasicBlock *FBB) {
3320 
3321   // If the condition was exit on true, convert the condition to exit on false
3322   ICmpInst::Predicate Cond;
3323   if (!L->contains(FBB))
3324     Cond = ExitCond->getPredicate();
3325   else
3326     Cond = ExitCond->getInversePredicate();
3327 
3328   // Handle common loops like: for (X = "string"; *X; ++X)
3329   if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3330     if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3331       const SCEV *ItCnt =
3332         ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3333       if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3334         unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3335         return BackedgeTakenInfo(ItCnt,
3336                                  isa<SCEVConstant>(ItCnt) ? ItCnt :
3337                                    getConstant(APInt::getMaxValue(BitWidth)-1));
3338       }
3339     }
3340 
3341   const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3342   const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3343 
3344   // Try to evaluate any dependencies out of the loop.
3345   LHS = getSCEVAtScope(LHS, L);
3346   RHS = getSCEVAtScope(RHS, L);
3347 
3348   // At this point, we would like to compute how many iterations of the
3349   // loop the predicate will return true for these inputs.
3350   if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3351     // If there is a loop-invariant, force it into the RHS.
3352     std::swap(LHS, RHS);
3353     Cond = ICmpInst::getSwappedPredicate(Cond);
3354   }
3355 
3356   // If we have a comparison of a chrec against a constant, try to use value
3357   // ranges to answer this query.
3358   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3359     if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3360       if (AddRec->getLoop() == L) {
3361         // Form the constant range.
3362         ConstantRange CompRange(
3363             ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3364 
3365         const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3366         if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3367       }
3368 
3369   switch (Cond) {
3370   case ICmpInst::ICMP_NE: {                     // while (X != Y)
3371     // Convert to: while (X-Y != 0)
3372     const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3373     if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3374     break;
3375   }
3376   case ICmpInst::ICMP_EQ: {
3377     // Convert to: while (X-Y == 0)           // while (X == Y)
3378     const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3379     if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3380     break;
3381   }
3382   case ICmpInst::ICMP_SLT: {
3383     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3384     if (BTI.hasAnyInfo()) return BTI;
3385     break;
3386   }
3387   case ICmpInst::ICMP_SGT: {
3388     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3389                                              getNotSCEV(RHS), L, true);
3390     if (BTI.hasAnyInfo()) return BTI;
3391     break;
3392   }
3393   case ICmpInst::ICMP_ULT: {
3394     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3395     if (BTI.hasAnyInfo()) return BTI;
3396     break;
3397   }
3398   case ICmpInst::ICMP_UGT: {
3399     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3400                                              getNotSCEV(RHS), L, false);
3401     if (BTI.hasAnyInfo()) return BTI;
3402     break;
3403   }
3404   default:
3405 #if 0
3406     errs() << "ComputeBackedgeTakenCount ";
3407     if (ExitCond->getOperand(0)->getType()->isUnsigned())
3408       errs() << "[unsigned] ";
3409     errs() << *LHS << "   "
3410          << Instruction::getOpcodeName(Instruction::ICmp)
3411          << "   " << *RHS << "\n";
3412 #endif
3413     break;
3414   }
3415   return
3416     ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3417 }
3418 
3419 static ConstantInt *
3420 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3421                                 ScalarEvolution &SE) {
3422   const SCEV *InVal = SE.getConstant(C);
3423   const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3424   assert(isa<SCEVConstant>(Val) &&
3425          "Evaluation of SCEV at constant didn't fold correctly?");
3426   return cast<SCEVConstant>(Val)->getValue();
3427 }
3428 
3429 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3430 /// and a GEP expression (missing the pointer index) indexing into it, return
3431 /// the addressed element of the initializer or null if the index expression is
3432 /// invalid.
3433 static Constant *
3434 GetAddressedElementFromGlobal(LLVMContext &Context, GlobalVariable *GV,
3435                               const std::vector<ConstantInt*> &Indices) {
3436   Constant *Init = GV->getInitializer();
3437   for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3438     uint64_t Idx = Indices[i]->getZExtValue();
3439     if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3440       assert(Idx < CS->getNumOperands() && "Bad struct index!");
3441       Init = cast<Constant>(CS->getOperand(Idx));
3442     } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3443       if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3444       Init = cast<Constant>(CA->getOperand(Idx));
3445     } else if (isa<ConstantAggregateZero>(Init)) {
3446       if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3447         assert(Idx < STy->getNumElements() && "Bad struct index!");
3448         Init = Context.getNullValue(STy->getElementType(Idx));
3449       } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3450         if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3451         Init = Context.getNullValue(ATy->getElementType());
3452       } else {
3453         llvm_unreachable("Unknown constant aggregate type!");
3454       }
3455       return 0;
3456     } else {
3457       return 0; // Unknown initializer type
3458     }
3459   }
3460   return Init;
3461 }
3462 
3463 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3464 /// 'icmp op load X, cst', try to see if we can compute the backedge
3465 /// execution count.
3466 const SCEV *
3467 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3468                                                 LoadInst *LI,
3469                                                 Constant *RHS,
3470                                                 const Loop *L,
3471                                                 ICmpInst::Predicate predicate) {
3472   if (LI->isVolatile()) return getCouldNotCompute();
3473 
3474   // Check to see if the loaded pointer is a getelementptr of a global.
3475   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3476   if (!GEP) return getCouldNotCompute();
3477 
3478   // Make sure that it is really a constant global we are gepping, with an
3479   // initializer, and make sure the first IDX is really 0.
3480   GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3481   if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3482       GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3483       !cast<Constant>(GEP->getOperand(1))->isNullValue())
3484     return getCouldNotCompute();
3485 
3486   // Okay, we allow one non-constant index into the GEP instruction.
3487   Value *VarIdx = 0;
3488   std::vector<ConstantInt*> Indexes;
3489   unsigned VarIdxNum = 0;
3490   for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3491     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3492       Indexes.push_back(CI);
3493     } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3494       if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3495       VarIdx = GEP->getOperand(i);
3496       VarIdxNum = i-2;
3497       Indexes.push_back(0);
3498     }
3499 
3500   // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3501   // Check to see if X is a loop variant variable value now.
3502   const SCEV *Idx = getSCEV(VarIdx);
3503   Idx = getSCEVAtScope(Idx, L);
3504 
3505   // We can only recognize very limited forms of loop index expressions, in
3506   // particular, only affine AddRec's like {C1,+,C2}.
3507   const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3508   if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3509       !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3510       !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3511     return getCouldNotCompute();
3512 
3513   unsigned MaxSteps = MaxBruteForceIterations;
3514   for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3515     ConstantInt *ItCst = ConstantInt::get(
3516                            cast<IntegerType>(IdxExpr->getType()), IterationNum);
3517     ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3518 
3519     // Form the GEP offset.
3520     Indexes[VarIdxNum] = Val;
3521 
3522     Constant *Result = GetAddressedElementFromGlobal(getContext(), GV, Indexes);
3523     if (Result == 0) break;  // Cannot compute!
3524 
3525     // Evaluate the condition for this iteration.
3526     Result = ConstantExpr::getICmp(predicate, Result, RHS);
3527     if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3528     if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3529 #if 0
3530       errs() << "\n***\n*** Computed loop count " << *ItCst
3531              << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3532              << "***\n";
3533 #endif
3534       ++NumArrayLenItCounts;
3535       return getConstant(ItCst);   // Found terminating iteration!
3536     }
3537   }
3538   return getCouldNotCompute();
3539 }
3540 
3541 
3542 /// CanConstantFold - Return true if we can constant fold an instruction of the
3543 /// specified type, assuming that all operands were constants.
3544 static bool CanConstantFold(const Instruction *I) {
3545   if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3546       isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3547     return true;
3548 
3549   if (const CallInst *CI = dyn_cast<CallInst>(I))
3550     if (const Function *F = CI->getCalledFunction())
3551       return canConstantFoldCallTo(F);
3552   return false;
3553 }
3554 
3555 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3556 /// in the loop that V is derived from.  We allow arbitrary operations along the
3557 /// way, but the operands of an operation must either be constants or a value
3558 /// derived from a constant PHI.  If this expression does not fit with these
3559 /// constraints, return null.
3560 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3561   // If this is not an instruction, or if this is an instruction outside of the
3562   // loop, it can't be derived from a loop PHI.
3563   Instruction *I = dyn_cast<Instruction>(V);
3564   if (I == 0 || !L->contains(I->getParent())) return 0;
3565 
3566   if (PHINode *PN = dyn_cast<PHINode>(I)) {
3567     if (L->getHeader() == I->getParent())
3568       return PN;
3569     else
3570       // We don't currently keep track of the control flow needed to evaluate
3571       // PHIs, so we cannot handle PHIs inside of loops.
3572       return 0;
3573   }
3574 
3575   // If we won't be able to constant fold this expression even if the operands
3576   // are constants, return early.
3577   if (!CanConstantFold(I)) return 0;
3578 
3579   // Otherwise, we can evaluate this instruction if all of its operands are
3580   // constant or derived from a PHI node themselves.
3581   PHINode *PHI = 0;
3582   for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3583     if (!(isa<Constant>(I->getOperand(Op)) ||
3584           isa<GlobalValue>(I->getOperand(Op)))) {
3585       PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3586       if (P == 0) return 0;  // Not evolving from PHI
3587       if (PHI == 0)
3588         PHI = P;
3589       else if (PHI != P)
3590         return 0;  // Evolving from multiple different PHIs.
3591     }
3592 
3593   // This is a expression evolving from a constant PHI!
3594   return PHI;
3595 }
3596 
3597 /// EvaluateExpression - Given an expression that passes the
3598 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3599 /// in the loop has the value PHIVal.  If we can't fold this expression for some
3600 /// reason, return null.
3601 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3602   if (isa<PHINode>(V)) return PHIVal;
3603   if (Constant *C = dyn_cast<Constant>(V)) return C;
3604   if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3605   Instruction *I = cast<Instruction>(V);
3606   LLVMContext &Context = I->getParent()->getContext();
3607 
3608   std::vector<Constant*> Operands;
3609   Operands.resize(I->getNumOperands());
3610 
3611   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3612     Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3613     if (Operands[i] == 0) return 0;
3614   }
3615 
3616   if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3617     return ConstantFoldCompareInstOperands(CI->getPredicate(),
3618                                            &Operands[0], Operands.size(),
3619                                            Context);
3620   else
3621     return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3622                                     &Operands[0], Operands.size(),
3623                                     Context);
3624 }
3625 
3626 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3627 /// in the header of its containing loop, we know the loop executes a
3628 /// constant number of times, and the PHI node is just a recurrence
3629 /// involving constants, fold it.
3630 Constant *
3631 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3632                                                    const APInt& BEs,
3633                                                    const Loop *L) {
3634   std::map<PHINode*, Constant*>::iterator I =
3635     ConstantEvolutionLoopExitValue.find(PN);
3636   if (I != ConstantEvolutionLoopExitValue.end())
3637     return I->second;
3638 
3639   if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3640     return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3641 
3642   Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3643 
3644   // Since the loop is canonicalized, the PHI node must have two entries.  One
3645   // entry must be a constant (coming in from outside of the loop), and the
3646   // second must be derived from the same PHI.
3647   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3648   Constant *StartCST =
3649     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3650   if (StartCST == 0)
3651     return RetVal = 0;  // Must be a constant.
3652 
3653   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3654   PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3655   if (PN2 != PN)
3656     return RetVal = 0;  // Not derived from same PHI.
3657 
3658   // Execute the loop symbolically to determine the exit value.
3659   if (BEs.getActiveBits() >= 32)
3660     return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3661 
3662   unsigned NumIterations = BEs.getZExtValue(); // must be in range
3663   unsigned IterationNum = 0;
3664   for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3665     if (IterationNum == NumIterations)
3666       return RetVal = PHIVal;  // Got exit value!
3667 
3668     // Compute the value of the PHI node for the next iteration.
3669     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3670     if (NextPHI == PHIVal)
3671       return RetVal = NextPHI;  // Stopped evolving!
3672     if (NextPHI == 0)
3673       return 0;        // Couldn't evaluate!
3674     PHIVal = NextPHI;
3675   }
3676 }
3677 
3678 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3679 /// constant number of times (the condition evolves only from constants),
3680 /// try to evaluate a few iterations of the loop until we get the exit
3681 /// condition gets a value of ExitWhen (true or false).  If we cannot
3682 /// evaluate the trip count of the loop, return getCouldNotCompute().
3683 const SCEV *
3684 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3685                                                        Value *Cond,
3686                                                        bool ExitWhen) {
3687   PHINode *PN = getConstantEvolvingPHI(Cond, L);
3688   if (PN == 0) return getCouldNotCompute();
3689 
3690   // Since the loop is canonicalized, the PHI node must have two entries.  One
3691   // entry must be a constant (coming in from outside of the loop), and the
3692   // second must be derived from the same PHI.
3693   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3694   Constant *StartCST =
3695     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3696   if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
3697 
3698   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3699   PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3700   if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
3701 
3702   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3703   // the loop symbolically to determine when the condition gets a value of
3704   // "ExitWhen".
3705   unsigned IterationNum = 0;
3706   unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3707   for (Constant *PHIVal = StartCST;
3708        IterationNum != MaxIterations; ++IterationNum) {
3709     ConstantInt *CondVal =
3710       dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3711 
3712     // Couldn't symbolically evaluate.
3713     if (!CondVal) return getCouldNotCompute();
3714 
3715     if (CondVal->getValue() == uint64_t(ExitWhen)) {
3716       ++NumBruteForceTripCountsComputed;
3717       return getConstant(Type::Int32Ty, IterationNum);
3718     }
3719 
3720     // Compute the value of the PHI node for the next iteration.
3721     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3722     if (NextPHI == 0 || NextPHI == PHIVal)
3723       return getCouldNotCompute();// Couldn't evaluate or not making progress...
3724     PHIVal = NextPHI;
3725   }
3726 
3727   // Too many iterations were needed to evaluate.
3728   return getCouldNotCompute();
3729 }
3730 
3731 /// getSCEVAtScope - Return a SCEV expression handle for the specified value
3732 /// at the specified scope in the program.  The L value specifies a loop
3733 /// nest to evaluate the expression at, where null is the top-level or a
3734 /// specified loop is immediately inside of the loop.
3735 ///
3736 /// This method can be used to compute the exit value for a variable defined
3737 /// in a loop by querying what the value will hold in the parent loop.
3738 ///
3739 /// In the case that a relevant loop exit value cannot be computed, the
3740 /// original value V is returned.
3741 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3742   // FIXME: this should be turned into a virtual method on SCEV!
3743 
3744   if (isa<SCEVConstant>(V)) return V;
3745 
3746   // If this instruction is evolved from a constant-evolving PHI, compute the
3747   // exit value from the loop without using SCEVs.
3748   if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3749     if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3750       const Loop *LI = (*this->LI)[I->getParent()];
3751       if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3752         if (PHINode *PN = dyn_cast<PHINode>(I))
3753           if (PN->getParent() == LI->getHeader()) {
3754             // Okay, there is no closed form solution for the PHI node.  Check
3755             // to see if the loop that contains it has a known backedge-taken
3756             // count.  If so, we may be able to force computation of the exit
3757             // value.
3758             const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3759             if (const SCEVConstant *BTCC =
3760                   dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3761               // Okay, we know how many times the containing loop executes.  If
3762               // this is a constant evolving PHI node, get the final value at
3763               // the specified iteration number.
3764               Constant *RV = getConstantEvolutionLoopExitValue(PN,
3765                                                    BTCC->getValue()->getValue(),
3766                                                                LI);
3767               if (RV) return getSCEV(RV);
3768             }
3769           }
3770 
3771       // Okay, this is an expression that we cannot symbolically evaluate
3772       // into a SCEV.  Check to see if it's possible to symbolically evaluate
3773       // the arguments into constants, and if so, try to constant propagate the
3774       // result.  This is particularly useful for computing loop exit values.
3775       if (CanConstantFold(I)) {
3776         // Check to see if we've folded this instruction at this loop before.
3777         std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3778         std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3779           Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3780         if (!Pair.second)
3781           return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3782 
3783         std::vector<Constant*> Operands;
3784         Operands.reserve(I->getNumOperands());
3785         for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3786           Value *Op = I->getOperand(i);
3787           if (Constant *C = dyn_cast<Constant>(Op)) {
3788             Operands.push_back(C);
3789           } else {
3790             // If any of the operands is non-constant and if they are
3791             // non-integer and non-pointer, don't even try to analyze them
3792             // with scev techniques.
3793             if (!isSCEVable(Op->getType()))
3794               return V;
3795 
3796             const SCEV* OpV = getSCEVAtScope(Op, L);
3797             if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3798               Constant *C = SC->getValue();
3799               if (C->getType() != Op->getType())
3800                 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3801                                                                   Op->getType(),
3802                                                                   false),
3803                                           C, Op->getType());
3804               Operands.push_back(C);
3805             } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3806               if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3807                 if (C->getType() != Op->getType())
3808                   C =
3809                     ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3810                                                                   Op->getType(),
3811                                                                   false),
3812                                           C, Op->getType());
3813                 Operands.push_back(C);
3814               } else
3815                 return V;
3816             } else {
3817               return V;
3818             }
3819           }
3820         }
3821 
3822         Constant *C;
3823         if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3824           C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3825                                               &Operands[0], Operands.size(),
3826                                               getContext());
3827         else
3828           C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3829                                        &Operands[0], Operands.size(),
3830                                        getContext());
3831         Pair.first->second = C;
3832         return getSCEV(C);
3833       }
3834     }
3835 
3836     // This is some other type of SCEVUnknown, just return it.
3837     return V;
3838   }
3839 
3840   if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3841     // Avoid performing the look-up in the common case where the specified
3842     // expression has no loop-variant portions.
3843     for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3844       const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3845       if (OpAtScope != Comm->getOperand(i)) {
3846         // Okay, at least one of these operands is loop variant but might be
3847         // foldable.  Build a new instance of the folded commutative expression.
3848         SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3849                                             Comm->op_begin()+i);
3850         NewOps.push_back(OpAtScope);
3851 
3852         for (++i; i != e; ++i) {
3853           OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3854           NewOps.push_back(OpAtScope);
3855         }
3856         if (isa<SCEVAddExpr>(Comm))
3857           return getAddExpr(NewOps);
3858         if (isa<SCEVMulExpr>(Comm))
3859           return getMulExpr(NewOps);
3860         if (isa<SCEVSMaxExpr>(Comm))
3861           return getSMaxExpr(NewOps);
3862         if (isa<SCEVUMaxExpr>(Comm))
3863           return getUMaxExpr(NewOps);
3864         llvm_unreachable("Unknown commutative SCEV type!");
3865       }
3866     }
3867     // If we got here, all operands are loop invariant.
3868     return Comm;
3869   }
3870 
3871   if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3872     const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3873     const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3874     if (LHS == Div->getLHS() && RHS == Div->getRHS())
3875       return Div;   // must be loop invariant
3876     return getUDivExpr(LHS, RHS);
3877   }
3878 
3879   // If this is a loop recurrence for a loop that does not contain L, then we
3880   // are dealing with the final value computed by the loop.
3881   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3882     if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3883       // To evaluate this recurrence, we need to know how many times the AddRec
3884       // loop iterates.  Compute this now.
3885       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3886       if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3887 
3888       // Then, evaluate the AddRec.
3889       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3890     }
3891     return AddRec;
3892   }
3893 
3894   if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3895     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3896     if (Op == Cast->getOperand())
3897       return Cast;  // must be loop invariant
3898     return getZeroExtendExpr(Op, Cast->getType());
3899   }
3900 
3901   if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3902     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3903     if (Op == Cast->getOperand())
3904       return Cast;  // must be loop invariant
3905     return getSignExtendExpr(Op, Cast->getType());
3906   }
3907 
3908   if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3909     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3910     if (Op == Cast->getOperand())
3911       return Cast;  // must be loop invariant
3912     return getTruncateExpr(Op, Cast->getType());
3913   }
3914 
3915   llvm_unreachable("Unknown SCEV type!");
3916   return 0;
3917 }
3918 
3919 /// getSCEVAtScope - This is a convenience function which does
3920 /// getSCEVAtScope(getSCEV(V), L).
3921 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3922   return getSCEVAtScope(getSCEV(V), L);
3923 }
3924 
3925 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3926 /// following equation:
3927 ///
3928 ///     A * X = B (mod N)
3929 ///
3930 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3931 /// A and B isn't important.
3932 ///
3933 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3934 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3935                                                ScalarEvolution &SE) {
3936   uint32_t BW = A.getBitWidth();
3937   assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3938   assert(A != 0 && "A must be non-zero.");
3939 
3940   // 1. D = gcd(A, N)
3941   //
3942   // The gcd of A and N may have only one prime factor: 2. The number of
3943   // trailing zeros in A is its multiplicity
3944   uint32_t Mult2 = A.countTrailingZeros();
3945   // D = 2^Mult2
3946 
3947   // 2. Check if B is divisible by D.
3948   //
3949   // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3950   // is not less than multiplicity of this prime factor for D.
3951   if (B.countTrailingZeros() < Mult2)
3952     return SE.getCouldNotCompute();
3953 
3954   // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3955   // modulo (N / D).
3956   //
3957   // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
3958   // bit width during computations.
3959   APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
3960   APInt Mod(BW + 1, 0);
3961   Mod.set(BW - Mult2);  // Mod = N / D
3962   APInt I = AD.multiplicativeInverse(Mod);
3963 
3964   // 4. Compute the minimum unsigned root of the equation:
3965   // I * (B / D) mod (N / D)
3966   APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3967 
3968   // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3969   // bits.
3970   return SE.getConstant(Result.trunc(BW));
3971 }
3972 
3973 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3974 /// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
3975 /// might be the same) or two SCEVCouldNotCompute objects.
3976 ///
3977 static std::pair<const SCEV *,const SCEV *>
3978 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3979   assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3980   const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3981   const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3982   const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3983 
3984   // We currently can only solve this if the coefficients are constants.
3985   if (!LC || !MC || !NC) {
3986     const SCEV *CNC = SE.getCouldNotCompute();
3987     return std::make_pair(CNC, CNC);
3988   }
3989 
3990   uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3991   const APInt &L = LC->getValue()->getValue();
3992   const APInt &M = MC->getValue()->getValue();
3993   const APInt &N = NC->getValue()->getValue();
3994   APInt Two(BitWidth, 2);
3995   APInt Four(BitWidth, 4);
3996 
3997   {
3998     using namespace APIntOps;
3999     const APInt& C = L;
4000     // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4001     // The B coefficient is M-N/2
4002     APInt B(M);
4003     B -= sdiv(N,Two);
4004 
4005     // The A coefficient is N/2
4006     APInt A(N.sdiv(Two));
4007 
4008     // Compute the B^2-4ac term.
4009     APInt SqrtTerm(B);
4010     SqrtTerm *= B;
4011     SqrtTerm -= Four * (A * C);
4012 
4013     // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4014     // integer value or else APInt::sqrt() will assert.
4015     APInt SqrtVal(SqrtTerm.sqrt());
4016 
4017     // Compute the two solutions for the quadratic formula.
4018     // The divisions must be performed as signed divisions.
4019     APInt NegB(-B);
4020     APInt TwoA( A << 1 );
4021     if (TwoA.isMinValue()) {
4022       const SCEV *CNC = SE.getCouldNotCompute();
4023       return std::make_pair(CNC, CNC);
4024     }
4025 
4026     LLVMContext &Context = SE.getContext();
4027 
4028     ConstantInt *Solution1 =
4029       ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4030     ConstantInt *Solution2 =
4031       ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4032 
4033     return std::make_pair(SE.getConstant(Solution1),
4034                           SE.getConstant(Solution2));
4035     } // end APIntOps namespace
4036 }
4037 
4038 /// HowFarToZero - Return the number of times a backedge comparing the specified
4039 /// value to zero will execute.  If not computable, return CouldNotCompute.
4040 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4041   // If the value is a constant
4042   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4043     // If the value is already zero, the branch will execute zero times.
4044     if (C->getValue()->isZero()) return C;
4045     return getCouldNotCompute();  // Otherwise it will loop infinitely.
4046   }
4047 
4048   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4049   if (!AddRec || AddRec->getLoop() != L)
4050     return getCouldNotCompute();
4051 
4052   if (AddRec->isAffine()) {
4053     // If this is an affine expression, the execution count of this branch is
4054     // the minimum unsigned root of the following equation:
4055     //
4056     //     Start + Step*N = 0 (mod 2^BW)
4057     //
4058     // equivalent to:
4059     //
4060     //             Step*N = -Start (mod 2^BW)
4061     //
4062     // where BW is the common bit width of Start and Step.
4063 
4064     // Get the initial value for the loop.
4065     const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4066                                        L->getParentLoop());
4067     const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4068                                       L->getParentLoop());
4069 
4070     if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4071       // For now we handle only constant steps.
4072 
4073       // First, handle unitary steps.
4074       if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4075         return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
4076       if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4077         return Start;                           //    N = Start (as unsigned)
4078 
4079       // Then, try to solve the above equation provided that Start is constant.
4080       if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4081         return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4082                                             -StartC->getValue()->getValue(),
4083                                             *this);
4084     }
4085   } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4086     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4087     // the quadratic equation to solve it.
4088     std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4089                                                                     *this);
4090     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4091     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4092     if (R1) {
4093 #if 0
4094       errs() << "HFTZ: " << *V << " - sol#1: " << *R1
4095              << "  sol#2: " << *R2 << "\n";
4096 #endif
4097       // Pick the smallest positive root value.
4098       if (ConstantInt *CB =
4099           dyn_cast<ConstantInt>(getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
4100                                    R1->getValue(), R2->getValue()))) {
4101         if (CB->getZExtValue() == false)
4102           std::swap(R1, R2);   // R1 is the minimum root now.
4103 
4104         // We can only use this value if the chrec ends up with an exact zero
4105         // value at this index.  When solving for "X*X != 5", for example, we
4106         // should not accept a root of 2.
4107         const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4108         if (Val->isZero())
4109           return R1;  // We found a quadratic root!
4110       }
4111     }
4112   }
4113 
4114   return getCouldNotCompute();
4115 }
4116 
4117 /// HowFarToNonZero - Return the number of times a backedge checking the
4118 /// specified value for nonzero will execute.  If not computable, return
4119 /// CouldNotCompute
4120 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4121   // Loops that look like: while (X == 0) are very strange indeed.  We don't
4122   // handle them yet except for the trivial case.  This could be expanded in the
4123   // future as needed.
4124 
4125   // If the value is a constant, check to see if it is known to be non-zero
4126   // already.  If so, the backedge will execute zero times.
4127   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4128     if (!C->getValue()->isNullValue())
4129       return getIntegerSCEV(0, C->getType());
4130     return getCouldNotCompute();  // Otherwise it will loop infinitely.
4131   }
4132 
4133   // We could implement others, but I really doubt anyone writes loops like
4134   // this, and if they did, they would already be constant folded.
4135   return getCouldNotCompute();
4136 }
4137 
4138 /// getLoopPredecessor - If the given loop's header has exactly one unique
4139 /// predecessor outside the loop, return it. Otherwise return null.
4140 ///
4141 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4142   BasicBlock *Header = L->getHeader();
4143   BasicBlock *Pred = 0;
4144   for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4145        PI != E; ++PI)
4146     if (!L->contains(*PI)) {
4147       if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4148       Pred = *PI;
4149     }
4150   return Pred;
4151 }
4152 
4153 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4154 /// (which may not be an immediate predecessor) which has exactly one
4155 /// successor from which BB is reachable, or null if no such block is
4156 /// found.
4157 ///
4158 BasicBlock *
4159 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4160   // If the block has a unique predecessor, then there is no path from the
4161   // predecessor to the block that does not go through the direct edge
4162   // from the predecessor to the block.
4163   if (BasicBlock *Pred = BB->getSinglePredecessor())
4164     return Pred;
4165 
4166   // A loop's header is defined to be a block that dominates the loop.
4167   // If the header has a unique predecessor outside the loop, it must be
4168   // a block that has exactly one successor that can reach the loop.
4169   if (Loop *L = LI->getLoopFor(BB))
4170     return getLoopPredecessor(L);
4171 
4172   return 0;
4173 }
4174 
4175 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4176 /// testing whether two expressions are equal, however for the purposes of
4177 /// looking for a condition guarding a loop, it can be useful to be a little
4178 /// more general, since a front-end may have replicated the controlling
4179 /// expression.
4180 ///
4181 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4182   // Quick check to see if they are the same SCEV.
4183   if (A == B) return true;
4184 
4185   // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4186   // two different instructions with the same value. Check for this case.
4187   if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4188     if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4189       if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4190         if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4191           if (AI->isIdenticalTo(BI))
4192             return true;
4193 
4194   // Otherwise assume they may have a different value.
4195   return false;
4196 }
4197 
4198 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4199   return getSignedRange(S).getSignedMax().isNegative();
4200 }
4201 
4202 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4203   return getSignedRange(S).getSignedMin().isStrictlyPositive();
4204 }
4205 
4206 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4207   return !getSignedRange(S).getSignedMin().isNegative();
4208 }
4209 
4210 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4211   return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4212 }
4213 
4214 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4215   return isKnownNegative(S) || isKnownPositive(S);
4216 }
4217 
4218 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4219                                        const SCEV *LHS, const SCEV *RHS) {
4220 
4221   if (HasSameValue(LHS, RHS))
4222     return ICmpInst::isTrueWhenEqual(Pred);
4223 
4224   switch (Pred) {
4225   default:
4226     llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4227     break;
4228   case ICmpInst::ICMP_SGT:
4229     Pred = ICmpInst::ICMP_SLT;
4230     std::swap(LHS, RHS);
4231   case ICmpInst::ICMP_SLT: {
4232     ConstantRange LHSRange = getSignedRange(LHS);
4233     ConstantRange RHSRange = getSignedRange(RHS);
4234     if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4235       return true;
4236     if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4237       return false;
4238     break;
4239   }
4240   case ICmpInst::ICMP_SGE:
4241     Pred = ICmpInst::ICMP_SLE;
4242     std::swap(LHS, RHS);
4243   case ICmpInst::ICMP_SLE: {
4244     ConstantRange LHSRange = getSignedRange(LHS);
4245     ConstantRange RHSRange = getSignedRange(RHS);
4246     if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4247       return true;
4248     if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4249       return false;
4250     break;
4251   }
4252   case ICmpInst::ICMP_UGT:
4253     Pred = ICmpInst::ICMP_ULT;
4254     std::swap(LHS, RHS);
4255   case ICmpInst::ICMP_ULT: {
4256     ConstantRange LHSRange = getUnsignedRange(LHS);
4257     ConstantRange RHSRange = getUnsignedRange(RHS);
4258     if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4259       return true;
4260     if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4261       return false;
4262     break;
4263   }
4264   case ICmpInst::ICMP_UGE:
4265     Pred = ICmpInst::ICMP_ULE;
4266     std::swap(LHS, RHS);
4267   case ICmpInst::ICMP_ULE: {
4268     ConstantRange LHSRange = getUnsignedRange(LHS);
4269     ConstantRange RHSRange = getUnsignedRange(RHS);
4270     if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4271       return true;
4272     if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4273       return false;
4274     break;
4275   }
4276   case ICmpInst::ICMP_NE: {
4277     if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4278       return true;
4279     if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4280       return true;
4281 
4282     const SCEV *Diff = getMinusSCEV(LHS, RHS);
4283     if (isKnownNonZero(Diff))
4284       return true;
4285     break;
4286   }
4287   case ICmpInst::ICMP_EQ:
4288     // The check at the top of the function catches the case where
4289     // the values are known to be equal.
4290     break;
4291   }
4292   return false;
4293 }
4294 
4295 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4296 /// protected by a conditional between LHS and RHS.  This is used to
4297 /// to eliminate casts.
4298 bool
4299 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4300                                              ICmpInst::Predicate Pred,
4301                                              const SCEV *LHS, const SCEV *RHS) {
4302   // Interpret a null as meaning no loop, where there is obviously no guard
4303   // (interprocedural conditions notwithstanding).
4304   if (!L) return true;
4305 
4306   BasicBlock *Latch = L->getLoopLatch();
4307   if (!Latch)
4308     return false;
4309 
4310   BranchInst *LoopContinuePredicate =
4311     dyn_cast<BranchInst>(Latch->getTerminator());
4312   if (!LoopContinuePredicate ||
4313       LoopContinuePredicate->isUnconditional())
4314     return false;
4315 
4316   return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4317                        LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4318 }
4319 
4320 /// isLoopGuardedByCond - Test whether entry to the loop is protected
4321 /// by a conditional between LHS and RHS.  This is used to help avoid max
4322 /// expressions in loop trip counts, and to eliminate casts.
4323 bool
4324 ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4325                                      ICmpInst::Predicate Pred,
4326                                      const SCEV *LHS, const SCEV *RHS) {
4327   // Interpret a null as meaning no loop, where there is obviously no guard
4328   // (interprocedural conditions notwithstanding).
4329   if (!L) return false;
4330 
4331   BasicBlock *Predecessor = getLoopPredecessor(L);
4332   BasicBlock *PredecessorDest = L->getHeader();
4333 
4334   // Starting at the loop predecessor, climb up the predecessor chain, as long
4335   // as there are predecessors that can be found that have unique successors
4336   // leading to the original header.
4337   for (; Predecessor;
4338        PredecessorDest = Predecessor,
4339        Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4340 
4341     BranchInst *LoopEntryPredicate =
4342       dyn_cast<BranchInst>(Predecessor->getTerminator());
4343     if (!LoopEntryPredicate ||
4344         LoopEntryPredicate->isUnconditional())
4345       continue;
4346 
4347     if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4348                       LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4349       return true;
4350   }
4351 
4352   return false;
4353 }
4354 
4355 /// isImpliedCond - Test whether the condition described by Pred, LHS,
4356 /// and RHS is true whenever the given Cond value evaluates to true.
4357 bool ScalarEvolution::isImpliedCond(Value *CondValue,
4358                                     ICmpInst::Predicate Pred,
4359                                     const SCEV *LHS, const SCEV *RHS,
4360                                     bool Inverse) {
4361   // Recursivly handle And and Or conditions.
4362   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4363     if (BO->getOpcode() == Instruction::And) {
4364       if (!Inverse)
4365         return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4366                isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4367     } else if (BO->getOpcode() == Instruction::Or) {
4368       if (Inverse)
4369         return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4370                isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4371     }
4372   }
4373 
4374   ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4375   if (!ICI) return false;
4376 
4377   // Bail if the ICmp's operands' types are wider than the needed type
4378   // before attempting to call getSCEV on them. This avoids infinite
4379   // recursion, since the analysis of widening casts can require loop
4380   // exit condition information for overflow checking, which would
4381   // lead back here.
4382   if (getTypeSizeInBits(LHS->getType()) <
4383       getTypeSizeInBits(ICI->getOperand(0)->getType()))
4384     return false;
4385 
4386   // Now that we found a conditional branch that dominates the loop, check to
4387   // see if it is the comparison we are looking for.
4388   ICmpInst::Predicate FoundPred;
4389   if (Inverse)
4390     FoundPred = ICI->getInversePredicate();
4391   else
4392     FoundPred = ICI->getPredicate();
4393 
4394   const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
4395   const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
4396 
4397   // Balance the types. The case where FoundLHS' type is wider than
4398   // LHS' type is checked for above.
4399   if (getTypeSizeInBits(LHS->getType()) >
4400       getTypeSizeInBits(FoundLHS->getType())) {
4401     if (CmpInst::isSigned(Pred)) {
4402       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4403       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4404     } else {
4405       FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4406       FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4407     }
4408   }
4409 
4410   // Canonicalize the query to match the way instcombine will have
4411   // canonicalized the comparison.
4412   // First, put a constant operand on the right.
4413   if (isa<SCEVConstant>(LHS)) {
4414     std::swap(LHS, RHS);
4415     Pred = ICmpInst::getSwappedPredicate(Pred);
4416   }
4417   // Then, canonicalize comparisons with boundary cases.
4418   if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4419     const APInt &RA = RC->getValue()->getValue();
4420     switch (Pred) {
4421     default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4422     case ICmpInst::ICMP_EQ:
4423     case ICmpInst::ICMP_NE:
4424       break;
4425     case ICmpInst::ICMP_UGE:
4426       if ((RA - 1).isMinValue()) {
4427         Pred = ICmpInst::ICMP_NE;
4428         RHS = getConstant(RA - 1);
4429         break;
4430       }
4431       if (RA.isMaxValue()) {
4432         Pred = ICmpInst::ICMP_EQ;
4433         break;
4434       }
4435       if (RA.isMinValue()) return true;
4436       break;
4437     case ICmpInst::ICMP_ULE:
4438       if ((RA + 1).isMaxValue()) {
4439         Pred = ICmpInst::ICMP_NE;
4440         RHS = getConstant(RA + 1);
4441         break;
4442       }
4443       if (RA.isMinValue()) {
4444         Pred = ICmpInst::ICMP_EQ;
4445         break;
4446       }
4447       if (RA.isMaxValue()) return true;
4448       break;
4449     case ICmpInst::ICMP_SGE:
4450       if ((RA - 1).isMinSignedValue()) {
4451         Pred = ICmpInst::ICMP_NE;
4452         RHS = getConstant(RA - 1);
4453         break;
4454       }
4455       if (RA.isMaxSignedValue()) {
4456         Pred = ICmpInst::ICMP_EQ;
4457         break;
4458       }
4459       if (RA.isMinSignedValue()) return true;
4460       break;
4461     case ICmpInst::ICMP_SLE:
4462       if ((RA + 1).isMaxSignedValue()) {
4463         Pred = ICmpInst::ICMP_NE;
4464         RHS = getConstant(RA + 1);
4465         break;
4466       }
4467       if (RA.isMinSignedValue()) {
4468         Pred = ICmpInst::ICMP_EQ;
4469         break;
4470       }
4471       if (RA.isMaxSignedValue()) return true;
4472       break;
4473     case ICmpInst::ICMP_UGT:
4474       if (RA.isMinValue()) {
4475         Pred = ICmpInst::ICMP_NE;
4476         break;
4477       }
4478       if ((RA + 1).isMaxValue()) {
4479         Pred = ICmpInst::ICMP_EQ;
4480         RHS = getConstant(RA + 1);
4481         break;
4482       }
4483       if (RA.isMaxValue()) return false;
4484       break;
4485     case ICmpInst::ICMP_ULT:
4486       if (RA.isMaxValue()) {
4487         Pred = ICmpInst::ICMP_NE;
4488         break;
4489       }
4490       if ((RA - 1).isMinValue()) {
4491         Pred = ICmpInst::ICMP_EQ;
4492         RHS = getConstant(RA - 1);
4493         break;
4494       }
4495       if (RA.isMinValue()) return false;
4496       break;
4497     case ICmpInst::ICMP_SGT:
4498       if (RA.isMinSignedValue()) {
4499         Pred = ICmpInst::ICMP_NE;
4500         break;
4501       }
4502       if ((RA + 1).isMaxSignedValue()) {
4503         Pred = ICmpInst::ICMP_EQ;
4504         RHS = getConstant(RA + 1);
4505         break;
4506       }
4507       if (RA.isMaxSignedValue()) return false;
4508       break;
4509     case ICmpInst::ICMP_SLT:
4510       if (RA.isMaxSignedValue()) {
4511         Pred = ICmpInst::ICMP_NE;
4512         break;
4513       }
4514       if ((RA - 1).isMinSignedValue()) {
4515        Pred = ICmpInst::ICMP_EQ;
4516        RHS = getConstant(RA - 1);
4517        break;
4518       }
4519       if (RA.isMinSignedValue()) return false;
4520       break;
4521     }
4522   }
4523 
4524   // Check to see if we can make the LHS or RHS match.
4525   if (LHS == FoundRHS || RHS == FoundLHS) {
4526     if (isa<SCEVConstant>(RHS)) {
4527       std::swap(FoundLHS, FoundRHS);
4528       FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
4529     } else {
4530       std::swap(LHS, RHS);
4531       Pred = ICmpInst::getSwappedPredicate(Pred);
4532     }
4533   }
4534 
4535   // Check whether the found predicate is the same as the desired predicate.
4536   if (FoundPred == Pred)
4537     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
4538 
4539   // Check whether swapping the found predicate makes it the same as the
4540   // desired predicate.
4541   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
4542     if (isa<SCEVConstant>(RHS))
4543       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
4544     else
4545       return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
4546                                    RHS, LHS, FoundLHS, FoundRHS);
4547   }
4548 
4549   // Check whether the actual condition is beyond sufficient.
4550   if (FoundPred == ICmpInst::ICMP_EQ)
4551     if (ICmpInst::isTrueWhenEqual(Pred))
4552       if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
4553         return true;
4554   if (Pred == ICmpInst::ICMP_NE)
4555     if (!ICmpInst::isTrueWhenEqual(FoundPred))
4556       if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
4557         return true;
4558 
4559   // Otherwise assume the worst.
4560   return false;
4561 }
4562 
4563 /// isImpliedCondOperands - Test whether the condition described by Pred,
4564 /// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS,
4565 /// and FoundRHS is true.
4566 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
4567                                             const SCEV *LHS, const SCEV *RHS,
4568                                             const SCEV *FoundLHS,
4569                                             const SCEV *FoundRHS) {
4570   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
4571                                      FoundLHS, FoundRHS) ||
4572          // ~x < ~y --> x > y
4573          isImpliedCondOperandsHelper(Pred, LHS, RHS,
4574                                      getNotSCEV(FoundRHS),
4575                                      getNotSCEV(FoundLHS));
4576 }
4577 
4578 /// isImpliedCondOperandsHelper - Test whether the condition described by
4579 /// Pred, LHS, and RHS is true whenever the condition desribed by Pred,
4580 /// FoundLHS, and FoundRHS is true.
4581 bool
4582 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
4583                                              const SCEV *LHS, const SCEV *RHS,
4584                                              const SCEV *FoundLHS,
4585                                              const SCEV *FoundRHS) {
4586   switch (Pred) {
4587   default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4588   case ICmpInst::ICMP_EQ:
4589   case ICmpInst::ICMP_NE:
4590     if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
4591       return true;
4592     break;
4593   case ICmpInst::ICMP_SLT:
4594   case ICmpInst::ICMP_SLE:
4595     if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4596         isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4597       return true;
4598     break;
4599   case ICmpInst::ICMP_SGT:
4600   case ICmpInst::ICMP_SGE:
4601     if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4602         isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4603       return true;
4604     break;
4605   case ICmpInst::ICMP_ULT:
4606   case ICmpInst::ICMP_ULE:
4607     if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4608         isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4609       return true;
4610     break;
4611   case ICmpInst::ICMP_UGT:
4612   case ICmpInst::ICMP_UGE:
4613     if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4614         isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4615       return true;
4616     break;
4617   }
4618 
4619   return false;
4620 }
4621 
4622 /// getBECount - Subtract the end and start values and divide by the step,
4623 /// rounding up, to get the number of times the backedge is executed. Return
4624 /// CouldNotCompute if an intermediate computation overflows.
4625 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4626                                         const SCEV *End,
4627                                         const SCEV *Step) {
4628   const Type *Ty = Start->getType();
4629   const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4630   const SCEV *Diff = getMinusSCEV(End, Start);
4631   const SCEV *RoundUp = getAddExpr(Step, NegOne);
4632 
4633   // Add an adjustment to the difference between End and Start so that
4634   // the division will effectively round up.
4635   const SCEV *Add = getAddExpr(Diff, RoundUp);
4636 
4637   // Check Add for unsigned overflow.
4638   // TODO: More sophisticated things could be done here.
4639   const Type *WideTy = getContext().getIntegerType(getTypeSizeInBits(Ty) + 1);
4640   const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
4641   const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
4642   const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
4643   if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4644     return getCouldNotCompute();
4645 
4646   return getUDivExpr(Add, Step);
4647 }
4648 
4649 /// HowManyLessThans - Return the number of times a backedge containing the
4650 /// specified less-than comparison will execute.  If not computable, return
4651 /// CouldNotCompute.
4652 ScalarEvolution::BackedgeTakenInfo
4653 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4654                                   const Loop *L, bool isSigned) {
4655   // Only handle:  "ADDREC < LoopInvariant".
4656   if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4657 
4658   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4659   if (!AddRec || AddRec->getLoop() != L)
4660     return getCouldNotCompute();
4661 
4662   if (AddRec->isAffine()) {
4663     // FORNOW: We only support unit strides.
4664     unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4665     const SCEV *Step = AddRec->getStepRecurrence(*this);
4666 
4667     // TODO: handle non-constant strides.
4668     const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4669     if (!CStep || CStep->isZero())
4670       return getCouldNotCompute();
4671     if (CStep->isOne()) {
4672       // With unit stride, the iteration never steps past the limit value.
4673     } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4674       if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4675         // Test whether a positive iteration iteration can step past the limit
4676         // value and past the maximum value for its type in a single step.
4677         if (isSigned) {
4678           APInt Max = APInt::getSignedMaxValue(BitWidth);
4679           if ((Max - CStep->getValue()->getValue())
4680                 .slt(CLimit->getValue()->getValue()))
4681             return getCouldNotCompute();
4682         } else {
4683           APInt Max = APInt::getMaxValue(BitWidth);
4684           if ((Max - CStep->getValue()->getValue())
4685                 .ult(CLimit->getValue()->getValue()))
4686             return getCouldNotCompute();
4687         }
4688       } else
4689         // TODO: handle non-constant limit values below.
4690         return getCouldNotCompute();
4691     } else
4692       // TODO: handle negative strides below.
4693       return getCouldNotCompute();
4694 
4695     // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4696     // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4697     // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4698     // treat m-n as signed nor unsigned due to overflow possibility.
4699 
4700     // First, we get the value of the LHS in the first iteration: n
4701     const SCEV *Start = AddRec->getOperand(0);
4702 
4703     // Determine the minimum constant start value.
4704     const SCEV *MinStart = getConstant(isSigned ?
4705       getSignedRange(Start).getSignedMin() :
4706       getUnsignedRange(Start).getUnsignedMin());
4707 
4708     // If we know that the condition is true in order to enter the loop,
4709     // then we know that it will run exactly (m-n)/s times. Otherwise, we
4710     // only know that it will execute (max(m,n)-n)/s times. In both cases,
4711     // the division must round up.
4712     const SCEV *End = RHS;
4713     if (!isLoopGuardedByCond(L,
4714                              isSigned ? ICmpInst::ICMP_SLT :
4715                                         ICmpInst::ICMP_ULT,
4716                              getMinusSCEV(Start, Step), RHS))
4717       End = isSigned ? getSMaxExpr(RHS, Start)
4718                      : getUMaxExpr(RHS, Start);
4719 
4720     // Determine the maximum constant end value.
4721     const SCEV *MaxEnd = getConstant(isSigned ?
4722       getSignedRange(End).getSignedMax() :
4723       getUnsignedRange(End).getUnsignedMax());
4724 
4725     // Finally, we subtract these two values and divide, rounding up, to get
4726     // the number of times the backedge is executed.
4727     const SCEV *BECount = getBECount(Start, End, Step);
4728 
4729     // The maximum backedge count is similar, except using the minimum start
4730     // value and the maximum end value.
4731     const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4732 
4733     return BackedgeTakenInfo(BECount, MaxBECount);
4734   }
4735 
4736   return getCouldNotCompute();
4737 }
4738 
4739 /// getNumIterationsInRange - Return the number of iterations of this loop that
4740 /// produce values in the specified constant range.  Another way of looking at
4741 /// this is that it returns the first iteration number where the value is not in
4742 /// the condition, thus computing the exit count. If the iteration count can't
4743 /// be computed, an instance of SCEVCouldNotCompute is returned.
4744 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4745                                                     ScalarEvolution &SE) const {
4746   if (Range.isFullSet())  // Infinite loop.
4747     return SE.getCouldNotCompute();
4748 
4749   // If the start is a non-zero constant, shift the range to simplify things.
4750   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4751     if (!SC->getValue()->isZero()) {
4752       SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4753       Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4754       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4755       if (const SCEVAddRecExpr *ShiftedAddRec =
4756             dyn_cast<SCEVAddRecExpr>(Shifted))
4757         return ShiftedAddRec->getNumIterationsInRange(
4758                            Range.subtract(SC->getValue()->getValue()), SE);
4759       // This is strange and shouldn't happen.
4760       return SE.getCouldNotCompute();
4761     }
4762 
4763   // The only time we can solve this is when we have all constant indices.
4764   // Otherwise, we cannot determine the overflow conditions.
4765   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4766     if (!isa<SCEVConstant>(getOperand(i)))
4767       return SE.getCouldNotCompute();
4768 
4769 
4770   // Okay at this point we know that all elements of the chrec are constants and
4771   // that the start element is zero.
4772 
4773   // First check to see if the range contains zero.  If not, the first
4774   // iteration exits.
4775   unsigned BitWidth = SE.getTypeSizeInBits(getType());
4776   if (!Range.contains(APInt(BitWidth, 0)))
4777     return SE.getIntegerSCEV(0, getType());
4778 
4779   if (isAffine()) {
4780     // If this is an affine expression then we have this situation:
4781     //   Solve {0,+,A} in Range  ===  Ax in Range
4782 
4783     // We know that zero is in the range.  If A is positive then we know that
4784     // the upper value of the range must be the first possible exit value.
4785     // If A is negative then the lower of the range is the last possible loop
4786     // value.  Also note that we already checked for a full range.
4787     APInt One(BitWidth,1);
4788     APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4789     APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4790 
4791     // The exit value should be (End+A)/A.
4792     APInt ExitVal = (End + A).udiv(A);
4793     ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
4794 
4795     // Evaluate at the exit value.  If we really did fall out of the valid
4796     // range, then we computed our trip count, otherwise wrap around or other
4797     // things must have happened.
4798     ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4799     if (Range.contains(Val->getValue()))
4800       return SE.getCouldNotCompute();  // Something strange happened
4801 
4802     // Ensure that the previous value is in the range.  This is a sanity check.
4803     assert(Range.contains(
4804            EvaluateConstantChrecAtConstant(this,
4805            ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
4806            "Linear scev computation is off in a bad way!");
4807     return SE.getConstant(ExitValue);
4808   } else if (isQuadratic()) {
4809     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4810     // quadratic equation to solve it.  To do this, we must frame our problem in
4811     // terms of figuring out when zero is crossed, instead of when
4812     // Range.getUpper() is crossed.
4813     SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4814     NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4815     const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4816 
4817     // Next, solve the constructed addrec
4818     std::pair<const SCEV *,const SCEV *> Roots =
4819       SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4820     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4821     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4822     if (R1) {
4823       // Pick the smallest positive root value.
4824       if (ConstantInt *CB =
4825           dyn_cast<ConstantInt>(
4826                        SE.getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
4827                          R1->getValue(), R2->getValue()))) {
4828         if (CB->getZExtValue() == false)
4829           std::swap(R1, R2);   // R1 is the minimum root now.
4830 
4831         // Make sure the root is not off by one.  The returned iteration should
4832         // not be in the range, but the previous one should be.  When solving
4833         // for "X*X < 5", for example, we should not return a root of 2.
4834         ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4835                                                              R1->getValue(),
4836                                                              SE);
4837         if (Range.contains(R1Val->getValue())) {
4838           // The next iteration must be out of the range...
4839           ConstantInt *NextVal =
4840                 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
4841 
4842           R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4843           if (!Range.contains(R1Val->getValue()))
4844             return SE.getConstant(NextVal);
4845           return SE.getCouldNotCompute();  // Something strange happened
4846         }
4847 
4848         // If R1 was not in the range, then it is a good return value.  Make
4849         // sure that R1-1 WAS in the range though, just in case.
4850         ConstantInt *NextVal =
4851                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
4852         R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4853         if (Range.contains(R1Val->getValue()))
4854           return R1;
4855         return SE.getCouldNotCompute();  // Something strange happened
4856       }
4857     }
4858   }
4859 
4860   return SE.getCouldNotCompute();
4861 }
4862 
4863 
4864 
4865 //===----------------------------------------------------------------------===//
4866 //                   SCEVCallbackVH Class Implementation
4867 //===----------------------------------------------------------------------===//
4868 
4869 void ScalarEvolution::SCEVCallbackVH::deleted() {
4870   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4871   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4872     SE->ConstantEvolutionLoopExitValue.erase(PN);
4873   if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4874     SE->ValuesAtScopes.erase(I);
4875   SE->Scalars.erase(getValPtr());
4876   // this now dangles!
4877 }
4878 
4879 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4880   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4881 
4882   // Forget all the expressions associated with users of the old value,
4883   // so that future queries will recompute the expressions using the new
4884   // value.
4885   SmallVector<User *, 16> Worklist;
4886   SmallPtrSet<User *, 8> Visited;
4887   Value *Old = getValPtr();
4888   bool DeleteOld = false;
4889   for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4890        UI != UE; ++UI)
4891     Worklist.push_back(*UI);
4892   while (!Worklist.empty()) {
4893     User *U = Worklist.pop_back_val();
4894     // Deleting the Old value will cause this to dangle. Postpone
4895     // that until everything else is done.
4896     if (U == Old) {
4897       DeleteOld = true;
4898       continue;
4899     }
4900     if (!Visited.insert(U))
4901       continue;
4902     if (PHINode *PN = dyn_cast<PHINode>(U))
4903       SE->ConstantEvolutionLoopExitValue.erase(PN);
4904     if (Instruction *I = dyn_cast<Instruction>(U))
4905       SE->ValuesAtScopes.erase(I);
4906     SE->Scalars.erase(U);
4907     for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4908          UI != UE; ++UI)
4909       Worklist.push_back(*UI);
4910   }
4911   // Delete the Old value if it (indirectly) references itself.
4912   if (DeleteOld) {
4913     if (PHINode *PN = dyn_cast<PHINode>(Old))
4914       SE->ConstantEvolutionLoopExitValue.erase(PN);
4915     if (Instruction *I = dyn_cast<Instruction>(Old))
4916       SE->ValuesAtScopes.erase(I);
4917     SE->Scalars.erase(Old);
4918     // this now dangles!
4919   }
4920   // this may dangle!
4921 }
4922 
4923 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4924   : CallbackVH(V), SE(se) {}
4925 
4926 //===----------------------------------------------------------------------===//
4927 //                   ScalarEvolution Class Implementation
4928 //===----------------------------------------------------------------------===//
4929 
4930 ScalarEvolution::ScalarEvolution()
4931   : FunctionPass(&ID) {
4932 }
4933 
4934 bool ScalarEvolution::runOnFunction(Function &F) {
4935   this->F = &F;
4936   LI = &getAnalysis<LoopInfo>();
4937   TD = getAnalysisIfAvailable<TargetData>();
4938   return false;
4939 }
4940 
4941 void ScalarEvolution::releaseMemory() {
4942   Scalars.clear();
4943   BackedgeTakenCounts.clear();
4944   ConstantEvolutionLoopExitValue.clear();
4945   ValuesAtScopes.clear();
4946   UniqueSCEVs.clear();
4947   SCEVAllocator.Reset();
4948 }
4949 
4950 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4951   AU.setPreservesAll();
4952   AU.addRequiredTransitive<LoopInfo>();
4953 }
4954 
4955 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4956   return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4957 }
4958 
4959 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4960                           const Loop *L) {
4961   // Print all inner loops first
4962   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4963     PrintLoopInfo(OS, SE, *I);
4964 
4965   OS << "Loop " << L->getHeader()->getName() << ": ";
4966 
4967   SmallVector<BasicBlock*, 8> ExitBlocks;
4968   L->getExitBlocks(ExitBlocks);
4969   if (ExitBlocks.size() != 1)
4970     OS << "<multiple exits> ";
4971 
4972   if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4973     OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4974   } else {
4975     OS << "Unpredictable backedge-taken count. ";
4976   }
4977 
4978   OS << "\n";
4979   OS << "Loop " << L->getHeader()->getName() << ": ";
4980 
4981   if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4982     OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4983   } else {
4984     OS << "Unpredictable max backedge-taken count. ";
4985   }
4986 
4987   OS << "\n";
4988 }
4989 
4990 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4991   // ScalarEvolution's implementaiton of the print method is to print
4992   // out SCEV values of all instructions that are interesting. Doing
4993   // this potentially causes it to create new SCEV objects though,
4994   // which technically conflicts with the const qualifier. This isn't
4995   // observable from outside the class though, so casting away the
4996   // const isn't dangerous.
4997   ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4998 
4999   OS << "Classifying expressions for: " << F->getName() << "\n";
5000   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5001     if (isSCEVable(I->getType())) {
5002       OS << *I << '\n';
5003       OS << "  -->  ";
5004       const SCEV *SV = SE.getSCEV(&*I);
5005       SV->print(OS);
5006 
5007       const Loop *L = LI->getLoopFor((*I).getParent());
5008 
5009       const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5010       if (AtUse != SV) {
5011         OS << "  -->  ";
5012         AtUse->print(OS);
5013       }
5014 
5015       if (L) {
5016         OS << "\t\t" "Exits: ";
5017         const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5018         if (!ExitValue->isLoopInvariant(L)) {
5019           OS << "<<Unknown>>";
5020         } else {
5021           OS << *ExitValue;
5022         }
5023       }
5024 
5025       OS << "\n";
5026     }
5027 
5028   OS << "Determining loop execution counts for: " << F->getName() << "\n";
5029   for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5030     PrintLoopInfo(OS, &SE, *I);
5031 }
5032 
5033 void ScalarEvolution::print(std::ostream &o, const Module *M) const {
5034   raw_os_ostream OS(o);
5035   print(OS, M);
5036 }
5037