xref: /llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp (revision afd6db99322ca8a6556b1e93699b4b4c9ebf5080)
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
13 //
14 // There are several aspects to this library.  First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
19 //
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
25 //
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression.  These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
30 //
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
34 //
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
37 //
38 //===----------------------------------------------------------------------===//
39 //
40 // There are several good references for the techniques used in this analysis.
41 //
42 //  Chains of recurrences -- a method to expedite the evaluation
43 //  of closed-form functions
44 //  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 //
46 //  On computational properties of chains of recurrences
47 //  Eugene V. Zima
48 //
49 //  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 //  Robert A. van Engelen
51 //
52 //  Efficient Symbolic Analysis for Optimizing Compilers
53 //  Robert A. van Engelen
54 //
55 //  Using the chains of recurrences algebra for data dependence testing and
56 //  induction variable substitution
57 //  MS Thesis, Johnie Birch
58 //
59 //===----------------------------------------------------------------------===//
60 
61 #define DEBUG_TYPE "scalar-evolution"
62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
63 #include "llvm/Constants.h"
64 #include "llvm/DerivedTypes.h"
65 #include "llvm/GlobalVariable.h"
66 #include "llvm/GlobalAlias.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Operator.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/Dominators.h"
72 #include "llvm/Analysis/InstructionSimplify.h"
73 #include "llvm/Analysis/LoopInfo.h"
74 #include "llvm/Analysis/ValueTracking.h"
75 #include "llvm/Assembly/Writer.h"
76 #include "llvm/Target/TargetData.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/ConstantRange.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/GetElementPtrTypeIterator.h"
82 #include "llvm/Support/InstIterator.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/ADT/Statistic.h"
86 #include "llvm/ADT/STLExtras.h"
87 #include "llvm/ADT/SmallPtrSet.h"
88 #include <algorithm>
89 using namespace llvm;
90 
91 STATISTIC(NumArrayLenItCounts,
92           "Number of trip counts computed with array length");
93 STATISTIC(NumTripCountsComputed,
94           "Number of loops with predictable loop counts");
95 STATISTIC(NumTripCountsNotComputed,
96           "Number of loops without predictable loop counts");
97 STATISTIC(NumBruteForceTripCountsComputed,
98           "Number of loops with trip counts computed by force");
99 
100 static cl::opt<unsigned>
101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
102                         cl::desc("Maximum number of iterations SCEV will "
103                                  "symbolically execute a constant "
104                                  "derived loop"),
105                         cl::init(100));
106 
107 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
108                 "Scalar Evolution Analysis", false, true)
109 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
110 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
111 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
112                 "Scalar Evolution Analysis", false, true)
113 char ScalarEvolution::ID = 0;
114 
115 //===----------------------------------------------------------------------===//
116 //                           SCEV class definitions
117 //===----------------------------------------------------------------------===//
118 
119 //===----------------------------------------------------------------------===//
120 // Implementation of the SCEV class.
121 //
122 
123 SCEV::~SCEV() {}
124 
125 void SCEV::dump() const {
126   print(dbgs());
127   dbgs() << '\n';
128 }
129 
130 bool SCEV::isZero() const {
131   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132     return SC->getValue()->isZero();
133   return false;
134 }
135 
136 bool SCEV::isOne() const {
137   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
138     return SC->getValue()->isOne();
139   return false;
140 }
141 
142 bool SCEV::isAllOnesValue() const {
143   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
144     return SC->getValue()->isAllOnesValue();
145   return false;
146 }
147 
148 SCEVCouldNotCompute::SCEVCouldNotCompute() :
149   SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
150 
151 const Type *SCEVCouldNotCompute::getType() const {
152   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153   return 0;
154 }
155 
156 bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
157   llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158   return false;
159 }
160 
161 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
162   OS << "***COULDNOTCOMPUTE***";
163 }
164 
165 bool SCEVCouldNotCompute::classof(const SCEV *S) {
166   return S->getSCEVType() == scCouldNotCompute;
167 }
168 
169 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
170   FoldingSetNodeID ID;
171   ID.AddInteger(scConstant);
172   ID.AddPointer(V);
173   void *IP = 0;
174   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
175   SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
176   UniqueSCEVs.InsertNode(S, IP);
177   return S;
178 }
179 
180 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
181   return getConstant(ConstantInt::get(getContext(), Val));
182 }
183 
184 const SCEV *
185 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
186   const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
187   return getConstant(ConstantInt::get(ITy, V, isSigned));
188 }
189 
190 const Type *SCEVConstant::getType() const { return V->getType(); }
191 
192 void SCEVConstant::print(raw_ostream &OS) const {
193   WriteAsOperand(OS, V, false);
194 }
195 
196 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
197                            unsigned SCEVTy, const SCEV *op, const Type *ty)
198   : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
199 
200 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
201   return Op->dominates(BB, DT);
202 }
203 
204 bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
205   return Op->properlyDominates(BB, DT);
206 }
207 
208 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
209                                    const SCEV *op, const Type *ty)
210   : SCEVCastExpr(ID, scTruncate, op, ty) {
211   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
212          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
213          "Cannot truncate non-integer value!");
214 }
215 
216 void SCEVTruncateExpr::print(raw_ostream &OS) const {
217   OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
218 }
219 
220 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
221                                        const SCEV *op, const Type *ty)
222   : SCEVCastExpr(ID, scZeroExtend, op, ty) {
223   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
224          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
225          "Cannot zero extend non-integer value!");
226 }
227 
228 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
229   OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
230 }
231 
232 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
233                                        const SCEV *op, const Type *ty)
234   : SCEVCastExpr(ID, scSignExtend, op, ty) {
235   assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
236          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
237          "Cannot sign extend non-integer value!");
238 }
239 
240 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
241   OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
242 }
243 
244 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
245   const char *OpStr = getOperationStr();
246   OS << "(";
247   for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
248     OS << **I;
249     if (llvm::next(I) != E)
250       OS << OpStr;
251   }
252   OS << ")";
253 }
254 
255 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
256   for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
257     if (!(*I)->dominates(BB, DT))
258       return false;
259   return true;
260 }
261 
262 bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
263   for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
264     if (!(*I)->properlyDominates(BB, DT))
265       return false;
266   return true;
267 }
268 
269 bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
270   for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
271     const SCEV *S = *I;
272     if (O == S || S->hasOperand(O))
273       return true;
274   }
275   return false;
276 }
277 
278 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
279   return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
280 }
281 
282 bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
283   return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
284 }
285 
286 void SCEVUDivExpr::print(raw_ostream &OS) const {
287   OS << "(" << *LHS << " /u " << *RHS << ")";
288 }
289 
290 const Type *SCEVUDivExpr::getType() const {
291   // In most cases the types of LHS and RHS will be the same, but in some
292   // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
293   // depend on the type for correctness, but handling types carefully can
294   // avoid extra casts in the SCEVExpander. The LHS is more likely to be
295   // a pointer type than the RHS, so use the RHS' type here.
296   return RHS->getType();
297 }
298 
299 bool
300 SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
301   return DT->dominates(L->getHeader(), BB) &&
302          SCEVNAryExpr::dominates(BB, DT);
303 }
304 
305 bool
306 SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
307   // This uses a "dominates" query instead of "properly dominates" query because
308   // the instruction which produces the addrec's value is a PHI, and a PHI
309   // effectively properly dominates its entire containing block.
310   return DT->dominates(L->getHeader(), BB) &&
311          SCEVNAryExpr::properlyDominates(BB, DT);
312 }
313 
314 void SCEVAddRecExpr::print(raw_ostream &OS) const {
315   OS << "{" << *Operands[0];
316   for (unsigned i = 1, e = NumOperands; i != e; ++i)
317     OS << ",+," << *Operands[i];
318   OS << "}<";
319   WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
320   OS << ">";
321 }
322 
323 void SCEVUnknown::deleted() {
324   // Clear this SCEVUnknown from various maps.
325   SE->ValuesAtScopes.erase(this);
326   SE->UnsignedRanges.erase(this);
327   SE->SignedRanges.erase(this);
328 
329   // Remove this SCEVUnknown from the uniquing map.
330   SE->UniqueSCEVs.RemoveNode(this);
331 
332   // Release the value.
333   setValPtr(0);
334 }
335 
336 void SCEVUnknown::allUsesReplacedWith(Value *New) {
337   // Clear this SCEVUnknown from various maps.
338   SE->ValuesAtScopes.erase(this);
339   SE->UnsignedRanges.erase(this);
340   SE->SignedRanges.erase(this);
341 
342   // Remove this SCEVUnknown from the uniquing map.
343   SE->UniqueSCEVs.RemoveNode(this);
344 
345   // Update this SCEVUnknown to point to the new value. This is needed
346   // because there may still be outstanding SCEVs which still point to
347   // this SCEVUnknown.
348   setValPtr(New);
349 }
350 
351 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
352   if (Instruction *I = dyn_cast<Instruction>(getValue()))
353     return DT->dominates(I->getParent(), BB);
354   return true;
355 }
356 
357 bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
358   if (Instruction *I = dyn_cast<Instruction>(getValue()))
359     return DT->properlyDominates(I->getParent(), BB);
360   return true;
361 }
362 
363 const Type *SCEVUnknown::getType() const {
364   return getValue()->getType();
365 }
366 
367 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
368   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
369     if (VCE->getOpcode() == Instruction::PtrToInt)
370       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
371         if (CE->getOpcode() == Instruction::GetElementPtr &&
372             CE->getOperand(0)->isNullValue() &&
373             CE->getNumOperands() == 2)
374           if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
375             if (CI->isOne()) {
376               AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
377                                  ->getElementType();
378               return true;
379             }
380 
381   return false;
382 }
383 
384 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
385   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
386     if (VCE->getOpcode() == Instruction::PtrToInt)
387       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
388         if (CE->getOpcode() == Instruction::GetElementPtr &&
389             CE->getOperand(0)->isNullValue()) {
390           const Type *Ty =
391             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
392           if (const StructType *STy = dyn_cast<StructType>(Ty))
393             if (!STy->isPacked() &&
394                 CE->getNumOperands() == 3 &&
395                 CE->getOperand(1)->isNullValue()) {
396               if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
397                 if (CI->isOne() &&
398                     STy->getNumElements() == 2 &&
399                     STy->getElementType(0)->isIntegerTy(1)) {
400                   AllocTy = STy->getElementType(1);
401                   return true;
402                 }
403             }
404         }
405 
406   return false;
407 }
408 
409 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
410   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
411     if (VCE->getOpcode() == Instruction::PtrToInt)
412       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
413         if (CE->getOpcode() == Instruction::GetElementPtr &&
414             CE->getNumOperands() == 3 &&
415             CE->getOperand(0)->isNullValue() &&
416             CE->getOperand(1)->isNullValue()) {
417           const Type *Ty =
418             cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
419           // Ignore vector types here so that ScalarEvolutionExpander doesn't
420           // emit getelementptrs that index into vectors.
421           if (Ty->isStructTy() || Ty->isArrayTy()) {
422             CTy = Ty;
423             FieldNo = CE->getOperand(2);
424             return true;
425           }
426         }
427 
428   return false;
429 }
430 
431 void SCEVUnknown::print(raw_ostream &OS) const {
432   const Type *AllocTy;
433   if (isSizeOf(AllocTy)) {
434     OS << "sizeof(" << *AllocTy << ")";
435     return;
436   }
437   if (isAlignOf(AllocTy)) {
438     OS << "alignof(" << *AllocTy << ")";
439     return;
440   }
441 
442   const Type *CTy;
443   Constant *FieldNo;
444   if (isOffsetOf(CTy, FieldNo)) {
445     OS << "offsetof(" << *CTy << ", ";
446     WriteAsOperand(OS, FieldNo, false);
447     OS << ")";
448     return;
449   }
450 
451   // Otherwise just print it normally.
452   WriteAsOperand(OS, getValue(), false);
453 }
454 
455 //===----------------------------------------------------------------------===//
456 //                               SCEV Utilities
457 //===----------------------------------------------------------------------===//
458 
459 namespace {
460   /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
461   /// than the complexity of the RHS.  This comparator is used to canonicalize
462   /// expressions.
463   class SCEVComplexityCompare {
464     const LoopInfo *const LI;
465   public:
466     explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
467 
468     // Return true or false if LHS is less than, or at least RHS, respectively.
469     bool operator()(const SCEV *LHS, const SCEV *RHS) const {
470       return compare(LHS, RHS) < 0;
471     }
472 
473     // Return negative, zero, or positive, if LHS is less than, equal to, or
474     // greater than RHS, respectively. A three-way result allows recursive
475     // comparisons to be more efficient.
476     int compare(const SCEV *LHS, const SCEV *RHS) const {
477       // Fast-path: SCEVs are uniqued so we can do a quick equality check.
478       if (LHS == RHS)
479         return 0;
480 
481       // Primarily, sort the SCEVs by their getSCEVType().
482       unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
483       if (LType != RType)
484         return (int)LType - (int)RType;
485 
486       // Aside from the getSCEVType() ordering, the particular ordering
487       // isn't very important except that it's beneficial to be consistent,
488       // so that (a + b) and (b + a) don't end up as different expressions.
489       switch (LType) {
490       case scUnknown: {
491         const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
492         const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
493 
494         // Sort SCEVUnknown values with some loose heuristics. TODO: This is
495         // not as complete as it could be.
496         const Value *LV = LU->getValue(), *RV = RU->getValue();
497 
498         // Order pointer values after integer values. This helps SCEVExpander
499         // form GEPs.
500         bool LIsPointer = LV->getType()->isPointerTy(),
501              RIsPointer = RV->getType()->isPointerTy();
502         if (LIsPointer != RIsPointer)
503           return (int)LIsPointer - (int)RIsPointer;
504 
505         // Compare getValueID values.
506         unsigned LID = LV->getValueID(),
507                  RID = RV->getValueID();
508         if (LID != RID)
509           return (int)LID - (int)RID;
510 
511         // Sort arguments by their position.
512         if (const Argument *LA = dyn_cast<Argument>(LV)) {
513           const Argument *RA = cast<Argument>(RV);
514           unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
515           return (int)LArgNo - (int)RArgNo;
516         }
517 
518         // For instructions, compare their loop depth, and their operand
519         // count.  This is pretty loose.
520         if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
521           const Instruction *RInst = cast<Instruction>(RV);
522 
523           // Compare loop depths.
524           const BasicBlock *LParent = LInst->getParent(),
525                            *RParent = RInst->getParent();
526           if (LParent != RParent) {
527             unsigned LDepth = LI->getLoopDepth(LParent),
528                      RDepth = LI->getLoopDepth(RParent);
529             if (LDepth != RDepth)
530               return (int)LDepth - (int)RDepth;
531           }
532 
533           // Compare the number of operands.
534           unsigned LNumOps = LInst->getNumOperands(),
535                    RNumOps = RInst->getNumOperands();
536           return (int)LNumOps - (int)RNumOps;
537         }
538 
539         return 0;
540       }
541 
542       case scConstant: {
543         const SCEVConstant *LC = cast<SCEVConstant>(LHS);
544         const SCEVConstant *RC = cast<SCEVConstant>(RHS);
545 
546         // Compare constant values.
547         const APInt &LA = LC->getValue()->getValue();
548         const APInt &RA = RC->getValue()->getValue();
549         unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
550         if (LBitWidth != RBitWidth)
551           return (int)LBitWidth - (int)RBitWidth;
552         return LA.ult(RA) ? -1 : 1;
553       }
554 
555       case scAddRecExpr: {
556         const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
557         const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
558 
559         // Compare addrec loop depths.
560         const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
561         if (LLoop != RLoop) {
562           unsigned LDepth = LLoop->getLoopDepth(),
563                    RDepth = RLoop->getLoopDepth();
564           if (LDepth != RDepth)
565             return (int)LDepth - (int)RDepth;
566         }
567 
568         // Addrec complexity grows with operand count.
569         unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
570         if (LNumOps != RNumOps)
571           return (int)LNumOps - (int)RNumOps;
572 
573         // Lexicographically compare.
574         for (unsigned i = 0; i != LNumOps; ++i) {
575           long X = compare(LA->getOperand(i), RA->getOperand(i));
576           if (X != 0)
577             return X;
578         }
579 
580         return 0;
581       }
582 
583       case scAddExpr:
584       case scMulExpr:
585       case scSMaxExpr:
586       case scUMaxExpr: {
587         const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
588         const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
589 
590         // Lexicographically compare n-ary expressions.
591         unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
592         for (unsigned i = 0; i != LNumOps; ++i) {
593           if (i >= RNumOps)
594             return 1;
595           long X = compare(LC->getOperand(i), RC->getOperand(i));
596           if (X != 0)
597             return X;
598         }
599         return (int)LNumOps - (int)RNumOps;
600       }
601 
602       case scUDivExpr: {
603         const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
604         const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
605 
606         // Lexicographically compare udiv expressions.
607         long X = compare(LC->getLHS(), RC->getLHS());
608         if (X != 0)
609           return X;
610         return compare(LC->getRHS(), RC->getRHS());
611       }
612 
613       case scTruncate:
614       case scZeroExtend:
615       case scSignExtend: {
616         const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
617         const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
618 
619         // Compare cast expressions by operand.
620         return compare(LC->getOperand(), RC->getOperand());
621       }
622 
623       default:
624         break;
625       }
626 
627       llvm_unreachable("Unknown SCEV kind!");
628       return 0;
629     }
630   };
631 }
632 
633 /// GroupByComplexity - Given a list of SCEV objects, order them by their
634 /// complexity, and group objects of the same complexity together by value.
635 /// When this routine is finished, we know that any duplicates in the vector are
636 /// consecutive and that complexity is monotonically increasing.
637 ///
638 /// Note that we go take special precautions to ensure that we get deterministic
639 /// results from this routine.  In other words, we don't want the results of
640 /// this to depend on where the addresses of various SCEV objects happened to
641 /// land in memory.
642 ///
643 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
644                               LoopInfo *LI) {
645   if (Ops.size() < 2) return;  // Noop
646   if (Ops.size() == 2) {
647     // This is the common case, which also happens to be trivially simple.
648     // Special case it.
649     const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
650     if (SCEVComplexityCompare(LI)(RHS, LHS))
651       std::swap(LHS, RHS);
652     return;
653   }
654 
655   // Do the rough sort by complexity.
656   std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
657 
658   // Now that we are sorted by complexity, group elements of the same
659   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
660   // be extremely short in practice.  Note that we take this approach because we
661   // do not want to depend on the addresses of the objects we are grouping.
662   for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
663     const SCEV *S = Ops[i];
664     unsigned Complexity = S->getSCEVType();
665 
666     // If there are any objects of the same complexity and same value as this
667     // one, group them.
668     for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
669       if (Ops[j] == S) { // Found a duplicate.
670         // Move it to immediately after i'th element.
671         std::swap(Ops[i+1], Ops[j]);
672         ++i;   // no need to rescan it.
673         if (i == e-2) return;  // Done!
674       }
675     }
676   }
677 }
678 
679 
680 
681 //===----------------------------------------------------------------------===//
682 //                      Simple SCEV method implementations
683 //===----------------------------------------------------------------------===//
684 
685 /// BinomialCoefficient - Compute BC(It, K).  The result has width W.
686 /// Assume, K > 0.
687 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
688                                        ScalarEvolution &SE,
689                                        const Type* ResultTy) {
690   // Handle the simplest case efficiently.
691   if (K == 1)
692     return SE.getTruncateOrZeroExtend(It, ResultTy);
693 
694   // We are using the following formula for BC(It, K):
695   //
696   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
697   //
698   // Suppose, W is the bitwidth of the return value.  We must be prepared for
699   // overflow.  Hence, we must assure that the result of our computation is
700   // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
701   // safe in modular arithmetic.
702   //
703   // However, this code doesn't use exactly that formula; the formula it uses
704   // is something like the following, where T is the number of factors of 2 in
705   // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
706   // exponentiation:
707   //
708   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
709   //
710   // This formula is trivially equivalent to the previous formula.  However,
711   // this formula can be implemented much more efficiently.  The trick is that
712   // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
713   // arithmetic.  To do exact division in modular arithmetic, all we have
714   // to do is multiply by the inverse.  Therefore, this step can be done at
715   // width W.
716   //
717   // The next issue is how to safely do the division by 2^T.  The way this
718   // is done is by doing the multiplication step at a width of at least W + T
719   // bits.  This way, the bottom W+T bits of the product are accurate. Then,
720   // when we perform the division by 2^T (which is equivalent to a right shift
721   // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
722   // truncated out after the division by 2^T.
723   //
724   // In comparison to just directly using the first formula, this technique
725   // is much more efficient; using the first formula requires W * K bits,
726   // but this formula less than W + K bits. Also, the first formula requires
727   // a division step, whereas this formula only requires multiplies and shifts.
728   //
729   // It doesn't matter whether the subtraction step is done in the calculation
730   // width or the input iteration count's width; if the subtraction overflows,
731   // the result must be zero anyway.  We prefer here to do it in the width of
732   // the induction variable because it helps a lot for certain cases; CodeGen
733   // isn't smart enough to ignore the overflow, which leads to much less
734   // efficient code if the width of the subtraction is wider than the native
735   // register width.
736   //
737   // (It's possible to not widen at all by pulling out factors of 2 before
738   // the multiplication; for example, K=2 can be calculated as
739   // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
740   // extra arithmetic, so it's not an obvious win, and it gets
741   // much more complicated for K > 3.)
742 
743   // Protection from insane SCEVs; this bound is conservative,
744   // but it probably doesn't matter.
745   if (K > 1000)
746     return SE.getCouldNotCompute();
747 
748   unsigned W = SE.getTypeSizeInBits(ResultTy);
749 
750   // Calculate K! / 2^T and T; we divide out the factors of two before
751   // multiplying for calculating K! / 2^T to avoid overflow.
752   // Other overflow doesn't matter because we only care about the bottom
753   // W bits of the result.
754   APInt OddFactorial(W, 1);
755   unsigned T = 1;
756   for (unsigned i = 3; i <= K; ++i) {
757     APInt Mult(W, i);
758     unsigned TwoFactors = Mult.countTrailingZeros();
759     T += TwoFactors;
760     Mult = Mult.lshr(TwoFactors);
761     OddFactorial *= Mult;
762   }
763 
764   // We need at least W + T bits for the multiplication step
765   unsigned CalculationBits = W + T;
766 
767   // Calculate 2^T, at width T+W.
768   APInt DivFactor = APInt(CalculationBits, 1).shl(T);
769 
770   // Calculate the multiplicative inverse of K! / 2^T;
771   // this multiplication factor will perform the exact division by
772   // K! / 2^T.
773   APInt Mod = APInt::getSignedMinValue(W+1);
774   APInt MultiplyFactor = OddFactorial.zext(W+1);
775   MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
776   MultiplyFactor = MultiplyFactor.trunc(W);
777 
778   // Calculate the product, at width T+W
779   const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
780                                                       CalculationBits);
781   const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
782   for (unsigned i = 1; i != K; ++i) {
783     const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
784     Dividend = SE.getMulExpr(Dividend,
785                              SE.getTruncateOrZeroExtend(S, CalculationTy));
786   }
787 
788   // Divide by 2^T
789   const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
790 
791   // Truncate the result, and divide by K! / 2^T.
792 
793   return SE.getMulExpr(SE.getConstant(MultiplyFactor),
794                        SE.getTruncateOrZeroExtend(DivResult, ResultTy));
795 }
796 
797 /// evaluateAtIteration - Return the value of this chain of recurrences at
798 /// the specified iteration number.  We can evaluate this recurrence by
799 /// multiplying each element in the chain by the binomial coefficient
800 /// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
801 ///
802 ///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
803 ///
804 /// where BC(It, k) stands for binomial coefficient.
805 ///
806 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
807                                                 ScalarEvolution &SE) const {
808   const SCEV *Result = getStart();
809   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
810     // The computation is correct in the face of overflow provided that the
811     // multiplication is performed _after_ the evaluation of the binomial
812     // coefficient.
813     const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
814     if (isa<SCEVCouldNotCompute>(Coeff))
815       return Coeff;
816 
817     Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
818   }
819   return Result;
820 }
821 
822 //===----------------------------------------------------------------------===//
823 //                    SCEV Expression folder implementations
824 //===----------------------------------------------------------------------===//
825 
826 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
827                                              const Type *Ty) {
828   assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
829          "This is not a truncating conversion!");
830   assert(isSCEVable(Ty) &&
831          "This is not a conversion to a SCEVable type!");
832   Ty = getEffectiveSCEVType(Ty);
833 
834   FoldingSetNodeID ID;
835   ID.AddInteger(scTruncate);
836   ID.AddPointer(Op);
837   ID.AddPointer(Ty);
838   void *IP = 0;
839   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
840 
841   // Fold if the operand is constant.
842   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
843     return getConstant(
844       cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
845                                                getEffectiveSCEVType(Ty))));
846 
847   // trunc(trunc(x)) --> trunc(x)
848   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
849     return getTruncateExpr(ST->getOperand(), Ty);
850 
851   // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
852   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
853     return getTruncateOrSignExtend(SS->getOperand(), Ty);
854 
855   // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
856   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
857     return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
858 
859   // If the input value is a chrec scev, truncate the chrec's operands.
860   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
861     SmallVector<const SCEV *, 4> Operands;
862     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
863       Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
864     return getAddRecExpr(Operands, AddRec->getLoop());
865   }
866 
867   // As a special case, fold trunc(undef) to undef. We don't want to
868   // know too much about SCEVUnknowns, but this special case is handy
869   // and harmless.
870   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
871     if (isa<UndefValue>(U->getValue()))
872       return getSCEV(UndefValue::get(Ty));
873 
874   // The cast wasn't folded; create an explicit cast node. We can reuse
875   // the existing insert position since if we get here, we won't have
876   // made any changes which would invalidate it.
877   SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
878                                                  Op, Ty);
879   UniqueSCEVs.InsertNode(S, IP);
880   return S;
881 }
882 
883 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
884                                                const Type *Ty) {
885   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
886          "This is not an extending conversion!");
887   assert(isSCEVable(Ty) &&
888          "This is not a conversion to a SCEVable type!");
889   Ty = getEffectiveSCEVType(Ty);
890 
891   // Fold if the operand is constant.
892   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
893     return getConstant(
894       cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
895                                               getEffectiveSCEVType(Ty))));
896 
897   // zext(zext(x)) --> zext(x)
898   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
899     return getZeroExtendExpr(SZ->getOperand(), Ty);
900 
901   // Before doing any expensive analysis, check to see if we've already
902   // computed a SCEV for this Op and Ty.
903   FoldingSetNodeID ID;
904   ID.AddInteger(scZeroExtend);
905   ID.AddPointer(Op);
906   ID.AddPointer(Ty);
907   void *IP = 0;
908   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
909 
910   // If the input value is a chrec scev, and we can prove that the value
911   // did not overflow the old, smaller, value, we can zero extend all of the
912   // operands (often constants).  This allows analysis of something like
913   // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
914   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
915     if (AR->isAffine()) {
916       const SCEV *Start = AR->getStart();
917       const SCEV *Step = AR->getStepRecurrence(*this);
918       unsigned BitWidth = getTypeSizeInBits(AR->getType());
919       const Loop *L = AR->getLoop();
920 
921       // If we have special knowledge that this addrec won't overflow,
922       // we don't need to do any further analysis.
923       if (AR->hasNoUnsignedWrap())
924         return getAddRecExpr(getZeroExtendExpr(Start, Ty),
925                              getZeroExtendExpr(Step, Ty),
926                              L);
927 
928       // Check whether the backedge-taken count is SCEVCouldNotCompute.
929       // Note that this serves two purposes: It filters out loops that are
930       // simply not analyzable, and it covers the case where this code is
931       // being called from within backedge-taken count analysis, such that
932       // attempting to ask for the backedge-taken count would likely result
933       // in infinite recursion. In the later case, the analysis code will
934       // cope with a conservative value, and it will take care to purge
935       // that value once it has finished.
936       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
937       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
938         // Manually compute the final value for AR, checking for
939         // overflow.
940 
941         // Check whether the backedge-taken count can be losslessly casted to
942         // the addrec's type. The count is always unsigned.
943         const SCEV *CastedMaxBECount =
944           getTruncateOrZeroExtend(MaxBECount, Start->getType());
945         const SCEV *RecastedMaxBECount =
946           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
947         if (MaxBECount == RecastedMaxBECount) {
948           const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
949           // Check whether Start+Step*MaxBECount has no unsigned overflow.
950           const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
951           const SCEV *Add = getAddExpr(Start, ZMul);
952           const SCEV *OperandExtendedAdd =
953             getAddExpr(getZeroExtendExpr(Start, WideTy),
954                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
955                                   getZeroExtendExpr(Step, WideTy)));
956           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
957             // Return the expression with the addrec on the outside.
958             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
959                                  getZeroExtendExpr(Step, Ty),
960                                  L);
961 
962           // Similar to above, only this time treat the step value as signed.
963           // This covers loops that count down.
964           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
965           Add = getAddExpr(Start, SMul);
966           OperandExtendedAdd =
967             getAddExpr(getZeroExtendExpr(Start, WideTy),
968                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
969                                   getSignExtendExpr(Step, WideTy)));
970           if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
971             // Return the expression with the addrec on the outside.
972             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
973                                  getSignExtendExpr(Step, Ty),
974                                  L);
975         }
976 
977         // If the backedge is guarded by a comparison with the pre-inc value
978         // the addrec is safe. Also, if the entry is guarded by a comparison
979         // with the start value and the backedge is guarded by a comparison
980         // with the post-inc value, the addrec is safe.
981         if (isKnownPositive(Step)) {
982           const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
983                                       getUnsignedRange(Step).getUnsignedMax());
984           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
985               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
986                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
987                                            AR->getPostIncExpr(*this), N)))
988             // Return the expression with the addrec on the outside.
989             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
990                                  getZeroExtendExpr(Step, Ty),
991                                  L);
992         } else if (isKnownNegative(Step)) {
993           const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
994                                       getSignedRange(Step).getSignedMin());
995           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
996               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
997                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
998                                            AR->getPostIncExpr(*this), N)))
999             // Return the expression with the addrec on the outside.
1000             return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1001                                  getSignExtendExpr(Step, Ty),
1002                                  L);
1003         }
1004       }
1005     }
1006 
1007   // The cast wasn't folded; create an explicit cast node.
1008   // Recompute the insert position, as it may have been invalidated.
1009   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1010   SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1011                                                    Op, Ty);
1012   UniqueSCEVs.InsertNode(S, IP);
1013   return S;
1014 }
1015 
1016 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1017                                                const Type *Ty) {
1018   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1019          "This is not an extending conversion!");
1020   assert(isSCEVable(Ty) &&
1021          "This is not a conversion to a SCEVable type!");
1022   Ty = getEffectiveSCEVType(Ty);
1023 
1024   // Fold if the operand is constant.
1025   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1026     return getConstant(
1027       cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
1028                                               getEffectiveSCEVType(Ty))));
1029 
1030   // sext(sext(x)) --> sext(x)
1031   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1032     return getSignExtendExpr(SS->getOperand(), Ty);
1033 
1034   // Before doing any expensive analysis, check to see if we've already
1035   // computed a SCEV for this Op and Ty.
1036   FoldingSetNodeID ID;
1037   ID.AddInteger(scSignExtend);
1038   ID.AddPointer(Op);
1039   ID.AddPointer(Ty);
1040   void *IP = 0;
1041   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1042 
1043   // If the input value is a chrec scev, and we can prove that the value
1044   // did not overflow the old, smaller, value, we can sign extend all of the
1045   // operands (often constants).  This allows analysis of something like
1046   // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1047   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1048     if (AR->isAffine()) {
1049       const SCEV *Start = AR->getStart();
1050       const SCEV *Step = AR->getStepRecurrence(*this);
1051       unsigned BitWidth = getTypeSizeInBits(AR->getType());
1052       const Loop *L = AR->getLoop();
1053 
1054       // If we have special knowledge that this addrec won't overflow,
1055       // we don't need to do any further analysis.
1056       if (AR->hasNoSignedWrap())
1057         return getAddRecExpr(getSignExtendExpr(Start, Ty),
1058                              getSignExtendExpr(Step, Ty),
1059                              L);
1060 
1061       // Check whether the backedge-taken count is SCEVCouldNotCompute.
1062       // Note that this serves two purposes: It filters out loops that are
1063       // simply not analyzable, and it covers the case where this code is
1064       // being called from within backedge-taken count analysis, such that
1065       // attempting to ask for the backedge-taken count would likely result
1066       // in infinite recursion. In the later case, the analysis code will
1067       // cope with a conservative value, and it will take care to purge
1068       // that value once it has finished.
1069       const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1070       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1071         // Manually compute the final value for AR, checking for
1072         // overflow.
1073 
1074         // Check whether the backedge-taken count can be losslessly casted to
1075         // the addrec's type. The count is always unsigned.
1076         const SCEV *CastedMaxBECount =
1077           getTruncateOrZeroExtend(MaxBECount, Start->getType());
1078         const SCEV *RecastedMaxBECount =
1079           getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1080         if (MaxBECount == RecastedMaxBECount) {
1081           const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1082           // Check whether Start+Step*MaxBECount has no signed overflow.
1083           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1084           const SCEV *Add = getAddExpr(Start, SMul);
1085           const SCEV *OperandExtendedAdd =
1086             getAddExpr(getSignExtendExpr(Start, WideTy),
1087                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1088                                   getSignExtendExpr(Step, WideTy)));
1089           if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1090             // Return the expression with the addrec on the outside.
1091             return getAddRecExpr(getSignExtendExpr(Start, Ty),
1092                                  getSignExtendExpr(Step, Ty),
1093                                  L);
1094 
1095           // Similar to above, only this time treat the step value as unsigned.
1096           // This covers loops that count up with an unsigned step.
1097           const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1098           Add = getAddExpr(Start, UMul);
1099           OperandExtendedAdd =
1100             getAddExpr(getSignExtendExpr(Start, WideTy),
1101                        getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1102                                   getZeroExtendExpr(Step, WideTy)));
1103           if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1104             // Return the expression with the addrec on the outside.
1105             return getAddRecExpr(getSignExtendExpr(Start, Ty),
1106                                  getZeroExtendExpr(Step, Ty),
1107                                  L);
1108         }
1109 
1110         // If the backedge is guarded by a comparison with the pre-inc value
1111         // the addrec is safe. Also, if the entry is guarded by a comparison
1112         // with the start value and the backedge is guarded by a comparison
1113         // with the post-inc value, the addrec is safe.
1114         if (isKnownPositive(Step)) {
1115           const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1116                                       getSignedRange(Step).getSignedMax());
1117           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1118               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1119                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1120                                            AR->getPostIncExpr(*this), N)))
1121             // Return the expression with the addrec on the outside.
1122             return getAddRecExpr(getSignExtendExpr(Start, Ty),
1123                                  getSignExtendExpr(Step, Ty),
1124                                  L);
1125         } else if (isKnownNegative(Step)) {
1126           const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1127                                       getSignedRange(Step).getSignedMin());
1128           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1129               (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1130                isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1131                                            AR->getPostIncExpr(*this), N)))
1132             // Return the expression with the addrec on the outside.
1133             return getAddRecExpr(getSignExtendExpr(Start, Ty),
1134                                  getSignExtendExpr(Step, Ty),
1135                                  L);
1136         }
1137       }
1138     }
1139 
1140   // The cast wasn't folded; create an explicit cast node.
1141   // Recompute the insert position, as it may have been invalidated.
1142   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1143   SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1144                                                    Op, Ty);
1145   UniqueSCEVs.InsertNode(S, IP);
1146   return S;
1147 }
1148 
1149 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1150 /// unspecified bits out to the given type.
1151 ///
1152 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1153                                               const Type *Ty) {
1154   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1155          "This is not an extending conversion!");
1156   assert(isSCEVable(Ty) &&
1157          "This is not a conversion to a SCEVable type!");
1158   Ty = getEffectiveSCEVType(Ty);
1159 
1160   // Sign-extend negative constants.
1161   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1162     if (SC->getValue()->getValue().isNegative())
1163       return getSignExtendExpr(Op, Ty);
1164 
1165   // Peel off a truncate cast.
1166   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1167     const SCEV *NewOp = T->getOperand();
1168     if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1169       return getAnyExtendExpr(NewOp, Ty);
1170     return getTruncateOrNoop(NewOp, Ty);
1171   }
1172 
1173   // Next try a zext cast. If the cast is folded, use it.
1174   const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1175   if (!isa<SCEVZeroExtendExpr>(ZExt))
1176     return ZExt;
1177 
1178   // Next try a sext cast. If the cast is folded, use it.
1179   const SCEV *SExt = getSignExtendExpr(Op, Ty);
1180   if (!isa<SCEVSignExtendExpr>(SExt))
1181     return SExt;
1182 
1183   // Force the cast to be folded into the operands of an addrec.
1184   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1185     SmallVector<const SCEV *, 4> Ops;
1186     for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1187          I != E; ++I)
1188       Ops.push_back(getAnyExtendExpr(*I, Ty));
1189     return getAddRecExpr(Ops, AR->getLoop());
1190   }
1191 
1192   // As a special case, fold anyext(undef) to undef. We don't want to
1193   // know too much about SCEVUnknowns, but this special case is handy
1194   // and harmless.
1195   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
1196     if (isa<UndefValue>(U->getValue()))
1197       return getSCEV(UndefValue::get(Ty));
1198 
1199   // If the expression is obviously signed, use the sext cast value.
1200   if (isa<SCEVSMaxExpr>(Op))
1201     return SExt;
1202 
1203   // Absent any other information, use the zext cast value.
1204   return ZExt;
1205 }
1206 
1207 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1208 /// a list of operands to be added under the given scale, update the given
1209 /// map. This is a helper function for getAddRecExpr. As an example of
1210 /// what it does, given a sequence of operands that would form an add
1211 /// expression like this:
1212 ///
1213 ///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1214 ///
1215 /// where A and B are constants, update the map with these values:
1216 ///
1217 ///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1218 ///
1219 /// and add 13 + A*B*29 to AccumulatedConstant.
1220 /// This will allow getAddRecExpr to produce this:
1221 ///
1222 ///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1223 ///
1224 /// This form often exposes folding opportunities that are hidden in
1225 /// the original operand list.
1226 ///
1227 /// Return true iff it appears that any interesting folding opportunities
1228 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1229 /// the common case where no interesting opportunities are present, and
1230 /// is also used as a check to avoid infinite recursion.
1231 ///
1232 static bool
1233 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1234                              SmallVector<const SCEV *, 8> &NewOps,
1235                              APInt &AccumulatedConstant,
1236                              const SCEV *const *Ops, size_t NumOperands,
1237                              const APInt &Scale,
1238                              ScalarEvolution &SE) {
1239   bool Interesting = false;
1240 
1241   // Iterate over the add operands. They are sorted, with constants first.
1242   unsigned i = 0;
1243   while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1244     ++i;
1245     // Pull a buried constant out to the outside.
1246     if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1247       Interesting = true;
1248     AccumulatedConstant += Scale * C->getValue()->getValue();
1249   }
1250 
1251   // Next comes everything else. We're especially interested in multiplies
1252   // here, but they're in the middle, so just visit the rest with one loop.
1253   for (; i != NumOperands; ++i) {
1254     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1255     if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1256       APInt NewScale =
1257         Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1258       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1259         // A multiplication of a constant with another add; recurse.
1260         const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1261         Interesting |=
1262           CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1263                                        Add->op_begin(), Add->getNumOperands(),
1264                                        NewScale, SE);
1265       } else {
1266         // A multiplication of a constant with some other value. Update
1267         // the map.
1268         SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1269         const SCEV *Key = SE.getMulExpr(MulOps);
1270         std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1271           M.insert(std::make_pair(Key, NewScale));
1272         if (Pair.second) {
1273           NewOps.push_back(Pair.first->first);
1274         } else {
1275           Pair.first->second += NewScale;
1276           // The map already had an entry for this value, which may indicate
1277           // a folding opportunity.
1278           Interesting = true;
1279         }
1280       }
1281     } else {
1282       // An ordinary operand. Update the map.
1283       std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1284         M.insert(std::make_pair(Ops[i], Scale));
1285       if (Pair.second) {
1286         NewOps.push_back(Pair.first->first);
1287       } else {
1288         Pair.first->second += Scale;
1289         // The map already had an entry for this value, which may indicate
1290         // a folding opportunity.
1291         Interesting = true;
1292       }
1293     }
1294   }
1295 
1296   return Interesting;
1297 }
1298 
1299 namespace {
1300   struct APIntCompare {
1301     bool operator()(const APInt &LHS, const APInt &RHS) const {
1302       return LHS.ult(RHS);
1303     }
1304   };
1305 }
1306 
1307 /// getAddExpr - Get a canonical add expression, or something simpler if
1308 /// possible.
1309 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1310                                         bool HasNUW, bool HasNSW) {
1311   assert(!Ops.empty() && "Cannot get empty add!");
1312   if (Ops.size() == 1) return Ops[0];
1313 #ifndef NDEBUG
1314   const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1315   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1316     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1317            "SCEVAddExpr operand types don't match!");
1318 #endif
1319 
1320   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1321   if (!HasNUW && HasNSW) {
1322     bool All = true;
1323     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1324          E = Ops.end(); I != E; ++I)
1325       if (!isKnownNonNegative(*I)) {
1326         All = false;
1327         break;
1328       }
1329     if (All) HasNUW = true;
1330   }
1331 
1332   // Sort by complexity, this groups all similar expression types together.
1333   GroupByComplexity(Ops, LI);
1334 
1335   // If there are any constants, fold them together.
1336   unsigned Idx = 0;
1337   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1338     ++Idx;
1339     assert(Idx < Ops.size());
1340     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1341       // We found two constants, fold them together!
1342       Ops[0] = getConstant(LHSC->getValue()->getValue() +
1343                            RHSC->getValue()->getValue());
1344       if (Ops.size() == 2) return Ops[0];
1345       Ops.erase(Ops.begin()+1);  // Erase the folded element
1346       LHSC = cast<SCEVConstant>(Ops[0]);
1347     }
1348 
1349     // If we are left with a constant zero being added, strip it off.
1350     if (LHSC->getValue()->isZero()) {
1351       Ops.erase(Ops.begin());
1352       --Idx;
1353     }
1354 
1355     if (Ops.size() == 1) return Ops[0];
1356   }
1357 
1358   // Okay, check to see if the same value occurs in the operand list more than
1359   // once.  If so, merge them together into an multiply expression.  Since we
1360   // sorted the list, these values are required to be adjacent.
1361   const Type *Ty = Ops[0]->getType();
1362   bool FoundMatch = false;
1363   for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
1364     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1365       // Scan ahead to count how many equal operands there are.
1366       unsigned Count = 2;
1367       while (i+Count != e && Ops[i+Count] == Ops[i])
1368         ++Count;
1369       // Merge the values into a multiply.
1370       const SCEV *Scale = getConstant(Ty, Count);
1371       const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1372       if (Ops.size() == Count)
1373         return Mul;
1374       Ops[i] = Mul;
1375       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
1376       --i; e -= Count - 1;
1377       FoundMatch = true;
1378     }
1379   if (FoundMatch)
1380     return getAddExpr(Ops, HasNUW, HasNSW);
1381 
1382   // Check for truncates. If all the operands are truncated from the same
1383   // type, see if factoring out the truncate would permit the result to be
1384   // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1385   // if the contents of the resulting outer trunc fold to something simple.
1386   for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1387     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1388     const Type *DstType = Trunc->getType();
1389     const Type *SrcType = Trunc->getOperand()->getType();
1390     SmallVector<const SCEV *, 8> LargeOps;
1391     bool Ok = true;
1392     // Check all the operands to see if they can be represented in the
1393     // source type of the truncate.
1394     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1395       if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1396         if (T->getOperand()->getType() != SrcType) {
1397           Ok = false;
1398           break;
1399         }
1400         LargeOps.push_back(T->getOperand());
1401       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1402         LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1403       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1404         SmallVector<const SCEV *, 8> LargeMulOps;
1405         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1406           if (const SCEVTruncateExpr *T =
1407                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1408             if (T->getOperand()->getType() != SrcType) {
1409               Ok = false;
1410               break;
1411             }
1412             LargeMulOps.push_back(T->getOperand());
1413           } else if (const SCEVConstant *C =
1414                        dyn_cast<SCEVConstant>(M->getOperand(j))) {
1415             LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1416           } else {
1417             Ok = false;
1418             break;
1419           }
1420         }
1421         if (Ok)
1422           LargeOps.push_back(getMulExpr(LargeMulOps));
1423       } else {
1424         Ok = false;
1425         break;
1426       }
1427     }
1428     if (Ok) {
1429       // Evaluate the expression in the larger type.
1430       const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1431       // If it folds to something simple, use it. Otherwise, don't.
1432       if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1433         return getTruncateExpr(Fold, DstType);
1434     }
1435   }
1436 
1437   // Skip past any other cast SCEVs.
1438   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1439     ++Idx;
1440 
1441   // If there are add operands they would be next.
1442   if (Idx < Ops.size()) {
1443     bool DeletedAdd = false;
1444     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1445       // If we have an add, expand the add operands onto the end of the operands
1446       // list.
1447       Ops.erase(Ops.begin()+Idx);
1448       Ops.append(Add->op_begin(), Add->op_end());
1449       DeletedAdd = true;
1450     }
1451 
1452     // If we deleted at least one add, we added operands to the end of the list,
1453     // and they are not necessarily sorted.  Recurse to resort and resimplify
1454     // any operands we just acquired.
1455     if (DeletedAdd)
1456       return getAddExpr(Ops);
1457   }
1458 
1459   // Skip over the add expression until we get to a multiply.
1460   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1461     ++Idx;
1462 
1463   // Check to see if there are any folding opportunities present with
1464   // operands multiplied by constant values.
1465   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1466     uint64_t BitWidth = getTypeSizeInBits(Ty);
1467     DenseMap<const SCEV *, APInt> M;
1468     SmallVector<const SCEV *, 8> NewOps;
1469     APInt AccumulatedConstant(BitWidth, 0);
1470     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1471                                      Ops.data(), Ops.size(),
1472                                      APInt(BitWidth, 1), *this)) {
1473       // Some interesting folding opportunity is present, so its worthwhile to
1474       // re-generate the operands list. Group the operands by constant scale,
1475       // to avoid multiplying by the same constant scale multiple times.
1476       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1477       for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
1478            E = NewOps.end(); I != E; ++I)
1479         MulOpLists[M.find(*I)->second].push_back(*I);
1480       // Re-generate the operands list.
1481       Ops.clear();
1482       if (AccumulatedConstant != 0)
1483         Ops.push_back(getConstant(AccumulatedConstant));
1484       for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1485            I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1486         if (I->first != 0)
1487           Ops.push_back(getMulExpr(getConstant(I->first),
1488                                    getAddExpr(I->second)));
1489       if (Ops.empty())
1490         return getConstant(Ty, 0);
1491       if (Ops.size() == 1)
1492         return Ops[0];
1493       return getAddExpr(Ops);
1494     }
1495   }
1496 
1497   // If we are adding something to a multiply expression, make sure the
1498   // something is not already an operand of the multiply.  If so, merge it into
1499   // the multiply.
1500   for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1501     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1502     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1503       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1504       if (isa<SCEVConstant>(MulOpSCEV))
1505         continue;
1506       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1507         if (MulOpSCEV == Ops[AddOp]) {
1508           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1509           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1510           if (Mul->getNumOperands() != 2) {
1511             // If the multiply has more than two operands, we must get the
1512             // Y*Z term.
1513             SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1514                                                 Mul->op_begin()+MulOp);
1515             MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1516             InnerMul = getMulExpr(MulOps);
1517           }
1518           const SCEV *One = getConstant(Ty, 1);
1519           const SCEV *AddOne = getAddExpr(One, InnerMul);
1520           const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1521           if (Ops.size() == 2) return OuterMul;
1522           if (AddOp < Idx) {
1523             Ops.erase(Ops.begin()+AddOp);
1524             Ops.erase(Ops.begin()+Idx-1);
1525           } else {
1526             Ops.erase(Ops.begin()+Idx);
1527             Ops.erase(Ops.begin()+AddOp-1);
1528           }
1529           Ops.push_back(OuterMul);
1530           return getAddExpr(Ops);
1531         }
1532 
1533       // Check this multiply against other multiplies being added together.
1534       for (unsigned OtherMulIdx = Idx+1;
1535            OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1536            ++OtherMulIdx) {
1537         const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1538         // If MulOp occurs in OtherMul, we can fold the two multiplies
1539         // together.
1540         for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1541              OMulOp != e; ++OMulOp)
1542           if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1543             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1544             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1545             if (Mul->getNumOperands() != 2) {
1546               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1547                                                   Mul->op_begin()+MulOp);
1548               MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1549               InnerMul1 = getMulExpr(MulOps);
1550             }
1551             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1552             if (OtherMul->getNumOperands() != 2) {
1553               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1554                                                   OtherMul->op_begin()+OMulOp);
1555               MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
1556               InnerMul2 = getMulExpr(MulOps);
1557             }
1558             const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1559             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1560             if (Ops.size() == 2) return OuterMul;
1561             Ops.erase(Ops.begin()+Idx);
1562             Ops.erase(Ops.begin()+OtherMulIdx-1);
1563             Ops.push_back(OuterMul);
1564             return getAddExpr(Ops);
1565           }
1566       }
1567     }
1568   }
1569 
1570   // If there are any add recurrences in the operands list, see if any other
1571   // added values are loop invariant.  If so, we can fold them into the
1572   // recurrence.
1573   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1574     ++Idx;
1575 
1576   // Scan over all recurrences, trying to fold loop invariants into them.
1577   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1578     // Scan all of the other operands to this add and add them to the vector if
1579     // they are loop invariant w.r.t. the recurrence.
1580     SmallVector<const SCEV *, 8> LIOps;
1581     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1582     const Loop *AddRecLoop = AddRec->getLoop();
1583     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1584       if (isLoopInvariant(Ops[i], AddRecLoop)) {
1585         LIOps.push_back(Ops[i]);
1586         Ops.erase(Ops.begin()+i);
1587         --i; --e;
1588       }
1589 
1590     // If we found some loop invariants, fold them into the recurrence.
1591     if (!LIOps.empty()) {
1592       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1593       LIOps.push_back(AddRec->getStart());
1594 
1595       SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1596                                              AddRec->op_end());
1597       AddRecOps[0] = getAddExpr(LIOps);
1598 
1599       // Build the new addrec. Propagate the NUW and NSW flags if both the
1600       // outer add and the inner addrec are guaranteed to have no overflow.
1601       const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
1602                                          HasNUW && AddRec->hasNoUnsignedWrap(),
1603                                          HasNSW && AddRec->hasNoSignedWrap());
1604 
1605       // If all of the other operands were loop invariant, we are done.
1606       if (Ops.size() == 1) return NewRec;
1607 
1608       // Otherwise, add the folded AddRec by the non-liv parts.
1609       for (unsigned i = 0;; ++i)
1610         if (Ops[i] == AddRec) {
1611           Ops[i] = NewRec;
1612           break;
1613         }
1614       return getAddExpr(Ops);
1615     }
1616 
1617     // Okay, if there weren't any loop invariants to be folded, check to see if
1618     // there are multiple AddRec's with the same loop induction variable being
1619     // added together.  If so, we can fold them.
1620     for (unsigned OtherIdx = Idx+1;
1621          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1622          ++OtherIdx)
1623       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1624         // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
1625         SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1626                                                AddRec->op_end());
1627         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1628              ++OtherIdx)
1629           if (const SCEVAddRecExpr *OtherAddRec =
1630                 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1631             if (OtherAddRec->getLoop() == AddRecLoop) {
1632               for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1633                    i != e; ++i) {
1634                 if (i >= AddRecOps.size()) {
1635                   AddRecOps.append(OtherAddRec->op_begin()+i,
1636                                    OtherAddRec->op_end());
1637                   break;
1638                 }
1639                 AddRecOps[i] = getAddExpr(AddRecOps[i],
1640                                           OtherAddRec->getOperand(i));
1641               }
1642               Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1643             }
1644         Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
1645         return getAddExpr(Ops);
1646       }
1647 
1648     // Otherwise couldn't fold anything into this recurrence.  Move onto the
1649     // next one.
1650   }
1651 
1652   // Okay, it looks like we really DO need an add expr.  Check to see if we
1653   // already have one, otherwise create a new one.
1654   FoldingSetNodeID ID;
1655   ID.AddInteger(scAddExpr);
1656   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1657     ID.AddPointer(Ops[i]);
1658   void *IP = 0;
1659   SCEVAddExpr *S =
1660     static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1661   if (!S) {
1662     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1663     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1664     S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1665                                         O, Ops.size());
1666     UniqueSCEVs.InsertNode(S, IP);
1667   }
1668   if (HasNUW) S->setHasNoUnsignedWrap(true);
1669   if (HasNSW) S->setHasNoSignedWrap(true);
1670   return S;
1671 }
1672 
1673 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1674 /// possible.
1675 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1676                                         bool HasNUW, bool HasNSW) {
1677   assert(!Ops.empty() && "Cannot get empty mul!");
1678   if (Ops.size() == 1) return Ops[0];
1679 #ifndef NDEBUG
1680   const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1681   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1682     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1683            "SCEVMulExpr operand types don't match!");
1684 #endif
1685 
1686   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1687   if (!HasNUW && HasNSW) {
1688     bool All = true;
1689     for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1690          E = Ops.end(); I != E; ++I)
1691       if (!isKnownNonNegative(*I)) {
1692         All = false;
1693         break;
1694       }
1695     if (All) HasNUW = true;
1696   }
1697 
1698   // Sort by complexity, this groups all similar expression types together.
1699   GroupByComplexity(Ops, LI);
1700 
1701   // If there are any constants, fold them together.
1702   unsigned Idx = 0;
1703   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1704 
1705     // C1*(C2+V) -> C1*C2 + C1*V
1706     if (Ops.size() == 2)
1707       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1708         if (Add->getNumOperands() == 2 &&
1709             isa<SCEVConstant>(Add->getOperand(0)))
1710           return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1711                             getMulExpr(LHSC, Add->getOperand(1)));
1712 
1713     ++Idx;
1714     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1715       // We found two constants, fold them together!
1716       ConstantInt *Fold = ConstantInt::get(getContext(),
1717                                            LHSC->getValue()->getValue() *
1718                                            RHSC->getValue()->getValue());
1719       Ops[0] = getConstant(Fold);
1720       Ops.erase(Ops.begin()+1);  // Erase the folded element
1721       if (Ops.size() == 1) return Ops[0];
1722       LHSC = cast<SCEVConstant>(Ops[0]);
1723     }
1724 
1725     // If we are left with a constant one being multiplied, strip it off.
1726     if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1727       Ops.erase(Ops.begin());
1728       --Idx;
1729     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1730       // If we have a multiply of zero, it will always be zero.
1731       return Ops[0];
1732     } else if (Ops[0]->isAllOnesValue()) {
1733       // If we have a mul by -1 of an add, try distributing the -1 among the
1734       // add operands.
1735       if (Ops.size() == 2)
1736         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1737           SmallVector<const SCEV *, 4> NewOps;
1738           bool AnyFolded = false;
1739           for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1740                I != E; ++I) {
1741             const SCEV *Mul = getMulExpr(Ops[0], *I);
1742             if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1743             NewOps.push_back(Mul);
1744           }
1745           if (AnyFolded)
1746             return getAddExpr(NewOps);
1747         }
1748     }
1749 
1750     if (Ops.size() == 1)
1751       return Ops[0];
1752   }
1753 
1754   // Skip over the add expression until we get to a multiply.
1755   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1756     ++Idx;
1757 
1758   // If there are mul operands inline them all into this expression.
1759   if (Idx < Ops.size()) {
1760     bool DeletedMul = false;
1761     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1762       // If we have an mul, expand the mul operands onto the end of the operands
1763       // list.
1764       Ops.erase(Ops.begin()+Idx);
1765       Ops.append(Mul->op_begin(), Mul->op_end());
1766       DeletedMul = true;
1767     }
1768 
1769     // If we deleted at least one mul, we added operands to the end of the list,
1770     // and they are not necessarily sorted.  Recurse to resort and resimplify
1771     // any operands we just acquired.
1772     if (DeletedMul)
1773       return getMulExpr(Ops);
1774   }
1775 
1776   // If there are any add recurrences in the operands list, see if any other
1777   // added values are loop invariant.  If so, we can fold them into the
1778   // recurrence.
1779   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1780     ++Idx;
1781 
1782   // Scan over all recurrences, trying to fold loop invariants into them.
1783   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1784     // Scan all of the other operands to this mul and add them to the vector if
1785     // they are loop invariant w.r.t. the recurrence.
1786     SmallVector<const SCEV *, 8> LIOps;
1787     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1788     const Loop *AddRecLoop = AddRec->getLoop();
1789     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1790       if (isLoopInvariant(Ops[i], AddRecLoop)) {
1791         LIOps.push_back(Ops[i]);
1792         Ops.erase(Ops.begin()+i);
1793         --i; --e;
1794       }
1795 
1796     // If we found some loop invariants, fold them into the recurrence.
1797     if (!LIOps.empty()) {
1798       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1799       SmallVector<const SCEV *, 4> NewOps;
1800       NewOps.reserve(AddRec->getNumOperands());
1801       const SCEV *Scale = getMulExpr(LIOps);
1802       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1803         NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1804 
1805       // Build the new addrec. Propagate the NUW and NSW flags if both the
1806       // outer mul and the inner addrec are guaranteed to have no overflow.
1807       const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
1808                                          HasNUW && AddRec->hasNoUnsignedWrap(),
1809                                          HasNSW && AddRec->hasNoSignedWrap());
1810 
1811       // If all of the other operands were loop invariant, we are done.
1812       if (Ops.size() == 1) return NewRec;
1813 
1814       // Otherwise, multiply the folded AddRec by the non-liv parts.
1815       for (unsigned i = 0;; ++i)
1816         if (Ops[i] == AddRec) {
1817           Ops[i] = NewRec;
1818           break;
1819         }
1820       return getMulExpr(Ops);
1821     }
1822 
1823     // Okay, if there weren't any loop invariants to be folded, check to see if
1824     // there are multiple AddRec's with the same loop induction variable being
1825     // multiplied together.  If so, we can fold them.
1826     for (unsigned OtherIdx = Idx+1;
1827          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1828          ++OtherIdx)
1829       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1830         // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L>  -->
1831         // {A*C,+,F*D + G*B + B*D}<L>
1832         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1833              ++OtherIdx)
1834           if (const SCEVAddRecExpr *OtherAddRec =
1835                 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1836             if (OtherAddRec->getLoop() == AddRecLoop) {
1837               const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1838               const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
1839               const SCEV *B = F->getStepRecurrence(*this);
1840               const SCEV *D = G->getStepRecurrence(*this);
1841               const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1842                                                getMulExpr(G, B),
1843                                                getMulExpr(B, D));
1844               const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1845                                                     F->getLoop());
1846               if (Ops.size() == 2) return NewAddRec;
1847               Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
1848               Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1849             }
1850         return getMulExpr(Ops);
1851       }
1852 
1853     // Otherwise couldn't fold anything into this recurrence.  Move onto the
1854     // next one.
1855   }
1856 
1857   // Okay, it looks like we really DO need an mul expr.  Check to see if we
1858   // already have one, otherwise create a new one.
1859   FoldingSetNodeID ID;
1860   ID.AddInteger(scMulExpr);
1861   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1862     ID.AddPointer(Ops[i]);
1863   void *IP = 0;
1864   SCEVMulExpr *S =
1865     static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1866   if (!S) {
1867     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1868     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1869     S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1870                                         O, Ops.size());
1871     UniqueSCEVs.InsertNode(S, IP);
1872   }
1873   if (HasNUW) S->setHasNoUnsignedWrap(true);
1874   if (HasNSW) S->setHasNoSignedWrap(true);
1875   return S;
1876 }
1877 
1878 /// getUDivExpr - Get a canonical unsigned division expression, or something
1879 /// simpler if possible.
1880 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1881                                          const SCEV *RHS) {
1882   assert(getEffectiveSCEVType(LHS->getType()) ==
1883          getEffectiveSCEVType(RHS->getType()) &&
1884          "SCEVUDivExpr operand types don't match!");
1885 
1886   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1887     if (RHSC->getValue()->equalsInt(1))
1888       return LHS;                               // X udiv 1 --> x
1889     // If the denominator is zero, the result of the udiv is undefined. Don't
1890     // try to analyze it, because the resolution chosen here may differ from
1891     // the resolution chosen in other parts of the compiler.
1892     if (!RHSC->getValue()->isZero()) {
1893       // Determine if the division can be folded into the operands of
1894       // its operands.
1895       // TODO: Generalize this to non-constants by using known-bits information.
1896       const Type *Ty = LHS->getType();
1897       unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1898       unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
1899       // For non-power-of-two values, effectively round the value up to the
1900       // nearest power of two.
1901       if (!RHSC->getValue()->getValue().isPowerOf2())
1902         ++MaxShiftAmt;
1903       const IntegerType *ExtTy =
1904         IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1905       // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1906       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1907         if (const SCEVConstant *Step =
1908               dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1909           if (!Step->getValue()->getValue()
1910                 .urem(RHSC->getValue()->getValue()) &&
1911               getZeroExtendExpr(AR, ExtTy) ==
1912               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1913                             getZeroExtendExpr(Step, ExtTy),
1914                             AR->getLoop())) {
1915             SmallVector<const SCEV *, 4> Operands;
1916             for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1917               Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1918             return getAddRecExpr(Operands, AR->getLoop());
1919           }
1920       // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1921       if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1922         SmallVector<const SCEV *, 4> Operands;
1923         for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1924           Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1925         if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1926           // Find an operand that's safely divisible.
1927           for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1928             const SCEV *Op = M->getOperand(i);
1929             const SCEV *Div = getUDivExpr(Op, RHSC);
1930             if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1931               Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
1932                                                       M->op_end());
1933               Operands[i] = Div;
1934               return getMulExpr(Operands);
1935             }
1936           }
1937       }
1938       // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1939       if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1940         SmallVector<const SCEV *, 4> Operands;
1941         for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1942           Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1943         if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1944           Operands.clear();
1945           for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1946             const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1947             if (isa<SCEVUDivExpr>(Op) ||
1948                 getMulExpr(Op, RHS) != A->getOperand(i))
1949               break;
1950             Operands.push_back(Op);
1951           }
1952           if (Operands.size() == A->getNumOperands())
1953             return getAddExpr(Operands);
1954         }
1955       }
1956 
1957       // Fold if both operands are constant.
1958       if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1959         Constant *LHSCV = LHSC->getValue();
1960         Constant *RHSCV = RHSC->getValue();
1961         return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1962                                                                    RHSCV)));
1963       }
1964     }
1965   }
1966 
1967   FoldingSetNodeID ID;
1968   ID.AddInteger(scUDivExpr);
1969   ID.AddPointer(LHS);
1970   ID.AddPointer(RHS);
1971   void *IP = 0;
1972   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1973   SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
1974                                              LHS, RHS);
1975   UniqueSCEVs.InsertNode(S, IP);
1976   return S;
1977 }
1978 
1979 
1980 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1981 /// Simplify the expression as much as possible.
1982 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1983                                            const SCEV *Step, const Loop *L,
1984                                            bool HasNUW, bool HasNSW) {
1985   SmallVector<const SCEV *, 4> Operands;
1986   Operands.push_back(Start);
1987   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1988     if (StepChrec->getLoop() == L) {
1989       Operands.append(StepChrec->op_begin(), StepChrec->op_end());
1990       return getAddRecExpr(Operands, L);
1991     }
1992 
1993   Operands.push_back(Step);
1994   return getAddRecExpr(Operands, L, HasNUW, HasNSW);
1995 }
1996 
1997 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1998 /// Simplify the expression as much as possible.
1999 const SCEV *
2000 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2001                                const Loop *L,
2002                                bool HasNUW, bool HasNSW) {
2003   if (Operands.size() == 1) return Operands[0];
2004 #ifndef NDEBUG
2005   const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2006   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2007     assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2008            "SCEVAddRecExpr operand types don't match!");
2009   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2010     assert(isLoopInvariant(Operands[i], L) &&
2011            "SCEVAddRecExpr operand is not loop-invariant!");
2012 #endif
2013 
2014   if (Operands.back()->isZero()) {
2015     Operands.pop_back();
2016     return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
2017   }
2018 
2019   // It's tempting to want to call getMaxBackedgeTakenCount count here and
2020   // use that information to infer NUW and NSW flags. However, computing a
2021   // BE count requires calling getAddRecExpr, so we may not yet have a
2022   // meaningful BE count at this point (and if we don't, we'd be stuck
2023   // with a SCEVCouldNotCompute as the cached BE count).
2024 
2025   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
2026   if (!HasNUW && HasNSW) {
2027     bool All = true;
2028     for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
2029          E = Operands.end(); I != E; ++I)
2030       if (!isKnownNonNegative(*I)) {
2031         All = false;
2032         break;
2033       }
2034     if (All) HasNUW = true;
2035   }
2036 
2037   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2038   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2039     const Loop *NestedLoop = NestedAR->getLoop();
2040     if (L->contains(NestedLoop) ?
2041         (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2042         (!NestedLoop->contains(L) &&
2043          DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2044       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2045                                                   NestedAR->op_end());
2046       Operands[0] = NestedAR->getStart();
2047       // AddRecs require their operands be loop-invariant with respect to their
2048       // loops. Don't perform this transformation if it would break this
2049       // requirement.
2050       bool AllInvariant = true;
2051       for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2052         if (!isLoopInvariant(Operands[i], L)) {
2053           AllInvariant = false;
2054           break;
2055         }
2056       if (AllInvariant) {
2057         NestedOperands[0] = getAddRecExpr(Operands, L);
2058         AllInvariant = true;
2059         for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2060           if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2061             AllInvariant = false;
2062             break;
2063           }
2064         if (AllInvariant)
2065           // Ok, both add recurrences are valid after the transformation.
2066           return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2067       }
2068       // Reset Operands to its original state.
2069       Operands[0] = NestedAR;
2070     }
2071   }
2072 
2073   // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2074   // already have one, otherwise create a new one.
2075   FoldingSetNodeID ID;
2076   ID.AddInteger(scAddRecExpr);
2077   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2078     ID.AddPointer(Operands[i]);
2079   ID.AddPointer(L);
2080   void *IP = 0;
2081   SCEVAddRecExpr *S =
2082     static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2083   if (!S) {
2084     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2085     std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2086     S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2087                                            O, Operands.size(), L);
2088     UniqueSCEVs.InsertNode(S, IP);
2089   }
2090   if (HasNUW) S->setHasNoUnsignedWrap(true);
2091   if (HasNSW) S->setHasNoSignedWrap(true);
2092   return S;
2093 }
2094 
2095 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2096                                          const SCEV *RHS) {
2097   SmallVector<const SCEV *, 2> Ops;
2098   Ops.push_back(LHS);
2099   Ops.push_back(RHS);
2100   return getSMaxExpr(Ops);
2101 }
2102 
2103 const SCEV *
2104 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2105   assert(!Ops.empty() && "Cannot get empty smax!");
2106   if (Ops.size() == 1) return Ops[0];
2107 #ifndef NDEBUG
2108   const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2109   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2110     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2111            "SCEVSMaxExpr operand types don't match!");
2112 #endif
2113 
2114   // Sort by complexity, this groups all similar expression types together.
2115   GroupByComplexity(Ops, LI);
2116 
2117   // If there are any constants, fold them together.
2118   unsigned Idx = 0;
2119   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2120     ++Idx;
2121     assert(Idx < Ops.size());
2122     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2123       // We found two constants, fold them together!
2124       ConstantInt *Fold = ConstantInt::get(getContext(),
2125                               APIntOps::smax(LHSC->getValue()->getValue(),
2126                                              RHSC->getValue()->getValue()));
2127       Ops[0] = getConstant(Fold);
2128       Ops.erase(Ops.begin()+1);  // Erase the folded element
2129       if (Ops.size() == 1) return Ops[0];
2130       LHSC = cast<SCEVConstant>(Ops[0]);
2131     }
2132 
2133     // If we are left with a constant minimum-int, strip it off.
2134     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2135       Ops.erase(Ops.begin());
2136       --Idx;
2137     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2138       // If we have an smax with a constant maximum-int, it will always be
2139       // maximum-int.
2140       return Ops[0];
2141     }
2142 
2143     if (Ops.size() == 1) return Ops[0];
2144   }
2145 
2146   // Find the first SMax
2147   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2148     ++Idx;
2149 
2150   // Check to see if one of the operands is an SMax. If so, expand its operands
2151   // onto our operand list, and recurse to simplify.
2152   if (Idx < Ops.size()) {
2153     bool DeletedSMax = false;
2154     while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2155       Ops.erase(Ops.begin()+Idx);
2156       Ops.append(SMax->op_begin(), SMax->op_end());
2157       DeletedSMax = true;
2158     }
2159 
2160     if (DeletedSMax)
2161       return getSMaxExpr(Ops);
2162   }
2163 
2164   // Okay, check to see if the same value occurs in the operand list twice.  If
2165   // so, delete one.  Since we sorted the list, these values are required to
2166   // be adjacent.
2167   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2168     //  X smax Y smax Y  -->  X smax Y
2169     //  X smax Y         -->  X, if X is always greater than Y
2170     if (Ops[i] == Ops[i+1] ||
2171         isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2172       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2173       --i; --e;
2174     } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2175       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2176       --i; --e;
2177     }
2178 
2179   if (Ops.size() == 1) return Ops[0];
2180 
2181   assert(!Ops.empty() && "Reduced smax down to nothing!");
2182 
2183   // Okay, it looks like we really DO need an smax expr.  Check to see if we
2184   // already have one, otherwise create a new one.
2185   FoldingSetNodeID ID;
2186   ID.AddInteger(scSMaxExpr);
2187   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2188     ID.AddPointer(Ops[i]);
2189   void *IP = 0;
2190   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2191   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2192   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2193   SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2194                                              O, Ops.size());
2195   UniqueSCEVs.InsertNode(S, IP);
2196   return S;
2197 }
2198 
2199 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2200                                          const SCEV *RHS) {
2201   SmallVector<const SCEV *, 2> Ops;
2202   Ops.push_back(LHS);
2203   Ops.push_back(RHS);
2204   return getUMaxExpr(Ops);
2205 }
2206 
2207 const SCEV *
2208 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2209   assert(!Ops.empty() && "Cannot get empty umax!");
2210   if (Ops.size() == 1) return Ops[0];
2211 #ifndef NDEBUG
2212   const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2213   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2214     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2215            "SCEVUMaxExpr operand types don't match!");
2216 #endif
2217 
2218   // Sort by complexity, this groups all similar expression types together.
2219   GroupByComplexity(Ops, LI);
2220 
2221   // If there are any constants, fold them together.
2222   unsigned Idx = 0;
2223   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2224     ++Idx;
2225     assert(Idx < Ops.size());
2226     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2227       // We found two constants, fold them together!
2228       ConstantInt *Fold = ConstantInt::get(getContext(),
2229                               APIntOps::umax(LHSC->getValue()->getValue(),
2230                                              RHSC->getValue()->getValue()));
2231       Ops[0] = getConstant(Fold);
2232       Ops.erase(Ops.begin()+1);  // Erase the folded element
2233       if (Ops.size() == 1) return Ops[0];
2234       LHSC = cast<SCEVConstant>(Ops[0]);
2235     }
2236 
2237     // If we are left with a constant minimum-int, strip it off.
2238     if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2239       Ops.erase(Ops.begin());
2240       --Idx;
2241     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2242       // If we have an umax with a constant maximum-int, it will always be
2243       // maximum-int.
2244       return Ops[0];
2245     }
2246 
2247     if (Ops.size() == 1) return Ops[0];
2248   }
2249 
2250   // Find the first UMax
2251   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2252     ++Idx;
2253 
2254   // Check to see if one of the operands is a UMax. If so, expand its operands
2255   // onto our operand list, and recurse to simplify.
2256   if (Idx < Ops.size()) {
2257     bool DeletedUMax = false;
2258     while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2259       Ops.erase(Ops.begin()+Idx);
2260       Ops.append(UMax->op_begin(), UMax->op_end());
2261       DeletedUMax = true;
2262     }
2263 
2264     if (DeletedUMax)
2265       return getUMaxExpr(Ops);
2266   }
2267 
2268   // Okay, check to see if the same value occurs in the operand list twice.  If
2269   // so, delete one.  Since we sorted the list, these values are required to
2270   // be adjacent.
2271   for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2272     //  X umax Y umax Y  -->  X umax Y
2273     //  X umax Y         -->  X, if X is always greater than Y
2274     if (Ops[i] == Ops[i+1] ||
2275         isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2276       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2277       --i; --e;
2278     } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2279       Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2280       --i; --e;
2281     }
2282 
2283   if (Ops.size() == 1) return Ops[0];
2284 
2285   assert(!Ops.empty() && "Reduced umax down to nothing!");
2286 
2287   // Okay, it looks like we really DO need a umax expr.  Check to see if we
2288   // already have one, otherwise create a new one.
2289   FoldingSetNodeID ID;
2290   ID.AddInteger(scUMaxExpr);
2291   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2292     ID.AddPointer(Ops[i]);
2293   void *IP = 0;
2294   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2295   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2296   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2297   SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2298                                              O, Ops.size());
2299   UniqueSCEVs.InsertNode(S, IP);
2300   return S;
2301 }
2302 
2303 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2304                                          const SCEV *RHS) {
2305   // ~smax(~x, ~y) == smin(x, y).
2306   return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2307 }
2308 
2309 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2310                                          const SCEV *RHS) {
2311   // ~umax(~x, ~y) == umin(x, y)
2312   return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2313 }
2314 
2315 const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2316   // If we have TargetData, we can bypass creating a target-independent
2317   // constant expression and then folding it back into a ConstantInt.
2318   // This is just a compile-time optimization.
2319   if (TD)
2320     return getConstant(TD->getIntPtrType(getContext()),
2321                        TD->getTypeAllocSize(AllocTy));
2322 
2323   Constant *C = ConstantExpr::getSizeOf(AllocTy);
2324   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2325     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2326       C = Folded;
2327   const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2328   return getTruncateOrZeroExtend(getSCEV(C), Ty);
2329 }
2330 
2331 const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2332   Constant *C = ConstantExpr::getAlignOf(AllocTy);
2333   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2334     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2335       C = Folded;
2336   const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2337   return getTruncateOrZeroExtend(getSCEV(C), Ty);
2338 }
2339 
2340 const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2341                                              unsigned FieldNo) {
2342   // If we have TargetData, we can bypass creating a target-independent
2343   // constant expression and then folding it back into a ConstantInt.
2344   // This is just a compile-time optimization.
2345   if (TD)
2346     return getConstant(TD->getIntPtrType(getContext()),
2347                        TD->getStructLayout(STy)->getElementOffset(FieldNo));
2348 
2349   Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2350   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2351     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2352       C = Folded;
2353   const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2354   return getTruncateOrZeroExtend(getSCEV(C), Ty);
2355 }
2356 
2357 const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2358                                              Constant *FieldNo) {
2359   Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2360   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2361     if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2362       C = Folded;
2363   const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2364   return getTruncateOrZeroExtend(getSCEV(C), Ty);
2365 }
2366 
2367 const SCEV *ScalarEvolution::getUnknown(Value *V) {
2368   // Don't attempt to do anything other than create a SCEVUnknown object
2369   // here.  createSCEV only calls getUnknown after checking for all other
2370   // interesting possibilities, and any other code that calls getUnknown
2371   // is doing so in order to hide a value from SCEV canonicalization.
2372 
2373   FoldingSetNodeID ID;
2374   ID.AddInteger(scUnknown);
2375   ID.AddPointer(V);
2376   void *IP = 0;
2377   if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2378     assert(cast<SCEVUnknown>(S)->getValue() == V &&
2379            "Stale SCEVUnknown in uniquing map!");
2380     return S;
2381   }
2382   SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2383                                             FirstUnknown);
2384   FirstUnknown = cast<SCEVUnknown>(S);
2385   UniqueSCEVs.InsertNode(S, IP);
2386   return S;
2387 }
2388 
2389 //===----------------------------------------------------------------------===//
2390 //            Basic SCEV Analysis and PHI Idiom Recognition Code
2391 //
2392 
2393 /// isSCEVable - Test if values of the given type are analyzable within
2394 /// the SCEV framework. This primarily includes integer types, and it
2395 /// can optionally include pointer types if the ScalarEvolution class
2396 /// has access to target-specific information.
2397 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2398   // Integers and pointers are always SCEVable.
2399   return Ty->isIntegerTy() || Ty->isPointerTy();
2400 }
2401 
2402 /// getTypeSizeInBits - Return the size in bits of the specified type,
2403 /// for which isSCEVable must return true.
2404 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2405   assert(isSCEVable(Ty) && "Type is not SCEVable!");
2406 
2407   // If we have a TargetData, use it!
2408   if (TD)
2409     return TD->getTypeSizeInBits(Ty);
2410 
2411   // Integer types have fixed sizes.
2412   if (Ty->isIntegerTy())
2413     return Ty->getPrimitiveSizeInBits();
2414 
2415   // The only other support type is pointer. Without TargetData, conservatively
2416   // assume pointers are 64-bit.
2417   assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2418   return 64;
2419 }
2420 
2421 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2422 /// the given type and which represents how SCEV will treat the given
2423 /// type, for which isSCEVable must return true. For pointer types,
2424 /// this is the pointer-sized integer type.
2425 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2426   assert(isSCEVable(Ty) && "Type is not SCEVable!");
2427 
2428   if (Ty->isIntegerTy())
2429     return Ty;
2430 
2431   // The only other support type is pointer.
2432   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2433   if (TD) return TD->getIntPtrType(getContext());
2434 
2435   // Without TargetData, conservatively assume pointers are 64-bit.
2436   return Type::getInt64Ty(getContext());
2437 }
2438 
2439 const SCEV *ScalarEvolution::getCouldNotCompute() {
2440   return &CouldNotCompute;
2441 }
2442 
2443 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2444 /// expression and create a new one.
2445 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2446   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2447 
2448   ValueExprMapType::const_iterator I = ValueExprMap.find(V);
2449   if (I != ValueExprMap.end()) return I->second;
2450   const SCEV *S = createSCEV(V);
2451 
2452   // The process of creating a SCEV for V may have caused other SCEVs
2453   // to have been created, so it's necessary to insert the new entry
2454   // from scratch, rather than trying to remember the insert position
2455   // above.
2456   ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2457   return S;
2458 }
2459 
2460 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2461 ///
2462 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2463   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2464     return getConstant(
2465                cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2466 
2467   const Type *Ty = V->getType();
2468   Ty = getEffectiveSCEVType(Ty);
2469   return getMulExpr(V,
2470                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2471 }
2472 
2473 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2474 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2475   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2476     return getConstant(
2477                 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2478 
2479   const Type *Ty = V->getType();
2480   Ty = getEffectiveSCEVType(Ty);
2481   const SCEV *AllOnes =
2482                    getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2483   return getMinusSCEV(AllOnes, V);
2484 }
2485 
2486 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2487 ///
2488 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2489                                           const SCEV *RHS) {
2490   // Fast path: X - X --> 0.
2491   if (LHS == RHS)
2492     return getConstant(LHS->getType(), 0);
2493 
2494   // X - Y --> X + -Y
2495   return getAddExpr(LHS, getNegativeSCEV(RHS));
2496 }
2497 
2498 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2499 /// input value to the specified type.  If the type must be extended, it is zero
2500 /// extended.
2501 const SCEV *
2502 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2503                                          const Type *Ty) {
2504   const Type *SrcTy = V->getType();
2505   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2506          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2507          "Cannot truncate or zero extend with non-integer arguments!");
2508   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2509     return V;  // No conversion
2510   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2511     return getTruncateExpr(V, Ty);
2512   return getZeroExtendExpr(V, Ty);
2513 }
2514 
2515 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2516 /// input value to the specified type.  If the type must be extended, it is sign
2517 /// extended.
2518 const SCEV *
2519 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2520                                          const Type *Ty) {
2521   const Type *SrcTy = V->getType();
2522   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2523          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2524          "Cannot truncate or zero extend with non-integer arguments!");
2525   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2526     return V;  // No conversion
2527   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2528     return getTruncateExpr(V, Ty);
2529   return getSignExtendExpr(V, Ty);
2530 }
2531 
2532 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2533 /// input value to the specified type.  If the type must be extended, it is zero
2534 /// extended.  The conversion must not be narrowing.
2535 const SCEV *
2536 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2537   const Type *SrcTy = V->getType();
2538   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2539          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2540          "Cannot noop or zero extend with non-integer arguments!");
2541   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2542          "getNoopOrZeroExtend cannot truncate!");
2543   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2544     return V;  // No conversion
2545   return getZeroExtendExpr(V, Ty);
2546 }
2547 
2548 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2549 /// input value to the specified type.  If the type must be extended, it is sign
2550 /// extended.  The conversion must not be narrowing.
2551 const SCEV *
2552 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2553   const Type *SrcTy = V->getType();
2554   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2555          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2556          "Cannot noop or sign extend with non-integer arguments!");
2557   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2558          "getNoopOrSignExtend cannot truncate!");
2559   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2560     return V;  // No conversion
2561   return getSignExtendExpr(V, Ty);
2562 }
2563 
2564 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2565 /// the input value to the specified type. If the type must be extended,
2566 /// it is extended with unspecified bits. The conversion must not be
2567 /// narrowing.
2568 const SCEV *
2569 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2570   const Type *SrcTy = V->getType();
2571   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2572          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2573          "Cannot noop or any extend with non-integer arguments!");
2574   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2575          "getNoopOrAnyExtend cannot truncate!");
2576   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2577     return V;  // No conversion
2578   return getAnyExtendExpr(V, Ty);
2579 }
2580 
2581 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2582 /// input value to the specified type.  The conversion must not be widening.
2583 const SCEV *
2584 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2585   const Type *SrcTy = V->getType();
2586   assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2587          (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2588          "Cannot truncate or noop with non-integer arguments!");
2589   assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2590          "getTruncateOrNoop cannot extend!");
2591   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2592     return V;  // No conversion
2593   return getTruncateExpr(V, Ty);
2594 }
2595 
2596 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2597 /// the types using zero-extension, and then perform a umax operation
2598 /// with them.
2599 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2600                                                         const SCEV *RHS) {
2601   const SCEV *PromotedLHS = LHS;
2602   const SCEV *PromotedRHS = RHS;
2603 
2604   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2605     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2606   else
2607     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2608 
2609   return getUMaxExpr(PromotedLHS, PromotedRHS);
2610 }
2611 
2612 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2613 /// the types using zero-extension, and then perform a umin operation
2614 /// with them.
2615 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2616                                                         const SCEV *RHS) {
2617   const SCEV *PromotedLHS = LHS;
2618   const SCEV *PromotedRHS = RHS;
2619 
2620   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2621     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2622   else
2623     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2624 
2625   return getUMinExpr(PromotedLHS, PromotedRHS);
2626 }
2627 
2628 /// PushDefUseChildren - Push users of the given Instruction
2629 /// onto the given Worklist.
2630 static void
2631 PushDefUseChildren(Instruction *I,
2632                    SmallVectorImpl<Instruction *> &Worklist) {
2633   // Push the def-use children onto the Worklist stack.
2634   for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2635        UI != UE; ++UI)
2636     Worklist.push_back(cast<Instruction>(*UI));
2637 }
2638 
2639 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2640 /// instructions that depend on the given instruction and removes them from
2641 /// the ValueExprMapType map if they reference SymName. This is used during PHI
2642 /// resolution.
2643 void
2644 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2645   SmallVector<Instruction *, 16> Worklist;
2646   PushDefUseChildren(PN, Worklist);
2647 
2648   SmallPtrSet<Instruction *, 8> Visited;
2649   Visited.insert(PN);
2650   while (!Worklist.empty()) {
2651     Instruction *I = Worklist.pop_back_val();
2652     if (!Visited.insert(I)) continue;
2653 
2654     ValueExprMapType::iterator It =
2655       ValueExprMap.find(static_cast<Value *>(I));
2656     if (It != ValueExprMap.end()) {
2657       const SCEV *Old = It->second;
2658 
2659       // Short-circuit the def-use traversal if the symbolic name
2660       // ceases to appear in expressions.
2661       if (Old != SymName && !Old->hasOperand(SymName))
2662         continue;
2663 
2664       // SCEVUnknown for a PHI either means that it has an unrecognized
2665       // structure, it's a PHI that's in the progress of being computed
2666       // by createNodeForPHI, or it's a single-value PHI. In the first case,
2667       // additional loop trip count information isn't going to change anything.
2668       // In the second case, createNodeForPHI will perform the necessary
2669       // updates on its own when it gets to that point. In the third, we do
2670       // want to forget the SCEVUnknown.
2671       if (!isa<PHINode>(I) ||
2672           !isa<SCEVUnknown>(Old) ||
2673           (I != PN && Old == SymName)) {
2674         ValuesAtScopes.erase(Old);
2675         UnsignedRanges.erase(Old);
2676         SignedRanges.erase(Old);
2677         ValueExprMap.erase(It);
2678       }
2679     }
2680 
2681     PushDefUseChildren(I, Worklist);
2682   }
2683 }
2684 
2685 /// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2686 /// a loop header, making it a potential recurrence, or it doesn't.
2687 ///
2688 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2689   if (const Loop *L = LI->getLoopFor(PN->getParent()))
2690     if (L->getHeader() == PN->getParent()) {
2691       // The loop may have multiple entrances or multiple exits; we can analyze
2692       // this phi as an addrec if it has a unique entry value and a unique
2693       // backedge value.
2694       Value *BEValueV = 0, *StartValueV = 0;
2695       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2696         Value *V = PN->getIncomingValue(i);
2697         if (L->contains(PN->getIncomingBlock(i))) {
2698           if (!BEValueV) {
2699             BEValueV = V;
2700           } else if (BEValueV != V) {
2701             BEValueV = 0;
2702             break;
2703           }
2704         } else if (!StartValueV) {
2705           StartValueV = V;
2706         } else if (StartValueV != V) {
2707           StartValueV = 0;
2708           break;
2709         }
2710       }
2711       if (BEValueV && StartValueV) {
2712         // While we are analyzing this PHI node, handle its value symbolically.
2713         const SCEV *SymbolicName = getUnknown(PN);
2714         assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
2715                "PHI node already processed?");
2716         ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2717 
2718         // Using this symbolic name for the PHI, analyze the value coming around
2719         // the back-edge.
2720         const SCEV *BEValue = getSCEV(BEValueV);
2721 
2722         // NOTE: If BEValue is loop invariant, we know that the PHI node just
2723         // has a special value for the first iteration of the loop.
2724 
2725         // If the value coming around the backedge is an add with the symbolic
2726         // value we just inserted, then we found a simple induction variable!
2727         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2728           // If there is a single occurrence of the symbolic value, replace it
2729           // with a recurrence.
2730           unsigned FoundIndex = Add->getNumOperands();
2731           for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2732             if (Add->getOperand(i) == SymbolicName)
2733               if (FoundIndex == e) {
2734                 FoundIndex = i;
2735                 break;
2736               }
2737 
2738           if (FoundIndex != Add->getNumOperands()) {
2739             // Create an add with everything but the specified operand.
2740             SmallVector<const SCEV *, 8> Ops;
2741             for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2742               if (i != FoundIndex)
2743                 Ops.push_back(Add->getOperand(i));
2744             const SCEV *Accum = getAddExpr(Ops);
2745 
2746             // This is not a valid addrec if the step amount is varying each
2747             // loop iteration, but is not itself an addrec in this loop.
2748             if (isLoopInvariant(Accum, L) ||
2749                 (isa<SCEVAddRecExpr>(Accum) &&
2750                  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2751               bool HasNUW = false;
2752               bool HasNSW = false;
2753 
2754               // If the increment doesn't overflow, then neither the addrec nor
2755               // the post-increment will overflow.
2756               if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2757                 if (OBO->hasNoUnsignedWrap())
2758                   HasNUW = true;
2759                 if (OBO->hasNoSignedWrap())
2760                   HasNSW = true;
2761               }
2762 
2763               const SCEV *StartVal = getSCEV(StartValueV);
2764               const SCEV *PHISCEV =
2765                 getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2766 
2767               // Since the no-wrap flags are on the increment, they apply to the
2768               // post-incremented value as well.
2769               if (isLoopInvariant(Accum, L))
2770                 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2771                                     Accum, L, HasNUW, HasNSW);
2772 
2773               // Okay, for the entire analysis of this edge we assumed the PHI
2774               // to be symbolic.  We now need to go back and purge all of the
2775               // entries for the scalars that use the symbolic expression.
2776               ForgetSymbolicName(PN, SymbolicName);
2777               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
2778               return PHISCEV;
2779             }
2780           }
2781         } else if (const SCEVAddRecExpr *AddRec =
2782                      dyn_cast<SCEVAddRecExpr>(BEValue)) {
2783           // Otherwise, this could be a loop like this:
2784           //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2785           // In this case, j = {1,+,1}  and BEValue is j.
2786           // Because the other in-value of i (0) fits the evolution of BEValue
2787           // i really is an addrec evolution.
2788           if (AddRec->getLoop() == L && AddRec->isAffine()) {
2789             const SCEV *StartVal = getSCEV(StartValueV);
2790 
2791             // If StartVal = j.start - j.stride, we can use StartVal as the
2792             // initial step of the addrec evolution.
2793             if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2794                                          AddRec->getOperand(1))) {
2795               const SCEV *PHISCEV =
2796                  getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2797 
2798               // Okay, for the entire analysis of this edge we assumed the PHI
2799               // to be symbolic.  We now need to go back and purge all of the
2800               // entries for the scalars that use the symbolic expression.
2801               ForgetSymbolicName(PN, SymbolicName);
2802               ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
2803               return PHISCEV;
2804             }
2805           }
2806         }
2807       }
2808     }
2809 
2810   // If the PHI has a single incoming value, follow that value, unless the
2811   // PHI's incoming blocks are in a different loop, in which case doing so
2812   // risks breaking LCSSA form. Instcombine would normally zap these, but
2813   // it doesn't have DominatorTree information, so it may miss cases.
2814   if (Value *V = SimplifyInstruction(PN, TD, DT)) {
2815     Instruction *I = dyn_cast<Instruction>(V);
2816     // Only instructions are problematic for preserving LCSSA form.
2817     if (!I)
2818       return getSCEV(V);
2819 
2820     // If the instruction is not defined in a loop, then it can be used freely.
2821     Loop *ILoop = LI->getLoopFor(I->getParent());
2822     if (!ILoop)
2823       return getSCEV(I);
2824 
2825     // If the instruction is defined in the same loop as the phi node, or in a
2826     // loop that contains the phi node loop as an inner loop, then using it as
2827     // a replacement for the phi node will not break LCSSA form.
2828     Loop *PNLoop = LI->getLoopFor(PN->getParent());
2829     if (ILoop->contains(PNLoop))
2830       return getSCEV(I);
2831   }
2832 
2833   // If it's not a loop phi, we can't handle it yet.
2834   return getUnknown(PN);
2835 }
2836 
2837 /// createNodeForGEP - Expand GEP instructions into add and multiply
2838 /// operations. This allows them to be analyzed by regular SCEV code.
2839 ///
2840 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2841 
2842   // Don't blindly transfer the inbounds flag from the GEP instruction to the
2843   // Add expression, because the Instruction may be guarded by control flow
2844   // and the no-overflow bits may not be valid for the expression in any
2845   // context.
2846 
2847   const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2848   Value *Base = GEP->getOperand(0);
2849   // Don't attempt to analyze GEPs over unsized objects.
2850   if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2851     return getUnknown(GEP);
2852   const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
2853   gep_type_iterator GTI = gep_type_begin(GEP);
2854   for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
2855                                       E = GEP->op_end();
2856        I != E; ++I) {
2857     Value *Index = *I;
2858     // Compute the (potentially symbolic) offset in bytes for this index.
2859     if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2860       // For a struct, add the member offset.
2861       unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2862       const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
2863 
2864       // Add the field offset to the running total offset.
2865       TotalOffset = getAddExpr(TotalOffset, FieldOffset);
2866     } else {
2867       // For an array, add the element offset, explicitly scaled.
2868       const SCEV *ElementSize = getSizeOfExpr(*GTI);
2869       const SCEV *IndexS = getSCEV(Index);
2870       // Getelementptr indices are signed.
2871       IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
2872 
2873       // Multiply the index by the element size to compute the element offset.
2874       const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
2875 
2876       // Add the element offset to the running total offset.
2877       TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2878     }
2879   }
2880 
2881   // Get the SCEV for the GEP base.
2882   const SCEV *BaseS = getSCEV(Base);
2883 
2884   // Add the total offset from all the GEP indices to the base.
2885   return getAddExpr(BaseS, TotalOffset);
2886 }
2887 
2888 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2889 /// guaranteed to end in (at every loop iteration).  It is, at the same time,
2890 /// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2891 /// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2892 uint32_t
2893 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2894   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2895     return C->getValue()->getValue().countTrailingZeros();
2896 
2897   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2898     return std::min(GetMinTrailingZeros(T->getOperand()),
2899                     (uint32_t)getTypeSizeInBits(T->getType()));
2900 
2901   if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2902     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2903     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2904              getTypeSizeInBits(E->getType()) : OpRes;
2905   }
2906 
2907   if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2908     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2909     return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2910              getTypeSizeInBits(E->getType()) : OpRes;
2911   }
2912 
2913   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2914     // The result is the min of all operands results.
2915     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2916     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2917       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2918     return MinOpRes;
2919   }
2920 
2921   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2922     // The result is the sum of all operands results.
2923     uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2924     uint32_t BitWidth = getTypeSizeInBits(M->getType());
2925     for (unsigned i = 1, e = M->getNumOperands();
2926          SumOpRes != BitWidth && i != e; ++i)
2927       SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2928                           BitWidth);
2929     return SumOpRes;
2930   }
2931 
2932   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2933     // The result is the min of all operands results.
2934     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2935     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2936       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2937     return MinOpRes;
2938   }
2939 
2940   if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2941     // The result is the min of all operands results.
2942     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2943     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2944       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2945     return MinOpRes;
2946   }
2947 
2948   if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2949     // The result is the min of all operands results.
2950     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2951     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2952       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2953     return MinOpRes;
2954   }
2955 
2956   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2957     // For a SCEVUnknown, ask ValueTracking.
2958     unsigned BitWidth = getTypeSizeInBits(U->getType());
2959     APInt Mask = APInt::getAllOnesValue(BitWidth);
2960     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2961     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2962     return Zeros.countTrailingOnes();
2963   }
2964 
2965   // SCEVUDivExpr
2966   return 0;
2967 }
2968 
2969 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2970 ///
2971 ConstantRange
2972 ScalarEvolution::getUnsignedRange(const SCEV *S) {
2973   // See if we've computed this range already.
2974   DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
2975   if (I != UnsignedRanges.end())
2976     return I->second;
2977 
2978   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2979     return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
2980 
2981   unsigned BitWidth = getTypeSizeInBits(S->getType());
2982   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2983 
2984   // If the value has known zeros, the maximum unsigned value will have those
2985   // known zeros as well.
2986   uint32_t TZ = GetMinTrailingZeros(S);
2987   if (TZ != 0)
2988     ConservativeResult =
2989       ConstantRange(APInt::getMinValue(BitWidth),
2990                     APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2991 
2992   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2993     ConstantRange X = getUnsignedRange(Add->getOperand(0));
2994     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2995       X = X.add(getUnsignedRange(Add->getOperand(i)));
2996     return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
2997   }
2998 
2999   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3000     ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3001     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3002       X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3003     return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
3004   }
3005 
3006   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3007     ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3008     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3009       X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3010     return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
3011   }
3012 
3013   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3014     ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3015     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3016       X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3017     return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
3018   }
3019 
3020   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3021     ConstantRange X = getUnsignedRange(UDiv->getLHS());
3022     ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3023     return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3024   }
3025 
3026   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3027     ConstantRange X = getUnsignedRange(ZExt->getOperand());
3028     return setUnsignedRange(ZExt,
3029       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3030   }
3031 
3032   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3033     ConstantRange X = getUnsignedRange(SExt->getOperand());
3034     return setUnsignedRange(SExt,
3035       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3036   }
3037 
3038   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3039     ConstantRange X = getUnsignedRange(Trunc->getOperand());
3040     return setUnsignedRange(Trunc,
3041       ConservativeResult.intersectWith(X.truncate(BitWidth)));
3042   }
3043 
3044   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3045     // If there's no unsigned wrap, the value will never be less than its
3046     // initial value.
3047     if (AddRec->hasNoUnsignedWrap())
3048       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3049         if (!C->getValue()->isZero())
3050           ConservativeResult =
3051             ConservativeResult.intersectWith(
3052               ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3053 
3054     // TODO: non-affine addrec
3055     if (AddRec->isAffine()) {
3056       const Type *Ty = AddRec->getType();
3057       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3058       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3059           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3060         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3061 
3062         const SCEV *Start = AddRec->getStart();
3063         const SCEV *Step = AddRec->getStepRecurrence(*this);
3064 
3065         ConstantRange StartRange = getUnsignedRange(Start);
3066         ConstantRange StepRange = getSignedRange(Step);
3067         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3068         ConstantRange EndRange =
3069           StartRange.add(MaxBECountRange.multiply(StepRange));
3070 
3071         // Check for overflow. This must be done with ConstantRange arithmetic
3072         // because we could be called from within the ScalarEvolution overflow
3073         // checking code.
3074         ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3075         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3076         ConstantRange ExtMaxBECountRange =
3077           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3078         ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3079         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3080             ExtEndRange)
3081           return setUnsignedRange(AddRec, ConservativeResult);
3082 
3083         APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3084                                    EndRange.getUnsignedMin());
3085         APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3086                                    EndRange.getUnsignedMax());
3087         if (Min.isMinValue() && Max.isMaxValue())
3088           return setUnsignedRange(AddRec, ConservativeResult);
3089         return setUnsignedRange(AddRec,
3090           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3091       }
3092     }
3093 
3094     return setUnsignedRange(AddRec, ConservativeResult);
3095   }
3096 
3097   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3098     // For a SCEVUnknown, ask ValueTracking.
3099     APInt Mask = APInt::getAllOnesValue(BitWidth);
3100     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3101     ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
3102     if (Ones == ~Zeros + 1)
3103       return setUnsignedRange(U, ConservativeResult);
3104     return setUnsignedRange(U,
3105       ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
3106   }
3107 
3108   return setUnsignedRange(S, ConservativeResult);
3109 }
3110 
3111 /// getSignedRange - Determine the signed range for a particular SCEV.
3112 ///
3113 ConstantRange
3114 ScalarEvolution::getSignedRange(const SCEV *S) {
3115   DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3116   if (I != SignedRanges.end())
3117     return I->second;
3118 
3119   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3120     return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
3121 
3122   unsigned BitWidth = getTypeSizeInBits(S->getType());
3123   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3124 
3125   // If the value has known zeros, the maximum signed value will have those
3126   // known zeros as well.
3127   uint32_t TZ = GetMinTrailingZeros(S);
3128   if (TZ != 0)
3129     ConservativeResult =
3130       ConstantRange(APInt::getSignedMinValue(BitWidth),
3131                     APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3132 
3133   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3134     ConstantRange X = getSignedRange(Add->getOperand(0));
3135     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3136       X = X.add(getSignedRange(Add->getOperand(i)));
3137     return setSignedRange(Add, ConservativeResult.intersectWith(X));
3138   }
3139 
3140   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3141     ConstantRange X = getSignedRange(Mul->getOperand(0));
3142     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3143       X = X.multiply(getSignedRange(Mul->getOperand(i)));
3144     return setSignedRange(Mul, ConservativeResult.intersectWith(X));
3145   }
3146 
3147   if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3148     ConstantRange X = getSignedRange(SMax->getOperand(0));
3149     for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3150       X = X.smax(getSignedRange(SMax->getOperand(i)));
3151     return setSignedRange(SMax, ConservativeResult.intersectWith(X));
3152   }
3153 
3154   if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3155     ConstantRange X = getSignedRange(UMax->getOperand(0));
3156     for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3157       X = X.umax(getSignedRange(UMax->getOperand(i)));
3158     return setSignedRange(UMax, ConservativeResult.intersectWith(X));
3159   }
3160 
3161   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3162     ConstantRange X = getSignedRange(UDiv->getLHS());
3163     ConstantRange Y = getSignedRange(UDiv->getRHS());
3164     return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3165   }
3166 
3167   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3168     ConstantRange X = getSignedRange(ZExt->getOperand());
3169     return setSignedRange(ZExt,
3170       ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3171   }
3172 
3173   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3174     ConstantRange X = getSignedRange(SExt->getOperand());
3175     return setSignedRange(SExt,
3176       ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3177   }
3178 
3179   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3180     ConstantRange X = getSignedRange(Trunc->getOperand());
3181     return setSignedRange(Trunc,
3182       ConservativeResult.intersectWith(X.truncate(BitWidth)));
3183   }
3184 
3185   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3186     // If there's no signed wrap, and all the operands have the same sign or
3187     // zero, the value won't ever change sign.
3188     if (AddRec->hasNoSignedWrap()) {
3189       bool AllNonNeg = true;
3190       bool AllNonPos = true;
3191       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3192         if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3193         if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3194       }
3195       if (AllNonNeg)
3196         ConservativeResult = ConservativeResult.intersectWith(
3197           ConstantRange(APInt(BitWidth, 0),
3198                         APInt::getSignedMinValue(BitWidth)));
3199       else if (AllNonPos)
3200         ConservativeResult = ConservativeResult.intersectWith(
3201           ConstantRange(APInt::getSignedMinValue(BitWidth),
3202                         APInt(BitWidth, 1)));
3203     }
3204 
3205     // TODO: non-affine addrec
3206     if (AddRec->isAffine()) {
3207       const Type *Ty = AddRec->getType();
3208       const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3209       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3210           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3211         MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3212 
3213         const SCEV *Start = AddRec->getStart();
3214         const SCEV *Step = AddRec->getStepRecurrence(*this);
3215 
3216         ConstantRange StartRange = getSignedRange(Start);
3217         ConstantRange StepRange = getSignedRange(Step);
3218         ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3219         ConstantRange EndRange =
3220           StartRange.add(MaxBECountRange.multiply(StepRange));
3221 
3222         // Check for overflow. This must be done with ConstantRange arithmetic
3223         // because we could be called from within the ScalarEvolution overflow
3224         // checking code.
3225         ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3226         ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3227         ConstantRange ExtMaxBECountRange =
3228           MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3229         ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3230         if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3231             ExtEndRange)
3232           return setSignedRange(AddRec, ConservativeResult);
3233 
3234         APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3235                                    EndRange.getSignedMin());
3236         APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3237                                    EndRange.getSignedMax());
3238         if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3239           return setSignedRange(AddRec, ConservativeResult);
3240         return setSignedRange(AddRec,
3241           ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3242       }
3243     }
3244 
3245     return setSignedRange(AddRec, ConservativeResult);
3246   }
3247 
3248   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3249     // For a SCEVUnknown, ask ValueTracking.
3250     if (!U->getValue()->getType()->isIntegerTy() && !TD)
3251       return setSignedRange(U, ConservativeResult);
3252     unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3253     if (NS == 1)
3254       return setSignedRange(U, ConservativeResult);
3255     return setSignedRange(U, ConservativeResult.intersectWith(
3256       ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3257                     APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
3258   }
3259 
3260   return setSignedRange(S, ConservativeResult);
3261 }
3262 
3263 /// createSCEV - We know that there is no SCEV for the specified value.
3264 /// Analyze the expression.
3265 ///
3266 const SCEV *ScalarEvolution::createSCEV(Value *V) {
3267   if (!isSCEVable(V->getType()))
3268     return getUnknown(V);
3269 
3270   unsigned Opcode = Instruction::UserOp1;
3271   if (Instruction *I = dyn_cast<Instruction>(V)) {
3272     Opcode = I->getOpcode();
3273 
3274     // Don't attempt to analyze instructions in blocks that aren't
3275     // reachable. Such instructions don't matter, and they aren't required
3276     // to obey basic rules for definitions dominating uses which this
3277     // analysis depends on.
3278     if (!DT->isReachableFromEntry(I->getParent()))
3279       return getUnknown(V);
3280   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3281     Opcode = CE->getOpcode();
3282   else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3283     return getConstant(CI);
3284   else if (isa<ConstantPointerNull>(V))
3285     return getConstant(V->getType(), 0);
3286   else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3287     return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3288   else
3289     return getUnknown(V);
3290 
3291   Operator *U = cast<Operator>(V);
3292   switch (Opcode) {
3293   case Instruction::Add: {
3294     // The simple thing to do would be to just call getSCEV on both operands
3295     // and call getAddExpr with the result. However if we're looking at a
3296     // bunch of things all added together, this can be quite inefficient,
3297     // because it leads to N-1 getAddExpr calls for N ultimate operands.
3298     // Instead, gather up all the operands and make a single getAddExpr call.
3299     // LLVM IR canonical form means we need only traverse the left operands.
3300     SmallVector<const SCEV *, 4> AddOps;
3301     AddOps.push_back(getSCEV(U->getOperand(1)));
3302     for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3303       unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3304       if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3305         break;
3306       U = cast<Operator>(Op);
3307       const SCEV *Op1 = getSCEV(U->getOperand(1));
3308       if (Opcode == Instruction::Sub)
3309         AddOps.push_back(getNegativeSCEV(Op1));
3310       else
3311         AddOps.push_back(Op1);
3312     }
3313     AddOps.push_back(getSCEV(U->getOperand(0)));
3314     return getAddExpr(AddOps);
3315   }
3316   case Instruction::Mul: {
3317     // See the Add code above.
3318     SmallVector<const SCEV *, 4> MulOps;
3319     MulOps.push_back(getSCEV(U->getOperand(1)));
3320     for (Value *Op = U->getOperand(0);
3321          Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3322          Op = U->getOperand(0)) {
3323       U = cast<Operator>(Op);
3324       MulOps.push_back(getSCEV(U->getOperand(1)));
3325     }
3326     MulOps.push_back(getSCEV(U->getOperand(0)));
3327     return getMulExpr(MulOps);
3328   }
3329   case Instruction::UDiv:
3330     return getUDivExpr(getSCEV(U->getOperand(0)),
3331                        getSCEV(U->getOperand(1)));
3332   case Instruction::Sub:
3333     return getMinusSCEV(getSCEV(U->getOperand(0)),
3334                         getSCEV(U->getOperand(1)));
3335   case Instruction::And:
3336     // For an expression like x&255 that merely masks off the high bits,
3337     // use zext(trunc(x)) as the SCEV expression.
3338     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3339       if (CI->isNullValue())
3340         return getSCEV(U->getOperand(1));
3341       if (CI->isAllOnesValue())
3342         return getSCEV(U->getOperand(0));
3343       const APInt &A = CI->getValue();
3344 
3345       // Instcombine's ShrinkDemandedConstant may strip bits out of
3346       // constants, obscuring what would otherwise be a low-bits mask.
3347       // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3348       // knew about to reconstruct a low-bits mask value.
3349       unsigned LZ = A.countLeadingZeros();
3350       unsigned BitWidth = A.getBitWidth();
3351       APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3352       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3353       ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3354 
3355       APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3356 
3357       if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3358         return
3359           getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3360                                 IntegerType::get(getContext(), BitWidth - LZ)),
3361                             U->getType());
3362     }
3363     break;
3364 
3365   case Instruction::Or:
3366     // If the RHS of the Or is a constant, we may have something like:
3367     // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3368     // optimizations will transparently handle this case.
3369     //
3370     // In order for this transformation to be safe, the LHS must be of the
3371     // form X*(2^n) and the Or constant must be less than 2^n.
3372     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3373       const SCEV *LHS = getSCEV(U->getOperand(0));
3374       const APInt &CIVal = CI->getValue();
3375       if (GetMinTrailingZeros(LHS) >=
3376           (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3377         // Build a plain add SCEV.
3378         const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3379         // If the LHS of the add was an addrec and it has no-wrap flags,
3380         // transfer the no-wrap flags, since an or won't introduce a wrap.
3381         if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3382           const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3383           if (OldAR->hasNoUnsignedWrap())
3384             const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3385           if (OldAR->hasNoSignedWrap())
3386             const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3387         }
3388         return S;
3389       }
3390     }
3391     break;
3392   case Instruction::Xor:
3393     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3394       // If the RHS of the xor is a signbit, then this is just an add.
3395       // Instcombine turns add of signbit into xor as a strength reduction step.
3396       if (CI->getValue().isSignBit())
3397         return getAddExpr(getSCEV(U->getOperand(0)),
3398                           getSCEV(U->getOperand(1)));
3399 
3400       // If the RHS of xor is -1, then this is a not operation.
3401       if (CI->isAllOnesValue())
3402         return getNotSCEV(getSCEV(U->getOperand(0)));
3403 
3404       // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3405       // This is a variant of the check for xor with -1, and it handles
3406       // the case where instcombine has trimmed non-demanded bits out
3407       // of an xor with -1.
3408       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3409         if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3410           if (BO->getOpcode() == Instruction::And &&
3411               LCI->getValue() == CI->getValue())
3412             if (const SCEVZeroExtendExpr *Z =
3413                   dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3414               const Type *UTy = U->getType();
3415               const SCEV *Z0 = Z->getOperand();
3416               const Type *Z0Ty = Z0->getType();
3417               unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3418 
3419               // If C is a low-bits mask, the zero extend is serving to
3420               // mask off the high bits. Complement the operand and
3421               // re-apply the zext.
3422               if (APIntOps::isMask(Z0TySize, CI->getValue()))
3423                 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3424 
3425               // If C is a single bit, it may be in the sign-bit position
3426               // before the zero-extend. In this case, represent the xor
3427               // using an add, which is equivalent, and re-apply the zext.
3428               APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3429               if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3430                   Trunc.isSignBit())
3431                 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3432                                          UTy);
3433             }
3434     }
3435     break;
3436 
3437   case Instruction::Shl:
3438     // Turn shift left of a constant amount into a multiply.
3439     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3440       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3441 
3442       // If the shift count is not less than the bitwidth, the result of
3443       // the shift is undefined. Don't try to analyze it, because the
3444       // resolution chosen here may differ from the resolution chosen in
3445       // other parts of the compiler.
3446       if (SA->getValue().uge(BitWidth))
3447         break;
3448 
3449       Constant *X = ConstantInt::get(getContext(),
3450         APInt(BitWidth, 1).shl(SA->getZExtValue()));
3451       return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3452     }
3453     break;
3454 
3455   case Instruction::LShr:
3456     // Turn logical shift right of a constant into a unsigned divide.
3457     if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3458       uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3459 
3460       // If the shift count is not less than the bitwidth, the result of
3461       // the shift is undefined. Don't try to analyze it, because the
3462       // resolution chosen here may differ from the resolution chosen in
3463       // other parts of the compiler.
3464       if (SA->getValue().uge(BitWidth))
3465         break;
3466 
3467       Constant *X = ConstantInt::get(getContext(),
3468         APInt(BitWidth, 1).shl(SA->getZExtValue()));
3469       return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3470     }
3471     break;
3472 
3473   case Instruction::AShr:
3474     // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3475     if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3476       if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3477         if (L->getOpcode() == Instruction::Shl &&
3478             L->getOperand(1) == U->getOperand(1)) {
3479           uint64_t BitWidth = getTypeSizeInBits(U->getType());
3480 
3481           // If the shift count is not less than the bitwidth, the result of
3482           // the shift is undefined. Don't try to analyze it, because the
3483           // resolution chosen here may differ from the resolution chosen in
3484           // other parts of the compiler.
3485           if (CI->getValue().uge(BitWidth))
3486             break;
3487 
3488           uint64_t Amt = BitWidth - CI->getZExtValue();
3489           if (Amt == BitWidth)
3490             return getSCEV(L->getOperand(0));       // shift by zero --> noop
3491           return
3492             getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3493                                               IntegerType::get(getContext(),
3494                                                                Amt)),
3495                               U->getType());
3496         }
3497     break;
3498 
3499   case Instruction::Trunc:
3500     return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3501 
3502   case Instruction::ZExt:
3503     return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3504 
3505   case Instruction::SExt:
3506     return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3507 
3508   case Instruction::BitCast:
3509     // BitCasts are no-op casts so we just eliminate the cast.
3510     if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3511       return getSCEV(U->getOperand(0));
3512     break;
3513 
3514   // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3515   // lead to pointer expressions which cannot safely be expanded to GEPs,
3516   // because ScalarEvolution doesn't respect the GEP aliasing rules when
3517   // simplifying integer expressions.
3518 
3519   case Instruction::GetElementPtr:
3520     return createNodeForGEP(cast<GEPOperator>(U));
3521 
3522   case Instruction::PHI:
3523     return createNodeForPHI(cast<PHINode>(U));
3524 
3525   case Instruction::Select:
3526     // This could be a smax or umax that was lowered earlier.
3527     // Try to recover it.
3528     if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3529       Value *LHS = ICI->getOperand(0);
3530       Value *RHS = ICI->getOperand(1);
3531       switch (ICI->getPredicate()) {
3532       case ICmpInst::ICMP_SLT:
3533       case ICmpInst::ICMP_SLE:
3534         std::swap(LHS, RHS);
3535         // fall through
3536       case ICmpInst::ICMP_SGT:
3537       case ICmpInst::ICMP_SGE:
3538         // a >s b ? a+x : b+x  ->  smax(a, b)+x
3539         // a >s b ? b+x : a+x  ->  smin(a, b)+x
3540         if (LHS->getType() == U->getType()) {
3541           const SCEV *LS = getSCEV(LHS);
3542           const SCEV *RS = getSCEV(RHS);
3543           const SCEV *LA = getSCEV(U->getOperand(1));
3544           const SCEV *RA = getSCEV(U->getOperand(2));
3545           const SCEV *LDiff = getMinusSCEV(LA, LS);
3546           const SCEV *RDiff = getMinusSCEV(RA, RS);
3547           if (LDiff == RDiff)
3548             return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3549           LDiff = getMinusSCEV(LA, RS);
3550           RDiff = getMinusSCEV(RA, LS);
3551           if (LDiff == RDiff)
3552             return getAddExpr(getSMinExpr(LS, RS), LDiff);
3553         }
3554         break;
3555       case ICmpInst::ICMP_ULT:
3556       case ICmpInst::ICMP_ULE:
3557         std::swap(LHS, RHS);
3558         // fall through
3559       case ICmpInst::ICMP_UGT:
3560       case ICmpInst::ICMP_UGE:
3561         // a >u b ? a+x : b+x  ->  umax(a, b)+x
3562         // a >u b ? b+x : a+x  ->  umin(a, b)+x
3563         if (LHS->getType() == U->getType()) {
3564           const SCEV *LS = getSCEV(LHS);
3565           const SCEV *RS = getSCEV(RHS);
3566           const SCEV *LA = getSCEV(U->getOperand(1));
3567           const SCEV *RA = getSCEV(U->getOperand(2));
3568           const SCEV *LDiff = getMinusSCEV(LA, LS);
3569           const SCEV *RDiff = getMinusSCEV(RA, RS);
3570           if (LDiff == RDiff)
3571             return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3572           LDiff = getMinusSCEV(LA, RS);
3573           RDiff = getMinusSCEV(RA, LS);
3574           if (LDiff == RDiff)
3575             return getAddExpr(getUMinExpr(LS, RS), LDiff);
3576         }
3577         break;
3578       case ICmpInst::ICMP_NE:
3579         // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
3580         if (LHS->getType() == U->getType() &&
3581             isa<ConstantInt>(RHS) &&
3582             cast<ConstantInt>(RHS)->isZero()) {
3583           const SCEV *One = getConstant(LHS->getType(), 1);
3584           const SCEV *LS = getSCEV(LHS);
3585           const SCEV *LA = getSCEV(U->getOperand(1));
3586           const SCEV *RA = getSCEV(U->getOperand(2));
3587           const SCEV *LDiff = getMinusSCEV(LA, LS);
3588           const SCEV *RDiff = getMinusSCEV(RA, One);
3589           if (LDiff == RDiff)
3590             return getAddExpr(getUMaxExpr(One, LS), LDiff);
3591         }
3592         break;
3593       case ICmpInst::ICMP_EQ:
3594         // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
3595         if (LHS->getType() == U->getType() &&
3596             isa<ConstantInt>(RHS) &&
3597             cast<ConstantInt>(RHS)->isZero()) {
3598           const SCEV *One = getConstant(LHS->getType(), 1);
3599           const SCEV *LS = getSCEV(LHS);
3600           const SCEV *LA = getSCEV(U->getOperand(1));
3601           const SCEV *RA = getSCEV(U->getOperand(2));
3602           const SCEV *LDiff = getMinusSCEV(LA, One);
3603           const SCEV *RDiff = getMinusSCEV(RA, LS);
3604           if (LDiff == RDiff)
3605             return getAddExpr(getUMaxExpr(One, LS), LDiff);
3606         }
3607         break;
3608       default:
3609         break;
3610       }
3611     }
3612 
3613   default: // We cannot analyze this expression.
3614     break;
3615   }
3616 
3617   return getUnknown(V);
3618 }
3619 
3620 
3621 
3622 //===----------------------------------------------------------------------===//
3623 //                   Iteration Count Computation Code
3624 //
3625 
3626 /// getBackedgeTakenCount - If the specified loop has a predictable
3627 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3628 /// object. The backedge-taken count is the number of times the loop header
3629 /// will be branched to from within the loop. This is one less than the
3630 /// trip count of the loop, since it doesn't count the first iteration,
3631 /// when the header is branched to from outside the loop.
3632 ///
3633 /// Note that it is not valid to call this method on a loop without a
3634 /// loop-invariant backedge-taken count (see
3635 /// hasLoopInvariantBackedgeTakenCount).
3636 ///
3637 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3638   return getBackedgeTakenInfo(L).Exact;
3639 }
3640 
3641 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3642 /// return the least SCEV value that is known never to be less than the
3643 /// actual backedge taken count.
3644 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3645   return getBackedgeTakenInfo(L).Max;
3646 }
3647 
3648 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
3649 /// onto the given Worklist.
3650 static void
3651 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3652   BasicBlock *Header = L->getHeader();
3653 
3654   // Push all Loop-header PHIs onto the Worklist stack.
3655   for (BasicBlock::iterator I = Header->begin();
3656        PHINode *PN = dyn_cast<PHINode>(I); ++I)
3657     Worklist.push_back(PN);
3658 }
3659 
3660 const ScalarEvolution::BackedgeTakenInfo &
3661 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3662   // Initially insert a CouldNotCompute for this loop. If the insertion
3663   // succeeds, proceed to actually compute a backedge-taken count and
3664   // update the value. The temporary CouldNotCompute value tells SCEV
3665   // code elsewhere that it shouldn't attempt to request a new
3666   // backedge-taken count, which could result in infinite recursion.
3667   std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3668     BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3669   if (Pair.second) {
3670     BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3671     if (BECount.Exact != getCouldNotCompute()) {
3672       assert(isLoopInvariant(BECount.Exact, L) &&
3673              isLoopInvariant(BECount.Max, L) &&
3674              "Computed backedge-taken count isn't loop invariant for loop!");
3675       ++NumTripCountsComputed;
3676 
3677       // Update the value in the map.
3678       Pair.first->second = BECount;
3679     } else {
3680       if (BECount.Max != getCouldNotCompute())
3681         // Update the value in the map.
3682         Pair.first->second = BECount;
3683       if (isa<PHINode>(L->getHeader()->begin()))
3684         // Only count loops that have phi nodes as not being computable.
3685         ++NumTripCountsNotComputed;
3686     }
3687 
3688     // Now that we know more about the trip count for this loop, forget any
3689     // existing SCEV values for PHI nodes in this loop since they are only
3690     // conservative estimates made without the benefit of trip count
3691     // information. This is similar to the code in forgetLoop, except that
3692     // it handles SCEVUnknown PHI nodes specially.
3693     if (BECount.hasAnyInfo()) {
3694       SmallVector<Instruction *, 16> Worklist;
3695       PushLoopPHIs(L, Worklist);
3696 
3697       SmallPtrSet<Instruction *, 8> Visited;
3698       while (!Worklist.empty()) {
3699         Instruction *I = Worklist.pop_back_val();
3700         if (!Visited.insert(I)) continue;
3701 
3702         ValueExprMapType::iterator It =
3703           ValueExprMap.find(static_cast<Value *>(I));
3704         if (It != ValueExprMap.end()) {
3705           const SCEV *Old = It->second;
3706 
3707           // SCEVUnknown for a PHI either means that it has an unrecognized
3708           // structure, or it's a PHI that's in the progress of being computed
3709           // by createNodeForPHI.  In the former case, additional loop trip
3710           // count information isn't going to change anything. In the later
3711           // case, createNodeForPHI will perform the necessary updates on its
3712           // own when it gets to that point.
3713           if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
3714             ValuesAtScopes.erase(Old);
3715             UnsignedRanges.erase(Old);
3716             SignedRanges.erase(Old);
3717             ValueExprMap.erase(It);
3718           }
3719           if (PHINode *PN = dyn_cast<PHINode>(I))
3720             ConstantEvolutionLoopExitValue.erase(PN);
3721         }
3722 
3723         PushDefUseChildren(I, Worklist);
3724       }
3725     }
3726   }
3727   return Pair.first->second;
3728 }
3729 
3730 /// forgetLoop - This method should be called by the client when it has
3731 /// changed a loop in a way that may effect ScalarEvolution's ability to
3732 /// compute a trip count, or if the loop is deleted.
3733 void ScalarEvolution::forgetLoop(const Loop *L) {
3734   // Drop any stored trip count value.
3735   BackedgeTakenCounts.erase(L);
3736 
3737   // Drop information about expressions based on loop-header PHIs.
3738   SmallVector<Instruction *, 16> Worklist;
3739   PushLoopPHIs(L, Worklist);
3740 
3741   SmallPtrSet<Instruction *, 8> Visited;
3742   while (!Worklist.empty()) {
3743     Instruction *I = Worklist.pop_back_val();
3744     if (!Visited.insert(I)) continue;
3745 
3746     ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
3747     if (It != ValueExprMap.end()) {
3748       const SCEV *Old = It->second;
3749       ValuesAtScopes.erase(Old);
3750       UnsignedRanges.erase(Old);
3751       SignedRanges.erase(Old);
3752       ValueExprMap.erase(It);
3753       if (PHINode *PN = dyn_cast<PHINode>(I))
3754         ConstantEvolutionLoopExitValue.erase(PN);
3755     }
3756 
3757     PushDefUseChildren(I, Worklist);
3758   }
3759 
3760   // Forget all contained loops too, to avoid dangling entries in the
3761   // ValuesAtScopes map.
3762   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
3763     forgetLoop(*I);
3764 }
3765 
3766 /// forgetValue - This method should be called by the client when it has
3767 /// changed a value in a way that may effect its value, or which may
3768 /// disconnect it from a def-use chain linking it to a loop.
3769 void ScalarEvolution::forgetValue(Value *V) {
3770   Instruction *I = dyn_cast<Instruction>(V);
3771   if (!I) return;
3772 
3773   // Drop information about expressions based on loop-header PHIs.
3774   SmallVector<Instruction *, 16> Worklist;
3775   Worklist.push_back(I);
3776 
3777   SmallPtrSet<Instruction *, 8> Visited;
3778   while (!Worklist.empty()) {
3779     I = Worklist.pop_back_val();
3780     if (!Visited.insert(I)) continue;
3781 
3782     ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
3783     if (It != ValueExprMap.end()) {
3784       const SCEV *Old = It->second;
3785       ValuesAtScopes.erase(Old);
3786       UnsignedRanges.erase(Old);
3787       SignedRanges.erase(Old);
3788       ValueExprMap.erase(It);
3789       if (PHINode *PN = dyn_cast<PHINode>(I))
3790         ConstantEvolutionLoopExitValue.erase(PN);
3791     }
3792 
3793     PushDefUseChildren(I, Worklist);
3794   }
3795 }
3796 
3797 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
3798 /// of the specified loop will execute.
3799 ScalarEvolution::BackedgeTakenInfo
3800 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3801   SmallVector<BasicBlock *, 8> ExitingBlocks;
3802   L->getExitingBlocks(ExitingBlocks);
3803 
3804   // Examine all exits and pick the most conservative values.
3805   const SCEV *BECount = getCouldNotCompute();
3806   const SCEV *MaxBECount = getCouldNotCompute();
3807   bool CouldNotComputeBECount = false;
3808   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3809     BackedgeTakenInfo NewBTI =
3810       ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3811 
3812     if (NewBTI.Exact == getCouldNotCompute()) {
3813       // We couldn't compute an exact value for this exit, so
3814       // we won't be able to compute an exact value for the loop.
3815       CouldNotComputeBECount = true;
3816       BECount = getCouldNotCompute();
3817     } else if (!CouldNotComputeBECount) {
3818       if (BECount == getCouldNotCompute())
3819         BECount = NewBTI.Exact;
3820       else
3821         BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3822     }
3823     if (MaxBECount == getCouldNotCompute())
3824       MaxBECount = NewBTI.Max;
3825     else if (NewBTI.Max != getCouldNotCompute())
3826       MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3827   }
3828 
3829   return BackedgeTakenInfo(BECount, MaxBECount);
3830 }
3831 
3832 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3833 /// of the specified loop will execute if it exits via the specified block.
3834 ScalarEvolution::BackedgeTakenInfo
3835 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3836                                                    BasicBlock *ExitingBlock) {
3837 
3838   // Okay, we've chosen an exiting block.  See what condition causes us to
3839   // exit at this block.
3840   //
3841   // FIXME: we should be able to handle switch instructions (with a single exit)
3842   BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3843   if (ExitBr == 0) return getCouldNotCompute();
3844   assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3845 
3846   // At this point, we know we have a conditional branch that determines whether
3847   // the loop is exited.  However, we don't know if the branch is executed each
3848   // time through the loop.  If not, then the execution count of the branch will
3849   // not be equal to the trip count of the loop.
3850   //
3851   // Currently we check for this by checking to see if the Exit branch goes to
3852   // the loop header.  If so, we know it will always execute the same number of
3853   // times as the loop.  We also handle the case where the exit block *is* the
3854   // loop header.  This is common for un-rotated loops.
3855   //
3856   // If both of those tests fail, walk up the unique predecessor chain to the
3857   // header, stopping if there is an edge that doesn't exit the loop. If the
3858   // header is reached, the execution count of the branch will be equal to the
3859   // trip count of the loop.
3860   //
3861   //  More extensive analysis could be done to handle more cases here.
3862   //
3863   if (ExitBr->getSuccessor(0) != L->getHeader() &&
3864       ExitBr->getSuccessor(1) != L->getHeader() &&
3865       ExitBr->getParent() != L->getHeader()) {
3866     // The simple checks failed, try climbing the unique predecessor chain
3867     // up to the header.
3868     bool Ok = false;
3869     for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3870       BasicBlock *Pred = BB->getUniquePredecessor();
3871       if (!Pred)
3872         return getCouldNotCompute();
3873       TerminatorInst *PredTerm = Pred->getTerminator();
3874       for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3875         BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3876         if (PredSucc == BB)
3877           continue;
3878         // If the predecessor has a successor that isn't BB and isn't
3879         // outside the loop, assume the worst.
3880         if (L->contains(PredSucc))
3881           return getCouldNotCompute();
3882       }
3883       if (Pred == L->getHeader()) {
3884         Ok = true;
3885         break;
3886       }
3887       BB = Pred;
3888     }
3889     if (!Ok)
3890       return getCouldNotCompute();
3891   }
3892 
3893   // Proceed to the next level to examine the exit condition expression.
3894   return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3895                                                ExitBr->getSuccessor(0),
3896                                                ExitBr->getSuccessor(1));
3897 }
3898 
3899 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3900 /// backedge of the specified loop will execute if its exit condition
3901 /// were a conditional branch of ExitCond, TBB, and FBB.
3902 ScalarEvolution::BackedgeTakenInfo
3903 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3904                                                        Value *ExitCond,
3905                                                        BasicBlock *TBB,
3906                                                        BasicBlock *FBB) {
3907   // Check if the controlling expression for this loop is an And or Or.
3908   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3909     if (BO->getOpcode() == Instruction::And) {
3910       // Recurse on the operands of the and.
3911       BackedgeTakenInfo BTI0 =
3912         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3913       BackedgeTakenInfo BTI1 =
3914         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3915       const SCEV *BECount = getCouldNotCompute();
3916       const SCEV *MaxBECount = getCouldNotCompute();
3917       if (L->contains(TBB)) {
3918         // Both conditions must be true for the loop to continue executing.
3919         // Choose the less conservative count.
3920         if (BTI0.Exact == getCouldNotCompute() ||
3921             BTI1.Exact == getCouldNotCompute())
3922           BECount = getCouldNotCompute();
3923         else
3924           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3925         if (BTI0.Max == getCouldNotCompute())
3926           MaxBECount = BTI1.Max;
3927         else if (BTI1.Max == getCouldNotCompute())
3928           MaxBECount = BTI0.Max;
3929         else
3930           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3931       } else {
3932         // Both conditions must be true at the same time for the loop to exit.
3933         // For now, be conservative.
3934         assert(L->contains(FBB) && "Loop block has no successor in loop!");
3935         if (BTI0.Max == BTI1.Max)
3936           MaxBECount = BTI0.Max;
3937         if (BTI0.Exact == BTI1.Exact)
3938           BECount = BTI0.Exact;
3939       }
3940 
3941       return BackedgeTakenInfo(BECount, MaxBECount);
3942     }
3943     if (BO->getOpcode() == Instruction::Or) {
3944       // Recurse on the operands of the or.
3945       BackedgeTakenInfo BTI0 =
3946         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3947       BackedgeTakenInfo BTI1 =
3948         ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3949       const SCEV *BECount = getCouldNotCompute();
3950       const SCEV *MaxBECount = getCouldNotCompute();
3951       if (L->contains(FBB)) {
3952         // Both conditions must be false for the loop to continue executing.
3953         // Choose the less conservative count.
3954         if (BTI0.Exact == getCouldNotCompute() ||
3955             BTI1.Exact == getCouldNotCompute())
3956           BECount = getCouldNotCompute();
3957         else
3958           BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3959         if (BTI0.Max == getCouldNotCompute())
3960           MaxBECount = BTI1.Max;
3961         else if (BTI1.Max == getCouldNotCompute())
3962           MaxBECount = BTI0.Max;
3963         else
3964           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3965       } else {
3966         // Both conditions must be false at the same time for the loop to exit.
3967         // For now, be conservative.
3968         assert(L->contains(TBB) && "Loop block has no successor in loop!");
3969         if (BTI0.Max == BTI1.Max)
3970           MaxBECount = BTI0.Max;
3971         if (BTI0.Exact == BTI1.Exact)
3972           BECount = BTI0.Exact;
3973       }
3974 
3975       return BackedgeTakenInfo(BECount, MaxBECount);
3976     }
3977   }
3978 
3979   // With an icmp, it may be feasible to compute an exact backedge-taken count.
3980   // Proceed to the next level to examine the icmp.
3981   if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3982     return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3983 
3984   // Check for a constant condition. These are normally stripped out by
3985   // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3986   // preserve the CFG and is temporarily leaving constant conditions
3987   // in place.
3988   if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
3989     if (L->contains(FBB) == !CI->getZExtValue())
3990       // The backedge is always taken.
3991       return getCouldNotCompute();
3992     else
3993       // The backedge is never taken.
3994       return getConstant(CI->getType(), 0);
3995   }
3996 
3997   // If it's not an integer or pointer comparison then compute it the hard way.
3998   return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3999 }
4000 
4001 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
4002 /// backedge of the specified loop will execute if its exit condition
4003 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4004 ScalarEvolution::BackedgeTakenInfo
4005 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
4006                                                            ICmpInst *ExitCond,
4007                                                            BasicBlock *TBB,
4008                                                            BasicBlock *FBB) {
4009 
4010   // If the condition was exit on true, convert the condition to exit on false
4011   ICmpInst::Predicate Cond;
4012   if (!L->contains(FBB))
4013     Cond = ExitCond->getPredicate();
4014   else
4015     Cond = ExitCond->getInversePredicate();
4016 
4017   // Handle common loops like: for (X = "string"; *X; ++X)
4018   if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4019     if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
4020       BackedgeTakenInfo ItCnt =
4021         ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
4022       if (ItCnt.hasAnyInfo())
4023         return ItCnt;
4024     }
4025 
4026   const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4027   const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
4028 
4029   // Try to evaluate any dependencies out of the loop.
4030   LHS = getSCEVAtScope(LHS, L);
4031   RHS = getSCEVAtScope(RHS, L);
4032 
4033   // At this point, we would like to compute how many iterations of the
4034   // loop the predicate will return true for these inputs.
4035   if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
4036     // If there is a loop-invariant, force it into the RHS.
4037     std::swap(LHS, RHS);
4038     Cond = ICmpInst::getSwappedPredicate(Cond);
4039   }
4040 
4041   // Simplify the operands before analyzing them.
4042   (void)SimplifyICmpOperands(Cond, LHS, RHS);
4043 
4044   // If we have a comparison of a chrec against a constant, try to use value
4045   // ranges to answer this query.
4046   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4047     if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4048       if (AddRec->getLoop() == L) {
4049         // Form the constant range.
4050         ConstantRange CompRange(
4051             ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4052 
4053         const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4054         if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4055       }
4056 
4057   switch (Cond) {
4058   case ICmpInst::ICMP_NE: {                     // while (X != Y)
4059     // Convert to: while (X-Y != 0)
4060     BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
4061     if (BTI.hasAnyInfo()) return BTI;
4062     break;
4063   }
4064   case ICmpInst::ICMP_EQ: {                     // while (X == Y)
4065     // Convert to: while (X-Y == 0)
4066     BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4067     if (BTI.hasAnyInfo()) return BTI;
4068     break;
4069   }
4070   case ICmpInst::ICMP_SLT: {
4071     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
4072     if (BTI.hasAnyInfo()) return BTI;
4073     break;
4074   }
4075   case ICmpInst::ICMP_SGT: {
4076     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4077                                              getNotSCEV(RHS), L, true);
4078     if (BTI.hasAnyInfo()) return BTI;
4079     break;
4080   }
4081   case ICmpInst::ICMP_ULT: {
4082     BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
4083     if (BTI.hasAnyInfo()) return BTI;
4084     break;
4085   }
4086   case ICmpInst::ICMP_UGT: {
4087     BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4088                                              getNotSCEV(RHS), L, false);
4089     if (BTI.hasAnyInfo()) return BTI;
4090     break;
4091   }
4092   default:
4093 #if 0
4094     dbgs() << "ComputeBackedgeTakenCount ";
4095     if (ExitCond->getOperand(0)->getType()->isUnsigned())
4096       dbgs() << "[unsigned] ";
4097     dbgs() << *LHS << "   "
4098          << Instruction::getOpcodeName(Instruction::ICmp)
4099          << "   " << *RHS << "\n";
4100 #endif
4101     break;
4102   }
4103   return
4104     ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
4105 }
4106 
4107 static ConstantInt *
4108 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4109                                 ScalarEvolution &SE) {
4110   const SCEV *InVal = SE.getConstant(C);
4111   const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4112   assert(isa<SCEVConstant>(Val) &&
4113          "Evaluation of SCEV at constant didn't fold correctly?");
4114   return cast<SCEVConstant>(Val)->getValue();
4115 }
4116 
4117 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
4118 /// and a GEP expression (missing the pointer index) indexing into it, return
4119 /// the addressed element of the initializer or null if the index expression is
4120 /// invalid.
4121 static Constant *
4122 GetAddressedElementFromGlobal(GlobalVariable *GV,
4123                               const std::vector<ConstantInt*> &Indices) {
4124   Constant *Init = GV->getInitializer();
4125   for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
4126     uint64_t Idx = Indices[i]->getZExtValue();
4127     if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
4128       assert(Idx < CS->getNumOperands() && "Bad struct index!");
4129       Init = cast<Constant>(CS->getOperand(Idx));
4130     } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
4131       if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
4132       Init = cast<Constant>(CA->getOperand(Idx));
4133     } else if (isa<ConstantAggregateZero>(Init)) {
4134       if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
4135         assert(Idx < STy->getNumElements() && "Bad struct index!");
4136         Init = Constant::getNullValue(STy->getElementType(Idx));
4137       } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
4138         if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
4139         Init = Constant::getNullValue(ATy->getElementType());
4140       } else {
4141         llvm_unreachable("Unknown constant aggregate type!");
4142       }
4143       return 0;
4144     } else {
4145       return 0; // Unknown initializer type
4146     }
4147   }
4148   return Init;
4149 }
4150 
4151 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
4152 /// 'icmp op load X, cst', try to see if we can compute the backedge
4153 /// execution count.
4154 ScalarEvolution::BackedgeTakenInfo
4155 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
4156                                                 LoadInst *LI,
4157                                                 Constant *RHS,
4158                                                 const Loop *L,
4159                                                 ICmpInst::Predicate predicate) {
4160   if (LI->isVolatile()) return getCouldNotCompute();
4161 
4162   // Check to see if the loaded pointer is a getelementptr of a global.
4163   // TODO: Use SCEV instead of manually grubbing with GEPs.
4164   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
4165   if (!GEP) return getCouldNotCompute();
4166 
4167   // Make sure that it is really a constant global we are gepping, with an
4168   // initializer, and make sure the first IDX is really 0.
4169   GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
4170   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4171       GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4172       !cast<Constant>(GEP->getOperand(1))->isNullValue())
4173     return getCouldNotCompute();
4174 
4175   // Okay, we allow one non-constant index into the GEP instruction.
4176   Value *VarIdx = 0;
4177   std::vector<ConstantInt*> Indexes;
4178   unsigned VarIdxNum = 0;
4179   for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4180     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4181       Indexes.push_back(CI);
4182     } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4183       if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
4184       VarIdx = GEP->getOperand(i);
4185       VarIdxNum = i-2;
4186       Indexes.push_back(0);
4187     }
4188 
4189   // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4190   // Check to see if X is a loop variant variable value now.
4191   const SCEV *Idx = getSCEV(VarIdx);
4192   Idx = getSCEVAtScope(Idx, L);
4193 
4194   // We can only recognize very limited forms of loop index expressions, in
4195   // particular, only affine AddRec's like {C1,+,C2}.
4196   const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4197   if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
4198       !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4199       !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4200     return getCouldNotCompute();
4201 
4202   unsigned MaxSteps = MaxBruteForceIterations;
4203   for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4204     ConstantInt *ItCst = ConstantInt::get(
4205                            cast<IntegerType>(IdxExpr->getType()), IterationNum);
4206     ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4207 
4208     // Form the GEP offset.
4209     Indexes[VarIdxNum] = Val;
4210 
4211     Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
4212     if (Result == 0) break;  // Cannot compute!
4213 
4214     // Evaluate the condition for this iteration.
4215     Result = ConstantExpr::getICmp(predicate, Result, RHS);
4216     if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
4217     if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4218 #if 0
4219       dbgs() << "\n***\n*** Computed loop count " << *ItCst
4220              << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4221              << "***\n";
4222 #endif
4223       ++NumArrayLenItCounts;
4224       return getConstant(ItCst);   // Found terminating iteration!
4225     }
4226   }
4227   return getCouldNotCompute();
4228 }
4229 
4230 
4231 /// CanConstantFold - Return true if we can constant fold an instruction of the
4232 /// specified type, assuming that all operands were constants.
4233 static bool CanConstantFold(const Instruction *I) {
4234   if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4235       isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
4236     return true;
4237 
4238   if (const CallInst *CI = dyn_cast<CallInst>(I))
4239     if (const Function *F = CI->getCalledFunction())
4240       return canConstantFoldCallTo(F);
4241   return false;
4242 }
4243 
4244 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4245 /// in the loop that V is derived from.  We allow arbitrary operations along the
4246 /// way, but the operands of an operation must either be constants or a value
4247 /// derived from a constant PHI.  If this expression does not fit with these
4248 /// constraints, return null.
4249 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4250   // If this is not an instruction, or if this is an instruction outside of the
4251   // loop, it can't be derived from a loop PHI.
4252   Instruction *I = dyn_cast<Instruction>(V);
4253   if (I == 0 || !L->contains(I)) return 0;
4254 
4255   if (PHINode *PN = dyn_cast<PHINode>(I)) {
4256     if (L->getHeader() == I->getParent())
4257       return PN;
4258     else
4259       // We don't currently keep track of the control flow needed to evaluate
4260       // PHIs, so we cannot handle PHIs inside of loops.
4261       return 0;
4262   }
4263 
4264   // If we won't be able to constant fold this expression even if the operands
4265   // are constants, return early.
4266   if (!CanConstantFold(I)) return 0;
4267 
4268   // Otherwise, we can evaluate this instruction if all of its operands are
4269   // constant or derived from a PHI node themselves.
4270   PHINode *PHI = 0;
4271   for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4272     if (!isa<Constant>(I->getOperand(Op))) {
4273       PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4274       if (P == 0) return 0;  // Not evolving from PHI
4275       if (PHI == 0)
4276         PHI = P;
4277       else if (PHI != P)
4278         return 0;  // Evolving from multiple different PHIs.
4279     }
4280 
4281   // This is a expression evolving from a constant PHI!
4282   return PHI;
4283 }
4284 
4285 /// EvaluateExpression - Given an expression that passes the
4286 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4287 /// in the loop has the value PHIVal.  If we can't fold this expression for some
4288 /// reason, return null.
4289 static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4290                                     const TargetData *TD) {
4291   if (isa<PHINode>(V)) return PHIVal;
4292   if (Constant *C = dyn_cast<Constant>(V)) return C;
4293   Instruction *I = cast<Instruction>(V);
4294 
4295   std::vector<Constant*> Operands(I->getNumOperands());
4296 
4297   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4298     Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4299     if (Operands[i] == 0) return 0;
4300   }
4301 
4302   if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4303     return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4304                                            Operands[1], TD);
4305   return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4306                                   &Operands[0], Operands.size(), TD);
4307 }
4308 
4309 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4310 /// in the header of its containing loop, we know the loop executes a
4311 /// constant number of times, and the PHI node is just a recurrence
4312 /// involving constants, fold it.
4313 Constant *
4314 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4315                                                    const APInt &BEs,
4316                                                    const Loop *L) {
4317   std::map<PHINode*, Constant*>::const_iterator I =
4318     ConstantEvolutionLoopExitValue.find(PN);
4319   if (I != ConstantEvolutionLoopExitValue.end())
4320     return I->second;
4321 
4322   if (BEs.ugt(MaxBruteForceIterations))
4323     return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4324 
4325   Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4326 
4327   // Since the loop is canonicalized, the PHI node must have two entries.  One
4328   // entry must be a constant (coming in from outside of the loop), and the
4329   // second must be derived from the same PHI.
4330   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4331   Constant *StartCST =
4332     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4333   if (StartCST == 0)
4334     return RetVal = 0;  // Must be a constant.
4335 
4336   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4337   if (getConstantEvolvingPHI(BEValue, L) != PN &&
4338       !isa<Constant>(BEValue))
4339     return RetVal = 0;  // Not derived from same PHI.
4340 
4341   // Execute the loop symbolically to determine the exit value.
4342   if (BEs.getActiveBits() >= 32)
4343     return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4344 
4345   unsigned NumIterations = BEs.getZExtValue(); // must be in range
4346   unsigned IterationNum = 0;
4347   for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4348     if (IterationNum == NumIterations)
4349       return RetVal = PHIVal;  // Got exit value!
4350 
4351     // Compute the value of the PHI node for the next iteration.
4352     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4353     if (NextPHI == PHIVal)
4354       return RetVal = NextPHI;  // Stopped evolving!
4355     if (NextPHI == 0)
4356       return 0;        // Couldn't evaluate!
4357     PHIVal = NextPHI;
4358   }
4359 }
4360 
4361 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4362 /// constant number of times (the condition evolves only from constants),
4363 /// try to evaluate a few iterations of the loop until we get the exit
4364 /// condition gets a value of ExitWhen (true or false).  If we cannot
4365 /// evaluate the trip count of the loop, return getCouldNotCompute().
4366 const SCEV *
4367 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4368                                                        Value *Cond,
4369                                                        bool ExitWhen) {
4370   PHINode *PN = getConstantEvolvingPHI(Cond, L);
4371   if (PN == 0) return getCouldNotCompute();
4372 
4373   // If the loop is canonicalized, the PHI will have exactly two entries.
4374   // That's the only form we support here.
4375   if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4376 
4377   // One entry must be a constant (coming in from outside of the loop), and the
4378   // second must be derived from the same PHI.
4379   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4380   Constant *StartCST =
4381     dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4382   if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4383 
4384   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4385   if (getConstantEvolvingPHI(BEValue, L) != PN &&
4386       !isa<Constant>(BEValue))
4387     return getCouldNotCompute();  // Not derived from same PHI.
4388 
4389   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4390   // the loop symbolically to determine when the condition gets a value of
4391   // "ExitWhen".
4392   unsigned IterationNum = 0;
4393   unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4394   for (Constant *PHIVal = StartCST;
4395        IterationNum != MaxIterations; ++IterationNum) {
4396     ConstantInt *CondVal =
4397       dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4398 
4399     // Couldn't symbolically evaluate.
4400     if (!CondVal) return getCouldNotCompute();
4401 
4402     if (CondVal->getValue() == uint64_t(ExitWhen)) {
4403       ++NumBruteForceTripCountsComputed;
4404       return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4405     }
4406 
4407     // Compute the value of the PHI node for the next iteration.
4408     Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4409     if (NextPHI == 0 || NextPHI == PHIVal)
4410       return getCouldNotCompute();// Couldn't evaluate or not making progress...
4411     PHIVal = NextPHI;
4412   }
4413 
4414   // Too many iterations were needed to evaluate.
4415   return getCouldNotCompute();
4416 }
4417 
4418 /// getSCEVAtScope - Return a SCEV expression for the specified value
4419 /// at the specified scope in the program.  The L value specifies a loop
4420 /// nest to evaluate the expression at, where null is the top-level or a
4421 /// specified loop is immediately inside of the loop.
4422 ///
4423 /// This method can be used to compute the exit value for a variable defined
4424 /// in a loop by querying what the value will hold in the parent loop.
4425 ///
4426 /// In the case that a relevant loop exit value cannot be computed, the
4427 /// original value V is returned.
4428 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4429   // Check to see if we've folded this expression at this loop before.
4430   std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4431   std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4432     Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4433   if (!Pair.second)
4434     return Pair.first->second ? Pair.first->second : V;
4435 
4436   // Otherwise compute it.
4437   const SCEV *C = computeSCEVAtScope(V, L);
4438   ValuesAtScopes[V][L] = C;
4439   return C;
4440 }
4441 
4442 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4443   if (isa<SCEVConstant>(V)) return V;
4444 
4445   // If this instruction is evolved from a constant-evolving PHI, compute the
4446   // exit value from the loop without using SCEVs.
4447   if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4448     if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4449       const Loop *LI = (*this->LI)[I->getParent()];
4450       if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4451         if (PHINode *PN = dyn_cast<PHINode>(I))
4452           if (PN->getParent() == LI->getHeader()) {
4453             // Okay, there is no closed form solution for the PHI node.  Check
4454             // to see if the loop that contains it has a known backedge-taken
4455             // count.  If so, we may be able to force computation of the exit
4456             // value.
4457             const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4458             if (const SCEVConstant *BTCC =
4459                   dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4460               // Okay, we know how many times the containing loop executes.  If
4461               // this is a constant evolving PHI node, get the final value at
4462               // the specified iteration number.
4463               Constant *RV = getConstantEvolutionLoopExitValue(PN,
4464                                                    BTCC->getValue()->getValue(),
4465                                                                LI);
4466               if (RV) return getSCEV(RV);
4467             }
4468           }
4469 
4470       // Okay, this is an expression that we cannot symbolically evaluate
4471       // into a SCEV.  Check to see if it's possible to symbolically evaluate
4472       // the arguments into constants, and if so, try to constant propagate the
4473       // result.  This is particularly useful for computing loop exit values.
4474       if (CanConstantFold(I)) {
4475         SmallVector<Constant *, 4> Operands;
4476         bool MadeImprovement = false;
4477         for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4478           Value *Op = I->getOperand(i);
4479           if (Constant *C = dyn_cast<Constant>(Op)) {
4480             Operands.push_back(C);
4481             continue;
4482           }
4483 
4484           // If any of the operands is non-constant and if they are
4485           // non-integer and non-pointer, don't even try to analyze them
4486           // with scev techniques.
4487           if (!isSCEVable(Op->getType()))
4488             return V;
4489 
4490           const SCEV *OrigV = getSCEV(Op);
4491           const SCEV *OpV = getSCEVAtScope(OrigV, L);
4492           MadeImprovement |= OrigV != OpV;
4493 
4494           Constant *C = 0;
4495           if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
4496             C = SC->getValue();
4497           if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
4498             C = dyn_cast<Constant>(SU->getValue());
4499           if (!C) return V;
4500           if (C->getType() != Op->getType())
4501             C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4502                                                               Op->getType(),
4503                                                               false),
4504                                       C, Op->getType());
4505           Operands.push_back(C);
4506         }
4507 
4508         // Check to see if getSCEVAtScope actually made an improvement.
4509         if (MadeImprovement) {
4510           Constant *C = 0;
4511           if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4512             C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4513                                                 Operands[0], Operands[1], TD);
4514           else
4515             C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4516                                          &Operands[0], Operands.size(), TD);
4517           if (!C) return V;
4518           return getSCEV(C);
4519         }
4520       }
4521     }
4522 
4523     // This is some other type of SCEVUnknown, just return it.
4524     return V;
4525   }
4526 
4527   if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4528     // Avoid performing the look-up in the common case where the specified
4529     // expression has no loop-variant portions.
4530     for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4531       const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4532       if (OpAtScope != Comm->getOperand(i)) {
4533         // Okay, at least one of these operands is loop variant but might be
4534         // foldable.  Build a new instance of the folded commutative expression.
4535         SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4536                                             Comm->op_begin()+i);
4537         NewOps.push_back(OpAtScope);
4538 
4539         for (++i; i != e; ++i) {
4540           OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4541           NewOps.push_back(OpAtScope);
4542         }
4543         if (isa<SCEVAddExpr>(Comm))
4544           return getAddExpr(NewOps);
4545         if (isa<SCEVMulExpr>(Comm))
4546           return getMulExpr(NewOps);
4547         if (isa<SCEVSMaxExpr>(Comm))
4548           return getSMaxExpr(NewOps);
4549         if (isa<SCEVUMaxExpr>(Comm))
4550           return getUMaxExpr(NewOps);
4551         llvm_unreachable("Unknown commutative SCEV type!");
4552       }
4553     }
4554     // If we got here, all operands are loop invariant.
4555     return Comm;
4556   }
4557 
4558   if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4559     const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4560     const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4561     if (LHS == Div->getLHS() && RHS == Div->getRHS())
4562       return Div;   // must be loop invariant
4563     return getUDivExpr(LHS, RHS);
4564   }
4565 
4566   // If this is a loop recurrence for a loop that does not contain L, then we
4567   // are dealing with the final value computed by the loop.
4568   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4569     // First, attempt to evaluate each operand.
4570     // Avoid performing the look-up in the common case where the specified
4571     // expression has no loop-variant portions.
4572     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
4573       const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
4574       if (OpAtScope == AddRec->getOperand(i))
4575         continue;
4576 
4577       // Okay, at least one of these operands is loop variant but might be
4578       // foldable.  Build a new instance of the folded commutative expression.
4579       SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
4580                                           AddRec->op_begin()+i);
4581       NewOps.push_back(OpAtScope);
4582       for (++i; i != e; ++i)
4583         NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
4584 
4585       AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
4586       break;
4587     }
4588 
4589     // If the scope is outside the addrec's loop, evaluate it by using the
4590     // loop exit value of the addrec.
4591     if (!AddRec->getLoop()->contains(L)) {
4592       // To evaluate this recurrence, we need to know how many times the AddRec
4593       // loop iterates.  Compute this now.
4594       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4595       if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4596 
4597       // Then, evaluate the AddRec.
4598       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4599     }
4600 
4601     return AddRec;
4602   }
4603 
4604   if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4605     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4606     if (Op == Cast->getOperand())
4607       return Cast;  // must be loop invariant
4608     return getZeroExtendExpr(Op, Cast->getType());
4609   }
4610 
4611   if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4612     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4613     if (Op == Cast->getOperand())
4614       return Cast;  // must be loop invariant
4615     return getSignExtendExpr(Op, Cast->getType());
4616   }
4617 
4618   if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4619     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4620     if (Op == Cast->getOperand())
4621       return Cast;  // must be loop invariant
4622     return getTruncateExpr(Op, Cast->getType());
4623   }
4624 
4625   llvm_unreachable("Unknown SCEV type!");
4626   return 0;
4627 }
4628 
4629 /// getSCEVAtScope - This is a convenience function which does
4630 /// getSCEVAtScope(getSCEV(V), L).
4631 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4632   return getSCEVAtScope(getSCEV(V), L);
4633 }
4634 
4635 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4636 /// following equation:
4637 ///
4638 ///     A * X = B (mod N)
4639 ///
4640 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4641 /// A and B isn't important.
4642 ///
4643 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4644 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4645                                                ScalarEvolution &SE) {
4646   uint32_t BW = A.getBitWidth();
4647   assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4648   assert(A != 0 && "A must be non-zero.");
4649 
4650   // 1. D = gcd(A, N)
4651   //
4652   // The gcd of A and N may have only one prime factor: 2. The number of
4653   // trailing zeros in A is its multiplicity
4654   uint32_t Mult2 = A.countTrailingZeros();
4655   // D = 2^Mult2
4656 
4657   // 2. Check if B is divisible by D.
4658   //
4659   // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4660   // is not less than multiplicity of this prime factor for D.
4661   if (B.countTrailingZeros() < Mult2)
4662     return SE.getCouldNotCompute();
4663 
4664   // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4665   // modulo (N / D).
4666   //
4667   // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4668   // bit width during computations.
4669   APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4670   APInt Mod(BW + 1, 0);
4671   Mod.set(BW - Mult2);  // Mod = N / D
4672   APInt I = AD.multiplicativeInverse(Mod);
4673 
4674   // 4. Compute the minimum unsigned root of the equation:
4675   // I * (B / D) mod (N / D)
4676   APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4677 
4678   // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4679   // bits.
4680   return SE.getConstant(Result.trunc(BW));
4681 }
4682 
4683 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4684 /// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4685 /// might be the same) or two SCEVCouldNotCompute objects.
4686 ///
4687 static std::pair<const SCEV *,const SCEV *>
4688 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4689   assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4690   const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4691   const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4692   const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4693 
4694   // We currently can only solve this if the coefficients are constants.
4695   if (!LC || !MC || !NC) {
4696     const SCEV *CNC = SE.getCouldNotCompute();
4697     return std::make_pair(CNC, CNC);
4698   }
4699 
4700   uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4701   const APInt &L = LC->getValue()->getValue();
4702   const APInt &M = MC->getValue()->getValue();
4703   const APInt &N = NC->getValue()->getValue();
4704   APInt Two(BitWidth, 2);
4705   APInt Four(BitWidth, 4);
4706 
4707   {
4708     using namespace APIntOps;
4709     const APInt& C = L;
4710     // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4711     // The B coefficient is M-N/2
4712     APInt B(M);
4713     B -= sdiv(N,Two);
4714 
4715     // The A coefficient is N/2
4716     APInt A(N.sdiv(Two));
4717 
4718     // Compute the B^2-4ac term.
4719     APInt SqrtTerm(B);
4720     SqrtTerm *= B;
4721     SqrtTerm -= Four * (A * C);
4722 
4723     // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4724     // integer value or else APInt::sqrt() will assert.
4725     APInt SqrtVal(SqrtTerm.sqrt());
4726 
4727     // Compute the two solutions for the quadratic formula.
4728     // The divisions must be performed as signed divisions.
4729     APInt NegB(-B);
4730     APInt TwoA( A << 1 );
4731     if (TwoA.isMinValue()) {
4732       const SCEV *CNC = SE.getCouldNotCompute();
4733       return std::make_pair(CNC, CNC);
4734     }
4735 
4736     LLVMContext &Context = SE.getContext();
4737 
4738     ConstantInt *Solution1 =
4739       ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4740     ConstantInt *Solution2 =
4741       ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4742 
4743     return std::make_pair(SE.getConstant(Solution1),
4744                           SE.getConstant(Solution2));
4745     } // end APIntOps namespace
4746 }
4747 
4748 /// HowFarToZero - Return the number of times a backedge comparing the specified
4749 /// value to zero will execute.  If not computable, return CouldNotCompute.
4750 ScalarEvolution::BackedgeTakenInfo
4751 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4752   // If the value is a constant
4753   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4754     // If the value is already zero, the branch will execute zero times.
4755     if (C->getValue()->isZero()) return C;
4756     return getCouldNotCompute();  // Otherwise it will loop infinitely.
4757   }
4758 
4759   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4760   if (!AddRec || AddRec->getLoop() != L)
4761     return getCouldNotCompute();
4762 
4763   if (AddRec->isAffine()) {
4764     // If this is an affine expression, the execution count of this branch is
4765     // the minimum unsigned root of the following equation:
4766     //
4767     //     Start + Step*N = 0 (mod 2^BW)
4768     //
4769     // equivalent to:
4770     //
4771     //             Step*N = -Start (mod 2^BW)
4772     //
4773     // where BW is the common bit width of Start and Step.
4774 
4775     // Get the initial value for the loop.
4776     const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4777                                        L->getParentLoop());
4778     const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4779                                       L->getParentLoop());
4780 
4781     if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4782       // For now we handle only constant steps.
4783 
4784       // First, handle unitary steps.
4785       if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4786         return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4787       if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4788         return Start;                           //    N = Start (as unsigned)
4789 
4790       // Then, try to solve the above equation provided that Start is constant.
4791       if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4792         return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4793                                             -StartC->getValue()->getValue(),
4794                                             *this);
4795     }
4796   } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4797     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4798     // the quadratic equation to solve it.
4799     std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4800                                                                     *this);
4801     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4802     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4803     if (R1) {
4804 #if 0
4805       dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4806              << "  sol#2: " << *R2 << "\n";
4807 #endif
4808       // Pick the smallest positive root value.
4809       if (ConstantInt *CB =
4810           dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4811                                    R1->getValue(), R2->getValue()))) {
4812         if (CB->getZExtValue() == false)
4813           std::swap(R1, R2);   // R1 is the minimum root now.
4814 
4815         // We can only use this value if the chrec ends up with an exact zero
4816         // value at this index.  When solving for "X*X != 5", for example, we
4817         // should not accept a root of 2.
4818         const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4819         if (Val->isZero())
4820           return R1;  // We found a quadratic root!
4821       }
4822     }
4823   }
4824 
4825   return getCouldNotCompute();
4826 }
4827 
4828 /// HowFarToNonZero - Return the number of times a backedge checking the
4829 /// specified value for nonzero will execute.  If not computable, return
4830 /// CouldNotCompute
4831 ScalarEvolution::BackedgeTakenInfo
4832 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4833   // Loops that look like: while (X == 0) are very strange indeed.  We don't
4834   // handle them yet except for the trivial case.  This could be expanded in the
4835   // future as needed.
4836 
4837   // If the value is a constant, check to see if it is known to be non-zero
4838   // already.  If so, the backedge will execute zero times.
4839   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4840     if (!C->getValue()->isNullValue())
4841       return getConstant(C->getType(), 0);
4842     return getCouldNotCompute();  // Otherwise it will loop infinitely.
4843   }
4844 
4845   // We could implement others, but I really doubt anyone writes loops like
4846   // this, and if they did, they would already be constant folded.
4847   return getCouldNotCompute();
4848 }
4849 
4850 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4851 /// (which may not be an immediate predecessor) which has exactly one
4852 /// successor from which BB is reachable, or null if no such block is
4853 /// found.
4854 ///
4855 std::pair<BasicBlock *, BasicBlock *>
4856 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4857   // If the block has a unique predecessor, then there is no path from the
4858   // predecessor to the block that does not go through the direct edge
4859   // from the predecessor to the block.
4860   if (BasicBlock *Pred = BB->getSinglePredecessor())
4861     return std::make_pair(Pred, BB);
4862 
4863   // A loop's header is defined to be a block that dominates the loop.
4864   // If the header has a unique predecessor outside the loop, it must be
4865   // a block that has exactly one successor that can reach the loop.
4866   if (Loop *L = LI->getLoopFor(BB))
4867     return std::make_pair(L->getLoopPredecessor(), L->getHeader());
4868 
4869   return std::pair<BasicBlock *, BasicBlock *>();
4870 }
4871 
4872 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4873 /// testing whether two expressions are equal, however for the purposes of
4874 /// looking for a condition guarding a loop, it can be useful to be a little
4875 /// more general, since a front-end may have replicated the controlling
4876 /// expression.
4877 ///
4878 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4879   // Quick check to see if they are the same SCEV.
4880   if (A == B) return true;
4881 
4882   // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4883   // two different instructions with the same value. Check for this case.
4884   if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4885     if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4886       if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4887         if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4888           if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4889             return true;
4890 
4891   // Otherwise assume they may have a different value.
4892   return false;
4893 }
4894 
4895 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
4896 /// predicate Pred. Return true iff any changes were made.
4897 ///
4898 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
4899                                            const SCEV *&LHS, const SCEV *&RHS) {
4900   bool Changed = false;
4901 
4902   // Canonicalize a constant to the right side.
4903   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
4904     // Check for both operands constant.
4905     if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
4906       if (ConstantExpr::getICmp(Pred,
4907                                 LHSC->getValue(),
4908                                 RHSC->getValue())->isNullValue())
4909         goto trivially_false;
4910       else
4911         goto trivially_true;
4912     }
4913     // Otherwise swap the operands to put the constant on the right.
4914     std::swap(LHS, RHS);
4915     Pred = ICmpInst::getSwappedPredicate(Pred);
4916     Changed = true;
4917   }
4918 
4919   // If we're comparing an addrec with a value which is loop-invariant in the
4920   // addrec's loop, put the addrec on the left. Also make a dominance check,
4921   // as both operands could be addrecs loop-invariant in each other's loop.
4922   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
4923     const Loop *L = AR->getLoop();
4924     if (isLoopInvariant(LHS, L) && LHS->properlyDominates(L->getHeader(), DT)) {
4925       std::swap(LHS, RHS);
4926       Pred = ICmpInst::getSwappedPredicate(Pred);
4927       Changed = true;
4928     }
4929   }
4930 
4931   // If there's a constant operand, canonicalize comparisons with boundary
4932   // cases, and canonicalize *-or-equal comparisons to regular comparisons.
4933   if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4934     const APInt &RA = RC->getValue()->getValue();
4935     switch (Pred) {
4936     default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4937     case ICmpInst::ICMP_EQ:
4938     case ICmpInst::ICMP_NE:
4939       break;
4940     case ICmpInst::ICMP_UGE:
4941       if ((RA - 1).isMinValue()) {
4942         Pred = ICmpInst::ICMP_NE;
4943         RHS = getConstant(RA - 1);
4944         Changed = true;
4945         break;
4946       }
4947       if (RA.isMaxValue()) {
4948         Pred = ICmpInst::ICMP_EQ;
4949         Changed = true;
4950         break;
4951       }
4952       if (RA.isMinValue()) goto trivially_true;
4953 
4954       Pred = ICmpInst::ICMP_UGT;
4955       RHS = getConstant(RA - 1);
4956       Changed = true;
4957       break;
4958     case ICmpInst::ICMP_ULE:
4959       if ((RA + 1).isMaxValue()) {
4960         Pred = ICmpInst::ICMP_NE;
4961         RHS = getConstant(RA + 1);
4962         Changed = true;
4963         break;
4964       }
4965       if (RA.isMinValue()) {
4966         Pred = ICmpInst::ICMP_EQ;
4967         Changed = true;
4968         break;
4969       }
4970       if (RA.isMaxValue()) goto trivially_true;
4971 
4972       Pred = ICmpInst::ICMP_ULT;
4973       RHS = getConstant(RA + 1);
4974       Changed = true;
4975       break;
4976     case ICmpInst::ICMP_SGE:
4977       if ((RA - 1).isMinSignedValue()) {
4978         Pred = ICmpInst::ICMP_NE;
4979         RHS = getConstant(RA - 1);
4980         Changed = true;
4981         break;
4982       }
4983       if (RA.isMaxSignedValue()) {
4984         Pred = ICmpInst::ICMP_EQ;
4985         Changed = true;
4986         break;
4987       }
4988       if (RA.isMinSignedValue()) goto trivially_true;
4989 
4990       Pred = ICmpInst::ICMP_SGT;
4991       RHS = getConstant(RA - 1);
4992       Changed = true;
4993       break;
4994     case ICmpInst::ICMP_SLE:
4995       if ((RA + 1).isMaxSignedValue()) {
4996         Pred = ICmpInst::ICMP_NE;
4997         RHS = getConstant(RA + 1);
4998         Changed = true;
4999         break;
5000       }
5001       if (RA.isMinSignedValue()) {
5002         Pred = ICmpInst::ICMP_EQ;
5003         Changed = true;
5004         break;
5005       }
5006       if (RA.isMaxSignedValue()) goto trivially_true;
5007 
5008       Pred = ICmpInst::ICMP_SLT;
5009       RHS = getConstant(RA + 1);
5010       Changed = true;
5011       break;
5012     case ICmpInst::ICMP_UGT:
5013       if (RA.isMinValue()) {
5014         Pred = ICmpInst::ICMP_NE;
5015         Changed = true;
5016         break;
5017       }
5018       if ((RA + 1).isMaxValue()) {
5019         Pred = ICmpInst::ICMP_EQ;
5020         RHS = getConstant(RA + 1);
5021         Changed = true;
5022         break;
5023       }
5024       if (RA.isMaxValue()) goto trivially_false;
5025       break;
5026     case ICmpInst::ICMP_ULT:
5027       if (RA.isMaxValue()) {
5028         Pred = ICmpInst::ICMP_NE;
5029         Changed = true;
5030         break;
5031       }
5032       if ((RA - 1).isMinValue()) {
5033         Pred = ICmpInst::ICMP_EQ;
5034         RHS = getConstant(RA - 1);
5035         Changed = true;
5036         break;
5037       }
5038       if (RA.isMinValue()) goto trivially_false;
5039       break;
5040     case ICmpInst::ICMP_SGT:
5041       if (RA.isMinSignedValue()) {
5042         Pred = ICmpInst::ICMP_NE;
5043         Changed = true;
5044         break;
5045       }
5046       if ((RA + 1).isMaxSignedValue()) {
5047         Pred = ICmpInst::ICMP_EQ;
5048         RHS = getConstant(RA + 1);
5049         Changed = true;
5050         break;
5051       }
5052       if (RA.isMaxSignedValue()) goto trivially_false;
5053       break;
5054     case ICmpInst::ICMP_SLT:
5055       if (RA.isMaxSignedValue()) {
5056         Pred = ICmpInst::ICMP_NE;
5057         Changed = true;
5058         break;
5059       }
5060       if ((RA - 1).isMinSignedValue()) {
5061        Pred = ICmpInst::ICMP_EQ;
5062        RHS = getConstant(RA - 1);
5063         Changed = true;
5064        break;
5065       }
5066       if (RA.isMinSignedValue()) goto trivially_false;
5067       break;
5068     }
5069   }
5070 
5071   // Check for obvious equality.
5072   if (HasSameValue(LHS, RHS)) {
5073     if (ICmpInst::isTrueWhenEqual(Pred))
5074       goto trivially_true;
5075     if (ICmpInst::isFalseWhenEqual(Pred))
5076       goto trivially_false;
5077   }
5078 
5079   // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5080   // adding or subtracting 1 from one of the operands.
5081   switch (Pred) {
5082   case ICmpInst::ICMP_SLE:
5083     if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
5084       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5085                        /*HasNUW=*/false, /*HasNSW=*/true);
5086       Pred = ICmpInst::ICMP_SLT;
5087       Changed = true;
5088     } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5089       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5090                        /*HasNUW=*/false, /*HasNSW=*/true);
5091       Pred = ICmpInst::ICMP_SLT;
5092       Changed = true;
5093     }
5094     break;
5095   case ICmpInst::ICMP_SGE:
5096     if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
5097       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5098                        /*HasNUW=*/false, /*HasNSW=*/true);
5099       Pred = ICmpInst::ICMP_SGT;
5100       Changed = true;
5101     } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5102       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5103                        /*HasNUW=*/false, /*HasNSW=*/true);
5104       Pred = ICmpInst::ICMP_SGT;
5105       Changed = true;
5106     }
5107     break;
5108   case ICmpInst::ICMP_ULE:
5109     if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5110       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5111                        /*HasNUW=*/true, /*HasNSW=*/false);
5112       Pred = ICmpInst::ICMP_ULT;
5113       Changed = true;
5114     } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5115       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5116                        /*HasNUW=*/true, /*HasNSW=*/false);
5117       Pred = ICmpInst::ICMP_ULT;
5118       Changed = true;
5119     }
5120     break;
5121   case ICmpInst::ICMP_UGE:
5122     if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5123       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5124                        /*HasNUW=*/true, /*HasNSW=*/false);
5125       Pred = ICmpInst::ICMP_UGT;
5126       Changed = true;
5127     } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5128       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5129                        /*HasNUW=*/true, /*HasNSW=*/false);
5130       Pred = ICmpInst::ICMP_UGT;
5131       Changed = true;
5132     }
5133     break;
5134   default:
5135     break;
5136   }
5137 
5138   // TODO: More simplifications are possible here.
5139 
5140   return Changed;
5141 
5142 trivially_true:
5143   // Return 0 == 0.
5144   LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5145   Pred = ICmpInst::ICMP_EQ;
5146   return true;
5147 
5148 trivially_false:
5149   // Return 0 != 0.
5150   LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5151   Pred = ICmpInst::ICMP_NE;
5152   return true;
5153 }
5154 
5155 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
5156   return getSignedRange(S).getSignedMax().isNegative();
5157 }
5158 
5159 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
5160   return getSignedRange(S).getSignedMin().isStrictlyPositive();
5161 }
5162 
5163 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
5164   return !getSignedRange(S).getSignedMin().isNegative();
5165 }
5166 
5167 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
5168   return !getSignedRange(S).getSignedMax().isStrictlyPositive();
5169 }
5170 
5171 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
5172   return isKnownNegative(S) || isKnownPositive(S);
5173 }
5174 
5175 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
5176                                        const SCEV *LHS, const SCEV *RHS) {
5177   // Canonicalize the inputs first.
5178   (void)SimplifyICmpOperands(Pred, LHS, RHS);
5179 
5180   // If LHS or RHS is an addrec, check to see if the condition is true in
5181   // every iteration of the loop.
5182   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
5183     if (isLoopEntryGuardedByCond(
5184           AR->getLoop(), Pred, AR->getStart(), RHS) &&
5185         isLoopBackedgeGuardedByCond(
5186           AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
5187       return true;
5188   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
5189     if (isLoopEntryGuardedByCond(
5190           AR->getLoop(), Pred, LHS, AR->getStart()) &&
5191         isLoopBackedgeGuardedByCond(
5192           AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
5193       return true;
5194 
5195   // Otherwise see what can be done with known constant ranges.
5196   return isKnownPredicateWithRanges(Pred, LHS, RHS);
5197 }
5198 
5199 bool
5200 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
5201                                             const SCEV *LHS, const SCEV *RHS) {
5202   if (HasSameValue(LHS, RHS))
5203     return ICmpInst::isTrueWhenEqual(Pred);
5204 
5205   // This code is split out from isKnownPredicate because it is called from
5206   // within isLoopEntryGuardedByCond.
5207   switch (Pred) {
5208   default:
5209     llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5210     break;
5211   case ICmpInst::ICMP_SGT:
5212     Pred = ICmpInst::ICMP_SLT;
5213     std::swap(LHS, RHS);
5214   case ICmpInst::ICMP_SLT: {
5215     ConstantRange LHSRange = getSignedRange(LHS);
5216     ConstantRange RHSRange = getSignedRange(RHS);
5217     if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
5218       return true;
5219     if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
5220       return false;
5221     break;
5222   }
5223   case ICmpInst::ICMP_SGE:
5224     Pred = ICmpInst::ICMP_SLE;
5225     std::swap(LHS, RHS);
5226   case ICmpInst::ICMP_SLE: {
5227     ConstantRange LHSRange = getSignedRange(LHS);
5228     ConstantRange RHSRange = getSignedRange(RHS);
5229     if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
5230       return true;
5231     if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
5232       return false;
5233     break;
5234   }
5235   case ICmpInst::ICMP_UGT:
5236     Pred = ICmpInst::ICMP_ULT;
5237     std::swap(LHS, RHS);
5238   case ICmpInst::ICMP_ULT: {
5239     ConstantRange LHSRange = getUnsignedRange(LHS);
5240     ConstantRange RHSRange = getUnsignedRange(RHS);
5241     if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
5242       return true;
5243     if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
5244       return false;
5245     break;
5246   }
5247   case ICmpInst::ICMP_UGE:
5248     Pred = ICmpInst::ICMP_ULE;
5249     std::swap(LHS, RHS);
5250   case ICmpInst::ICMP_ULE: {
5251     ConstantRange LHSRange = getUnsignedRange(LHS);
5252     ConstantRange RHSRange = getUnsignedRange(RHS);
5253     if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
5254       return true;
5255     if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
5256       return false;
5257     break;
5258   }
5259   case ICmpInst::ICMP_NE: {
5260     if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
5261       return true;
5262     if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
5263       return true;
5264 
5265     const SCEV *Diff = getMinusSCEV(LHS, RHS);
5266     if (isKnownNonZero(Diff))
5267       return true;
5268     break;
5269   }
5270   case ICmpInst::ICMP_EQ:
5271     // The check at the top of the function catches the case where
5272     // the values are known to be equal.
5273     break;
5274   }
5275   return false;
5276 }
5277 
5278 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5279 /// protected by a conditional between LHS and RHS.  This is used to
5280 /// to eliminate casts.
5281 bool
5282 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
5283                                              ICmpInst::Predicate Pred,
5284                                              const SCEV *LHS, const SCEV *RHS) {
5285   // Interpret a null as meaning no loop, where there is obviously no guard
5286   // (interprocedural conditions notwithstanding).
5287   if (!L) return true;
5288 
5289   BasicBlock *Latch = L->getLoopLatch();
5290   if (!Latch)
5291     return false;
5292 
5293   BranchInst *LoopContinuePredicate =
5294     dyn_cast<BranchInst>(Latch->getTerminator());
5295   if (!LoopContinuePredicate ||
5296       LoopContinuePredicate->isUnconditional())
5297     return false;
5298 
5299   return isImpliedCond(Pred, LHS, RHS,
5300                        LoopContinuePredicate->getCondition(),
5301                        LoopContinuePredicate->getSuccessor(0) != L->getHeader());
5302 }
5303 
5304 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5305 /// by a conditional between LHS and RHS.  This is used to help avoid max
5306 /// expressions in loop trip counts, and to eliminate casts.
5307 bool
5308 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
5309                                           ICmpInst::Predicate Pred,
5310                                           const SCEV *LHS, const SCEV *RHS) {
5311   // Interpret a null as meaning no loop, where there is obviously no guard
5312   // (interprocedural conditions notwithstanding).
5313   if (!L) return false;
5314 
5315   // Starting at the loop predecessor, climb up the predecessor chain, as long
5316   // as there are predecessors that can be found that have unique successors
5317   // leading to the original header.
5318   for (std::pair<BasicBlock *, BasicBlock *>
5319          Pair(L->getLoopPredecessor(), L->getHeader());
5320        Pair.first;
5321        Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
5322 
5323     BranchInst *LoopEntryPredicate =
5324       dyn_cast<BranchInst>(Pair.first->getTerminator());
5325     if (!LoopEntryPredicate ||
5326         LoopEntryPredicate->isUnconditional())
5327       continue;
5328 
5329     if (isImpliedCond(Pred, LHS, RHS,
5330                       LoopEntryPredicate->getCondition(),
5331                       LoopEntryPredicate->getSuccessor(0) != Pair.second))
5332       return true;
5333   }
5334 
5335   return false;
5336 }
5337 
5338 /// isImpliedCond - Test whether the condition described by Pred, LHS,
5339 /// and RHS is true whenever the given Cond value evaluates to true.
5340 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
5341                                     const SCEV *LHS, const SCEV *RHS,
5342                                     Value *FoundCondValue,
5343                                     bool Inverse) {
5344   // Recursively handle And and Or conditions.
5345   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
5346     if (BO->getOpcode() == Instruction::And) {
5347       if (!Inverse)
5348         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5349                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5350     } else if (BO->getOpcode() == Instruction::Or) {
5351       if (Inverse)
5352         return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5353                isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5354     }
5355   }
5356 
5357   ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
5358   if (!ICI) return false;
5359 
5360   // Bail if the ICmp's operands' types are wider than the needed type
5361   // before attempting to call getSCEV on them. This avoids infinite
5362   // recursion, since the analysis of widening casts can require loop
5363   // exit condition information for overflow checking, which would
5364   // lead back here.
5365   if (getTypeSizeInBits(LHS->getType()) <
5366       getTypeSizeInBits(ICI->getOperand(0)->getType()))
5367     return false;
5368 
5369   // Now that we found a conditional branch that dominates the loop, check to
5370   // see if it is the comparison we are looking for.
5371   ICmpInst::Predicate FoundPred;
5372   if (Inverse)
5373     FoundPred = ICI->getInversePredicate();
5374   else
5375     FoundPred = ICI->getPredicate();
5376 
5377   const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
5378   const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
5379 
5380   // Balance the types. The case where FoundLHS' type is wider than
5381   // LHS' type is checked for above.
5382   if (getTypeSizeInBits(LHS->getType()) >
5383       getTypeSizeInBits(FoundLHS->getType())) {
5384     if (CmpInst::isSigned(Pred)) {
5385       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
5386       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
5387     } else {
5388       FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
5389       FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
5390     }
5391   }
5392 
5393   // Canonicalize the query to match the way instcombine will have
5394   // canonicalized the comparison.
5395   if (SimplifyICmpOperands(Pred, LHS, RHS))
5396     if (LHS == RHS)
5397       return CmpInst::isTrueWhenEqual(Pred);
5398   if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
5399     if (FoundLHS == FoundRHS)
5400       return CmpInst::isFalseWhenEqual(Pred);
5401 
5402   // Check to see if we can make the LHS or RHS match.
5403   if (LHS == FoundRHS || RHS == FoundLHS) {
5404     if (isa<SCEVConstant>(RHS)) {
5405       std::swap(FoundLHS, FoundRHS);
5406       FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
5407     } else {
5408       std::swap(LHS, RHS);
5409       Pred = ICmpInst::getSwappedPredicate(Pred);
5410     }
5411   }
5412 
5413   // Check whether the found predicate is the same as the desired predicate.
5414   if (FoundPred == Pred)
5415     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
5416 
5417   // Check whether swapping the found predicate makes it the same as the
5418   // desired predicate.
5419   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
5420     if (isa<SCEVConstant>(RHS))
5421       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
5422     else
5423       return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
5424                                    RHS, LHS, FoundLHS, FoundRHS);
5425   }
5426 
5427   // Check whether the actual condition is beyond sufficient.
5428   if (FoundPred == ICmpInst::ICMP_EQ)
5429     if (ICmpInst::isTrueWhenEqual(Pred))
5430       if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
5431         return true;
5432   if (Pred == ICmpInst::ICMP_NE)
5433     if (!ICmpInst::isTrueWhenEqual(FoundPred))
5434       if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
5435         return true;
5436 
5437   // Otherwise assume the worst.
5438   return false;
5439 }
5440 
5441 /// isImpliedCondOperands - Test whether the condition described by Pred,
5442 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5443 /// and FoundRHS is true.
5444 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5445                                             const SCEV *LHS, const SCEV *RHS,
5446                                             const SCEV *FoundLHS,
5447                                             const SCEV *FoundRHS) {
5448   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5449                                      FoundLHS, FoundRHS) ||
5450          // ~x < ~y --> x > y
5451          isImpliedCondOperandsHelper(Pred, LHS, RHS,
5452                                      getNotSCEV(FoundRHS),
5453                                      getNotSCEV(FoundLHS));
5454 }
5455 
5456 /// isImpliedCondOperandsHelper - Test whether the condition described by
5457 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
5458 /// FoundLHS, and FoundRHS is true.
5459 bool
5460 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5461                                              const SCEV *LHS, const SCEV *RHS,
5462                                              const SCEV *FoundLHS,
5463                                              const SCEV *FoundRHS) {
5464   switch (Pred) {
5465   default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5466   case ICmpInst::ICMP_EQ:
5467   case ICmpInst::ICMP_NE:
5468     if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5469       return true;
5470     break;
5471   case ICmpInst::ICMP_SLT:
5472   case ICmpInst::ICMP_SLE:
5473     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5474         isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5475       return true;
5476     break;
5477   case ICmpInst::ICMP_SGT:
5478   case ICmpInst::ICMP_SGE:
5479     if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5480         isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5481       return true;
5482     break;
5483   case ICmpInst::ICMP_ULT:
5484   case ICmpInst::ICMP_ULE:
5485     if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5486         isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5487       return true;
5488     break;
5489   case ICmpInst::ICMP_UGT:
5490   case ICmpInst::ICMP_UGE:
5491     if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5492         isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5493       return true;
5494     break;
5495   }
5496 
5497   return false;
5498 }
5499 
5500 /// getBECount - Subtract the end and start values and divide by the step,
5501 /// rounding up, to get the number of times the backedge is executed. Return
5502 /// CouldNotCompute if an intermediate computation overflows.
5503 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5504                                         const SCEV *End,
5505                                         const SCEV *Step,
5506                                         bool NoWrap) {
5507   assert(!isKnownNegative(Step) &&
5508          "This code doesn't handle negative strides yet!");
5509 
5510   const Type *Ty = Start->getType();
5511   const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
5512   const SCEV *Diff = getMinusSCEV(End, Start);
5513   const SCEV *RoundUp = getAddExpr(Step, NegOne);
5514 
5515   // Add an adjustment to the difference between End and Start so that
5516   // the division will effectively round up.
5517   const SCEV *Add = getAddExpr(Diff, RoundUp);
5518 
5519   if (!NoWrap) {
5520     // Check Add for unsigned overflow.
5521     // TODO: More sophisticated things could be done here.
5522     const Type *WideTy = IntegerType::get(getContext(),
5523                                           getTypeSizeInBits(Ty) + 1);
5524     const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5525     const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5526     const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5527     if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5528       return getCouldNotCompute();
5529   }
5530 
5531   return getUDivExpr(Add, Step);
5532 }
5533 
5534 /// HowManyLessThans - Return the number of times a backedge containing the
5535 /// specified less-than comparison will execute.  If not computable, return
5536 /// CouldNotCompute.
5537 ScalarEvolution::BackedgeTakenInfo
5538 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5539                                   const Loop *L, bool isSigned) {
5540   // Only handle:  "ADDREC < LoopInvariant".
5541   if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
5542 
5543   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5544   if (!AddRec || AddRec->getLoop() != L)
5545     return getCouldNotCompute();
5546 
5547   // Check to see if we have a flag which makes analysis easy.
5548   bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5549                            AddRec->hasNoUnsignedWrap();
5550 
5551   if (AddRec->isAffine()) {
5552     unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5553     const SCEV *Step = AddRec->getStepRecurrence(*this);
5554 
5555     if (Step->isZero())
5556       return getCouldNotCompute();
5557     if (Step->isOne()) {
5558       // With unit stride, the iteration never steps past the limit value.
5559     } else if (isKnownPositive(Step)) {
5560       // Test whether a positive iteration can step past the limit
5561       // value and past the maximum value for its type in a single step.
5562       // Note that it's not sufficient to check NoWrap here, because even
5563       // though the value after a wrap is undefined, it's not undefined
5564       // behavior, so if wrap does occur, the loop could either terminate or
5565       // loop infinitely, but in either case, the loop is guaranteed to
5566       // iterate at least until the iteration where the wrapping occurs.
5567       const SCEV *One = getConstant(Step->getType(), 1);
5568       if (isSigned) {
5569         APInt Max = APInt::getSignedMaxValue(BitWidth);
5570         if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5571               .slt(getSignedRange(RHS).getSignedMax()))
5572           return getCouldNotCompute();
5573       } else {
5574         APInt Max = APInt::getMaxValue(BitWidth);
5575         if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5576               .ult(getUnsignedRange(RHS).getUnsignedMax()))
5577           return getCouldNotCompute();
5578       }
5579     } else
5580       // TODO: Handle negative strides here and below.
5581       return getCouldNotCompute();
5582 
5583     // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5584     // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5585     // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5586     // treat m-n as signed nor unsigned due to overflow possibility.
5587 
5588     // First, we get the value of the LHS in the first iteration: n
5589     const SCEV *Start = AddRec->getOperand(0);
5590 
5591     // Determine the minimum constant start value.
5592     const SCEV *MinStart = getConstant(isSigned ?
5593       getSignedRange(Start).getSignedMin() :
5594       getUnsignedRange(Start).getUnsignedMin());
5595 
5596     // If we know that the condition is true in order to enter the loop,
5597     // then we know that it will run exactly (m-n)/s times. Otherwise, we
5598     // only know that it will execute (max(m,n)-n)/s times. In both cases,
5599     // the division must round up.
5600     const SCEV *End = RHS;
5601     if (!isLoopEntryGuardedByCond(L,
5602                                   isSigned ? ICmpInst::ICMP_SLT :
5603                                              ICmpInst::ICMP_ULT,
5604                                   getMinusSCEV(Start, Step), RHS))
5605       End = isSigned ? getSMaxExpr(RHS, Start)
5606                      : getUMaxExpr(RHS, Start);
5607 
5608     // Determine the maximum constant end value.
5609     const SCEV *MaxEnd = getConstant(isSigned ?
5610       getSignedRange(End).getSignedMax() :
5611       getUnsignedRange(End).getUnsignedMax());
5612 
5613     // If MaxEnd is within a step of the maximum integer value in its type,
5614     // adjust it down to the minimum value which would produce the same effect.
5615     // This allows the subsequent ceiling division of (N+(step-1))/step to
5616     // compute the correct value.
5617     const SCEV *StepMinusOne = getMinusSCEV(Step,
5618                                             getConstant(Step->getType(), 1));
5619     MaxEnd = isSigned ?
5620       getSMinExpr(MaxEnd,
5621                   getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5622                                StepMinusOne)) :
5623       getUMinExpr(MaxEnd,
5624                   getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5625                                StepMinusOne));
5626 
5627     // Finally, we subtract these two values and divide, rounding up, to get
5628     // the number of times the backedge is executed.
5629     const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5630 
5631     // The maximum backedge count is similar, except using the minimum start
5632     // value and the maximum end value.
5633     const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5634 
5635     return BackedgeTakenInfo(BECount, MaxBECount);
5636   }
5637 
5638   return getCouldNotCompute();
5639 }
5640 
5641 /// getNumIterationsInRange - Return the number of iterations of this loop that
5642 /// produce values in the specified constant range.  Another way of looking at
5643 /// this is that it returns the first iteration number where the value is not in
5644 /// the condition, thus computing the exit count. If the iteration count can't
5645 /// be computed, an instance of SCEVCouldNotCompute is returned.
5646 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5647                                                     ScalarEvolution &SE) const {
5648   if (Range.isFullSet())  // Infinite loop.
5649     return SE.getCouldNotCompute();
5650 
5651   // If the start is a non-zero constant, shift the range to simplify things.
5652   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5653     if (!SC->getValue()->isZero()) {
5654       SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5655       Operands[0] = SE.getConstant(SC->getType(), 0);
5656       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5657       if (const SCEVAddRecExpr *ShiftedAddRec =
5658             dyn_cast<SCEVAddRecExpr>(Shifted))
5659         return ShiftedAddRec->getNumIterationsInRange(
5660                            Range.subtract(SC->getValue()->getValue()), SE);
5661       // This is strange and shouldn't happen.
5662       return SE.getCouldNotCompute();
5663     }
5664 
5665   // The only time we can solve this is when we have all constant indices.
5666   // Otherwise, we cannot determine the overflow conditions.
5667   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5668     if (!isa<SCEVConstant>(getOperand(i)))
5669       return SE.getCouldNotCompute();
5670 
5671 
5672   // Okay at this point we know that all elements of the chrec are constants and
5673   // that the start element is zero.
5674 
5675   // First check to see if the range contains zero.  If not, the first
5676   // iteration exits.
5677   unsigned BitWidth = SE.getTypeSizeInBits(getType());
5678   if (!Range.contains(APInt(BitWidth, 0)))
5679     return SE.getConstant(getType(), 0);
5680 
5681   if (isAffine()) {
5682     // If this is an affine expression then we have this situation:
5683     //   Solve {0,+,A} in Range  ===  Ax in Range
5684 
5685     // We know that zero is in the range.  If A is positive then we know that
5686     // the upper value of the range must be the first possible exit value.
5687     // If A is negative then the lower of the range is the last possible loop
5688     // value.  Also note that we already checked for a full range.
5689     APInt One(BitWidth,1);
5690     APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5691     APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5692 
5693     // The exit value should be (End+A)/A.
5694     APInt ExitVal = (End + A).udiv(A);
5695     ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5696 
5697     // Evaluate at the exit value.  If we really did fall out of the valid
5698     // range, then we computed our trip count, otherwise wrap around or other
5699     // things must have happened.
5700     ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5701     if (Range.contains(Val->getValue()))
5702       return SE.getCouldNotCompute();  // Something strange happened
5703 
5704     // Ensure that the previous value is in the range.  This is a sanity check.
5705     assert(Range.contains(
5706            EvaluateConstantChrecAtConstant(this,
5707            ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5708            "Linear scev computation is off in a bad way!");
5709     return SE.getConstant(ExitValue);
5710   } else if (isQuadratic()) {
5711     // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5712     // quadratic equation to solve it.  To do this, we must frame our problem in
5713     // terms of figuring out when zero is crossed, instead of when
5714     // Range.getUpper() is crossed.
5715     SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5716     NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5717     const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5718 
5719     // Next, solve the constructed addrec
5720     std::pair<const SCEV *,const SCEV *> Roots =
5721       SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5722     const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5723     const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5724     if (R1) {
5725       // Pick the smallest positive root value.
5726       if (ConstantInt *CB =
5727           dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5728                          R1->getValue(), R2->getValue()))) {
5729         if (CB->getZExtValue() == false)
5730           std::swap(R1, R2);   // R1 is the minimum root now.
5731 
5732         // Make sure the root is not off by one.  The returned iteration should
5733         // not be in the range, but the previous one should be.  When solving
5734         // for "X*X < 5", for example, we should not return a root of 2.
5735         ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5736                                                              R1->getValue(),
5737                                                              SE);
5738         if (Range.contains(R1Val->getValue())) {
5739           // The next iteration must be out of the range...
5740           ConstantInt *NextVal =
5741                 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5742 
5743           R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5744           if (!Range.contains(R1Val->getValue()))
5745             return SE.getConstant(NextVal);
5746           return SE.getCouldNotCompute();  // Something strange happened
5747         }
5748 
5749         // If R1 was not in the range, then it is a good return value.  Make
5750         // sure that R1-1 WAS in the range though, just in case.
5751         ConstantInt *NextVal =
5752                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5753         R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5754         if (Range.contains(R1Val->getValue()))
5755           return R1;
5756         return SE.getCouldNotCompute();  // Something strange happened
5757       }
5758     }
5759   }
5760 
5761   return SE.getCouldNotCompute();
5762 }
5763 
5764 
5765 
5766 //===----------------------------------------------------------------------===//
5767 //                   SCEVCallbackVH Class Implementation
5768 //===----------------------------------------------------------------------===//
5769 
5770 void ScalarEvolution::SCEVCallbackVH::deleted() {
5771   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5772   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5773     SE->ConstantEvolutionLoopExitValue.erase(PN);
5774   SE->ValueExprMap.erase(getValPtr());
5775   // this now dangles!
5776 }
5777 
5778 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
5779   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5780 
5781   // Forget all the expressions associated with users of the old value,
5782   // so that future queries will recompute the expressions using the new
5783   // value.
5784   Value *Old = getValPtr();
5785   SmallVector<User *, 16> Worklist;
5786   SmallPtrSet<User *, 8> Visited;
5787   for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5788        UI != UE; ++UI)
5789     Worklist.push_back(*UI);
5790   while (!Worklist.empty()) {
5791     User *U = Worklist.pop_back_val();
5792     // Deleting the Old value will cause this to dangle. Postpone
5793     // that until everything else is done.
5794     if (U == Old)
5795       continue;
5796     if (!Visited.insert(U))
5797       continue;
5798     if (PHINode *PN = dyn_cast<PHINode>(U))
5799       SE->ConstantEvolutionLoopExitValue.erase(PN);
5800     SE->ValueExprMap.erase(U);
5801     for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5802          UI != UE; ++UI)
5803       Worklist.push_back(*UI);
5804   }
5805   // Delete the Old value.
5806   if (PHINode *PN = dyn_cast<PHINode>(Old))
5807     SE->ConstantEvolutionLoopExitValue.erase(PN);
5808   SE->ValueExprMap.erase(Old);
5809   // this now dangles!
5810 }
5811 
5812 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5813   : CallbackVH(V), SE(se) {}
5814 
5815 //===----------------------------------------------------------------------===//
5816 //                   ScalarEvolution Class Implementation
5817 //===----------------------------------------------------------------------===//
5818 
5819 ScalarEvolution::ScalarEvolution()
5820   : FunctionPass(ID), FirstUnknown(0) {
5821   initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
5822 }
5823 
5824 bool ScalarEvolution::runOnFunction(Function &F) {
5825   this->F = &F;
5826   LI = &getAnalysis<LoopInfo>();
5827   TD = getAnalysisIfAvailable<TargetData>();
5828   DT = &getAnalysis<DominatorTree>();
5829   return false;
5830 }
5831 
5832 void ScalarEvolution::releaseMemory() {
5833   // Iterate through all the SCEVUnknown instances and call their
5834   // destructors, so that they release their references to their values.
5835   for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
5836     U->~SCEVUnknown();
5837   FirstUnknown = 0;
5838 
5839   ValueExprMap.clear();
5840   BackedgeTakenCounts.clear();
5841   ConstantEvolutionLoopExitValue.clear();
5842   ValuesAtScopes.clear();
5843   UnsignedRanges.clear();
5844   SignedRanges.clear();
5845   UniqueSCEVs.clear();
5846   SCEVAllocator.Reset();
5847 }
5848 
5849 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5850   AU.setPreservesAll();
5851   AU.addRequiredTransitive<LoopInfo>();
5852   AU.addRequiredTransitive<DominatorTree>();
5853 }
5854 
5855 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5856   return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5857 }
5858 
5859 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5860                           const Loop *L) {
5861   // Print all inner loops first
5862   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5863     PrintLoopInfo(OS, SE, *I);
5864 
5865   OS << "Loop ";
5866   WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5867   OS << ": ";
5868 
5869   SmallVector<BasicBlock *, 8> ExitBlocks;
5870   L->getExitBlocks(ExitBlocks);
5871   if (ExitBlocks.size() != 1)
5872     OS << "<multiple exits> ";
5873 
5874   if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5875     OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5876   } else {
5877     OS << "Unpredictable backedge-taken count. ";
5878   }
5879 
5880   OS << "\n"
5881         "Loop ";
5882   WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5883   OS << ": ";
5884 
5885   if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5886     OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5887   } else {
5888     OS << "Unpredictable max backedge-taken count. ";
5889   }
5890 
5891   OS << "\n";
5892 }
5893 
5894 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5895   // ScalarEvolution's implementation of the print method is to print
5896   // out SCEV values of all instructions that are interesting. Doing
5897   // this potentially causes it to create new SCEV objects though,
5898   // which technically conflicts with the const qualifier. This isn't
5899   // observable from outside the class though, so casting away the
5900   // const isn't dangerous.
5901   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5902 
5903   OS << "Classifying expressions for: ";
5904   WriteAsOperand(OS, F, /*PrintType=*/false);
5905   OS << "\n";
5906   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5907     if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
5908       OS << *I << '\n';
5909       OS << "  -->  ";
5910       const SCEV *SV = SE.getSCEV(&*I);
5911       SV->print(OS);
5912 
5913       const Loop *L = LI->getLoopFor((*I).getParent());
5914 
5915       const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5916       if (AtUse != SV) {
5917         OS << "  -->  ";
5918         AtUse->print(OS);
5919       }
5920 
5921       if (L) {
5922         OS << "\t\t" "Exits: ";
5923         const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5924         if (!SE.isLoopInvariant(ExitValue, L)) {
5925           OS << "<<Unknown>>";
5926         } else {
5927           OS << *ExitValue;
5928         }
5929       }
5930 
5931       OS << "\n";
5932     }
5933 
5934   OS << "Determining loop execution counts for: ";
5935   WriteAsOperand(OS, F, /*PrintType=*/false);
5936   OS << "\n";
5937   for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5938     PrintLoopInfo(OS, &SE, *I);
5939 }
5940 
5941 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
5942   switch (S->getSCEVType()) {
5943   case scConstant:
5944     return true;
5945   case scTruncate:
5946   case scZeroExtend:
5947   case scSignExtend:
5948     return isLoopInvariant(cast<SCEVCastExpr>(S)->getOperand(), L);
5949   case scAddRecExpr: {
5950     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
5951 
5952     // Add recurrences are never invariant in the function-body (null loop).
5953     if (!L)
5954       return false;
5955 
5956     // This recurrence is variant w.r.t. L if L contains AR's loop.
5957     if (L->contains(AR->getLoop()))
5958       return false;
5959 
5960     // This recurrence is invariant w.r.t. L if AR's loop contains L.
5961     if (AR->getLoop()->contains(L))
5962       return true;
5963 
5964     // This recurrence is variant w.r.t. L if any of its operands
5965     // are variant.
5966     for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
5967          I != E; ++I)
5968       if (!isLoopInvariant(*I, L))
5969         return false;
5970 
5971     // Otherwise it's loop-invariant.
5972     return true;
5973   }
5974   case scAddExpr:
5975   case scMulExpr:
5976   case scUMaxExpr:
5977   case scSMaxExpr: {
5978     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
5979     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
5980          I != E; ++I)
5981       if (!isLoopInvariant(*I, L))
5982         return false;
5983     return true;
5984   }
5985   case scUDivExpr: {
5986     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
5987     return isLoopInvariant(UDiv->getLHS(), L) &&
5988            isLoopInvariant(UDiv->getRHS(), L);
5989   }
5990   case scUnknown:
5991     // All non-instruction values are loop invariant.  All instructions are loop
5992     // invariant if they are not contained in the specified loop.
5993     // Instructions are never considered invariant in the function body
5994     // (null loop) because they are defined within the "loop".
5995     if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
5996       return L && !L->contains(I);
5997     return true;
5998   case scCouldNotCompute:
5999     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6000     return false;
6001   default: break;
6002   }
6003   llvm_unreachable("Unknown SCEV kind!");
6004   return false;
6005 }
6006 
6007 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
6008   switch (S->getSCEVType()) {
6009   case scConstant:
6010     return false;
6011   case scTruncate:
6012   case scZeroExtend:
6013   case scSignExtend:
6014     return hasComputableLoopEvolution(cast<SCEVCastExpr>(S)->getOperand(), L);
6015   case scAddRecExpr:
6016     return cast<SCEVAddRecExpr>(S)->getLoop() == L;
6017   case scAddExpr:
6018   case scMulExpr:
6019   case scUMaxExpr:
6020   case scSMaxExpr: {
6021     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
6022     bool HasVarying = false;
6023     for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
6024          I != E; ++I) {
6025       const SCEV *Op = *I;
6026       if (!isLoopInvariant(Op, L)) {
6027         if (hasComputableLoopEvolution(Op, L))
6028           HasVarying = true;
6029         else
6030           return false;
6031       }
6032     }
6033     return HasVarying;
6034   }
6035   case scUDivExpr: {
6036     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
6037     bool HasVarying = false;
6038     if (!isLoopInvariant(UDiv->getLHS(), L)) {
6039       if (hasComputableLoopEvolution(UDiv->getLHS(), L))
6040         HasVarying = true;
6041       else
6042         return false;
6043     }
6044     if (!isLoopInvariant(UDiv->getRHS(), L)) {
6045       if (hasComputableLoopEvolution(UDiv->getRHS(), L))
6046         HasVarying = true;
6047       else
6048         return false;
6049     }
6050     return HasVarying;
6051   }
6052   case scUnknown:
6053     return false;
6054   case scCouldNotCompute:
6055     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6056     return false;
6057   default: break;
6058   }
6059   llvm_unreachable("Unknown SCEV kind!");
6060   return false;
6061 }
6062