xref: /llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp (revision d334fec1409c5b158bbed2f5694983cbb8a70f11)
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
12 //
13 // There are several aspects to this library.  First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
18 //
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
24 //
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression.  These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
29 //
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
33 //
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // There are several good references for the techniques used in this analysis.
40 //
41 //  Chains of recurrences -- a method to expedite the evaluation
42 //  of closed-form functions
43 //  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44 //
45 //  On computational properties of chains of recurrences
46 //  Eugene V. Zima
47 //
48 //  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 //  Robert A. van Engelen
50 //
51 //  Efficient Symbolic Analysis for Optimizing Compilers
52 //  Robert A. van Engelen
53 //
54 //  Using the chains of recurrences algebra for data dependence testing and
55 //  induction variable substitution
56 //  MS Thesis, Johnie Birch
57 //
58 //===----------------------------------------------------------------------===//
59 
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionDivision.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/InitializePasses.h"
116 #include "llvm/Pass.h"
117 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/CommandLine.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/KnownBits.h"
123 #include "llvm/Support/SaveAndRestore.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include <algorithm>
126 #include <cassert>
127 #include <climits>
128 #include <cstddef>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <map>
132 #include <memory>
133 #include <tuple>
134 #include <utility>
135 #include <vector>
136 
137 using namespace llvm;
138 using namespace PatternMatch;
139 
140 #define DEBUG_TYPE "scalar-evolution"
141 
142 STATISTIC(NumTripCountsComputed,
143           "Number of loops with predictable loop counts");
144 STATISTIC(NumTripCountsNotComputed,
145           "Number of loops without predictable loop counts");
146 STATISTIC(NumBruteForceTripCountsComputed,
147           "Number of loops with trip counts computed by force");
148 
149 static cl::opt<unsigned>
150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
151                         cl::ZeroOrMore,
152                         cl::desc("Maximum number of iterations SCEV will "
153                                  "symbolically execute a constant "
154                                  "derived loop"),
155                         cl::init(100));
156 
157 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
158 static cl::opt<bool> VerifySCEV(
159     "verify-scev", cl::Hidden,
160     cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
161 static cl::opt<bool> VerifySCEVStrict(
162     "verify-scev-strict", cl::Hidden,
163     cl::desc("Enable stricter verification with -verify-scev is passed"));
164 static cl::opt<bool>
165     VerifySCEVMap("verify-scev-maps", cl::Hidden,
166                   cl::desc("Verify no dangling value in ScalarEvolution's "
167                            "ExprValueMap (slow)"));
168 
169 static cl::opt<bool> VerifyIR(
170     "scev-verify-ir", cl::Hidden,
171     cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
172     cl::init(false));
173 
174 static cl::opt<unsigned> MulOpsInlineThreshold(
175     "scev-mulops-inline-threshold", cl::Hidden,
176     cl::desc("Threshold for inlining multiplication operands into a SCEV"),
177     cl::init(32));
178 
179 static cl::opt<unsigned> AddOpsInlineThreshold(
180     "scev-addops-inline-threshold", cl::Hidden,
181     cl::desc("Threshold for inlining addition operands into a SCEV"),
182     cl::init(500));
183 
184 static cl::opt<unsigned> MaxSCEVCompareDepth(
185     "scalar-evolution-max-scev-compare-depth", cl::Hidden,
186     cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
187     cl::init(32));
188 
189 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
190     "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
191     cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
192     cl::init(2));
193 
194 static cl::opt<unsigned> MaxValueCompareDepth(
195     "scalar-evolution-max-value-compare-depth", cl::Hidden,
196     cl::desc("Maximum depth of recursive value complexity comparisons"),
197     cl::init(2));
198 
199 static cl::opt<unsigned>
200     MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
201                   cl::desc("Maximum depth of recursive arithmetics"),
202                   cl::init(32));
203 
204 static cl::opt<unsigned> MaxConstantEvolvingDepth(
205     "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
206     cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
207 
208 static cl::opt<unsigned>
209     MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
210                  cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
211                  cl::init(8));
212 
213 static cl::opt<unsigned>
214     MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
215                   cl::desc("Max coefficients in AddRec during evolving"),
216                   cl::init(8));
217 
218 static cl::opt<unsigned>
219     HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
220                   cl::desc("Size of the expression which is considered huge"),
221                   cl::init(4096));
222 
223 static cl::opt<bool>
224 ClassifyExpressions("scalar-evolution-classify-expressions",
225     cl::Hidden, cl::init(true),
226     cl::desc("When printing analysis, include information on every instruction"));
227 
228 static cl::opt<bool> UseExpensiveRangeSharpening(
229     "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
230     cl::init(false),
231     cl::desc("Use more powerful methods of sharpening expression ranges. May "
232              "be costly in terms of compile time"));
233 
234 //===----------------------------------------------------------------------===//
235 //                           SCEV class definitions
236 //===----------------------------------------------------------------------===//
237 
238 //===----------------------------------------------------------------------===//
239 // Implementation of the SCEV class.
240 //
241 
242 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
243 LLVM_DUMP_METHOD void SCEV::dump() const {
244   print(dbgs());
245   dbgs() << '\n';
246 }
247 #endif
248 
249 void SCEV::print(raw_ostream &OS) const {
250   switch (getSCEVType()) {
251   case scConstant:
252     cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
253     return;
254   case scPtrToInt: {
255     const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
256     const SCEV *Op = PtrToInt->getOperand();
257     OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
258        << *PtrToInt->getType() << ")";
259     return;
260   }
261   case scTruncate: {
262     const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
263     const SCEV *Op = Trunc->getOperand();
264     OS << "(trunc " << *Op->getType() << " " << *Op << " to "
265        << *Trunc->getType() << ")";
266     return;
267   }
268   case scZeroExtend: {
269     const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
270     const SCEV *Op = ZExt->getOperand();
271     OS << "(zext " << *Op->getType() << " " << *Op << " to "
272        << *ZExt->getType() << ")";
273     return;
274   }
275   case scSignExtend: {
276     const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
277     const SCEV *Op = SExt->getOperand();
278     OS << "(sext " << *Op->getType() << " " << *Op << " to "
279        << *SExt->getType() << ")";
280     return;
281   }
282   case scAddRecExpr: {
283     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
284     OS << "{" << *AR->getOperand(0);
285     for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
286       OS << ",+," << *AR->getOperand(i);
287     OS << "}<";
288     if (AR->hasNoUnsignedWrap())
289       OS << "nuw><";
290     if (AR->hasNoSignedWrap())
291       OS << "nsw><";
292     if (AR->hasNoSelfWrap() &&
293         !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
294       OS << "nw><";
295     AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
296     OS << ">";
297     return;
298   }
299   case scAddExpr:
300   case scMulExpr:
301   case scUMaxExpr:
302   case scSMaxExpr:
303   case scUMinExpr:
304   case scSMinExpr:
305   case scSequentialUMinExpr: {
306     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
307     const char *OpStr = nullptr;
308     switch (NAry->getSCEVType()) {
309     case scAddExpr: OpStr = " + "; break;
310     case scMulExpr: OpStr = " * "; break;
311     case scUMaxExpr: OpStr = " umax "; break;
312     case scSMaxExpr: OpStr = " smax "; break;
313     case scUMinExpr:
314       OpStr = " umin ";
315       break;
316     case scSMinExpr:
317       OpStr = " smin ";
318       break;
319     case scSequentialUMinExpr:
320       OpStr = " umin_seq ";
321       break;
322     default:
323       llvm_unreachable("There are no other nary expression types.");
324     }
325     OS << "(";
326     ListSeparator LS(OpStr);
327     for (const SCEV *Op : NAry->operands())
328       OS << LS << *Op;
329     OS << ")";
330     switch (NAry->getSCEVType()) {
331     case scAddExpr:
332     case scMulExpr:
333       if (NAry->hasNoUnsignedWrap())
334         OS << "<nuw>";
335       if (NAry->hasNoSignedWrap())
336         OS << "<nsw>";
337       break;
338     default:
339       // Nothing to print for other nary expressions.
340       break;
341     }
342     return;
343   }
344   case scUDivExpr: {
345     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
346     OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
347     return;
348   }
349   case scUnknown: {
350     const SCEVUnknown *U = cast<SCEVUnknown>(this);
351     Type *AllocTy;
352     if (U->isSizeOf(AllocTy)) {
353       OS << "sizeof(" << *AllocTy << ")";
354       return;
355     }
356     if (U->isAlignOf(AllocTy)) {
357       OS << "alignof(" << *AllocTy << ")";
358       return;
359     }
360 
361     Type *CTy;
362     Constant *FieldNo;
363     if (U->isOffsetOf(CTy, FieldNo)) {
364       OS << "offsetof(" << *CTy << ", ";
365       FieldNo->printAsOperand(OS, false);
366       OS << ")";
367       return;
368     }
369 
370     // Otherwise just print it normally.
371     U->getValue()->printAsOperand(OS, false);
372     return;
373   }
374   case scCouldNotCompute:
375     OS << "***COULDNOTCOMPUTE***";
376     return;
377   }
378   llvm_unreachable("Unknown SCEV kind!");
379 }
380 
381 Type *SCEV::getType() const {
382   switch (getSCEVType()) {
383   case scConstant:
384     return cast<SCEVConstant>(this)->getType();
385   case scPtrToInt:
386   case scTruncate:
387   case scZeroExtend:
388   case scSignExtend:
389     return cast<SCEVCastExpr>(this)->getType();
390   case scAddRecExpr:
391     return cast<SCEVAddRecExpr>(this)->getType();
392   case scMulExpr:
393     return cast<SCEVMulExpr>(this)->getType();
394   case scUMaxExpr:
395   case scSMaxExpr:
396   case scUMinExpr:
397   case scSMinExpr:
398     return cast<SCEVMinMaxExpr>(this)->getType();
399   case scSequentialUMinExpr:
400     return cast<SCEVSequentialMinMaxExpr>(this)->getType();
401   case scAddExpr:
402     return cast<SCEVAddExpr>(this)->getType();
403   case scUDivExpr:
404     return cast<SCEVUDivExpr>(this)->getType();
405   case scUnknown:
406     return cast<SCEVUnknown>(this)->getType();
407   case scCouldNotCompute:
408     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
409   }
410   llvm_unreachable("Unknown SCEV kind!");
411 }
412 
413 bool SCEV::isZero() const {
414   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
415     return SC->getValue()->isZero();
416   return false;
417 }
418 
419 bool SCEV::isOne() const {
420   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
421     return SC->getValue()->isOne();
422   return false;
423 }
424 
425 bool SCEV::isAllOnesValue() const {
426   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
427     return SC->getValue()->isMinusOne();
428   return false;
429 }
430 
431 bool SCEV::isNonConstantNegative() const {
432   const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
433   if (!Mul) return false;
434 
435   // If there is a constant factor, it will be first.
436   const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
437   if (!SC) return false;
438 
439   // Return true if the value is negative, this matches things like (-42 * V).
440   return SC->getAPInt().isNegative();
441 }
442 
443 SCEVCouldNotCompute::SCEVCouldNotCompute() :
444   SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
445 
446 bool SCEVCouldNotCompute::classof(const SCEV *S) {
447   return S->getSCEVType() == scCouldNotCompute;
448 }
449 
450 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
451   FoldingSetNodeID ID;
452   ID.AddInteger(scConstant);
453   ID.AddPointer(V);
454   void *IP = nullptr;
455   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
456   SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
457   UniqueSCEVs.InsertNode(S, IP);
458   return S;
459 }
460 
461 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
462   return getConstant(ConstantInt::get(getContext(), Val));
463 }
464 
465 const SCEV *
466 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
467   IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
468   return getConstant(ConstantInt::get(ITy, V, isSigned));
469 }
470 
471 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
472                            const SCEV *op, Type *ty)
473     : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
474   Operands[0] = op;
475 }
476 
477 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
478                                    Type *ITy)
479     : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
480   assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
481          "Must be a non-bit-width-changing pointer-to-integer cast!");
482 }
483 
484 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
485                                            SCEVTypes SCEVTy, const SCEV *op,
486                                            Type *ty)
487     : SCEVCastExpr(ID, SCEVTy, op, ty) {}
488 
489 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
490                                    Type *ty)
491     : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
492   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
493          "Cannot truncate non-integer value!");
494 }
495 
496 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
497                                        const SCEV *op, Type *ty)
498     : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
499   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
500          "Cannot zero extend non-integer value!");
501 }
502 
503 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
504                                        const SCEV *op, Type *ty)
505     : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
506   assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
507          "Cannot sign extend non-integer value!");
508 }
509 
510 void SCEVUnknown::deleted() {
511   // Clear this SCEVUnknown from various maps.
512   SE->forgetMemoizedResults(this);
513 
514   // Remove this SCEVUnknown from the uniquing map.
515   SE->UniqueSCEVs.RemoveNode(this);
516 
517   // Release the value.
518   setValPtr(nullptr);
519 }
520 
521 void SCEVUnknown::allUsesReplacedWith(Value *New) {
522   // Remove this SCEVUnknown from the uniquing map.
523   SE->UniqueSCEVs.RemoveNode(this);
524 
525   // Update this SCEVUnknown to point to the new value. This is needed
526   // because there may still be outstanding SCEVs which still point to
527   // this SCEVUnknown.
528   setValPtr(New);
529 }
530 
531 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
532   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
533     if (VCE->getOpcode() == Instruction::PtrToInt)
534       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
535         if (CE->getOpcode() == Instruction::GetElementPtr &&
536             CE->getOperand(0)->isNullValue() &&
537             CE->getNumOperands() == 2)
538           if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
539             if (CI->isOne()) {
540               AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
541               return true;
542             }
543 
544   return false;
545 }
546 
547 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
548   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
549     if (VCE->getOpcode() == Instruction::PtrToInt)
550       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
551         if (CE->getOpcode() == Instruction::GetElementPtr &&
552             CE->getOperand(0)->isNullValue()) {
553           Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
554           if (StructType *STy = dyn_cast<StructType>(Ty))
555             if (!STy->isPacked() &&
556                 CE->getNumOperands() == 3 &&
557                 CE->getOperand(1)->isNullValue()) {
558               if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
559                 if (CI->isOne() &&
560                     STy->getNumElements() == 2 &&
561                     STy->getElementType(0)->isIntegerTy(1)) {
562                   AllocTy = STy->getElementType(1);
563                   return true;
564                 }
565             }
566         }
567 
568   return false;
569 }
570 
571 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
572   if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
573     if (VCE->getOpcode() == Instruction::PtrToInt)
574       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
575         if (CE->getOpcode() == Instruction::GetElementPtr &&
576             CE->getNumOperands() == 3 &&
577             CE->getOperand(0)->isNullValue() &&
578             CE->getOperand(1)->isNullValue()) {
579           Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
580           // Ignore vector types here so that ScalarEvolutionExpander doesn't
581           // emit getelementptrs that index into vectors.
582           if (Ty->isStructTy() || Ty->isArrayTy()) {
583             CTy = Ty;
584             FieldNo = CE->getOperand(2);
585             return true;
586           }
587         }
588 
589   return false;
590 }
591 
592 //===----------------------------------------------------------------------===//
593 //                               SCEV Utilities
594 //===----------------------------------------------------------------------===//
595 
596 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
597 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
598 /// operands in SCEV expressions.  \p EqCache is a set of pairs of values that
599 /// have been previously deemed to be "equally complex" by this routine.  It is
600 /// intended to avoid exponential time complexity in cases like:
601 ///
602 ///   %a = f(%x, %y)
603 ///   %b = f(%a, %a)
604 ///   %c = f(%b, %b)
605 ///
606 ///   %d = f(%x, %y)
607 ///   %e = f(%d, %d)
608 ///   %f = f(%e, %e)
609 ///
610 ///   CompareValueComplexity(%f, %c)
611 ///
612 /// Since we do not continue running this routine on expression trees once we
613 /// have seen unequal values, there is no need to track them in the cache.
614 static int
615 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
616                        const LoopInfo *const LI, Value *LV, Value *RV,
617                        unsigned Depth) {
618   if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
619     return 0;
620 
621   // Order pointer values after integer values. This helps SCEVExpander form
622   // GEPs.
623   bool LIsPointer = LV->getType()->isPointerTy(),
624        RIsPointer = RV->getType()->isPointerTy();
625   if (LIsPointer != RIsPointer)
626     return (int)LIsPointer - (int)RIsPointer;
627 
628   // Compare getValueID values.
629   unsigned LID = LV->getValueID(), RID = RV->getValueID();
630   if (LID != RID)
631     return (int)LID - (int)RID;
632 
633   // Sort arguments by their position.
634   if (const auto *LA = dyn_cast<Argument>(LV)) {
635     const auto *RA = cast<Argument>(RV);
636     unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
637     return (int)LArgNo - (int)RArgNo;
638   }
639 
640   if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
641     const auto *RGV = cast<GlobalValue>(RV);
642 
643     const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
644       auto LT = GV->getLinkage();
645       return !(GlobalValue::isPrivateLinkage(LT) ||
646                GlobalValue::isInternalLinkage(LT));
647     };
648 
649     // Use the names to distinguish the two values, but only if the
650     // names are semantically important.
651     if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
652       return LGV->getName().compare(RGV->getName());
653   }
654 
655   // For instructions, compare their loop depth, and their operand count.  This
656   // is pretty loose.
657   if (const auto *LInst = dyn_cast<Instruction>(LV)) {
658     const auto *RInst = cast<Instruction>(RV);
659 
660     // Compare loop depths.
661     const BasicBlock *LParent = LInst->getParent(),
662                      *RParent = RInst->getParent();
663     if (LParent != RParent) {
664       unsigned LDepth = LI->getLoopDepth(LParent),
665                RDepth = LI->getLoopDepth(RParent);
666       if (LDepth != RDepth)
667         return (int)LDepth - (int)RDepth;
668     }
669 
670     // Compare the number of operands.
671     unsigned LNumOps = LInst->getNumOperands(),
672              RNumOps = RInst->getNumOperands();
673     if (LNumOps != RNumOps)
674       return (int)LNumOps - (int)RNumOps;
675 
676     for (unsigned Idx : seq(0u, LNumOps)) {
677       int Result =
678           CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
679                                  RInst->getOperand(Idx), Depth + 1);
680       if (Result != 0)
681         return Result;
682     }
683   }
684 
685   EqCacheValue.unionSets(LV, RV);
686   return 0;
687 }
688 
689 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
690 // than RHS, respectively. A three-way result allows recursive comparisons to be
691 // more efficient.
692 // If the max analysis depth was reached, return None, assuming we do not know
693 // if they are equivalent for sure.
694 static Optional<int>
695 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
696                       EquivalenceClasses<const Value *> &EqCacheValue,
697                       const LoopInfo *const LI, const SCEV *LHS,
698                       const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
699   // Fast-path: SCEVs are uniqued so we can do a quick equality check.
700   if (LHS == RHS)
701     return 0;
702 
703   // Primarily, sort the SCEVs by their getSCEVType().
704   SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
705   if (LType != RType)
706     return (int)LType - (int)RType;
707 
708   if (EqCacheSCEV.isEquivalent(LHS, RHS))
709     return 0;
710 
711   if (Depth > MaxSCEVCompareDepth)
712     return None;
713 
714   // Aside from the getSCEVType() ordering, the particular ordering
715   // isn't very important except that it's beneficial to be consistent,
716   // so that (a + b) and (b + a) don't end up as different expressions.
717   switch (LType) {
718   case scUnknown: {
719     const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
720     const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
721 
722     int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
723                                    RU->getValue(), Depth + 1);
724     if (X == 0)
725       EqCacheSCEV.unionSets(LHS, RHS);
726     return X;
727   }
728 
729   case scConstant: {
730     const SCEVConstant *LC = cast<SCEVConstant>(LHS);
731     const SCEVConstant *RC = cast<SCEVConstant>(RHS);
732 
733     // Compare constant values.
734     const APInt &LA = LC->getAPInt();
735     const APInt &RA = RC->getAPInt();
736     unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
737     if (LBitWidth != RBitWidth)
738       return (int)LBitWidth - (int)RBitWidth;
739     return LA.ult(RA) ? -1 : 1;
740   }
741 
742   case scAddRecExpr: {
743     const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
744     const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
745 
746     // There is always a dominance between two recs that are used by one SCEV,
747     // so we can safely sort recs by loop header dominance. We require such
748     // order in getAddExpr.
749     const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
750     if (LLoop != RLoop) {
751       const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
752       assert(LHead != RHead && "Two loops share the same header?");
753       if (DT.dominates(LHead, RHead))
754         return 1;
755       else
756         assert(DT.dominates(RHead, LHead) &&
757                "No dominance between recurrences used by one SCEV?");
758       return -1;
759     }
760 
761     // Addrec complexity grows with operand count.
762     unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
763     if (LNumOps != RNumOps)
764       return (int)LNumOps - (int)RNumOps;
765 
766     // Lexicographically compare.
767     for (unsigned i = 0; i != LNumOps; ++i) {
768       auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
769                                      LA->getOperand(i), RA->getOperand(i), DT,
770                                      Depth + 1);
771       if (X != 0)
772         return X;
773     }
774     EqCacheSCEV.unionSets(LHS, RHS);
775     return 0;
776   }
777 
778   case scAddExpr:
779   case scMulExpr:
780   case scSMaxExpr:
781   case scUMaxExpr:
782   case scSMinExpr:
783   case scUMinExpr:
784   case scSequentialUMinExpr: {
785     const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
786     const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
787 
788     // Lexicographically compare n-ary expressions.
789     unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
790     if (LNumOps != RNumOps)
791       return (int)LNumOps - (int)RNumOps;
792 
793     for (unsigned i = 0; i != LNumOps; ++i) {
794       auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
795                                      LC->getOperand(i), RC->getOperand(i), DT,
796                                      Depth + 1);
797       if (X != 0)
798         return X;
799     }
800     EqCacheSCEV.unionSets(LHS, RHS);
801     return 0;
802   }
803 
804   case scUDivExpr: {
805     const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
806     const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
807 
808     // Lexicographically compare udiv expressions.
809     auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
810                                    RC->getLHS(), DT, Depth + 1);
811     if (X != 0)
812       return X;
813     X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
814                               RC->getRHS(), DT, Depth + 1);
815     if (X == 0)
816       EqCacheSCEV.unionSets(LHS, RHS);
817     return X;
818   }
819 
820   case scPtrToInt:
821   case scTruncate:
822   case scZeroExtend:
823   case scSignExtend: {
824     const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
825     const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
826 
827     // Compare cast expressions by operand.
828     auto X =
829         CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
830                               RC->getOperand(), DT, Depth + 1);
831     if (X == 0)
832       EqCacheSCEV.unionSets(LHS, RHS);
833     return X;
834   }
835 
836   case scCouldNotCompute:
837     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
838   }
839   llvm_unreachable("Unknown SCEV kind!");
840 }
841 
842 /// Given a list of SCEV objects, order them by their complexity, and group
843 /// objects of the same complexity together by value.  When this routine is
844 /// finished, we know that any duplicates in the vector are consecutive and that
845 /// complexity is monotonically increasing.
846 ///
847 /// Note that we go take special precautions to ensure that we get deterministic
848 /// results from this routine.  In other words, we don't want the results of
849 /// this to depend on where the addresses of various SCEV objects happened to
850 /// land in memory.
851 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
852                               LoopInfo *LI, DominatorTree &DT) {
853   if (Ops.size() < 2) return;  // Noop
854 
855   EquivalenceClasses<const SCEV *> EqCacheSCEV;
856   EquivalenceClasses<const Value *> EqCacheValue;
857 
858   // Whether LHS has provably less complexity than RHS.
859   auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
860     auto Complexity =
861         CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
862     return Complexity && *Complexity < 0;
863   };
864   if (Ops.size() == 2) {
865     // This is the common case, which also happens to be trivially simple.
866     // Special case it.
867     const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
868     if (IsLessComplex(RHS, LHS))
869       std::swap(LHS, RHS);
870     return;
871   }
872 
873   // Do the rough sort by complexity.
874   llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
875     return IsLessComplex(LHS, RHS);
876   });
877 
878   // Now that we are sorted by complexity, group elements of the same
879   // complexity.  Note that this is, at worst, N^2, but the vector is likely to
880   // be extremely short in practice.  Note that we take this approach because we
881   // do not want to depend on the addresses of the objects we are grouping.
882   for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
883     const SCEV *S = Ops[i];
884     unsigned Complexity = S->getSCEVType();
885 
886     // If there are any objects of the same complexity and same value as this
887     // one, group them.
888     for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
889       if (Ops[j] == S) { // Found a duplicate.
890         // Move it to immediately after i'th element.
891         std::swap(Ops[i+1], Ops[j]);
892         ++i;   // no need to rescan it.
893         if (i == e-2) return;  // Done!
894       }
895     }
896   }
897 }
898 
899 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
900 /// least HugeExprThreshold nodes).
901 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
902   return any_of(Ops, [](const SCEV *S) {
903     return S->getExpressionSize() >= HugeExprThreshold;
904   });
905 }
906 
907 //===----------------------------------------------------------------------===//
908 //                      Simple SCEV method implementations
909 //===----------------------------------------------------------------------===//
910 
911 /// Compute BC(It, K).  The result has width W.  Assume, K > 0.
912 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
913                                        ScalarEvolution &SE,
914                                        Type *ResultTy) {
915   // Handle the simplest case efficiently.
916   if (K == 1)
917     return SE.getTruncateOrZeroExtend(It, ResultTy);
918 
919   // We are using the following formula for BC(It, K):
920   //
921   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
922   //
923   // Suppose, W is the bitwidth of the return value.  We must be prepared for
924   // overflow.  Hence, we must assure that the result of our computation is
925   // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
926   // safe in modular arithmetic.
927   //
928   // However, this code doesn't use exactly that formula; the formula it uses
929   // is something like the following, where T is the number of factors of 2 in
930   // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
931   // exponentiation:
932   //
933   //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
934   //
935   // This formula is trivially equivalent to the previous formula.  However,
936   // this formula can be implemented much more efficiently.  The trick is that
937   // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
938   // arithmetic.  To do exact division in modular arithmetic, all we have
939   // to do is multiply by the inverse.  Therefore, this step can be done at
940   // width W.
941   //
942   // The next issue is how to safely do the division by 2^T.  The way this
943   // is done is by doing the multiplication step at a width of at least W + T
944   // bits.  This way, the bottom W+T bits of the product are accurate. Then,
945   // when we perform the division by 2^T (which is equivalent to a right shift
946   // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
947   // truncated out after the division by 2^T.
948   //
949   // In comparison to just directly using the first formula, this technique
950   // is much more efficient; using the first formula requires W * K bits,
951   // but this formula less than W + K bits. Also, the first formula requires
952   // a division step, whereas this formula only requires multiplies and shifts.
953   //
954   // It doesn't matter whether the subtraction step is done in the calculation
955   // width or the input iteration count's width; if the subtraction overflows,
956   // the result must be zero anyway.  We prefer here to do it in the width of
957   // the induction variable because it helps a lot for certain cases; CodeGen
958   // isn't smart enough to ignore the overflow, which leads to much less
959   // efficient code if the width of the subtraction is wider than the native
960   // register width.
961   //
962   // (It's possible to not widen at all by pulling out factors of 2 before
963   // the multiplication; for example, K=2 can be calculated as
964   // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
965   // extra arithmetic, so it's not an obvious win, and it gets
966   // much more complicated for K > 3.)
967 
968   // Protection from insane SCEVs; this bound is conservative,
969   // but it probably doesn't matter.
970   if (K > 1000)
971     return SE.getCouldNotCompute();
972 
973   unsigned W = SE.getTypeSizeInBits(ResultTy);
974 
975   // Calculate K! / 2^T and T; we divide out the factors of two before
976   // multiplying for calculating K! / 2^T to avoid overflow.
977   // Other overflow doesn't matter because we only care about the bottom
978   // W bits of the result.
979   APInt OddFactorial(W, 1);
980   unsigned T = 1;
981   for (unsigned i = 3; i <= K; ++i) {
982     APInt Mult(W, i);
983     unsigned TwoFactors = Mult.countTrailingZeros();
984     T += TwoFactors;
985     Mult.lshrInPlace(TwoFactors);
986     OddFactorial *= Mult;
987   }
988 
989   // We need at least W + T bits for the multiplication step
990   unsigned CalculationBits = W + T;
991 
992   // Calculate 2^T, at width T+W.
993   APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
994 
995   // Calculate the multiplicative inverse of K! / 2^T;
996   // this multiplication factor will perform the exact division by
997   // K! / 2^T.
998   APInt Mod = APInt::getSignedMinValue(W+1);
999   APInt MultiplyFactor = OddFactorial.zext(W+1);
1000   MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
1001   MultiplyFactor = MultiplyFactor.trunc(W);
1002 
1003   // Calculate the product, at width T+W
1004   IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1005                                                       CalculationBits);
1006   const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1007   for (unsigned i = 1; i != K; ++i) {
1008     const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1009     Dividend = SE.getMulExpr(Dividend,
1010                              SE.getTruncateOrZeroExtend(S, CalculationTy));
1011   }
1012 
1013   // Divide by 2^T
1014   const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1015 
1016   // Truncate the result, and divide by K! / 2^T.
1017 
1018   return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1019                        SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1020 }
1021 
1022 /// Return the value of this chain of recurrences at the specified iteration
1023 /// number.  We can evaluate this recurrence by multiplying each element in the
1024 /// chain by the binomial coefficient corresponding to it.  In other words, we
1025 /// can evaluate {A,+,B,+,C,+,D} as:
1026 ///
1027 ///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1028 ///
1029 /// where BC(It, k) stands for binomial coefficient.
1030 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1031                                                 ScalarEvolution &SE) const {
1032   return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE);
1033 }
1034 
1035 const SCEV *
1036 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
1037                                     const SCEV *It, ScalarEvolution &SE) {
1038   assert(Operands.size() > 0);
1039   const SCEV *Result = Operands[0];
1040   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1041     // The computation is correct in the face of overflow provided that the
1042     // multiplication is performed _after_ the evaluation of the binomial
1043     // coefficient.
1044     const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
1045     if (isa<SCEVCouldNotCompute>(Coeff))
1046       return Coeff;
1047 
1048     Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
1049   }
1050   return Result;
1051 }
1052 
1053 //===----------------------------------------------------------------------===//
1054 //                    SCEV Expression folder implementations
1055 //===----------------------------------------------------------------------===//
1056 
1057 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
1058                                                      unsigned Depth) {
1059   assert(Depth <= 1 &&
1060          "getLosslessPtrToIntExpr() should self-recurse at most once.");
1061 
1062   // We could be called with an integer-typed operands during SCEV rewrites.
1063   // Since the operand is an integer already, just perform zext/trunc/self cast.
1064   if (!Op->getType()->isPointerTy())
1065     return Op;
1066 
1067   // What would be an ID for such a SCEV cast expression?
1068   FoldingSetNodeID ID;
1069   ID.AddInteger(scPtrToInt);
1070   ID.AddPointer(Op);
1071 
1072   void *IP = nullptr;
1073 
1074   // Is there already an expression for such a cast?
1075   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1076     return S;
1077 
1078   // It isn't legal for optimizations to construct new ptrtoint expressions
1079   // for non-integral pointers.
1080   if (getDataLayout().isNonIntegralPointerType(Op->getType()))
1081     return getCouldNotCompute();
1082 
1083   Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1084 
1085   // We can only trivially model ptrtoint if SCEV's effective (integer) type
1086   // is sufficiently wide to represent all possible pointer values.
1087   // We could theoretically teach SCEV to truncate wider pointers, but
1088   // that isn't implemented for now.
1089   if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) !=
1090       getDataLayout().getTypeSizeInBits(IntPtrTy))
1091     return getCouldNotCompute();
1092 
1093   // If not, is this expression something we can't reduce any further?
1094   if (auto *U = dyn_cast<SCEVUnknown>(Op)) {
1095     // Perform some basic constant folding. If the operand of the ptr2int cast
1096     // is a null pointer, don't create a ptr2int SCEV expression (that will be
1097     // left as-is), but produce a zero constant.
1098     // NOTE: We could handle a more general case, but lack motivational cases.
1099     if (isa<ConstantPointerNull>(U->getValue()))
1100       return getZero(IntPtrTy);
1101 
1102     // Create an explicit cast node.
1103     // We can reuse the existing insert position since if we get here,
1104     // we won't have made any changes which would invalidate it.
1105     SCEV *S = new (SCEVAllocator)
1106         SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1107     UniqueSCEVs.InsertNode(S, IP);
1108     registerUser(S, Op);
1109     return S;
1110   }
1111 
1112   assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "
1113                        "non-SCEVUnknown's.");
1114 
1115   // Otherwise, we've got some expression that is more complex than just a
1116   // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1117   // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1118   // only, and the expressions must otherwise be integer-typed.
1119   // So sink the cast down to the SCEVUnknown's.
1120 
1121   /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1122   /// which computes a pointer-typed value, and rewrites the whole expression
1123   /// tree so that *all* the computations are done on integers, and the only
1124   /// pointer-typed operands in the expression are SCEVUnknown.
1125   class SCEVPtrToIntSinkingRewriter
1126       : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1127     using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1128 
1129   public:
1130     SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1131 
1132     static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1133       SCEVPtrToIntSinkingRewriter Rewriter(SE);
1134       return Rewriter.visit(Scev);
1135     }
1136 
1137     const SCEV *visit(const SCEV *S) {
1138       Type *STy = S->getType();
1139       // If the expression is not pointer-typed, just keep it as-is.
1140       if (!STy->isPointerTy())
1141         return S;
1142       // Else, recursively sink the cast down into it.
1143       return Base::visit(S);
1144     }
1145 
1146     const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1147       SmallVector<const SCEV *, 2> Operands;
1148       bool Changed = false;
1149       for (auto *Op : Expr->operands()) {
1150         Operands.push_back(visit(Op));
1151         Changed |= Op != Operands.back();
1152       }
1153       return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1154     }
1155 
1156     const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1157       SmallVector<const SCEV *, 2> Operands;
1158       bool Changed = false;
1159       for (auto *Op : Expr->operands()) {
1160         Operands.push_back(visit(Op));
1161         Changed |= Op != Operands.back();
1162       }
1163       return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1164     }
1165 
1166     const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1167       assert(Expr->getType()->isPointerTy() &&
1168              "Should only reach pointer-typed SCEVUnknown's.");
1169       return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
1170     }
1171   };
1172 
1173   // And actually perform the cast sinking.
1174   const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1175   assert(IntOp->getType()->isIntegerTy() &&
1176          "We must have succeeded in sinking the cast, "
1177          "and ending up with an integer-typed expression!");
1178   return IntOp;
1179 }
1180 
1181 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
1182   assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1183 
1184   const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
1185   if (isa<SCEVCouldNotCompute>(IntOp))
1186     return IntOp;
1187 
1188   return getTruncateOrZeroExtend(IntOp, Ty);
1189 }
1190 
1191 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1192                                              unsigned Depth) {
1193   assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1194          "This is not a truncating conversion!");
1195   assert(isSCEVable(Ty) &&
1196          "This is not a conversion to a SCEVable type!");
1197   assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
1198   Ty = getEffectiveSCEVType(Ty);
1199 
1200   FoldingSetNodeID ID;
1201   ID.AddInteger(scTruncate);
1202   ID.AddPointer(Op);
1203   ID.AddPointer(Ty);
1204   void *IP = nullptr;
1205   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1206 
1207   // Fold if the operand is constant.
1208   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1209     return getConstant(
1210       cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1211 
1212   // trunc(trunc(x)) --> trunc(x)
1213   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1214     return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1215 
1216   // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1217   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1218     return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1219 
1220   // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1221   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1222     return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1223 
1224   if (Depth > MaxCastDepth) {
1225     SCEV *S =
1226         new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1227     UniqueSCEVs.InsertNode(S, IP);
1228     registerUser(S, Op);
1229     return S;
1230   }
1231 
1232   // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1233   // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1234   // if after transforming we have at most one truncate, not counting truncates
1235   // that replace other casts.
1236   if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1237     auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1238     SmallVector<const SCEV *, 4> Operands;
1239     unsigned numTruncs = 0;
1240     for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1241          ++i) {
1242       const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1243       if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1244           isa<SCEVTruncateExpr>(S))
1245         numTruncs++;
1246       Operands.push_back(S);
1247     }
1248     if (numTruncs < 2) {
1249       if (isa<SCEVAddExpr>(Op))
1250         return getAddExpr(Operands);
1251       else if (isa<SCEVMulExpr>(Op))
1252         return getMulExpr(Operands);
1253       else
1254         llvm_unreachable("Unexpected SCEV type for Op.");
1255     }
1256     // Although we checked in the beginning that ID is not in the cache, it is
1257     // possible that during recursion and different modification ID was inserted
1258     // into the cache. So if we find it, just return it.
1259     if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1260       return S;
1261   }
1262 
1263   // If the input value is a chrec scev, truncate the chrec's operands.
1264   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1265     SmallVector<const SCEV *, 4> Operands;
1266     for (const SCEV *Op : AddRec->operands())
1267       Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1268     return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1269   }
1270 
1271   // Return zero if truncating to known zeros.
1272   uint32_t MinTrailingZeros = GetMinTrailingZeros(Op);
1273   if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1274     return getZero(Ty);
1275 
1276   // The cast wasn't folded; create an explicit cast node. We can reuse
1277   // the existing insert position since if we get here, we won't have
1278   // made any changes which would invalidate it.
1279   SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1280                                                  Op, Ty);
1281   UniqueSCEVs.InsertNode(S, IP);
1282   registerUser(S, Op);
1283   return S;
1284 }
1285 
1286 // Get the limit of a recurrence such that incrementing by Step cannot cause
1287 // signed overflow as long as the value of the recurrence within the
1288 // loop does not exceed this limit before incrementing.
1289 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1290                                                  ICmpInst::Predicate *Pred,
1291                                                  ScalarEvolution *SE) {
1292   unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1293   if (SE->isKnownPositive(Step)) {
1294     *Pred = ICmpInst::ICMP_SLT;
1295     return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1296                            SE->getSignedRangeMax(Step));
1297   }
1298   if (SE->isKnownNegative(Step)) {
1299     *Pred = ICmpInst::ICMP_SGT;
1300     return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1301                            SE->getSignedRangeMin(Step));
1302   }
1303   return nullptr;
1304 }
1305 
1306 // Get the limit of a recurrence such that incrementing by Step cannot cause
1307 // unsigned overflow as long as the value of the recurrence within the loop does
1308 // not exceed this limit before incrementing.
1309 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1310                                                    ICmpInst::Predicate *Pred,
1311                                                    ScalarEvolution *SE) {
1312   unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1313   *Pred = ICmpInst::ICMP_ULT;
1314 
1315   return SE->getConstant(APInt::getMinValue(BitWidth) -
1316                          SE->getUnsignedRangeMax(Step));
1317 }
1318 
1319 namespace {
1320 
1321 struct ExtendOpTraitsBase {
1322   typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1323                                                           unsigned);
1324 };
1325 
1326 // Used to make code generic over signed and unsigned overflow.
1327 template <typename ExtendOp> struct ExtendOpTraits {
1328   // Members present:
1329   //
1330   // static const SCEV::NoWrapFlags WrapType;
1331   //
1332   // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1333   //
1334   // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1335   //                                           ICmpInst::Predicate *Pred,
1336   //                                           ScalarEvolution *SE);
1337 };
1338 
1339 template <>
1340 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1341   static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1342 
1343   static const GetExtendExprTy GetExtendExpr;
1344 
1345   static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1346                                              ICmpInst::Predicate *Pred,
1347                                              ScalarEvolution *SE) {
1348     return getSignedOverflowLimitForStep(Step, Pred, SE);
1349   }
1350 };
1351 
1352 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1353     SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1354 
1355 template <>
1356 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1357   static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1358 
1359   static const GetExtendExprTy GetExtendExpr;
1360 
1361   static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1362                                              ICmpInst::Predicate *Pred,
1363                                              ScalarEvolution *SE) {
1364     return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1365   }
1366 };
1367 
1368 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1369     SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1370 
1371 } // end anonymous namespace
1372 
1373 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1374 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1375 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1376 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1377 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1378 // expression "Step + sext/zext(PreIncAR)" is congruent with
1379 // "sext/zext(PostIncAR)"
1380 template <typename ExtendOpTy>
1381 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1382                                         ScalarEvolution *SE, unsigned Depth) {
1383   auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1384   auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1385 
1386   const Loop *L = AR->getLoop();
1387   const SCEV *Start = AR->getStart();
1388   const SCEV *Step = AR->getStepRecurrence(*SE);
1389 
1390   // Check for a simple looking step prior to loop entry.
1391   const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1392   if (!SA)
1393     return nullptr;
1394 
1395   // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1396   // subtraction is expensive. For this purpose, perform a quick and dirty
1397   // difference, by checking for Step in the operand list.
1398   SmallVector<const SCEV *, 4> DiffOps;
1399   for (const SCEV *Op : SA->operands())
1400     if (Op != Step)
1401       DiffOps.push_back(Op);
1402 
1403   if (DiffOps.size() == SA->getNumOperands())
1404     return nullptr;
1405 
1406   // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1407   // `Step`:
1408 
1409   // 1. NSW/NUW flags on the step increment.
1410   auto PreStartFlags =
1411     ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1412   const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1413   const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1414       SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1415 
1416   // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1417   // "S+X does not sign/unsign-overflow".
1418   //
1419 
1420   const SCEV *BECount = SE->getBackedgeTakenCount(L);
1421   if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1422       !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1423     return PreStart;
1424 
1425   // 2. Direct overflow check on the step operation's expression.
1426   unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1427   Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1428   const SCEV *OperandExtendedStart =
1429       SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1430                      (SE->*GetExtendExpr)(Step, WideTy, Depth));
1431   if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1432     if (PreAR && AR->getNoWrapFlags(WrapType)) {
1433       // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1434       // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1435       // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`.  Cache this fact.
1436       SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1437     }
1438     return PreStart;
1439   }
1440 
1441   // 3. Loop precondition.
1442   ICmpInst::Predicate Pred;
1443   const SCEV *OverflowLimit =
1444       ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1445 
1446   if (OverflowLimit &&
1447       SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1448     return PreStart;
1449 
1450   return nullptr;
1451 }
1452 
1453 // Get the normalized zero or sign extended expression for this AddRec's Start.
1454 template <typename ExtendOpTy>
1455 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1456                                         ScalarEvolution *SE,
1457                                         unsigned Depth) {
1458   auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1459 
1460   const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1461   if (!PreStart)
1462     return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1463 
1464   return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1465                                              Depth),
1466                         (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1467 }
1468 
1469 // Try to prove away overflow by looking at "nearby" add recurrences.  A
1470 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1471 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1472 //
1473 // Formally:
1474 //
1475 //     {S,+,X} == {S-T,+,X} + T
1476 //  => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1477 //
1478 // If ({S-T,+,X} + T) does not overflow  ... (1)
1479 //
1480 //  RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1481 //
1482 // If {S-T,+,X} does not overflow  ... (2)
1483 //
1484 //  RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1485 //      == {Ext(S-T)+Ext(T),+,Ext(X)}
1486 //
1487 // If (S-T)+T does not overflow  ... (3)
1488 //
1489 //  RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1490 //      == {Ext(S),+,Ext(X)} == LHS
1491 //
1492 // Thus, if (1), (2) and (3) are true for some T, then
1493 //   Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1494 //
1495 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1496 // does not overflow" restricted to the 0th iteration.  Therefore we only need
1497 // to check for (1) and (2).
1498 //
1499 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1500 // is `Delta` (defined below).
1501 template <typename ExtendOpTy>
1502 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1503                                                 const SCEV *Step,
1504                                                 const Loop *L) {
1505   auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1506 
1507   // We restrict `Start` to a constant to prevent SCEV from spending too much
1508   // time here.  It is correct (but more expensive) to continue with a
1509   // non-constant `Start` and do a general SCEV subtraction to compute
1510   // `PreStart` below.
1511   const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1512   if (!StartC)
1513     return false;
1514 
1515   APInt StartAI = StartC->getAPInt();
1516 
1517   for (unsigned Delta : {-2, -1, 1, 2}) {
1518     const SCEV *PreStart = getConstant(StartAI - Delta);
1519 
1520     FoldingSetNodeID ID;
1521     ID.AddInteger(scAddRecExpr);
1522     ID.AddPointer(PreStart);
1523     ID.AddPointer(Step);
1524     ID.AddPointer(L);
1525     void *IP = nullptr;
1526     const auto *PreAR =
1527       static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1528 
1529     // Give up if we don't already have the add recurrence we need because
1530     // actually constructing an add recurrence is relatively expensive.
1531     if (PreAR && PreAR->getNoWrapFlags(WrapType)) {  // proves (2)
1532       const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1533       ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1534       const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1535           DeltaS, &Pred, this);
1536       if (Limit && isKnownPredicate(Pred, PreAR, Limit))  // proves (1)
1537         return true;
1538     }
1539   }
1540 
1541   return false;
1542 }
1543 
1544 // Finds an integer D for an expression (C + x + y + ...) such that the top
1545 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1546 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1547 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1548 // the (C + x + y + ...) expression is \p WholeAddExpr.
1549 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1550                                             const SCEVConstant *ConstantTerm,
1551                                             const SCEVAddExpr *WholeAddExpr) {
1552   const APInt &C = ConstantTerm->getAPInt();
1553   const unsigned BitWidth = C.getBitWidth();
1554   // Find number of trailing zeros of (x + y + ...) w/o the C first:
1555   uint32_t TZ = BitWidth;
1556   for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1557     TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1558   if (TZ) {
1559     // Set D to be as many least significant bits of C as possible while still
1560     // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1561     return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1562   }
1563   return APInt(BitWidth, 0);
1564 }
1565 
1566 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1567 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1568 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1569 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1570 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1571                                             const APInt &ConstantStart,
1572                                             const SCEV *Step) {
1573   const unsigned BitWidth = ConstantStart.getBitWidth();
1574   const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1575   if (TZ)
1576     return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1577                          : ConstantStart;
1578   return APInt(BitWidth, 0);
1579 }
1580 
1581 const SCEV *
1582 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1583   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1584          "This is not an extending conversion!");
1585   assert(isSCEVable(Ty) &&
1586          "This is not a conversion to a SCEVable type!");
1587   assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1588   Ty = getEffectiveSCEVType(Ty);
1589 
1590   // Fold if the operand is constant.
1591   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1592     return getConstant(
1593       cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1594 
1595   // zext(zext(x)) --> zext(x)
1596   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1597     return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1598 
1599   // Before doing any expensive analysis, check to see if we've already
1600   // computed a SCEV for this Op and Ty.
1601   FoldingSetNodeID ID;
1602   ID.AddInteger(scZeroExtend);
1603   ID.AddPointer(Op);
1604   ID.AddPointer(Ty);
1605   void *IP = nullptr;
1606   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1607   if (Depth > MaxCastDepth) {
1608     SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1609                                                      Op, Ty);
1610     UniqueSCEVs.InsertNode(S, IP);
1611     registerUser(S, Op);
1612     return S;
1613   }
1614 
1615   // zext(trunc(x)) --> zext(x) or x or trunc(x)
1616   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1617     // It's possible the bits taken off by the truncate were all zero bits. If
1618     // so, we should be able to simplify this further.
1619     const SCEV *X = ST->getOperand();
1620     ConstantRange CR = getUnsignedRange(X);
1621     unsigned TruncBits = getTypeSizeInBits(ST->getType());
1622     unsigned NewBits = getTypeSizeInBits(Ty);
1623     if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1624             CR.zextOrTrunc(NewBits)))
1625       return getTruncateOrZeroExtend(X, Ty, Depth);
1626   }
1627 
1628   // If the input value is a chrec scev, and we can prove that the value
1629   // did not overflow the old, smaller, value, we can zero extend all of the
1630   // operands (often constants).  This allows analysis of something like
1631   // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1632   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1633     if (AR->isAffine()) {
1634       const SCEV *Start = AR->getStart();
1635       const SCEV *Step = AR->getStepRecurrence(*this);
1636       unsigned BitWidth = getTypeSizeInBits(AR->getType());
1637       const Loop *L = AR->getLoop();
1638 
1639       if (!AR->hasNoUnsignedWrap()) {
1640         auto NewFlags = proveNoWrapViaConstantRanges(AR);
1641         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1642       }
1643 
1644       // If we have special knowledge that this addrec won't overflow,
1645       // we don't need to do any further analysis.
1646       if (AR->hasNoUnsignedWrap())
1647         return getAddRecExpr(
1648             getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1649             getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1650 
1651       // Check whether the backedge-taken count is SCEVCouldNotCompute.
1652       // Note that this serves two purposes: It filters out loops that are
1653       // simply not analyzable, and it covers the case where this code is
1654       // being called from within backedge-taken count analysis, such that
1655       // attempting to ask for the backedge-taken count would likely result
1656       // in infinite recursion. In the later case, the analysis code will
1657       // cope with a conservative value, and it will take care to purge
1658       // that value once it has finished.
1659       const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1660       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1661         // Manually compute the final value for AR, checking for overflow.
1662 
1663         // Check whether the backedge-taken count can be losslessly casted to
1664         // the addrec's type. The count is always unsigned.
1665         const SCEV *CastedMaxBECount =
1666             getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1667         const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1668             CastedMaxBECount, MaxBECount->getType(), Depth);
1669         if (MaxBECount == RecastedMaxBECount) {
1670           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1671           // Check whether Start+Step*MaxBECount has no unsigned overflow.
1672           const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1673                                         SCEV::FlagAnyWrap, Depth + 1);
1674           const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1675                                                           SCEV::FlagAnyWrap,
1676                                                           Depth + 1),
1677                                                WideTy, Depth + 1);
1678           const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1679           const SCEV *WideMaxBECount =
1680             getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1681           const SCEV *OperandExtendedAdd =
1682             getAddExpr(WideStart,
1683                        getMulExpr(WideMaxBECount,
1684                                   getZeroExtendExpr(Step, WideTy, Depth + 1),
1685                                   SCEV::FlagAnyWrap, Depth + 1),
1686                        SCEV::FlagAnyWrap, Depth + 1);
1687           if (ZAdd == OperandExtendedAdd) {
1688             // Cache knowledge of AR NUW, which is propagated to this AddRec.
1689             setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1690             // Return the expression with the addrec on the outside.
1691             return getAddRecExpr(
1692                 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1693                                                          Depth + 1),
1694                 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1695                 AR->getNoWrapFlags());
1696           }
1697           // Similar to above, only this time treat the step value as signed.
1698           // This covers loops that count down.
1699           OperandExtendedAdd =
1700             getAddExpr(WideStart,
1701                        getMulExpr(WideMaxBECount,
1702                                   getSignExtendExpr(Step, WideTy, Depth + 1),
1703                                   SCEV::FlagAnyWrap, Depth + 1),
1704                        SCEV::FlagAnyWrap, Depth + 1);
1705           if (ZAdd == OperandExtendedAdd) {
1706             // Cache knowledge of AR NW, which is propagated to this AddRec.
1707             // Negative step causes unsigned wrap, but it still can't self-wrap.
1708             setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1709             // Return the expression with the addrec on the outside.
1710             return getAddRecExpr(
1711                 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1712                                                          Depth + 1),
1713                 getSignExtendExpr(Step, Ty, Depth + 1), L,
1714                 AR->getNoWrapFlags());
1715           }
1716         }
1717       }
1718 
1719       // Normally, in the cases we can prove no-overflow via a
1720       // backedge guarding condition, we can also compute a backedge
1721       // taken count for the loop.  The exceptions are assumptions and
1722       // guards present in the loop -- SCEV is not great at exploiting
1723       // these to compute max backedge taken counts, but can still use
1724       // these to prove lack of overflow.  Use this fact to avoid
1725       // doing extra work that may not pay off.
1726       if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1727           !AC.assumptions().empty()) {
1728 
1729         auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1730         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1731         if (AR->hasNoUnsignedWrap()) {
1732           // Same as nuw case above - duplicated here to avoid a compile time
1733           // issue.  It's not clear that the order of checks does matter, but
1734           // it's one of two issue possible causes for a change which was
1735           // reverted.  Be conservative for the moment.
1736           return getAddRecExpr(
1737                 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1738                                                          Depth + 1),
1739                 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1740                 AR->getNoWrapFlags());
1741         }
1742 
1743         // For a negative step, we can extend the operands iff doing so only
1744         // traverses values in the range zext([0,UINT_MAX]).
1745         if (isKnownNegative(Step)) {
1746           const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1747                                       getSignedRangeMin(Step));
1748           if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1749               isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1750             // Cache knowledge of AR NW, which is propagated to this
1751             // AddRec.  Negative step causes unsigned wrap, but it
1752             // still can't self-wrap.
1753             setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1754             // Return the expression with the addrec on the outside.
1755             return getAddRecExpr(
1756                 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1757                                                          Depth + 1),
1758                 getSignExtendExpr(Step, Ty, Depth + 1), L,
1759                 AR->getNoWrapFlags());
1760           }
1761         }
1762       }
1763 
1764       // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1765       // if D + (C - D + Step * n) could be proven to not unsigned wrap
1766       // where D maximizes the number of trailing zeros of (C - D + Step * n)
1767       if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1768         const APInt &C = SC->getAPInt();
1769         const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1770         if (D != 0) {
1771           const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1772           const SCEV *SResidual =
1773               getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1774           const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1775           return getAddExpr(SZExtD, SZExtR,
1776                             (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1777                             Depth + 1);
1778         }
1779       }
1780 
1781       if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1782         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1783         return getAddRecExpr(
1784             getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1785             getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1786       }
1787     }
1788 
1789   // zext(A % B) --> zext(A) % zext(B)
1790   {
1791     const SCEV *LHS;
1792     const SCEV *RHS;
1793     if (matchURem(Op, LHS, RHS))
1794       return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1795                          getZeroExtendExpr(RHS, Ty, Depth + 1));
1796   }
1797 
1798   // zext(A / B) --> zext(A) / zext(B).
1799   if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1800     return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1801                        getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1802 
1803   if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1804     // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1805     if (SA->hasNoUnsignedWrap()) {
1806       // If the addition does not unsign overflow then we can, by definition,
1807       // commute the zero extension with the addition operation.
1808       SmallVector<const SCEV *, 4> Ops;
1809       for (const auto *Op : SA->operands())
1810         Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1811       return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1812     }
1813 
1814     // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1815     // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1816     // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1817     //
1818     // Often address arithmetics contain expressions like
1819     // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1820     // This transformation is useful while proving that such expressions are
1821     // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1822     if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1823       const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1824       if (D != 0) {
1825         const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1826         const SCEV *SResidual =
1827             getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1828         const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1829         return getAddExpr(SZExtD, SZExtR,
1830                           (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1831                           Depth + 1);
1832       }
1833     }
1834   }
1835 
1836   if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1837     // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1838     if (SM->hasNoUnsignedWrap()) {
1839       // If the multiply does not unsign overflow then we can, by definition,
1840       // commute the zero extension with the multiply operation.
1841       SmallVector<const SCEV *, 4> Ops;
1842       for (const auto *Op : SM->operands())
1843         Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1844       return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1845     }
1846 
1847     // zext(2^K * (trunc X to iN)) to iM ->
1848     // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1849     //
1850     // Proof:
1851     //
1852     //     zext(2^K * (trunc X to iN)) to iM
1853     //   = zext((trunc X to iN) << K) to iM
1854     //   = zext((trunc X to i{N-K}) << K)<nuw> to iM
1855     //     (because shl removes the top K bits)
1856     //   = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1857     //   = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1858     //
1859     if (SM->getNumOperands() == 2)
1860       if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1861         if (MulLHS->getAPInt().isPowerOf2())
1862           if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1863             int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1864                                MulLHS->getAPInt().logBase2();
1865             Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1866             return getMulExpr(
1867                 getZeroExtendExpr(MulLHS, Ty),
1868                 getZeroExtendExpr(
1869                     getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1870                 SCEV::FlagNUW, Depth + 1);
1871           }
1872   }
1873 
1874   // The cast wasn't folded; create an explicit cast node.
1875   // Recompute the insert position, as it may have been invalidated.
1876   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1877   SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1878                                                    Op, Ty);
1879   UniqueSCEVs.InsertNode(S, IP);
1880   registerUser(S, Op);
1881   return S;
1882 }
1883 
1884 const SCEV *
1885 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1886   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1887          "This is not an extending conversion!");
1888   assert(isSCEVable(Ty) &&
1889          "This is not a conversion to a SCEVable type!");
1890   assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1891   Ty = getEffectiveSCEVType(Ty);
1892 
1893   // Fold if the operand is constant.
1894   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1895     return getConstant(
1896       cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1897 
1898   // sext(sext(x)) --> sext(x)
1899   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1900     return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1901 
1902   // sext(zext(x)) --> zext(x)
1903   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1904     return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1905 
1906   // Before doing any expensive analysis, check to see if we've already
1907   // computed a SCEV for this Op and Ty.
1908   FoldingSetNodeID ID;
1909   ID.AddInteger(scSignExtend);
1910   ID.AddPointer(Op);
1911   ID.AddPointer(Ty);
1912   void *IP = nullptr;
1913   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1914   // Limit recursion depth.
1915   if (Depth > MaxCastDepth) {
1916     SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1917                                                      Op, Ty);
1918     UniqueSCEVs.InsertNode(S, IP);
1919     registerUser(S, Op);
1920     return S;
1921   }
1922 
1923   // sext(trunc(x)) --> sext(x) or x or trunc(x)
1924   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1925     // It's possible the bits taken off by the truncate were all sign bits. If
1926     // so, we should be able to simplify this further.
1927     const SCEV *X = ST->getOperand();
1928     ConstantRange CR = getSignedRange(X);
1929     unsigned TruncBits = getTypeSizeInBits(ST->getType());
1930     unsigned NewBits = getTypeSizeInBits(Ty);
1931     if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1932             CR.sextOrTrunc(NewBits)))
1933       return getTruncateOrSignExtend(X, Ty, Depth);
1934   }
1935 
1936   if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1937     // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1938     if (SA->hasNoSignedWrap()) {
1939       // If the addition does not sign overflow then we can, by definition,
1940       // commute the sign extension with the addition operation.
1941       SmallVector<const SCEV *, 4> Ops;
1942       for (const auto *Op : SA->operands())
1943         Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1944       return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1945     }
1946 
1947     // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1948     // if D + (C - D + x + y + ...) could be proven to not signed wrap
1949     // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1950     //
1951     // For instance, this will bring two seemingly different expressions:
1952     //     1 + sext(5 + 20 * %x + 24 * %y)  and
1953     //         sext(6 + 20 * %x + 24 * %y)
1954     // to the same form:
1955     //     2 + sext(4 + 20 * %x + 24 * %y)
1956     if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1957       const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1958       if (D != 0) {
1959         const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1960         const SCEV *SResidual =
1961             getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1962         const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1963         return getAddExpr(SSExtD, SSExtR,
1964                           (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1965                           Depth + 1);
1966       }
1967     }
1968   }
1969   // If the input value is a chrec scev, and we can prove that the value
1970   // did not overflow the old, smaller, value, we can sign extend all of the
1971   // operands (often constants).  This allows analysis of something like
1972   // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1973   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1974     if (AR->isAffine()) {
1975       const SCEV *Start = AR->getStart();
1976       const SCEV *Step = AR->getStepRecurrence(*this);
1977       unsigned BitWidth = getTypeSizeInBits(AR->getType());
1978       const Loop *L = AR->getLoop();
1979 
1980       if (!AR->hasNoSignedWrap()) {
1981         auto NewFlags = proveNoWrapViaConstantRanges(AR);
1982         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1983       }
1984 
1985       // If we have special knowledge that this addrec won't overflow,
1986       // we don't need to do any further analysis.
1987       if (AR->hasNoSignedWrap())
1988         return getAddRecExpr(
1989             getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1990             getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1991 
1992       // Check whether the backedge-taken count is SCEVCouldNotCompute.
1993       // Note that this serves two purposes: It filters out loops that are
1994       // simply not analyzable, and it covers the case where this code is
1995       // being called from within backedge-taken count analysis, such that
1996       // attempting to ask for the backedge-taken count would likely result
1997       // in infinite recursion. In the later case, the analysis code will
1998       // cope with a conservative value, and it will take care to purge
1999       // that value once it has finished.
2000       const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
2001       if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2002         // Manually compute the final value for AR, checking for
2003         // overflow.
2004 
2005         // Check whether the backedge-taken count can be losslessly casted to
2006         // the addrec's type. The count is always unsigned.
2007         const SCEV *CastedMaxBECount =
2008             getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2009         const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2010             CastedMaxBECount, MaxBECount->getType(), Depth);
2011         if (MaxBECount == RecastedMaxBECount) {
2012           Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2013           // Check whether Start+Step*MaxBECount has no signed overflow.
2014           const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2015                                         SCEV::FlagAnyWrap, Depth + 1);
2016           const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2017                                                           SCEV::FlagAnyWrap,
2018                                                           Depth + 1),
2019                                                WideTy, Depth + 1);
2020           const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2021           const SCEV *WideMaxBECount =
2022             getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2023           const SCEV *OperandExtendedAdd =
2024             getAddExpr(WideStart,
2025                        getMulExpr(WideMaxBECount,
2026                                   getSignExtendExpr(Step, WideTy, Depth + 1),
2027                                   SCEV::FlagAnyWrap, Depth + 1),
2028                        SCEV::FlagAnyWrap, Depth + 1);
2029           if (SAdd == OperandExtendedAdd) {
2030             // Cache knowledge of AR NSW, which is propagated to this AddRec.
2031             setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2032             // Return the expression with the addrec on the outside.
2033             return getAddRecExpr(
2034                 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2035                                                          Depth + 1),
2036                 getSignExtendExpr(Step, Ty, Depth + 1), L,
2037                 AR->getNoWrapFlags());
2038           }
2039           // Similar to above, only this time treat the step value as unsigned.
2040           // This covers loops that count up with an unsigned step.
2041           OperandExtendedAdd =
2042             getAddExpr(WideStart,
2043                        getMulExpr(WideMaxBECount,
2044                                   getZeroExtendExpr(Step, WideTy, Depth + 1),
2045                                   SCEV::FlagAnyWrap, Depth + 1),
2046                        SCEV::FlagAnyWrap, Depth + 1);
2047           if (SAdd == OperandExtendedAdd) {
2048             // If AR wraps around then
2049             //
2050             //    abs(Step) * MaxBECount > unsigned-max(AR->getType())
2051             // => SAdd != OperandExtendedAdd
2052             //
2053             // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2054             // (SAdd == OperandExtendedAdd => AR is NW)
2055 
2056             setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2057 
2058             // Return the expression with the addrec on the outside.
2059             return getAddRecExpr(
2060                 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2061                                                          Depth + 1),
2062                 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2063                 AR->getNoWrapFlags());
2064           }
2065         }
2066       }
2067 
2068       auto NewFlags = proveNoSignedWrapViaInduction(AR);
2069       setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2070       if (AR->hasNoSignedWrap()) {
2071         // Same as nsw case above - duplicated here to avoid a compile time
2072         // issue.  It's not clear that the order of checks does matter, but
2073         // it's one of two issue possible causes for a change which was
2074         // reverted.  Be conservative for the moment.
2075         return getAddRecExpr(
2076             getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2077             getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2078       }
2079 
2080       // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2081       // if D + (C - D + Step * n) could be proven to not signed wrap
2082       // where D maximizes the number of trailing zeros of (C - D + Step * n)
2083       if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2084         const APInt &C = SC->getAPInt();
2085         const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2086         if (D != 0) {
2087           const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2088           const SCEV *SResidual =
2089               getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2090           const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2091           return getAddExpr(SSExtD, SSExtR,
2092                             (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2093                             Depth + 1);
2094         }
2095       }
2096 
2097       if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2098         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2099         return getAddRecExpr(
2100             getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2101             getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2102       }
2103     }
2104 
2105   // If the input value is provably positive and we could not simplify
2106   // away the sext build a zext instead.
2107   if (isKnownNonNegative(Op))
2108     return getZeroExtendExpr(Op, Ty, Depth + 1);
2109 
2110   // The cast wasn't folded; create an explicit cast node.
2111   // Recompute the insert position, as it may have been invalidated.
2112   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2113   SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2114                                                    Op, Ty);
2115   UniqueSCEVs.InsertNode(S, IP);
2116   registerUser(S, { Op });
2117   return S;
2118 }
2119 
2120 const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op,
2121                                          Type *Ty) {
2122   switch (Kind) {
2123   case scTruncate:
2124     return getTruncateExpr(Op, Ty);
2125   case scZeroExtend:
2126     return getZeroExtendExpr(Op, Ty);
2127   case scSignExtend:
2128     return getSignExtendExpr(Op, Ty);
2129   case scPtrToInt:
2130     return getPtrToIntExpr(Op, Ty);
2131   default:
2132     llvm_unreachable("Not a SCEV cast expression!");
2133   }
2134 }
2135 
2136 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2137 /// unspecified bits out to the given type.
2138 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2139                                               Type *Ty) {
2140   assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2141          "This is not an extending conversion!");
2142   assert(isSCEVable(Ty) &&
2143          "This is not a conversion to a SCEVable type!");
2144   Ty = getEffectiveSCEVType(Ty);
2145 
2146   // Sign-extend negative constants.
2147   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2148     if (SC->getAPInt().isNegative())
2149       return getSignExtendExpr(Op, Ty);
2150 
2151   // Peel off a truncate cast.
2152   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2153     const SCEV *NewOp = T->getOperand();
2154     if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2155       return getAnyExtendExpr(NewOp, Ty);
2156     return getTruncateOrNoop(NewOp, Ty);
2157   }
2158 
2159   // Next try a zext cast. If the cast is folded, use it.
2160   const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2161   if (!isa<SCEVZeroExtendExpr>(ZExt))
2162     return ZExt;
2163 
2164   // Next try a sext cast. If the cast is folded, use it.
2165   const SCEV *SExt = getSignExtendExpr(Op, Ty);
2166   if (!isa<SCEVSignExtendExpr>(SExt))
2167     return SExt;
2168 
2169   // Force the cast to be folded into the operands of an addrec.
2170   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2171     SmallVector<const SCEV *, 4> Ops;
2172     for (const SCEV *Op : AR->operands())
2173       Ops.push_back(getAnyExtendExpr(Op, Ty));
2174     return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2175   }
2176 
2177   // If the expression is obviously signed, use the sext cast value.
2178   if (isa<SCEVSMaxExpr>(Op))
2179     return SExt;
2180 
2181   // Absent any other information, use the zext cast value.
2182   return ZExt;
2183 }
2184 
2185 /// Process the given Ops list, which is a list of operands to be added under
2186 /// the given scale, update the given map. This is a helper function for
2187 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2188 /// that would form an add expression like this:
2189 ///
2190 ///    m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2191 ///
2192 /// where A and B are constants, update the map with these values:
2193 ///
2194 ///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2195 ///
2196 /// and add 13 + A*B*29 to AccumulatedConstant.
2197 /// This will allow getAddRecExpr to produce this:
2198 ///
2199 ///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2200 ///
2201 /// This form often exposes folding opportunities that are hidden in
2202 /// the original operand list.
2203 ///
2204 /// Return true iff it appears that any interesting folding opportunities
2205 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2206 /// the common case where no interesting opportunities are present, and
2207 /// is also used as a check to avoid infinite recursion.
2208 static bool
2209 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2210                              SmallVectorImpl<const SCEV *> &NewOps,
2211                              APInt &AccumulatedConstant,
2212                              const SCEV *const *Ops, size_t NumOperands,
2213                              const APInt &Scale,
2214                              ScalarEvolution &SE) {
2215   bool Interesting = false;
2216 
2217   // Iterate over the add operands. They are sorted, with constants first.
2218   unsigned i = 0;
2219   while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2220     ++i;
2221     // Pull a buried constant out to the outside.
2222     if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2223       Interesting = true;
2224     AccumulatedConstant += Scale * C->getAPInt();
2225   }
2226 
2227   // Next comes everything else. We're especially interested in multiplies
2228   // here, but they're in the middle, so just visit the rest with one loop.
2229   for (; i != NumOperands; ++i) {
2230     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2231     if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2232       APInt NewScale =
2233           Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2234       if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2235         // A multiplication of a constant with another add; recurse.
2236         const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2237         Interesting |=
2238           CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2239                                        Add->op_begin(), Add->getNumOperands(),
2240                                        NewScale, SE);
2241       } else {
2242         // A multiplication of a constant with some other value. Update
2243         // the map.
2244         SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
2245         const SCEV *Key = SE.getMulExpr(MulOps);
2246         auto Pair = M.insert({Key, NewScale});
2247         if (Pair.second) {
2248           NewOps.push_back(Pair.first->first);
2249         } else {
2250           Pair.first->second += NewScale;
2251           // The map already had an entry for this value, which may indicate
2252           // a folding opportunity.
2253           Interesting = true;
2254         }
2255       }
2256     } else {
2257       // An ordinary operand. Update the map.
2258       std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2259           M.insert({Ops[i], Scale});
2260       if (Pair.second) {
2261         NewOps.push_back(Pair.first->first);
2262       } else {
2263         Pair.first->second += Scale;
2264         // The map already had an entry for this value, which may indicate
2265         // a folding opportunity.
2266         Interesting = true;
2267       }
2268     }
2269   }
2270 
2271   return Interesting;
2272 }
2273 
2274 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
2275                                       const SCEV *LHS, const SCEV *RHS) {
2276   const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
2277                                             SCEV::NoWrapFlags, unsigned);
2278   switch (BinOp) {
2279   default:
2280     llvm_unreachable("Unsupported binary op");
2281   case Instruction::Add:
2282     Operation = &ScalarEvolution::getAddExpr;
2283     break;
2284   case Instruction::Sub:
2285     Operation = &ScalarEvolution::getMinusSCEV;
2286     break;
2287   case Instruction::Mul:
2288     Operation = &ScalarEvolution::getMulExpr;
2289     break;
2290   }
2291 
2292   const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
2293       Signed ? &ScalarEvolution::getSignExtendExpr
2294              : &ScalarEvolution::getZeroExtendExpr;
2295 
2296   // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2297   auto *NarrowTy = cast<IntegerType>(LHS->getType());
2298   auto *WideTy =
2299       IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
2300 
2301   const SCEV *A = (this->*Extension)(
2302       (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
2303   const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0),
2304                                      (this->*Extension)(RHS, WideTy, 0),
2305                                      SCEV::FlagAnyWrap, 0);
2306   return A == B;
2307 }
2308 
2309 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
2310 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
2311     const OverflowingBinaryOperator *OBO) {
2312   SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
2313 
2314   if (OBO->hasNoUnsignedWrap())
2315     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2316   if (OBO->hasNoSignedWrap())
2317     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2318 
2319   bool Deduced = false;
2320 
2321   if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
2322     return {Flags, Deduced};
2323 
2324   if (OBO->getOpcode() != Instruction::Add &&
2325       OBO->getOpcode() != Instruction::Sub &&
2326       OBO->getOpcode() != Instruction::Mul)
2327     return {Flags, Deduced};
2328 
2329   const SCEV *LHS = getSCEV(OBO->getOperand(0));
2330   const SCEV *RHS = getSCEV(OBO->getOperand(1));
2331 
2332   if (!OBO->hasNoUnsignedWrap() &&
2333       willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2334                       /* Signed */ false, LHS, RHS)) {
2335     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2336     Deduced = true;
2337   }
2338 
2339   if (!OBO->hasNoSignedWrap() &&
2340       willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2341                       /* Signed */ true, LHS, RHS)) {
2342     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2343     Deduced = true;
2344   }
2345 
2346   return {Flags, Deduced};
2347 }
2348 
2349 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2350 // `OldFlags' as can't-wrap behavior.  Infer a more aggressive set of
2351 // can't-overflow flags for the operation if possible.
2352 static SCEV::NoWrapFlags
2353 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2354                       const ArrayRef<const SCEV *> Ops,
2355                       SCEV::NoWrapFlags Flags) {
2356   using namespace std::placeholders;
2357 
2358   using OBO = OverflowingBinaryOperator;
2359 
2360   bool CanAnalyze =
2361       Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2362   (void)CanAnalyze;
2363   assert(CanAnalyze && "don't call from other places!");
2364 
2365   int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2366   SCEV::NoWrapFlags SignOrUnsignWrap =
2367       ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2368 
2369   // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2370   auto IsKnownNonNegative = [&](const SCEV *S) {
2371     return SE->isKnownNonNegative(S);
2372   };
2373 
2374   if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2375     Flags =
2376         ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2377 
2378   SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2379 
2380   if (SignOrUnsignWrap != SignOrUnsignMask &&
2381       (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2382       isa<SCEVConstant>(Ops[0])) {
2383 
2384     auto Opcode = [&] {
2385       switch (Type) {
2386       case scAddExpr:
2387         return Instruction::Add;
2388       case scMulExpr:
2389         return Instruction::Mul;
2390       default:
2391         llvm_unreachable("Unexpected SCEV op.");
2392       }
2393     }();
2394 
2395     const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2396 
2397     // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2398     if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2399       auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2400           Opcode, C, OBO::NoSignedWrap);
2401       if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2402         Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2403     }
2404 
2405     // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2406     if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2407       auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2408           Opcode, C, OBO::NoUnsignedWrap);
2409       if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2410         Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2411     }
2412   }
2413 
2414   // <0,+,nonnegative><nw> is also nuw
2415   // TODO: Add corresponding nsw case
2416   if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) &&
2417       !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 &&
2418       Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2419     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2420 
2421   // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW
2422   if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) &&
2423       Ops.size() == 2) {
2424     if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2425       if (UDiv->getOperand(1) == Ops[1])
2426         Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2427     if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2428       if (UDiv->getOperand(1) == Ops[0])
2429         Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2430   }
2431 
2432   return Flags;
2433 }
2434 
2435 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2436   return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2437 }
2438 
2439 /// Get a canonical add expression, or something simpler if possible.
2440 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2441                                         SCEV::NoWrapFlags OrigFlags,
2442                                         unsigned Depth) {
2443   assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2444          "only nuw or nsw allowed");
2445   assert(!Ops.empty() && "Cannot get empty add!");
2446   if (Ops.size() == 1) return Ops[0];
2447 #ifndef NDEBUG
2448   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2449   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2450     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2451            "SCEVAddExpr operand types don't match!");
2452   unsigned NumPtrs = count_if(
2453       Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2454   assert(NumPtrs <= 1 && "add has at most one pointer operand");
2455 #endif
2456 
2457   // Sort by complexity, this groups all similar expression types together.
2458   GroupByComplexity(Ops, &LI, DT);
2459 
2460   // If there are any constants, fold them together.
2461   unsigned Idx = 0;
2462   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2463     ++Idx;
2464     assert(Idx < Ops.size());
2465     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2466       // We found two constants, fold them together!
2467       Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2468       if (Ops.size() == 2) return Ops[0];
2469       Ops.erase(Ops.begin()+1);  // Erase the folded element
2470       LHSC = cast<SCEVConstant>(Ops[0]);
2471     }
2472 
2473     // If we are left with a constant zero being added, strip it off.
2474     if (LHSC->getValue()->isZero()) {
2475       Ops.erase(Ops.begin());
2476       --Idx;
2477     }
2478 
2479     if (Ops.size() == 1) return Ops[0];
2480   }
2481 
2482   // Delay expensive flag strengthening until necessary.
2483   auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2484     return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2485   };
2486 
2487   // Limit recursion calls depth.
2488   if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2489     return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2490 
2491   if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) {
2492     // Don't strengthen flags if we have no new information.
2493     SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2494     if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2495       Add->setNoWrapFlags(ComputeFlags(Ops));
2496     return S;
2497   }
2498 
2499   // Okay, check to see if the same value occurs in the operand list more than
2500   // once.  If so, merge them together into an multiply expression.  Since we
2501   // sorted the list, these values are required to be adjacent.
2502   Type *Ty = Ops[0]->getType();
2503   bool FoundMatch = false;
2504   for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2505     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
2506       // Scan ahead to count how many equal operands there are.
2507       unsigned Count = 2;
2508       while (i+Count != e && Ops[i+Count] == Ops[i])
2509         ++Count;
2510       // Merge the values into a multiply.
2511       const SCEV *Scale = getConstant(Ty, Count);
2512       const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2513       if (Ops.size() == Count)
2514         return Mul;
2515       Ops[i] = Mul;
2516       Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2517       --i; e -= Count - 1;
2518       FoundMatch = true;
2519     }
2520   if (FoundMatch)
2521     return getAddExpr(Ops, OrigFlags, Depth + 1);
2522 
2523   // Check for truncates. If all the operands are truncated from the same
2524   // type, see if factoring out the truncate would permit the result to be
2525   // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2526   // if the contents of the resulting outer trunc fold to something simple.
2527   auto FindTruncSrcType = [&]() -> Type * {
2528     // We're ultimately looking to fold an addrec of truncs and muls of only
2529     // constants and truncs, so if we find any other types of SCEV
2530     // as operands of the addrec then we bail and return nullptr here.
2531     // Otherwise, we return the type of the operand of a trunc that we find.
2532     if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2533       return T->getOperand()->getType();
2534     if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2535       const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2536       if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2537         return T->getOperand()->getType();
2538     }
2539     return nullptr;
2540   };
2541   if (auto *SrcType = FindTruncSrcType()) {
2542     SmallVector<const SCEV *, 8> LargeOps;
2543     bool Ok = true;
2544     // Check all the operands to see if they can be represented in the
2545     // source type of the truncate.
2546     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2547       if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2548         if (T->getOperand()->getType() != SrcType) {
2549           Ok = false;
2550           break;
2551         }
2552         LargeOps.push_back(T->getOperand());
2553       } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2554         LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2555       } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2556         SmallVector<const SCEV *, 8> LargeMulOps;
2557         for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2558           if (const SCEVTruncateExpr *T =
2559                 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2560             if (T->getOperand()->getType() != SrcType) {
2561               Ok = false;
2562               break;
2563             }
2564             LargeMulOps.push_back(T->getOperand());
2565           } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2566             LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2567           } else {
2568             Ok = false;
2569             break;
2570           }
2571         }
2572         if (Ok)
2573           LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2574       } else {
2575         Ok = false;
2576         break;
2577       }
2578     }
2579     if (Ok) {
2580       // Evaluate the expression in the larger type.
2581       const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2582       // If it folds to something simple, use it. Otherwise, don't.
2583       if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2584         return getTruncateExpr(Fold, Ty);
2585     }
2586   }
2587 
2588   if (Ops.size() == 2) {
2589     // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2590     // C2 can be folded in a way that allows retaining wrapping flags of (X +
2591     // C1).
2592     const SCEV *A = Ops[0];
2593     const SCEV *B = Ops[1];
2594     auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
2595     auto *C = dyn_cast<SCEVConstant>(A);
2596     if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2597       auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2598       auto C2 = C->getAPInt();
2599       SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
2600 
2601       APInt ConstAdd = C1 + C2;
2602       auto AddFlags = AddExpr->getNoWrapFlags();
2603       // Adding a smaller constant is NUW if the original AddExpr was NUW.
2604       if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) &&
2605           ConstAdd.ule(C1)) {
2606         PreservedFlags =
2607             ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW);
2608       }
2609 
2610       // Adding a constant with the same sign and small magnitude is NSW, if the
2611       // original AddExpr was NSW.
2612       if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) &&
2613           C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
2614           ConstAdd.abs().ule(C1.abs())) {
2615         PreservedFlags =
2616             ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW);
2617       }
2618 
2619       if (PreservedFlags != SCEV::FlagAnyWrap) {
2620         SmallVector<const SCEV *, 4> NewOps(AddExpr->operands());
2621         NewOps[0] = getConstant(ConstAdd);
2622         return getAddExpr(NewOps, PreservedFlags);
2623       }
2624     }
2625   }
2626 
2627   // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y)
2628   if (Ops.size() == 2) {
2629     const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]);
2630     if (Mul && Mul->getNumOperands() == 2 &&
2631         Mul->getOperand(0)->isAllOnesValue()) {
2632       const SCEV *X;
2633       const SCEV *Y;
2634       if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) {
2635         return getMulExpr(Y, getUDivExpr(X, Y));
2636       }
2637     }
2638   }
2639 
2640   // Skip past any other cast SCEVs.
2641   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2642     ++Idx;
2643 
2644   // If there are add operands they would be next.
2645   if (Idx < Ops.size()) {
2646     bool DeletedAdd = false;
2647     // If the original flags and all inlined SCEVAddExprs are NUW, use the
2648     // common NUW flag for expression after inlining. Other flags cannot be
2649     // preserved, because they may depend on the original order of operations.
2650     SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
2651     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2652       if (Ops.size() > AddOpsInlineThreshold ||
2653           Add->getNumOperands() > AddOpsInlineThreshold)
2654         break;
2655       // If we have an add, expand the add operands onto the end of the operands
2656       // list.
2657       Ops.erase(Ops.begin()+Idx);
2658       Ops.append(Add->op_begin(), Add->op_end());
2659       DeletedAdd = true;
2660       CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
2661     }
2662 
2663     // If we deleted at least one add, we added operands to the end of the list,
2664     // and they are not necessarily sorted.  Recurse to resort and resimplify
2665     // any operands we just acquired.
2666     if (DeletedAdd)
2667       return getAddExpr(Ops, CommonFlags, Depth + 1);
2668   }
2669 
2670   // Skip over the add expression until we get to a multiply.
2671   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2672     ++Idx;
2673 
2674   // Check to see if there are any folding opportunities present with
2675   // operands multiplied by constant values.
2676   if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2677     uint64_t BitWidth = getTypeSizeInBits(Ty);
2678     DenseMap<const SCEV *, APInt> M;
2679     SmallVector<const SCEV *, 8> NewOps;
2680     APInt AccumulatedConstant(BitWidth, 0);
2681     if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2682                                      Ops.data(), Ops.size(),
2683                                      APInt(BitWidth, 1), *this)) {
2684       struct APIntCompare {
2685         bool operator()(const APInt &LHS, const APInt &RHS) const {
2686           return LHS.ult(RHS);
2687         }
2688       };
2689 
2690       // Some interesting folding opportunity is present, so its worthwhile to
2691       // re-generate the operands list. Group the operands by constant scale,
2692       // to avoid multiplying by the same constant scale multiple times.
2693       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2694       for (const SCEV *NewOp : NewOps)
2695         MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2696       // Re-generate the operands list.
2697       Ops.clear();
2698       if (AccumulatedConstant != 0)
2699         Ops.push_back(getConstant(AccumulatedConstant));
2700       for (auto &MulOp : MulOpLists) {
2701         if (MulOp.first == 1) {
2702           Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2703         } else if (MulOp.first != 0) {
2704           Ops.push_back(getMulExpr(
2705               getConstant(MulOp.first),
2706               getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2707               SCEV::FlagAnyWrap, Depth + 1));
2708         }
2709       }
2710       if (Ops.empty())
2711         return getZero(Ty);
2712       if (Ops.size() == 1)
2713         return Ops[0];
2714       return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2715     }
2716   }
2717 
2718   // If we are adding something to a multiply expression, make sure the
2719   // something is not already an operand of the multiply.  If so, merge it into
2720   // the multiply.
2721   for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2722     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2723     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2724       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2725       if (isa<SCEVConstant>(MulOpSCEV))
2726         continue;
2727       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2728         if (MulOpSCEV == Ops[AddOp]) {
2729           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
2730           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2731           if (Mul->getNumOperands() != 2) {
2732             // If the multiply has more than two operands, we must get the
2733             // Y*Z term.
2734             SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2735                                                 Mul->op_begin()+MulOp);
2736             MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2737             InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2738           }
2739           SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2740           const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2741           const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2742                                             SCEV::FlagAnyWrap, Depth + 1);
2743           if (Ops.size() == 2) return OuterMul;
2744           if (AddOp < Idx) {
2745             Ops.erase(Ops.begin()+AddOp);
2746             Ops.erase(Ops.begin()+Idx-1);
2747           } else {
2748             Ops.erase(Ops.begin()+Idx);
2749             Ops.erase(Ops.begin()+AddOp-1);
2750           }
2751           Ops.push_back(OuterMul);
2752           return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2753         }
2754 
2755       // Check this multiply against other multiplies being added together.
2756       for (unsigned OtherMulIdx = Idx+1;
2757            OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2758            ++OtherMulIdx) {
2759         const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2760         // If MulOp occurs in OtherMul, we can fold the two multiplies
2761         // together.
2762         for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2763              OMulOp != e; ++OMulOp)
2764           if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2765             // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2766             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2767             if (Mul->getNumOperands() != 2) {
2768               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2769                                                   Mul->op_begin()+MulOp);
2770               MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2771               InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2772             }
2773             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2774             if (OtherMul->getNumOperands() != 2) {
2775               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2776                                                   OtherMul->op_begin()+OMulOp);
2777               MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2778               InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2779             }
2780             SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2781             const SCEV *InnerMulSum =
2782                 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2783             const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2784                                               SCEV::FlagAnyWrap, Depth + 1);
2785             if (Ops.size() == 2) return OuterMul;
2786             Ops.erase(Ops.begin()+Idx);
2787             Ops.erase(Ops.begin()+OtherMulIdx-1);
2788             Ops.push_back(OuterMul);
2789             return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2790           }
2791       }
2792     }
2793   }
2794 
2795   // If there are any add recurrences in the operands list, see if any other
2796   // added values are loop invariant.  If so, we can fold them into the
2797   // recurrence.
2798   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2799     ++Idx;
2800 
2801   // Scan over all recurrences, trying to fold loop invariants into them.
2802   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2803     // Scan all of the other operands to this add and add them to the vector if
2804     // they are loop invariant w.r.t. the recurrence.
2805     SmallVector<const SCEV *, 8> LIOps;
2806     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2807     const Loop *AddRecLoop = AddRec->getLoop();
2808     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2809       if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2810         LIOps.push_back(Ops[i]);
2811         Ops.erase(Ops.begin()+i);
2812         --i; --e;
2813       }
2814 
2815     // If we found some loop invariants, fold them into the recurrence.
2816     if (!LIOps.empty()) {
2817       // Compute nowrap flags for the addition of the loop-invariant ops and
2818       // the addrec. Temporarily push it as an operand for that purpose. These
2819       // flags are valid in the scope of the addrec only.
2820       LIOps.push_back(AddRec);
2821       SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2822       LIOps.pop_back();
2823 
2824       //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
2825       LIOps.push_back(AddRec->getStart());
2826 
2827       SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2828 
2829       // It is not in general safe to propagate flags valid on an add within
2830       // the addrec scope to one outside it.  We must prove that the inner
2831       // scope is guaranteed to execute if the outer one does to be able to
2832       // safely propagate.  We know the program is undefined if poison is
2833       // produced on the inner scoped addrec.  We also know that *for this use*
2834       // the outer scoped add can't overflow (because of the flags we just
2835       // computed for the inner scoped add) without the program being undefined.
2836       // Proving that entry to the outer scope neccesitates entry to the inner
2837       // scope, thus proves the program undefined if the flags would be violated
2838       // in the outer scope.
2839       SCEV::NoWrapFlags AddFlags = Flags;
2840       if (AddFlags != SCEV::FlagAnyWrap) {
2841         auto *DefI = getDefiningScopeBound(LIOps);
2842         auto *ReachI = &*AddRecLoop->getHeader()->begin();
2843         if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2844           AddFlags = SCEV::FlagAnyWrap;
2845       }
2846       AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1);
2847 
2848       // Build the new addrec. Propagate the NUW and NSW flags if both the
2849       // outer add and the inner addrec are guaranteed to have no overflow.
2850       // Always propagate NW.
2851       Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2852       const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2853 
2854       // If all of the other operands were loop invariant, we are done.
2855       if (Ops.size() == 1) return NewRec;
2856 
2857       // Otherwise, add the folded AddRec by the non-invariant parts.
2858       for (unsigned i = 0;; ++i)
2859         if (Ops[i] == AddRec) {
2860           Ops[i] = NewRec;
2861           break;
2862         }
2863       return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2864     }
2865 
2866     // Okay, if there weren't any loop invariants to be folded, check to see if
2867     // there are multiple AddRec's with the same loop induction variable being
2868     // added together.  If so, we can fold them.
2869     for (unsigned OtherIdx = Idx+1;
2870          OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2871          ++OtherIdx) {
2872       // We expect the AddRecExpr's to be sorted in reverse dominance order,
2873       // so that the 1st found AddRecExpr is dominated by all others.
2874       assert(DT.dominates(
2875            cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2876            AddRec->getLoop()->getHeader()) &&
2877         "AddRecExprs are not sorted in reverse dominance order?");
2878       if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2879         // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
2880         SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2881         for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2882              ++OtherIdx) {
2883           const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2884           if (OtherAddRec->getLoop() == AddRecLoop) {
2885             for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2886                  i != e; ++i) {
2887               if (i >= AddRecOps.size()) {
2888                 AddRecOps.append(OtherAddRec->op_begin()+i,
2889                                  OtherAddRec->op_end());
2890                 break;
2891               }
2892               SmallVector<const SCEV *, 2> TwoOps = {
2893                   AddRecOps[i], OtherAddRec->getOperand(i)};
2894               AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2895             }
2896             Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2897           }
2898         }
2899         // Step size has changed, so we cannot guarantee no self-wraparound.
2900         Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2901         return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2902       }
2903     }
2904 
2905     // Otherwise couldn't fold anything into this recurrence.  Move onto the
2906     // next one.
2907   }
2908 
2909   // Okay, it looks like we really DO need an add expr.  Check to see if we
2910   // already have one, otherwise create a new one.
2911   return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2912 }
2913 
2914 const SCEV *
2915 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2916                                     SCEV::NoWrapFlags Flags) {
2917   FoldingSetNodeID ID;
2918   ID.AddInteger(scAddExpr);
2919   for (const SCEV *Op : Ops)
2920     ID.AddPointer(Op);
2921   void *IP = nullptr;
2922   SCEVAddExpr *S =
2923       static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2924   if (!S) {
2925     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2926     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2927     S = new (SCEVAllocator)
2928         SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2929     UniqueSCEVs.InsertNode(S, IP);
2930     registerUser(S, Ops);
2931   }
2932   S->setNoWrapFlags(Flags);
2933   return S;
2934 }
2935 
2936 const SCEV *
2937 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2938                                        const Loop *L, SCEV::NoWrapFlags Flags) {
2939   FoldingSetNodeID ID;
2940   ID.AddInteger(scAddRecExpr);
2941   for (const SCEV *Op : Ops)
2942     ID.AddPointer(Op);
2943   ID.AddPointer(L);
2944   void *IP = nullptr;
2945   SCEVAddRecExpr *S =
2946       static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2947   if (!S) {
2948     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2949     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2950     S = new (SCEVAllocator)
2951         SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2952     UniqueSCEVs.InsertNode(S, IP);
2953     LoopUsers[L].push_back(S);
2954     registerUser(S, Ops);
2955   }
2956   setNoWrapFlags(S, Flags);
2957   return S;
2958 }
2959 
2960 const SCEV *
2961 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2962                                     SCEV::NoWrapFlags Flags) {
2963   FoldingSetNodeID ID;
2964   ID.AddInteger(scMulExpr);
2965   for (const SCEV *Op : Ops)
2966     ID.AddPointer(Op);
2967   void *IP = nullptr;
2968   SCEVMulExpr *S =
2969     static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2970   if (!S) {
2971     const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2972     std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2973     S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2974                                         O, Ops.size());
2975     UniqueSCEVs.InsertNode(S, IP);
2976     registerUser(S, Ops);
2977   }
2978   S->setNoWrapFlags(Flags);
2979   return S;
2980 }
2981 
2982 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2983   uint64_t k = i*j;
2984   if (j > 1 && k / j != i) Overflow = true;
2985   return k;
2986 }
2987 
2988 /// Compute the result of "n choose k", the binomial coefficient.  If an
2989 /// intermediate computation overflows, Overflow will be set and the return will
2990 /// be garbage. Overflow is not cleared on absence of overflow.
2991 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2992   // We use the multiplicative formula:
2993   //     n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2994   // At each iteration, we take the n-th term of the numeral and divide by the
2995   // (k-n)th term of the denominator.  This division will always produce an
2996   // integral result, and helps reduce the chance of overflow in the
2997   // intermediate computations. However, we can still overflow even when the
2998   // final result would fit.
2999 
3000   if (n == 0 || n == k) return 1;
3001   if (k > n) return 0;
3002 
3003   if (k > n/2)
3004     k = n-k;
3005 
3006   uint64_t r = 1;
3007   for (uint64_t i = 1; i <= k; ++i) {
3008     r = umul_ov(r, n-(i-1), Overflow);
3009     r /= i;
3010   }
3011   return r;
3012 }
3013 
3014 /// Determine if any of the operands in this SCEV are a constant or if
3015 /// any of the add or multiply expressions in this SCEV contain a constant.
3016 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
3017   struct FindConstantInAddMulChain {
3018     bool FoundConstant = false;
3019 
3020     bool follow(const SCEV *S) {
3021       FoundConstant |= isa<SCEVConstant>(S);
3022       return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3023     }
3024 
3025     bool isDone() const {
3026       return FoundConstant;
3027     }
3028   };
3029 
3030   FindConstantInAddMulChain F;
3031   SCEVTraversal<FindConstantInAddMulChain> ST(F);
3032   ST.visitAll(StartExpr);
3033   return F.FoundConstant;
3034 }
3035 
3036 /// Get a canonical multiply expression, or something simpler if possible.
3037 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
3038                                         SCEV::NoWrapFlags OrigFlags,
3039                                         unsigned Depth) {
3040   assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
3041          "only nuw or nsw allowed");
3042   assert(!Ops.empty() && "Cannot get empty mul!");
3043   if (Ops.size() == 1) return Ops[0];
3044 #ifndef NDEBUG
3045   Type *ETy = Ops[0]->getType();
3046   assert(!ETy->isPointerTy());
3047   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3048     assert(Ops[i]->getType() == ETy &&
3049            "SCEVMulExpr operand types don't match!");
3050 #endif
3051 
3052   // Sort by complexity, this groups all similar expression types together.
3053   GroupByComplexity(Ops, &LI, DT);
3054 
3055   // If there are any constants, fold them together.
3056   unsigned Idx = 0;
3057   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3058     ++Idx;
3059     assert(Idx < Ops.size());
3060     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3061       // We found two constants, fold them together!
3062       Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3063       if (Ops.size() == 2) return Ops[0];
3064       Ops.erase(Ops.begin()+1);  // Erase the folded element
3065       LHSC = cast<SCEVConstant>(Ops[0]);
3066     }
3067 
3068     // If we have a multiply of zero, it will always be zero.
3069     if (LHSC->getValue()->isZero())
3070       return LHSC;
3071 
3072     // If we are left with a constant one being multiplied, strip it off.
3073     if (LHSC->getValue()->isOne()) {
3074       Ops.erase(Ops.begin());
3075       --Idx;
3076     }
3077 
3078     if (Ops.size() == 1)
3079       return Ops[0];
3080   }
3081 
3082   // Delay expensive flag strengthening until necessary.
3083   auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
3084     return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3085   };
3086 
3087   // Limit recursion calls depth.
3088   if (Depth > MaxArithDepth || hasHugeExpression(Ops))
3089     return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3090 
3091   if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) {
3092     // Don't strengthen flags if we have no new information.
3093     SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
3094     if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3095       Mul->setNoWrapFlags(ComputeFlags(Ops));
3096     return S;
3097   }
3098 
3099   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3100     if (Ops.size() == 2) {
3101       // C1*(C2+V) -> C1*C2 + C1*V
3102       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3103         // If any of Add's ops are Adds or Muls with a constant, apply this
3104         // transformation as well.
3105         //
3106         // TODO: There are some cases where this transformation is not
3107         // profitable; for example, Add = (C0 + X) * Y + Z.  Maybe the scope of
3108         // this transformation should be narrowed down.
3109         if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
3110           return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
3111                                        SCEV::FlagAnyWrap, Depth + 1),
3112                             getMulExpr(LHSC, Add->getOperand(1),
3113                                        SCEV::FlagAnyWrap, Depth + 1),
3114                             SCEV::FlagAnyWrap, Depth + 1);
3115 
3116       if (Ops[0]->isAllOnesValue()) {
3117         // If we have a mul by -1 of an add, try distributing the -1 among the
3118         // add operands.
3119         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3120           SmallVector<const SCEV *, 4> NewOps;
3121           bool AnyFolded = false;
3122           for (const SCEV *AddOp : Add->operands()) {
3123             const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
3124                                          Depth + 1);
3125             if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
3126             NewOps.push_back(Mul);
3127           }
3128           if (AnyFolded)
3129             return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
3130         } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3131           // Negation preserves a recurrence's no self-wrap property.
3132           SmallVector<const SCEV *, 4> Operands;
3133           for (const SCEV *AddRecOp : AddRec->operands())
3134             Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
3135                                           Depth + 1));
3136 
3137           return getAddRecExpr(Operands, AddRec->getLoop(),
3138                                AddRec->getNoWrapFlags(SCEV::FlagNW));
3139         }
3140       }
3141     }
3142   }
3143 
3144   // Skip over the add expression until we get to a multiply.
3145   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3146     ++Idx;
3147 
3148   // If there are mul operands inline them all into this expression.
3149   if (Idx < Ops.size()) {
3150     bool DeletedMul = false;
3151     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3152       if (Ops.size() > MulOpsInlineThreshold)
3153         break;
3154       // If we have an mul, expand the mul operands onto the end of the
3155       // operands list.
3156       Ops.erase(Ops.begin()+Idx);
3157       Ops.append(Mul->op_begin(), Mul->op_end());
3158       DeletedMul = true;
3159     }
3160 
3161     // If we deleted at least one mul, we added operands to the end of the
3162     // list, and they are not necessarily sorted.  Recurse to resort and
3163     // resimplify any operands we just acquired.
3164     if (DeletedMul)
3165       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3166   }
3167 
3168   // If there are any add recurrences in the operands list, see if any other
3169   // added values are loop invariant.  If so, we can fold them into the
3170   // recurrence.
3171   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3172     ++Idx;
3173 
3174   // Scan over all recurrences, trying to fold loop invariants into them.
3175   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3176     // Scan all of the other operands to this mul and add them to the vector
3177     // if they are loop invariant w.r.t. the recurrence.
3178     SmallVector<const SCEV *, 8> LIOps;
3179     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3180     const Loop *AddRecLoop = AddRec->getLoop();
3181     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3182       if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3183         LIOps.push_back(Ops[i]);
3184         Ops.erase(Ops.begin()+i);
3185         --i; --e;
3186       }
3187 
3188     // If we found some loop invariants, fold them into the recurrence.
3189     if (!LIOps.empty()) {
3190       //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
3191       SmallVector<const SCEV *, 4> NewOps;
3192       NewOps.reserve(AddRec->getNumOperands());
3193       const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3194       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
3195         NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3196                                     SCEV::FlagAnyWrap, Depth + 1));
3197 
3198       // Build the new addrec. Propagate the NUW and NSW flags if both the
3199       // outer mul and the inner addrec are guaranteed to have no overflow.
3200       //
3201       // No self-wrap cannot be guaranteed after changing the step size, but
3202       // will be inferred if either NUW or NSW is true.
3203       SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
3204       const SCEV *NewRec = getAddRecExpr(
3205           NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
3206 
3207       // If all of the other operands were loop invariant, we are done.
3208       if (Ops.size() == 1) return NewRec;
3209 
3210       // Otherwise, multiply the folded AddRec by the non-invariant parts.
3211       for (unsigned i = 0;; ++i)
3212         if (Ops[i] == AddRec) {
3213           Ops[i] = NewRec;
3214           break;
3215         }
3216       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3217     }
3218 
3219     // Okay, if there weren't any loop invariants to be folded, check to see
3220     // if there are multiple AddRec's with the same loop induction variable
3221     // being multiplied together.  If so, we can fold them.
3222 
3223     // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3224     // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3225     //       choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3226     //   ]]],+,...up to x=2n}.
3227     // Note that the arguments to choose() are always integers with values
3228     // known at compile time, never SCEV objects.
3229     //
3230     // The implementation avoids pointless extra computations when the two
3231     // addrec's are of different length (mathematically, it's equivalent to
3232     // an infinite stream of zeros on the right).
3233     bool OpsModified = false;
3234     for (unsigned OtherIdx = Idx+1;
3235          OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3236          ++OtherIdx) {
3237       const SCEVAddRecExpr *OtherAddRec =
3238         dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3239       if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3240         continue;
3241 
3242       // Limit max number of arguments to avoid creation of unreasonably big
3243       // SCEVAddRecs with very complex operands.
3244       if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3245           MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3246         continue;
3247 
3248       bool Overflow = false;
3249       Type *Ty = AddRec->getType();
3250       bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3251       SmallVector<const SCEV*, 7> AddRecOps;
3252       for (int x = 0, xe = AddRec->getNumOperands() +
3253              OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3254         SmallVector <const SCEV *, 7> SumOps;
3255         for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3256           uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3257           for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3258                  ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3259                z < ze && !Overflow; ++z) {
3260             uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3261             uint64_t Coeff;
3262             if (LargerThan64Bits)
3263               Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3264             else
3265               Coeff = Coeff1*Coeff2;
3266             const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3267             const SCEV *Term1 = AddRec->getOperand(y-z);
3268             const SCEV *Term2 = OtherAddRec->getOperand(z);
3269             SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3270                                         SCEV::FlagAnyWrap, Depth + 1));
3271           }
3272         }
3273         if (SumOps.empty())
3274           SumOps.push_back(getZero(Ty));
3275         AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3276       }
3277       if (!Overflow) {
3278         const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3279                                               SCEV::FlagAnyWrap);
3280         if (Ops.size() == 2) return NewAddRec;
3281         Ops[Idx] = NewAddRec;
3282         Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3283         OpsModified = true;
3284         AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3285         if (!AddRec)
3286           break;
3287       }
3288     }
3289     if (OpsModified)
3290       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3291 
3292     // Otherwise couldn't fold anything into this recurrence.  Move onto the
3293     // next one.
3294   }
3295 
3296   // Okay, it looks like we really DO need an mul expr.  Check to see if we
3297   // already have one, otherwise create a new one.
3298   return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3299 }
3300 
3301 /// Represents an unsigned remainder expression based on unsigned division.
3302 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3303                                          const SCEV *RHS) {
3304   assert(getEffectiveSCEVType(LHS->getType()) ==
3305          getEffectiveSCEVType(RHS->getType()) &&
3306          "SCEVURemExpr operand types don't match!");
3307 
3308   // Short-circuit easy cases
3309   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3310     // If constant is one, the result is trivial
3311     if (RHSC->getValue()->isOne())
3312       return getZero(LHS->getType()); // X urem 1 --> 0
3313 
3314     // If constant is a power of two, fold into a zext(trunc(LHS)).
3315     if (RHSC->getAPInt().isPowerOf2()) {
3316       Type *FullTy = LHS->getType();
3317       Type *TruncTy =
3318           IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3319       return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3320     }
3321   }
3322 
3323   // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3324   const SCEV *UDiv = getUDivExpr(LHS, RHS);
3325   const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3326   return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3327 }
3328 
3329 /// Get a canonical unsigned division expression, or something simpler if
3330 /// possible.
3331 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3332                                          const SCEV *RHS) {
3333   assert(!LHS->getType()->isPointerTy() &&
3334          "SCEVUDivExpr operand can't be pointer!");
3335   assert(LHS->getType() == RHS->getType() &&
3336          "SCEVUDivExpr operand types don't match!");
3337 
3338   FoldingSetNodeID ID;
3339   ID.AddInteger(scUDivExpr);
3340   ID.AddPointer(LHS);
3341   ID.AddPointer(RHS);
3342   void *IP = nullptr;
3343   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3344     return S;
3345 
3346   // 0 udiv Y == 0
3347   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
3348     if (LHSC->getValue()->isZero())
3349       return LHS;
3350 
3351   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3352     if (RHSC->getValue()->isOne())
3353       return LHS;                               // X udiv 1 --> x
3354     // If the denominator is zero, the result of the udiv is undefined. Don't
3355     // try to analyze it, because the resolution chosen here may differ from
3356     // the resolution chosen in other parts of the compiler.
3357     if (!RHSC->getValue()->isZero()) {
3358       // Determine if the division can be folded into the operands of
3359       // its operands.
3360       // TODO: Generalize this to non-constants by using known-bits information.
3361       Type *Ty = LHS->getType();
3362       unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3363       unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3364       // For non-power-of-two values, effectively round the value up to the
3365       // nearest power of two.
3366       if (!RHSC->getAPInt().isPowerOf2())
3367         ++MaxShiftAmt;
3368       IntegerType *ExtTy =
3369         IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3370       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3371         if (const SCEVConstant *Step =
3372             dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3373           // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3374           const APInt &StepInt = Step->getAPInt();
3375           const APInt &DivInt = RHSC->getAPInt();
3376           if (!StepInt.urem(DivInt) &&
3377               getZeroExtendExpr(AR, ExtTy) ==
3378               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3379                             getZeroExtendExpr(Step, ExtTy),
3380                             AR->getLoop(), SCEV::FlagAnyWrap)) {
3381             SmallVector<const SCEV *, 4> Operands;
3382             for (const SCEV *Op : AR->operands())
3383               Operands.push_back(getUDivExpr(Op, RHS));
3384             return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3385           }
3386           /// Get a canonical UDivExpr for a recurrence.
3387           /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3388           // We can currently only fold X%N if X is constant.
3389           const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3390           if (StartC && !DivInt.urem(StepInt) &&
3391               getZeroExtendExpr(AR, ExtTy) ==
3392               getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3393                             getZeroExtendExpr(Step, ExtTy),
3394                             AR->getLoop(), SCEV::FlagAnyWrap)) {
3395             const APInt &StartInt = StartC->getAPInt();
3396             const APInt &StartRem = StartInt.urem(StepInt);
3397             if (StartRem != 0) {
3398               const SCEV *NewLHS =
3399                   getAddRecExpr(getConstant(StartInt - StartRem), Step,
3400                                 AR->getLoop(), SCEV::FlagNW);
3401               if (LHS != NewLHS) {
3402                 LHS = NewLHS;
3403 
3404                 // Reset the ID to include the new LHS, and check if it is
3405                 // already cached.
3406                 ID.clear();
3407                 ID.AddInteger(scUDivExpr);
3408                 ID.AddPointer(LHS);
3409                 ID.AddPointer(RHS);
3410                 IP = nullptr;
3411                 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3412                   return S;
3413               }
3414             }
3415           }
3416         }
3417       // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3418       if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3419         SmallVector<const SCEV *, 4> Operands;
3420         for (const SCEV *Op : M->operands())
3421           Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3422         if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3423           // Find an operand that's safely divisible.
3424           for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3425             const SCEV *Op = M->getOperand(i);
3426             const SCEV *Div = getUDivExpr(Op, RHSC);
3427             if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3428               Operands = SmallVector<const SCEV *, 4>(M->operands());
3429               Operands[i] = Div;
3430               return getMulExpr(Operands);
3431             }
3432           }
3433       }
3434 
3435       // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3436       if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3437         if (auto *DivisorConstant =
3438                 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3439           bool Overflow = false;
3440           APInt NewRHS =
3441               DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3442           if (Overflow) {
3443             return getConstant(RHSC->getType(), 0, false);
3444           }
3445           return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3446         }
3447       }
3448 
3449       // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3450       if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3451         SmallVector<const SCEV *, 4> Operands;
3452         for (const SCEV *Op : A->operands())
3453           Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3454         if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3455           Operands.clear();
3456           for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3457             const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3458             if (isa<SCEVUDivExpr>(Op) ||
3459                 getMulExpr(Op, RHS) != A->getOperand(i))
3460               break;
3461             Operands.push_back(Op);
3462           }
3463           if (Operands.size() == A->getNumOperands())
3464             return getAddExpr(Operands);
3465         }
3466       }
3467 
3468       // Fold if both operands are constant.
3469       if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3470         Constant *LHSCV = LHSC->getValue();
3471         Constant *RHSCV = RHSC->getValue();
3472         return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3473                                                                    RHSCV)));
3474       }
3475     }
3476   }
3477 
3478   // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3479   // changes). Make sure we get a new one.
3480   IP = nullptr;
3481   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3482   SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3483                                              LHS, RHS);
3484   UniqueSCEVs.InsertNode(S, IP);
3485   registerUser(S, {LHS, RHS});
3486   return S;
3487 }
3488 
3489 APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3490   APInt A = C1->getAPInt().abs();
3491   APInt B = C2->getAPInt().abs();
3492   uint32_t ABW = A.getBitWidth();
3493   uint32_t BBW = B.getBitWidth();
3494 
3495   if (ABW > BBW)
3496     B = B.zext(ABW);
3497   else if (ABW < BBW)
3498     A = A.zext(BBW);
3499 
3500   return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3501 }
3502 
3503 /// Get a canonical unsigned division expression, or something simpler if
3504 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3505 /// can attempt to remove factors from the LHS and RHS.  We can't do this when
3506 /// it's not exact because the udiv may be clearing bits.
3507 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3508                                               const SCEV *RHS) {
3509   // TODO: we could try to find factors in all sorts of things, but for now we
3510   // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3511   // end of this file for inspiration.
3512 
3513   const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3514   if (!Mul || !Mul->hasNoUnsignedWrap())
3515     return getUDivExpr(LHS, RHS);
3516 
3517   if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3518     // If the mulexpr multiplies by a constant, then that constant must be the
3519     // first element of the mulexpr.
3520     if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3521       if (LHSCst == RHSCst) {
3522         SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
3523         return getMulExpr(Operands);
3524       }
3525 
3526       // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3527       // that there's a factor provided by one of the other terms. We need to
3528       // check.
3529       APInt Factor = gcd(LHSCst, RHSCst);
3530       if (!Factor.isIntN(1)) {
3531         LHSCst =
3532             cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3533         RHSCst =
3534             cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3535         SmallVector<const SCEV *, 2> Operands;
3536         Operands.push_back(LHSCst);
3537         Operands.append(Mul->op_begin() + 1, Mul->op_end());
3538         LHS = getMulExpr(Operands);
3539         RHS = RHSCst;
3540         Mul = dyn_cast<SCEVMulExpr>(LHS);
3541         if (!Mul)
3542           return getUDivExactExpr(LHS, RHS);
3543       }
3544     }
3545   }
3546 
3547   for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3548     if (Mul->getOperand(i) == RHS) {
3549       SmallVector<const SCEV *, 2> Operands;
3550       Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3551       Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3552       return getMulExpr(Operands);
3553     }
3554   }
3555 
3556   return getUDivExpr(LHS, RHS);
3557 }
3558 
3559 /// Get an add recurrence expression for the specified loop.  Simplify the
3560 /// expression as much as possible.
3561 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3562                                            const Loop *L,
3563                                            SCEV::NoWrapFlags Flags) {
3564   SmallVector<const SCEV *, 4> Operands;
3565   Operands.push_back(Start);
3566   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3567     if (StepChrec->getLoop() == L) {
3568       Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3569       return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3570     }
3571 
3572   Operands.push_back(Step);
3573   return getAddRecExpr(Operands, L, Flags);
3574 }
3575 
3576 /// Get an add recurrence expression for the specified loop.  Simplify the
3577 /// expression as much as possible.
3578 const SCEV *
3579 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3580                                const Loop *L, SCEV::NoWrapFlags Flags) {
3581   if (Operands.size() == 1) return Operands[0];
3582 #ifndef NDEBUG
3583   Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3584   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3585     assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3586            "SCEVAddRecExpr operand types don't match!");
3587     assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer");
3588   }
3589   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3590     assert(isLoopInvariant(Operands[i], L) &&
3591            "SCEVAddRecExpr operand is not loop-invariant!");
3592 #endif
3593 
3594   if (Operands.back()->isZero()) {
3595     Operands.pop_back();
3596     return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0}  -->  X
3597   }
3598 
3599   // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3600   // use that information to infer NUW and NSW flags. However, computing a
3601   // BE count requires calling getAddRecExpr, so we may not yet have a
3602   // meaningful BE count at this point (and if we don't, we'd be stuck
3603   // with a SCEVCouldNotCompute as the cached BE count).
3604 
3605   Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3606 
3607   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3608   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3609     const Loop *NestedLoop = NestedAR->getLoop();
3610     if (L->contains(NestedLoop)
3611             ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3612             : (!NestedLoop->contains(L) &&
3613                DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3614       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
3615       Operands[0] = NestedAR->getStart();
3616       // AddRecs require their operands be loop-invariant with respect to their
3617       // loops. Don't perform this transformation if it would break this
3618       // requirement.
3619       bool AllInvariant = all_of(
3620           Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3621 
3622       if (AllInvariant) {
3623         // Create a recurrence for the outer loop with the same step size.
3624         //
3625         // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3626         // inner recurrence has the same property.
3627         SCEV::NoWrapFlags OuterFlags =
3628           maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3629 
3630         NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3631         AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3632           return isLoopInvariant(Op, NestedLoop);
3633         });
3634 
3635         if (AllInvariant) {
3636           // Ok, both add recurrences are valid after the transformation.
3637           //
3638           // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3639           // the outer recurrence has the same property.
3640           SCEV::NoWrapFlags InnerFlags =
3641             maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3642           return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3643         }
3644       }
3645       // Reset Operands to its original state.
3646       Operands[0] = NestedAR;
3647     }
3648   }
3649 
3650   // Okay, it looks like we really DO need an addrec expr.  Check to see if we
3651   // already have one, otherwise create a new one.
3652   return getOrCreateAddRecExpr(Operands, L, Flags);
3653 }
3654 
3655 const SCEV *
3656 ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3657                             const SmallVectorImpl<const SCEV *> &IndexExprs) {
3658   const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3659   // getSCEV(Base)->getType() has the same address space as Base->getType()
3660   // because SCEV::getType() preserves the address space.
3661   Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3662   const bool AssumeInBoundsFlags = [&]() {
3663     if (!GEP->isInBounds())
3664       return false;
3665 
3666     // We'd like to propagate flags from the IR to the corresponding SCEV nodes,
3667     // but to do that, we have to ensure that said flag is valid in the entire
3668     // defined scope of the SCEV.
3669     auto *GEPI = dyn_cast<Instruction>(GEP);
3670     // TODO: non-instructions have global scope.  We might be able to prove
3671     // some global scope cases
3672     return GEPI && isSCEVExprNeverPoison(GEPI);
3673   }();
3674 
3675   SCEV::NoWrapFlags OffsetWrap =
3676     AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3677 
3678   Type *CurTy = GEP->getType();
3679   bool FirstIter = true;
3680   SmallVector<const SCEV *, 4> Offsets;
3681   for (const SCEV *IndexExpr : IndexExprs) {
3682     // Compute the (potentially symbolic) offset in bytes for this index.
3683     if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3684       // For a struct, add the member offset.
3685       ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3686       unsigned FieldNo = Index->getZExtValue();
3687       const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3688       Offsets.push_back(FieldOffset);
3689 
3690       // Update CurTy to the type of the field at Index.
3691       CurTy = STy->getTypeAtIndex(Index);
3692     } else {
3693       // Update CurTy to its element type.
3694       if (FirstIter) {
3695         assert(isa<PointerType>(CurTy) &&
3696                "The first index of a GEP indexes a pointer");
3697         CurTy = GEP->getSourceElementType();
3698         FirstIter = false;
3699       } else {
3700         CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3701       }
3702       // For an array, add the element offset, explicitly scaled.
3703       const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3704       // Getelementptr indices are signed.
3705       IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3706 
3707       // Multiply the index by the element size to compute the element offset.
3708       const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3709       Offsets.push_back(LocalOffset);
3710     }
3711   }
3712 
3713   // Handle degenerate case of GEP without offsets.
3714   if (Offsets.empty())
3715     return BaseExpr;
3716 
3717   // Add the offsets together, assuming nsw if inbounds.
3718   const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3719   // Add the base address and the offset. We cannot use the nsw flag, as the
3720   // base address is unsigned. However, if we know that the offset is
3721   // non-negative, we can use nuw.
3722   SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset)
3723                                    ? SCEV::FlagNUW : SCEV::FlagAnyWrap;
3724   auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
3725   assert(BaseExpr->getType() == GEPExpr->getType() &&
3726          "GEP should not change type mid-flight.");
3727   return GEPExpr;
3728 }
3729 
3730 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3731                                                ArrayRef<const SCEV *> Ops) {
3732   FoldingSetNodeID ID;
3733   ID.AddInteger(SCEVType);
3734   for (const SCEV *Op : Ops)
3735     ID.AddPointer(Op);
3736   void *IP = nullptr;
3737   return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3738 }
3739 
3740 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3741   SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3742   return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3743 }
3744 
3745 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3746                                            SmallVectorImpl<const SCEV *> &Ops) {
3747   assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
3748   assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3749   if (Ops.size() == 1) return Ops[0];
3750 #ifndef NDEBUG
3751   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3752   for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3753     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3754            "Operand types don't match!");
3755     assert(Ops[0]->getType()->isPointerTy() ==
3756                Ops[i]->getType()->isPointerTy() &&
3757            "min/max should be consistently pointerish");
3758   }
3759 #endif
3760 
3761   bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3762   bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3763 
3764   // Sort by complexity, this groups all similar expression types together.
3765   GroupByComplexity(Ops, &LI, DT);
3766 
3767   // Check if we have created the same expression before.
3768   if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3769     return S;
3770   }
3771 
3772   // If there are any constants, fold them together.
3773   unsigned Idx = 0;
3774   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3775     ++Idx;
3776     assert(Idx < Ops.size());
3777     auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3778       if (Kind == scSMaxExpr)
3779         return APIntOps::smax(LHS, RHS);
3780       else if (Kind == scSMinExpr)
3781         return APIntOps::smin(LHS, RHS);
3782       else if (Kind == scUMaxExpr)
3783         return APIntOps::umax(LHS, RHS);
3784       else if (Kind == scUMinExpr)
3785         return APIntOps::umin(LHS, RHS);
3786       llvm_unreachable("Unknown SCEV min/max opcode");
3787     };
3788 
3789     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3790       // We found two constants, fold them together!
3791       ConstantInt *Fold = ConstantInt::get(
3792           getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3793       Ops[0] = getConstant(Fold);
3794       Ops.erase(Ops.begin()+1);  // Erase the folded element
3795       if (Ops.size() == 1) return Ops[0];
3796       LHSC = cast<SCEVConstant>(Ops[0]);
3797     }
3798 
3799     bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3800     bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3801 
3802     if (IsMax ? IsMinV : IsMaxV) {
3803       // If we are left with a constant minimum(/maximum)-int, strip it off.
3804       Ops.erase(Ops.begin());
3805       --Idx;
3806     } else if (IsMax ? IsMaxV : IsMinV) {
3807       // If we have a max(/min) with a constant maximum(/minimum)-int,
3808       // it will always be the extremum.
3809       return LHSC;
3810     }
3811 
3812     if (Ops.size() == 1) return Ops[0];
3813   }
3814 
3815   // Find the first operation of the same kind
3816   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3817     ++Idx;
3818 
3819   // Check to see if one of the operands is of the same kind. If so, expand its
3820   // operands onto our operand list, and recurse to simplify.
3821   if (Idx < Ops.size()) {
3822     bool DeletedAny = false;
3823     while (Ops[Idx]->getSCEVType() == Kind) {
3824       const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3825       Ops.erase(Ops.begin()+Idx);
3826       Ops.append(SMME->op_begin(), SMME->op_end());
3827       DeletedAny = true;
3828     }
3829 
3830     if (DeletedAny)
3831       return getMinMaxExpr(Kind, Ops);
3832   }
3833 
3834   // Okay, check to see if the same value occurs in the operand list twice.  If
3835   // so, delete one.  Since we sorted the list, these values are required to
3836   // be adjacent.
3837   llvm::CmpInst::Predicate GEPred =
3838       IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3839   llvm::CmpInst::Predicate LEPred =
3840       IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3841   llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3842   llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3843   for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3844     if (Ops[i] == Ops[i + 1] ||
3845         isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3846       //  X op Y op Y  -->  X op Y
3847       //  X op Y       -->  X, if we know X, Y are ordered appropriately
3848       Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3849       --i;
3850       --e;
3851     } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3852                                                Ops[i + 1])) {
3853       //  X op Y       -->  Y, if we know X, Y are ordered appropriately
3854       Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3855       --i;
3856       --e;
3857     }
3858   }
3859 
3860   if (Ops.size() == 1) return Ops[0];
3861 
3862   assert(!Ops.empty() && "Reduced smax down to nothing!");
3863 
3864   // Okay, it looks like we really DO need an expr.  Check to see if we
3865   // already have one, otherwise create a new one.
3866   FoldingSetNodeID ID;
3867   ID.AddInteger(Kind);
3868   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3869     ID.AddPointer(Ops[i]);
3870   void *IP = nullptr;
3871   const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3872   if (ExistingSCEV)
3873     return ExistingSCEV;
3874   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3875   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3876   SCEV *S = new (SCEVAllocator)
3877       SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3878 
3879   UniqueSCEVs.InsertNode(S, IP);
3880   registerUser(S, Ops);
3881   return S;
3882 }
3883 
3884 namespace {
3885 
3886 class SCEVSequentialMinMaxDeduplicatingVisitor final
3887     : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3888                          Optional<const SCEV *>> {
3889   using RetVal = Optional<const SCEV *>;
3890   using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>;
3891 
3892   ScalarEvolution &SE;
3893   const SCEVTypes RootKind; // Must be a sequential min/max expression.
3894   const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind.
3895   SmallPtrSet<const SCEV *, 16> SeenOps;
3896 
3897   bool canRecurseInto(SCEVTypes Kind) const {
3898     // We can only recurse into the SCEV expression of the same effective type
3899     // as the type of our root SCEV expression.
3900     return RootKind == Kind || NonSequentialRootKind == Kind;
3901   };
3902 
3903   RetVal visitAnyMinMaxExpr(const SCEV *S) {
3904     assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3905            "Only for min/max expressions.");
3906     SCEVTypes Kind = S->getSCEVType();
3907 
3908     if (!canRecurseInto(Kind))
3909       return S;
3910 
3911     auto *NAry = cast<SCEVNAryExpr>(S);
3912     SmallVector<const SCEV *> NewOps;
3913     bool Changed =
3914         visit(Kind, makeArrayRef(NAry->op_begin(), NAry->op_end()), NewOps);
3915 
3916     if (!Changed)
3917       return S;
3918     if (NewOps.empty())
3919       return None;
3920 
3921     return isa<SCEVSequentialMinMaxExpr>(S)
3922                ? SE.getSequentialMinMaxExpr(Kind, NewOps)
3923                : SE.getMinMaxExpr(Kind, NewOps);
3924   }
3925 
3926   RetVal visit(const SCEV *S) {
3927     // Has the whole operand been seen already?
3928     if (!SeenOps.insert(S).second)
3929       return None;
3930     return Base::visit(S);
3931   }
3932 
3933 public:
3934   SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
3935                                            SCEVTypes RootKind)
3936       : SE(SE), RootKind(RootKind),
3937         NonSequentialRootKind(
3938             SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
3939                 RootKind)) {}
3940 
3941   bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps,
3942                          SmallVectorImpl<const SCEV *> &NewOps) {
3943     bool Changed = false;
3944     SmallVector<const SCEV *> Ops;
3945     Ops.reserve(OrigOps.size());
3946 
3947     for (const SCEV *Op : OrigOps) {
3948       RetVal NewOp = visit(Op);
3949       if (NewOp != Op)
3950         Changed = true;
3951       if (NewOp)
3952         Ops.emplace_back(*NewOp);
3953     }
3954 
3955     if (Changed)
3956       NewOps = std::move(Ops);
3957     return Changed;
3958   }
3959 
3960   RetVal visitConstant(const SCEVConstant *Constant) { return Constant; }
3961 
3962   RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; }
3963 
3964   RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; }
3965 
3966   RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; }
3967 
3968   RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; }
3969 
3970   RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; }
3971 
3972   RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; }
3973 
3974   RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; }
3975 
3976   RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
3977 
3978   RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) {
3979     return visitAnyMinMaxExpr(Expr);
3980   }
3981 
3982   RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) {
3983     return visitAnyMinMaxExpr(Expr);
3984   }
3985 
3986   RetVal visitSMinExpr(const SCEVSMinExpr *Expr) {
3987     return visitAnyMinMaxExpr(Expr);
3988   }
3989 
3990   RetVal visitUMinExpr(const SCEVUMinExpr *Expr) {
3991     return visitAnyMinMaxExpr(Expr);
3992   }
3993 
3994   RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
3995     return visitAnyMinMaxExpr(Expr);
3996   }
3997 
3998   RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; }
3999 
4000   RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; }
4001 };
4002 
4003 } // namespace
4004 
4005 const SCEV *
4006 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
4007                                          SmallVectorImpl<const SCEV *> &Ops) {
4008   assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4009          "Not a SCEVSequentialMinMaxExpr!");
4010   assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
4011   if (Ops.size() == 1)
4012     return Ops[0];
4013   if (Ops.size() == 2 &&
4014       any_of(Ops, [](const SCEV *Op) { return isa<SCEVConstant>(Op); }))
4015     return getMinMaxExpr(
4016         SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind),
4017         Ops);
4018 #ifndef NDEBUG
4019   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
4020   for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
4021     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
4022            "Operand types don't match!");
4023     assert(Ops[0]->getType()->isPointerTy() ==
4024                Ops[i]->getType()->isPointerTy() &&
4025            "min/max should be consistently pointerish");
4026   }
4027 #endif
4028 
4029   // Note that SCEVSequentialMinMaxExpr is *NOT* commutative,
4030   // so we can *NOT* do any kind of sorting of the expressions!
4031 
4032   // Check if we have created the same expression before.
4033   if (const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4034     return S;
4035 
4036   // FIXME: there are *some* simplifications that we can do here.
4037 
4038   // Keep only the first instance of an operand.
4039   {
4040     SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind);
4041     bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4042     if (Changed)
4043       return getSequentialMinMaxExpr(Kind, Ops);
4044   }
4045 
4046   // Check to see if one of the operands is of the same kind. If so, expand its
4047   // operands onto our operand list, and recurse to simplify.
4048   {
4049     unsigned Idx = 0;
4050     bool DeletedAny = false;
4051     while (Idx < Ops.size()) {
4052       if (Ops[Idx]->getSCEVType() != Kind) {
4053         ++Idx;
4054         continue;
4055       }
4056       const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]);
4057       Ops.erase(Ops.begin() + Idx);
4058       Ops.insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end());
4059       DeletedAny = true;
4060     }
4061 
4062     if (DeletedAny)
4063       return getSequentialMinMaxExpr(Kind, Ops);
4064   }
4065 
4066   // Okay, it looks like we really DO need an expr.  Check to see if we
4067   // already have one, otherwise create a new one.
4068   FoldingSetNodeID ID;
4069   ID.AddInteger(Kind);
4070   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
4071     ID.AddPointer(Ops[i]);
4072   void *IP = nullptr;
4073   const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
4074   if (ExistingSCEV)
4075     return ExistingSCEV;
4076 
4077   const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
4078   std::uninitialized_copy(Ops.begin(), Ops.end(), O);
4079   SCEV *S = new (SCEVAllocator)
4080       SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
4081 
4082   UniqueSCEVs.InsertNode(S, IP);
4083   registerUser(S, Ops);
4084   return S;
4085 }
4086 
4087 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
4088   SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
4089   return getSMaxExpr(Ops);
4090 }
4091 
4092 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
4093   return getMinMaxExpr(scSMaxExpr, Ops);
4094 }
4095 
4096 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
4097   SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
4098   return getUMaxExpr(Ops);
4099 }
4100 
4101 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
4102   return getMinMaxExpr(scUMaxExpr, Ops);
4103 }
4104 
4105 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
4106                                          const SCEV *RHS) {
4107   SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4108   return getSMinExpr(Ops);
4109 }
4110 
4111 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
4112   return getMinMaxExpr(scSMinExpr, Ops);
4113 }
4114 
4115 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS,
4116                                          bool Sequential) {
4117   SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4118   return getUMinExpr(Ops, Sequential);
4119 }
4120 
4121 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
4122                                          bool Sequential) {
4123   return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
4124                     : getMinMaxExpr(scUMinExpr, Ops);
4125 }
4126 
4127 const SCEV *
4128 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy,
4129                                              ScalableVectorType *ScalableTy) {
4130   Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo());
4131   Constant *One = ConstantInt::get(IntTy, 1);
4132   Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One);
4133   // Note that the expression we created is the final expression, we don't
4134   // want to simplify it any further Also, if we call a normal getSCEV(),
4135   // we'll end up in an endless recursion. So just create an SCEVUnknown.
4136   return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
4137 }
4138 
4139 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
4140   if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
4141     return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy);
4142   // We can bypass creating a target-independent constant expression and then
4143   // folding it back into a ConstantInt. This is just a compile-time
4144   // optimization.
4145   return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
4146 }
4147 
4148 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
4149   if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
4150     return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy);
4151   // We can bypass creating a target-independent constant expression and then
4152   // folding it back into a ConstantInt. This is just a compile-time
4153   // optimization.
4154   return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
4155 }
4156 
4157 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
4158                                              StructType *STy,
4159                                              unsigned FieldNo) {
4160   // We can bypass creating a target-independent constant expression and then
4161   // folding it back into a ConstantInt. This is just a compile-time
4162   // optimization.
4163   return getConstant(
4164       IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
4165 }
4166 
4167 const SCEV *ScalarEvolution::getUnknown(Value *V) {
4168   // Don't attempt to do anything other than create a SCEVUnknown object
4169   // here.  createSCEV only calls getUnknown after checking for all other
4170   // interesting possibilities, and any other code that calls getUnknown
4171   // is doing so in order to hide a value from SCEV canonicalization.
4172 
4173   FoldingSetNodeID ID;
4174   ID.AddInteger(scUnknown);
4175   ID.AddPointer(V);
4176   void *IP = nullptr;
4177   if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
4178     assert(cast<SCEVUnknown>(S)->getValue() == V &&
4179            "Stale SCEVUnknown in uniquing map!");
4180     return S;
4181   }
4182   SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
4183                                             FirstUnknown);
4184   FirstUnknown = cast<SCEVUnknown>(S);
4185   UniqueSCEVs.InsertNode(S, IP);
4186   return S;
4187 }
4188 
4189 //===----------------------------------------------------------------------===//
4190 //            Basic SCEV Analysis and PHI Idiom Recognition Code
4191 //
4192 
4193 /// Test if values of the given type are analyzable within the SCEV
4194 /// framework. This primarily includes integer types, and it can optionally
4195 /// include pointer types if the ScalarEvolution class has access to
4196 /// target-specific information.
4197 bool ScalarEvolution::isSCEVable(Type *Ty) const {
4198   // Integers and pointers are always SCEVable.
4199   return Ty->isIntOrPtrTy();
4200 }
4201 
4202 /// Return the size in bits of the specified type, for which isSCEVable must
4203 /// return true.
4204 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
4205   assert(isSCEVable(Ty) && "Type is not SCEVable!");
4206   if (Ty->isPointerTy())
4207     return getDataLayout().getIndexTypeSizeInBits(Ty);
4208   return getDataLayout().getTypeSizeInBits(Ty);
4209 }
4210 
4211 /// Return a type with the same bitwidth as the given type and which represents
4212 /// how SCEV will treat the given type, for which isSCEVable must return
4213 /// true. For pointer types, this is the pointer index sized integer type.
4214 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
4215   assert(isSCEVable(Ty) && "Type is not SCEVable!");
4216 
4217   if (Ty->isIntegerTy())
4218     return Ty;
4219 
4220   // The only other support type is pointer.
4221   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
4222   return getDataLayout().getIndexType(Ty);
4223 }
4224 
4225 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
4226   return  getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
4227 }
4228 
4229 bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A,
4230                                                          const SCEV *B) {
4231   /// For a valid use point to exist, the defining scope of one operand
4232   /// must dominate the other.
4233   bool PreciseA, PreciseB;
4234   auto *ScopeA = getDefiningScopeBound({A}, PreciseA);
4235   auto *ScopeB = getDefiningScopeBound({B}, PreciseB);
4236   if (!PreciseA || !PreciseB)
4237     // Can't tell.
4238     return false;
4239   return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4240     DT.dominates(ScopeB, ScopeA);
4241 }
4242 
4243 
4244 const SCEV *ScalarEvolution::getCouldNotCompute() {
4245   return CouldNotCompute.get();
4246 }
4247 
4248 bool ScalarEvolution::checkValidity(const SCEV *S) const {
4249   bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
4250     auto *SU = dyn_cast<SCEVUnknown>(S);
4251     return SU && SU->getValue() == nullptr;
4252   });
4253 
4254   return !ContainsNulls;
4255 }
4256 
4257 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
4258   HasRecMapType::iterator I = HasRecMap.find(S);
4259   if (I != HasRecMap.end())
4260     return I->second;
4261 
4262   bool FoundAddRec =
4263       SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
4264   HasRecMap.insert({S, FoundAddRec});
4265   return FoundAddRec;
4266 }
4267 
4268 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
4269 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
4270 /// offset I, then return {S', I}, else return {\p S, nullptr}.
4271 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
4272   const auto *Add = dyn_cast<SCEVAddExpr>(S);
4273   if (!Add)
4274     return {S, nullptr};
4275 
4276   if (Add->getNumOperands() != 2)
4277     return {S, nullptr};
4278 
4279   auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
4280   if (!ConstOp)
4281     return {S, nullptr};
4282 
4283   return {Add->getOperand(1), ConstOp->getValue()};
4284 }
4285 
4286 /// Return the ValueOffsetPair set for \p S. \p S can be represented
4287 /// by the value and offset from any ValueOffsetPair in the set.
4288 ScalarEvolution::ValueOffsetPairSetVector *
4289 ScalarEvolution::getSCEVValues(const SCEV *S) {
4290   ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
4291   if (SI == ExprValueMap.end())
4292     return nullptr;
4293 #ifndef NDEBUG
4294   if (VerifySCEVMap) {
4295     // Check there is no dangling Value in the set returned.
4296     for (const auto &VE : SI->second)
4297       assert(ValueExprMap.count(VE.first));
4298   }
4299 #endif
4300   return &SI->second;
4301 }
4302 
4303 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4304 /// cannot be used separately. eraseValueFromMap should be used to remove
4305 /// V from ValueExprMap and ExprValueMap at the same time.
4306 void ScalarEvolution::eraseValueFromMap(Value *V) {
4307   ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4308   if (I != ValueExprMap.end()) {
4309     const SCEV *S = I->second;
4310     // Remove {V, 0} from the set of ExprValueMap[S]
4311     if (auto *SV = getSCEVValues(S))
4312       SV->remove({V, nullptr});
4313 
4314     // Remove {V, Offset} from the set of ExprValueMap[Stripped]
4315     const SCEV *Stripped;
4316     ConstantInt *Offset;
4317     std::tie(Stripped, Offset) = splitAddExpr(S);
4318     if (Offset != nullptr) {
4319       if (auto *SV = getSCEVValues(Stripped))
4320         SV->remove({V, Offset});
4321     }
4322     ValueExprMap.erase(V);
4323   }
4324 }
4325 
4326 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) {
4327   // A recursive query may have already computed the SCEV. It should be
4328   // equivalent, but may not necessarily be exactly the same, e.g. due to lazily
4329   // inferred nowrap flags.
4330   auto It = ValueExprMap.find_as(V);
4331   if (It == ValueExprMap.end()) {
4332     ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4333     ExprValueMap[S].insert({V, nullptr});
4334   }
4335 }
4336 
4337 /// Return an existing SCEV if it exists, otherwise analyze the expression and
4338 /// create a new one.
4339 const SCEV *ScalarEvolution::getSCEV(Value *V) {
4340   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4341 
4342   const SCEV *S = getExistingSCEV(V);
4343   if (S == nullptr) {
4344     S = createSCEV(V);
4345     // During PHI resolution, it is possible to create two SCEVs for the same
4346     // V, so it is needed to double check whether V->S is inserted into
4347     // ValueExprMap before insert S->{V, 0} into ExprValueMap.
4348     std::pair<ValueExprMapType::iterator, bool> Pair =
4349         ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4350     if (Pair.second) {
4351       ExprValueMap[S].insert({V, nullptr});
4352 
4353       // If S == Stripped + Offset, add Stripped -> {V, Offset} into
4354       // ExprValueMap.
4355       const SCEV *Stripped = S;
4356       ConstantInt *Offset = nullptr;
4357       std::tie(Stripped, Offset) = splitAddExpr(S);
4358       // If stripped is SCEVUnknown, don't bother to save
4359       // Stripped -> {V, offset}. It doesn't simplify and sometimes even
4360       // increase the complexity of the expansion code.
4361       // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
4362       // because it may generate add/sub instead of GEP in SCEV expansion.
4363       if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
4364           !isa<GetElementPtrInst>(V))
4365         ExprValueMap[Stripped].insert({V, Offset});
4366     }
4367   }
4368   return S;
4369 }
4370 
4371 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
4372   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4373 
4374   ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4375   if (I != ValueExprMap.end()) {
4376     const SCEV *S = I->second;
4377     assert(checkValidity(S) &&
4378            "existing SCEV has not been properly invalidated");
4379     return S;
4380   }
4381   return nullptr;
4382 }
4383 
4384 /// Return a SCEV corresponding to -V = -1*V
4385 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
4386                                              SCEV::NoWrapFlags Flags) {
4387   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4388     return getConstant(
4389                cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
4390 
4391   Type *Ty = V->getType();
4392   Ty = getEffectiveSCEVType(Ty);
4393   return getMulExpr(V, getMinusOne(Ty), Flags);
4394 }
4395 
4396 /// If Expr computes ~A, return A else return nullptr
4397 static const SCEV *MatchNotExpr(const SCEV *Expr) {
4398   const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
4399   if (!Add || Add->getNumOperands() != 2 ||
4400       !Add->getOperand(0)->isAllOnesValue())
4401     return nullptr;
4402 
4403   const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
4404   if (!AddRHS || AddRHS->getNumOperands() != 2 ||
4405       !AddRHS->getOperand(0)->isAllOnesValue())
4406     return nullptr;
4407 
4408   return AddRHS->getOperand(1);
4409 }
4410 
4411 /// Return a SCEV corresponding to ~V = -1-V
4412 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
4413   assert(!V->getType()->isPointerTy() && "Can't negate pointer");
4414 
4415   if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4416     return getConstant(
4417                 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
4418 
4419   // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4420   if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
4421     auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
4422       SmallVector<const SCEV *, 2> MatchedOperands;
4423       for (const SCEV *Operand : MME->operands()) {
4424         const SCEV *Matched = MatchNotExpr(Operand);
4425         if (!Matched)
4426           return (const SCEV *)nullptr;
4427         MatchedOperands.push_back(Matched);
4428       }
4429       return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
4430                            MatchedOperands);
4431     };
4432     if (const SCEV *Replaced = MatchMinMaxNegation(MME))
4433       return Replaced;
4434   }
4435 
4436   Type *Ty = V->getType();
4437   Ty = getEffectiveSCEVType(Ty);
4438   return getMinusSCEV(getMinusOne(Ty), V);
4439 }
4440 
4441 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
4442   assert(P->getType()->isPointerTy());
4443 
4444   if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
4445     // The base of an AddRec is the first operand.
4446     SmallVector<const SCEV *> Ops{AddRec->operands()};
4447     Ops[0] = removePointerBase(Ops[0]);
4448     // Don't try to transfer nowrap flags for now. We could in some cases
4449     // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4450     return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4451   }
4452   if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
4453     // The base of an Add is the pointer operand.
4454     SmallVector<const SCEV *> Ops{Add->operands()};
4455     const SCEV **PtrOp = nullptr;
4456     for (const SCEV *&AddOp : Ops) {
4457       if (AddOp->getType()->isPointerTy()) {
4458         assert(!PtrOp && "Cannot have multiple pointer ops");
4459         PtrOp = &AddOp;
4460       }
4461     }
4462     *PtrOp = removePointerBase(*PtrOp);
4463     // Don't try to transfer nowrap flags for now. We could in some cases
4464     // (for example, if the pointer operand of the Add is a SCEVUnknown).
4465     return getAddExpr(Ops);
4466   }
4467   // Any other expression must be a pointer base.
4468   return getZero(P->getType());
4469 }
4470 
4471 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
4472                                           SCEV::NoWrapFlags Flags,
4473                                           unsigned Depth) {
4474   // Fast path: X - X --> 0.
4475   if (LHS == RHS)
4476     return getZero(LHS->getType());
4477 
4478   // If we subtract two pointers with different pointer bases, bail.
4479   // Eventually, we're going to add an assertion to getMulExpr that we
4480   // can't multiply by a pointer.
4481   if (RHS->getType()->isPointerTy()) {
4482     if (!LHS->getType()->isPointerTy() ||
4483         getPointerBase(LHS) != getPointerBase(RHS))
4484       return getCouldNotCompute();
4485     LHS = removePointerBase(LHS);
4486     RHS = removePointerBase(RHS);
4487   }
4488 
4489   // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4490   // makes it so that we cannot make much use of NUW.
4491   auto AddFlags = SCEV::FlagAnyWrap;
4492   const bool RHSIsNotMinSigned =
4493       !getSignedRangeMin(RHS).isMinSignedValue();
4494   if (hasFlags(Flags, SCEV::FlagNSW)) {
4495     // Let M be the minimum representable signed value. Then (-1)*RHS
4496     // signed-wraps if and only if RHS is M. That can happen even for
4497     // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4498     // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4499     // (-1)*RHS, we need to prove that RHS != M.
4500     //
4501     // If LHS is non-negative and we know that LHS - RHS does not
4502     // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4503     // either by proving that RHS > M or that LHS >= 0.
4504     if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4505       AddFlags = SCEV::FlagNSW;
4506     }
4507   }
4508 
4509   // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4510   // RHS is NSW and LHS >= 0.
4511   //
4512   // The difficulty here is that the NSW flag may have been proven
4513   // relative to a loop that is to be found in a recurrence in LHS and
4514   // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4515   // larger scope than intended.
4516   auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4517 
4518   return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4519 }
4520 
4521 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4522                                                      unsigned Depth) {
4523   Type *SrcTy = V->getType();
4524   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4525          "Cannot truncate or zero extend with non-integer arguments!");
4526   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4527     return V;  // No conversion
4528   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4529     return getTruncateExpr(V, Ty, Depth);
4530   return getZeroExtendExpr(V, Ty, Depth);
4531 }
4532 
4533 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4534                                                      unsigned Depth) {
4535   Type *SrcTy = V->getType();
4536   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4537          "Cannot truncate or zero extend with non-integer arguments!");
4538   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4539     return V;  // No conversion
4540   if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4541     return getTruncateExpr(V, Ty, Depth);
4542   return getSignExtendExpr(V, Ty, Depth);
4543 }
4544 
4545 const SCEV *
4546 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4547   Type *SrcTy = V->getType();
4548   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4549          "Cannot noop or zero extend with non-integer arguments!");
4550   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4551          "getNoopOrZeroExtend cannot truncate!");
4552   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4553     return V;  // No conversion
4554   return getZeroExtendExpr(V, Ty);
4555 }
4556 
4557 const SCEV *
4558 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4559   Type *SrcTy = V->getType();
4560   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4561          "Cannot noop or sign extend with non-integer arguments!");
4562   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4563          "getNoopOrSignExtend cannot truncate!");
4564   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4565     return V;  // No conversion
4566   return getSignExtendExpr(V, Ty);
4567 }
4568 
4569 const SCEV *
4570 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4571   Type *SrcTy = V->getType();
4572   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4573          "Cannot noop or any extend with non-integer arguments!");
4574   assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4575          "getNoopOrAnyExtend cannot truncate!");
4576   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4577     return V;  // No conversion
4578   return getAnyExtendExpr(V, Ty);
4579 }
4580 
4581 const SCEV *
4582 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4583   Type *SrcTy = V->getType();
4584   assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4585          "Cannot truncate or noop with non-integer arguments!");
4586   assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4587          "getTruncateOrNoop cannot extend!");
4588   if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4589     return V;  // No conversion
4590   return getTruncateExpr(V, Ty);
4591 }
4592 
4593 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4594                                                         const SCEV *RHS) {
4595   const SCEV *PromotedLHS = LHS;
4596   const SCEV *PromotedRHS = RHS;
4597 
4598   if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4599     PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4600   else
4601     PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4602 
4603   return getUMaxExpr(PromotedLHS, PromotedRHS);
4604 }
4605 
4606 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4607                                                         const SCEV *RHS,
4608                                                         bool Sequential) {
4609   SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4610   return getUMinFromMismatchedTypes(Ops, Sequential);
4611 }
4612 
4613 const SCEV *
4614 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
4615                                             bool Sequential) {
4616   assert(!Ops.empty() && "At least one operand must be!");
4617   // Trivial case.
4618   if (Ops.size() == 1)
4619     return Ops[0];
4620 
4621   // Find the max type first.
4622   Type *MaxType = nullptr;
4623   for (auto *S : Ops)
4624     if (MaxType)
4625       MaxType = getWiderType(MaxType, S->getType());
4626     else
4627       MaxType = S->getType();
4628   assert(MaxType && "Failed to find maximum type!");
4629 
4630   // Extend all ops to max type.
4631   SmallVector<const SCEV *, 2> PromotedOps;
4632   for (auto *S : Ops)
4633     PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4634 
4635   // Generate umin.
4636   return getUMinExpr(PromotedOps, Sequential);
4637 }
4638 
4639 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4640   // A pointer operand may evaluate to a nonpointer expression, such as null.
4641   if (!V->getType()->isPointerTy())
4642     return V;
4643 
4644   while (true) {
4645     if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4646       V = AddRec->getStart();
4647     } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
4648       const SCEV *PtrOp = nullptr;
4649       for (const SCEV *AddOp : Add->operands()) {
4650         if (AddOp->getType()->isPointerTy()) {
4651           assert(!PtrOp && "Cannot have multiple pointer ops");
4652           PtrOp = AddOp;
4653         }
4654       }
4655       assert(PtrOp && "Must have pointer op");
4656       V = PtrOp;
4657     } else // Not something we can look further into.
4658       return V;
4659   }
4660 }
4661 
4662 /// Push users of the given Instruction onto the given Worklist.
4663 static void PushDefUseChildren(Instruction *I,
4664                                SmallVectorImpl<Instruction *> &Worklist,
4665                                SmallPtrSetImpl<Instruction *> &Visited) {
4666   // Push the def-use children onto the Worklist stack.
4667   for (User *U : I->users()) {
4668     auto *UserInsn = cast<Instruction>(U);
4669     if (Visited.insert(UserInsn).second)
4670       Worklist.push_back(UserInsn);
4671   }
4672 }
4673 
4674 namespace {
4675 
4676 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4677 /// expression in case its Loop is L. If it is not L then
4678 /// if IgnoreOtherLoops is true then use AddRec itself
4679 /// otherwise rewrite cannot be done.
4680 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4681 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4682 public:
4683   static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4684                              bool IgnoreOtherLoops = true) {
4685     SCEVInitRewriter Rewriter(L, SE);
4686     const SCEV *Result = Rewriter.visit(S);
4687     if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4688       return SE.getCouldNotCompute();
4689     return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4690                ? SE.getCouldNotCompute()
4691                : Result;
4692   }
4693 
4694   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4695     if (!SE.isLoopInvariant(Expr, L))
4696       SeenLoopVariantSCEVUnknown = true;
4697     return Expr;
4698   }
4699 
4700   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4701     // Only re-write AddRecExprs for this loop.
4702     if (Expr->getLoop() == L)
4703       return Expr->getStart();
4704     SeenOtherLoops = true;
4705     return Expr;
4706   }
4707 
4708   bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4709 
4710   bool hasSeenOtherLoops() { return SeenOtherLoops; }
4711 
4712 private:
4713   explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4714       : SCEVRewriteVisitor(SE), L(L) {}
4715 
4716   const Loop *L;
4717   bool SeenLoopVariantSCEVUnknown = false;
4718   bool SeenOtherLoops = false;
4719 };
4720 
4721 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4722 /// increment expression in case its Loop is L. If it is not L then
4723 /// use AddRec itself.
4724 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4725 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4726 public:
4727   static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4728     SCEVPostIncRewriter Rewriter(L, SE);
4729     const SCEV *Result = Rewriter.visit(S);
4730     return Rewriter.hasSeenLoopVariantSCEVUnknown()
4731         ? SE.getCouldNotCompute()
4732         : Result;
4733   }
4734 
4735   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4736     if (!SE.isLoopInvariant(Expr, L))
4737       SeenLoopVariantSCEVUnknown = true;
4738     return Expr;
4739   }
4740 
4741   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4742     // Only re-write AddRecExprs for this loop.
4743     if (Expr->getLoop() == L)
4744       return Expr->getPostIncExpr(SE);
4745     SeenOtherLoops = true;
4746     return Expr;
4747   }
4748 
4749   bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4750 
4751   bool hasSeenOtherLoops() { return SeenOtherLoops; }
4752 
4753 private:
4754   explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4755       : SCEVRewriteVisitor(SE), L(L) {}
4756 
4757   const Loop *L;
4758   bool SeenLoopVariantSCEVUnknown = false;
4759   bool SeenOtherLoops = false;
4760 };
4761 
4762 /// This class evaluates the compare condition by matching it against the
4763 /// condition of loop latch. If there is a match we assume a true value
4764 /// for the condition while building SCEV nodes.
4765 class SCEVBackedgeConditionFolder
4766     : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4767 public:
4768   static const SCEV *rewrite(const SCEV *S, const Loop *L,
4769                              ScalarEvolution &SE) {
4770     bool IsPosBECond = false;
4771     Value *BECond = nullptr;
4772     if (BasicBlock *Latch = L->getLoopLatch()) {
4773       BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4774       if (BI && BI->isConditional()) {
4775         assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4776                "Both outgoing branches should not target same header!");
4777         BECond = BI->getCondition();
4778         IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4779       } else {
4780         return S;
4781       }
4782     }
4783     SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4784     return Rewriter.visit(S);
4785   }
4786 
4787   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4788     const SCEV *Result = Expr;
4789     bool InvariantF = SE.isLoopInvariant(Expr, L);
4790 
4791     if (!InvariantF) {
4792       Instruction *I = cast<Instruction>(Expr->getValue());
4793       switch (I->getOpcode()) {
4794       case Instruction::Select: {
4795         SelectInst *SI = cast<SelectInst>(I);
4796         Optional<const SCEV *> Res =
4797             compareWithBackedgeCondition(SI->getCondition());
4798         if (Res.hasValue()) {
4799           bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4800           Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4801         }
4802         break;
4803       }
4804       default: {
4805         Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4806         if (Res.hasValue())
4807           Result = Res.getValue();
4808         break;
4809       }
4810       }
4811     }
4812     return Result;
4813   }
4814 
4815 private:
4816   explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4817                                        bool IsPosBECond, ScalarEvolution &SE)
4818       : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4819         IsPositiveBECond(IsPosBECond) {}
4820 
4821   Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4822 
4823   const Loop *L;
4824   /// Loop back condition.
4825   Value *BackedgeCond = nullptr;
4826   /// Set to true if loop back is on positive branch condition.
4827   bool IsPositiveBECond;
4828 };
4829 
4830 Optional<const SCEV *>
4831 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4832 
4833   // If value matches the backedge condition for loop latch,
4834   // then return a constant evolution node based on loopback
4835   // branch taken.
4836   if (BackedgeCond == IC)
4837     return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4838                             : SE.getZero(Type::getInt1Ty(SE.getContext()));
4839   return None;
4840 }
4841 
4842 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4843 public:
4844   static const SCEV *rewrite(const SCEV *S, const Loop *L,
4845                              ScalarEvolution &SE) {
4846     SCEVShiftRewriter Rewriter(L, SE);
4847     const SCEV *Result = Rewriter.visit(S);
4848     return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4849   }
4850 
4851   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4852     // Only allow AddRecExprs for this loop.
4853     if (!SE.isLoopInvariant(Expr, L))
4854       Valid = false;
4855     return Expr;
4856   }
4857 
4858   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4859     if (Expr->getLoop() == L && Expr->isAffine())
4860       return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4861     Valid = false;
4862     return Expr;
4863   }
4864 
4865   bool isValid() { return Valid; }
4866 
4867 private:
4868   explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4869       : SCEVRewriteVisitor(SE), L(L) {}
4870 
4871   const Loop *L;
4872   bool Valid = true;
4873 };
4874 
4875 } // end anonymous namespace
4876 
4877 SCEV::NoWrapFlags
4878 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4879   if (!AR->isAffine())
4880     return SCEV::FlagAnyWrap;
4881 
4882   using OBO = OverflowingBinaryOperator;
4883 
4884   SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4885 
4886   if (!AR->hasNoSignedWrap()) {
4887     ConstantRange AddRecRange = getSignedRange(AR);
4888     ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4889 
4890     auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4891         Instruction::Add, IncRange, OBO::NoSignedWrap);
4892     if (NSWRegion.contains(AddRecRange))
4893       Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4894   }
4895 
4896   if (!AR->hasNoUnsignedWrap()) {
4897     ConstantRange AddRecRange = getUnsignedRange(AR);
4898     ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4899 
4900     auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4901         Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4902     if (NUWRegion.contains(AddRecRange))
4903       Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4904   }
4905 
4906   return Result;
4907 }
4908 
4909 SCEV::NoWrapFlags
4910 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4911   SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4912 
4913   if (AR->hasNoSignedWrap())
4914     return Result;
4915 
4916   if (!AR->isAffine())
4917     return Result;
4918 
4919   const SCEV *Step = AR->getStepRecurrence(*this);
4920   const Loop *L = AR->getLoop();
4921 
4922   // Check whether the backedge-taken count is SCEVCouldNotCompute.
4923   // Note that this serves two purposes: It filters out loops that are
4924   // simply not analyzable, and it covers the case where this code is
4925   // being called from within backedge-taken count analysis, such that
4926   // attempting to ask for the backedge-taken count would likely result
4927   // in infinite recursion. In the later case, the analysis code will
4928   // cope with a conservative value, and it will take care to purge
4929   // that value once it has finished.
4930   const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4931 
4932   // Normally, in the cases we can prove no-overflow via a
4933   // backedge guarding condition, we can also compute a backedge
4934   // taken count for the loop.  The exceptions are assumptions and
4935   // guards present in the loop -- SCEV is not great at exploiting
4936   // these to compute max backedge taken counts, but can still use
4937   // these to prove lack of overflow.  Use this fact to avoid
4938   // doing extra work that may not pay off.
4939 
4940   if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4941       AC.assumptions().empty())
4942     return Result;
4943 
4944   // If the backedge is guarded by a comparison with the pre-inc  value the
4945   // addrec is safe. Also, if the entry is guarded by a comparison with the
4946   // start value and the backedge is guarded by a comparison with the post-inc
4947   // value, the addrec is safe.
4948   ICmpInst::Predicate Pred;
4949   const SCEV *OverflowLimit =
4950     getSignedOverflowLimitForStep(Step, &Pred, this);
4951   if (OverflowLimit &&
4952       (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
4953        isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
4954     Result = setFlags(Result, SCEV::FlagNSW);
4955   }
4956   return Result;
4957 }
4958 SCEV::NoWrapFlags
4959 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4960   SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4961 
4962   if (AR->hasNoUnsignedWrap())
4963     return Result;
4964 
4965   if (!AR->isAffine())
4966     return Result;
4967 
4968   const SCEV *Step = AR->getStepRecurrence(*this);
4969   unsigned BitWidth = getTypeSizeInBits(AR->getType());
4970   const Loop *L = AR->getLoop();
4971 
4972   // Check whether the backedge-taken count is SCEVCouldNotCompute.
4973   // Note that this serves two purposes: It filters out loops that are
4974   // simply not analyzable, and it covers the case where this code is
4975   // being called from within backedge-taken count analysis, such that
4976   // attempting to ask for the backedge-taken count would likely result
4977   // in infinite recursion. In the later case, the analysis code will
4978   // cope with a conservative value, and it will take care to purge
4979   // that value once it has finished.
4980   const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4981 
4982   // Normally, in the cases we can prove no-overflow via a
4983   // backedge guarding condition, we can also compute a backedge
4984   // taken count for the loop.  The exceptions are assumptions and
4985   // guards present in the loop -- SCEV is not great at exploiting
4986   // these to compute max backedge taken counts, but can still use
4987   // these to prove lack of overflow.  Use this fact to avoid
4988   // doing extra work that may not pay off.
4989 
4990   if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4991       AC.assumptions().empty())
4992     return Result;
4993 
4994   // If the backedge is guarded by a comparison with the pre-inc  value the
4995   // addrec is safe. Also, if the entry is guarded by a comparison with the
4996   // start value and the backedge is guarded by a comparison with the post-inc
4997   // value, the addrec is safe.
4998   if (isKnownPositive(Step)) {
4999     const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
5000                                 getUnsignedRangeMax(Step));
5001     if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
5002         isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
5003       Result = setFlags(Result, SCEV::FlagNUW);
5004     }
5005   }
5006 
5007   return Result;
5008 }
5009 
5010 namespace {
5011 
5012 /// Represents an abstract binary operation.  This may exist as a
5013 /// normal instruction or constant expression, or may have been
5014 /// derived from an expression tree.
5015 struct BinaryOp {
5016   unsigned Opcode;
5017   Value *LHS;
5018   Value *RHS;
5019   bool IsNSW = false;
5020   bool IsNUW = false;
5021 
5022   /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
5023   /// constant expression.
5024   Operator *Op = nullptr;
5025 
5026   explicit BinaryOp(Operator *Op)
5027       : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
5028         Op(Op) {
5029     if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
5030       IsNSW = OBO->hasNoSignedWrap();
5031       IsNUW = OBO->hasNoUnsignedWrap();
5032     }
5033   }
5034 
5035   explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
5036                     bool IsNUW = false)
5037       : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5038 };
5039 
5040 } // end anonymous namespace
5041 
5042 /// Try to map \p V into a BinaryOp, and return \c None on failure.
5043 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
5044   auto *Op = dyn_cast<Operator>(V);
5045   if (!Op)
5046     return None;
5047 
5048   // Implementation detail: all the cleverness here should happen without
5049   // creating new SCEV expressions -- our caller knowns tricks to avoid creating
5050   // SCEV expressions when possible, and we should not break that.
5051 
5052   switch (Op->getOpcode()) {
5053   case Instruction::Add:
5054   case Instruction::Sub:
5055   case Instruction::Mul:
5056   case Instruction::UDiv:
5057   case Instruction::URem:
5058   case Instruction::And:
5059   case Instruction::Or:
5060   case Instruction::AShr:
5061   case Instruction::Shl:
5062     return BinaryOp(Op);
5063 
5064   case Instruction::Xor:
5065     if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
5066       // If the RHS of the xor is a signmask, then this is just an add.
5067       // Instcombine turns add of signmask into xor as a strength reduction step.
5068       if (RHSC->getValue().isSignMask())
5069         return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
5070     return BinaryOp(Op);
5071 
5072   case Instruction::LShr:
5073     // Turn logical shift right of a constant into a unsigned divide.
5074     if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
5075       uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
5076 
5077       // If the shift count is not less than the bitwidth, the result of
5078       // the shift is undefined. Don't try to analyze it, because the
5079       // resolution chosen here may differ from the resolution chosen in
5080       // other parts of the compiler.
5081       if (SA->getValue().ult(BitWidth)) {
5082         Constant *X =
5083             ConstantInt::get(SA->getContext(),
5084                              APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
5085         return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
5086       }
5087     }
5088     return BinaryOp(Op);
5089 
5090   case Instruction::ExtractValue: {
5091     auto *EVI = cast<ExtractValueInst>(Op);
5092     if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5093       break;
5094 
5095     auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
5096     if (!WO)
5097       break;
5098 
5099     Instruction::BinaryOps BinOp = WO->getBinaryOp();
5100     bool Signed = WO->isSigned();
5101     // TODO: Should add nuw/nsw flags for mul as well.
5102     if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
5103       return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5104 
5105     // Now that we know that all uses of the arithmetic-result component of
5106     // CI are guarded by the overflow check, we can go ahead and pretend
5107     // that the arithmetic is non-overflowing.
5108     return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5109                     /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
5110   }
5111 
5112   default:
5113     break;
5114   }
5115 
5116   // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
5117   // semantics as a Sub, return a binary sub expression.
5118   if (auto *II = dyn_cast<IntrinsicInst>(V))
5119     if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5120       return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
5121 
5122   return None;
5123 }
5124 
5125 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
5126 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
5127 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
5128 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
5129 /// follows one of the following patterns:
5130 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5131 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5132 /// If the SCEV expression of \p Op conforms with one of the expected patterns
5133 /// we return the type of the truncation operation, and indicate whether the
5134 /// truncated type should be treated as signed/unsigned by setting
5135 /// \p Signed to true/false, respectively.
5136 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
5137                                bool &Signed, ScalarEvolution &SE) {
5138   // The case where Op == SymbolicPHI (that is, with no type conversions on
5139   // the way) is handled by the regular add recurrence creating logic and
5140   // would have already been triggered in createAddRecForPHI. Reaching it here
5141   // means that createAddRecFromPHI had failed for this PHI before (e.g.,
5142   // because one of the other operands of the SCEVAddExpr updating this PHI is
5143   // not invariant).
5144   //
5145   // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
5146   // this case predicates that allow us to prove that Op == SymbolicPHI will
5147   // be added.
5148   if (Op == SymbolicPHI)
5149     return nullptr;
5150 
5151   unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
5152   unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
5153   if (SourceBits != NewBits)
5154     return nullptr;
5155 
5156   const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
5157   const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
5158   if (!SExt && !ZExt)
5159     return nullptr;
5160   const SCEVTruncateExpr *Trunc =
5161       SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
5162            : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
5163   if (!Trunc)
5164     return nullptr;
5165   const SCEV *X = Trunc->getOperand();
5166   if (X != SymbolicPHI)
5167     return nullptr;
5168   Signed = SExt != nullptr;
5169   return Trunc->getType();
5170 }
5171 
5172 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
5173   if (!PN->getType()->isIntegerTy())
5174     return nullptr;
5175   const Loop *L = LI.getLoopFor(PN->getParent());
5176   if (!L || L->getHeader() != PN->getParent())
5177     return nullptr;
5178   return L;
5179 }
5180 
5181 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
5182 // computation that updates the phi follows the following pattern:
5183 //   (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
5184 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
5185 // If so, try to see if it can be rewritten as an AddRecExpr under some
5186 // Predicates. If successful, return them as a pair. Also cache the results
5187 // of the analysis.
5188 //
5189 // Example usage scenario:
5190 //    Say the Rewriter is called for the following SCEV:
5191 //         8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5192 //    where:
5193 //         %X = phi i64 (%Start, %BEValue)
5194 //    It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
5195 //    and call this function with %SymbolicPHI = %X.
5196 //
5197 //    The analysis will find that the value coming around the backedge has
5198 //    the following SCEV:
5199 //         BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5200 //    Upon concluding that this matches the desired pattern, the function
5201 //    will return the pair {NewAddRec, SmallPredsVec} where:
5202 //         NewAddRec = {%Start,+,%Step}
5203 //         SmallPredsVec = {P1, P2, P3} as follows:
5204 //           P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
5205 //           P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
5206 //           P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
5207 //    The returned pair means that SymbolicPHI can be rewritten into NewAddRec
5208 //    under the predicates {P1,P2,P3}.
5209 //    This predicated rewrite will be cached in PredicatedSCEVRewrites:
5210 //         PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
5211 //
5212 // TODO's:
5213 //
5214 // 1) Extend the Induction descriptor to also support inductions that involve
5215 //    casts: When needed (namely, when we are called in the context of the
5216 //    vectorizer induction analysis), a Set of cast instructions will be
5217 //    populated by this method, and provided back to isInductionPHI. This is
5218 //    needed to allow the vectorizer to properly record them to be ignored by
5219 //    the cost model and to avoid vectorizing them (otherwise these casts,
5220 //    which are redundant under the runtime overflow checks, will be
5221 //    vectorized, which can be costly).
5222 //
5223 // 2) Support additional induction/PHISCEV patterns: We also want to support
5224 //    inductions where the sext-trunc / zext-trunc operations (partly) occur
5225 //    after the induction update operation (the induction increment):
5226 //
5227 //      (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
5228 //    which correspond to a phi->add->trunc->sext/zext->phi update chain.
5229 //
5230 //      (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
5231 //    which correspond to a phi->trunc->add->sext/zext->phi update chain.
5232 //
5233 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
5234 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5235 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
5236   SmallVector<const SCEVPredicate *, 3> Predicates;
5237 
5238   // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
5239   // return an AddRec expression under some predicate.
5240 
5241   auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5242   const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5243   assert(L && "Expecting an integer loop header phi");
5244 
5245   // The loop may have multiple entrances or multiple exits; we can analyze
5246   // this phi as an addrec if it has a unique entry value and a unique
5247   // backedge value.
5248   Value *BEValueV = nullptr, *StartValueV = nullptr;
5249   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5250     Value *V = PN->getIncomingValue(i);
5251     if (L->contains(PN->getIncomingBlock(i))) {
5252       if (!BEValueV) {
5253         BEValueV = V;
5254       } else if (BEValueV != V) {
5255         BEValueV = nullptr;
5256         break;
5257       }
5258     } else if (!StartValueV) {
5259       StartValueV = V;
5260     } else if (StartValueV != V) {
5261       StartValueV = nullptr;
5262       break;
5263     }
5264   }
5265   if (!BEValueV || !StartValueV)
5266     return None;
5267 
5268   const SCEV *BEValue = getSCEV(BEValueV);
5269 
5270   // If the value coming around the backedge is an add with the symbolic
5271   // value we just inserted, possibly with casts that we can ignore under
5272   // an appropriate runtime guard, then we found a simple induction variable!
5273   const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
5274   if (!Add)
5275     return None;
5276 
5277   // If there is a single occurrence of the symbolic value, possibly
5278   // casted, replace it with a recurrence.
5279   unsigned FoundIndex = Add->getNumOperands();
5280   Type *TruncTy = nullptr;
5281   bool Signed;
5282   for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5283     if ((TruncTy =
5284              isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
5285       if (FoundIndex == e) {
5286         FoundIndex = i;
5287         break;
5288       }
5289 
5290   if (FoundIndex == Add->getNumOperands())
5291     return None;
5292 
5293   // Create an add with everything but the specified operand.
5294   SmallVector<const SCEV *, 8> Ops;
5295   for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5296     if (i != FoundIndex)
5297       Ops.push_back(Add->getOperand(i));
5298   const SCEV *Accum = getAddExpr(Ops);
5299 
5300   // The runtime checks will not be valid if the step amount is
5301   // varying inside the loop.
5302   if (!isLoopInvariant(Accum, L))
5303     return None;
5304 
5305   // *** Part2: Create the predicates
5306 
5307   // Analysis was successful: we have a phi-with-cast pattern for which we
5308   // can return an AddRec expression under the following predicates:
5309   //
5310   // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5311   //     fits within the truncated type (does not overflow) for i = 0 to n-1.
5312   // P2: An Equal predicate that guarantees that
5313   //     Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5314   // P3: An Equal predicate that guarantees that
5315   //     Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5316   //
5317   // As we next prove, the above predicates guarantee that:
5318   //     Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5319   //
5320   //
5321   // More formally, we want to prove that:
5322   //     Expr(i+1) = Start + (i+1) * Accum
5323   //               = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5324   //
5325   // Given that:
5326   // 1) Expr(0) = Start
5327   // 2) Expr(1) = Start + Accum
5328   //            = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5329   // 3) Induction hypothesis (step i):
5330   //    Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5331   //
5332   // Proof:
5333   //  Expr(i+1) =
5334   //   = Start + (i+1)*Accum
5335   //   = (Start + i*Accum) + Accum
5336   //   = Expr(i) + Accum
5337   //   = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5338   //                                                             :: from step i
5339   //
5340   //   = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5341   //
5342   //   = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5343   //     + (Ext ix (Trunc iy (Accum) to ix) to iy)
5344   //     + Accum                                                     :: from P3
5345   //
5346   //   = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5347   //     + Accum                            :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5348   //
5349   //   = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5350   //   = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5351   //
5352   // By induction, the same applies to all iterations 1<=i<n:
5353   //
5354 
5355   // Create a truncated addrec for which we will add a no overflow check (P1).
5356   const SCEV *StartVal = getSCEV(StartValueV);
5357   const SCEV *PHISCEV =
5358       getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
5359                     getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
5360 
5361   // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5362   // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5363   // will be constant.
5364   //
5365   //  If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5366   // add P1.
5367   if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5368     SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
5369         Signed ? SCEVWrapPredicate::IncrementNSSW
5370                : SCEVWrapPredicate::IncrementNUSW;
5371     const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
5372     Predicates.push_back(AddRecPred);
5373   }
5374 
5375   // Create the Equal Predicates P2,P3:
5376 
5377   // It is possible that the predicates P2 and/or P3 are computable at
5378   // compile time due to StartVal and/or Accum being constants.
5379   // If either one is, then we can check that now and escape if either P2
5380   // or P3 is false.
5381 
5382   // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5383   // for each of StartVal and Accum
5384   auto getExtendedExpr = [&](const SCEV *Expr,
5385                              bool CreateSignExtend) -> const SCEV * {
5386     assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
5387     const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
5388     const SCEV *ExtendedExpr =
5389         CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
5390                          : getZeroExtendExpr(TruncatedExpr, Expr->getType());
5391     return ExtendedExpr;
5392   };
5393 
5394   // Given:
5395   //  ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5396   //               = getExtendedExpr(Expr)
5397   // Determine whether the predicate P: Expr == ExtendedExpr
5398   // is known to be false at compile time
5399   auto PredIsKnownFalse = [&](const SCEV *Expr,
5400                               const SCEV *ExtendedExpr) -> bool {
5401     return Expr != ExtendedExpr &&
5402            isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
5403   };
5404 
5405   const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
5406   if (PredIsKnownFalse(StartVal, StartExtended)) {
5407     LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
5408     return None;
5409   }
5410 
5411   // The Step is always Signed (because the overflow checks are either
5412   // NSSW or NUSW)
5413   const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
5414   if (PredIsKnownFalse(Accum, AccumExtended)) {
5415     LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
5416     return None;
5417   }
5418 
5419   auto AppendPredicate = [&](const SCEV *Expr,
5420                              const SCEV *ExtendedExpr) -> void {
5421     if (Expr != ExtendedExpr &&
5422         !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
5423       const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
5424       LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
5425       Predicates.push_back(Pred);
5426     }
5427   };
5428 
5429   AppendPredicate(StartVal, StartExtended);
5430   AppendPredicate(Accum, AccumExtended);
5431 
5432   // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5433   // which the casts had been folded away. The caller can rewrite SymbolicPHI
5434   // into NewAR if it will also add the runtime overflow checks specified in
5435   // Predicates.
5436   auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
5437 
5438   std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5439       std::make_pair(NewAR, Predicates);
5440   // Remember the result of the analysis for this SCEV at this locayyytion.
5441   PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
5442   return PredRewrite;
5443 }
5444 
5445 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5446 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
5447   auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5448   const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5449   if (!L)
5450     return None;
5451 
5452   // Check to see if we already analyzed this PHI.
5453   auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5454   if (I != PredicatedSCEVRewrites.end()) {
5455     std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5456         I->second;
5457     // Analysis was done before and failed to create an AddRec:
5458     if (Rewrite.first == SymbolicPHI)
5459       return None;
5460     // Analysis was done before and succeeded to create an AddRec under
5461     // a predicate:
5462     assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
5463     assert(!(Rewrite.second).empty() && "Expected to find Predicates");
5464     return Rewrite;
5465   }
5466 
5467   Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5468     Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5469 
5470   // Record in the cache that the analysis failed
5471   if (!Rewrite) {
5472     SmallVector<const SCEVPredicate *, 3> Predicates;
5473     PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5474     return None;
5475   }
5476 
5477   return Rewrite;
5478 }
5479 
5480 // FIXME: This utility is currently required because the Rewriter currently
5481 // does not rewrite this expression:
5482 // {0, +, (sext ix (trunc iy to ix) to iy)}
5483 // into {0, +, %step},
5484 // even when the following Equal predicate exists:
5485 // "%step == (sext ix (trunc iy to ix) to iy)".
5486 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5487     const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5488   if (AR1 == AR2)
5489     return true;
5490 
5491   auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5492     if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) &&
5493         !Preds->implies(SE.getEqualPredicate(Expr2, Expr1)))
5494       return false;
5495     return true;
5496   };
5497 
5498   if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5499       !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5500     return false;
5501   return true;
5502 }
5503 
5504 /// A helper function for createAddRecFromPHI to handle simple cases.
5505 ///
5506 /// This function tries to find an AddRec expression for the simplest (yet most
5507 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5508 /// If it fails, createAddRecFromPHI will use a more general, but slow,
5509 /// technique for finding the AddRec expression.
5510 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5511                                                       Value *BEValueV,
5512                                                       Value *StartValueV) {
5513   const Loop *L = LI.getLoopFor(PN->getParent());
5514   assert(L && L->getHeader() == PN->getParent());
5515   assert(BEValueV && StartValueV);
5516 
5517   auto BO = MatchBinaryOp(BEValueV, DT);
5518   if (!BO)
5519     return nullptr;
5520 
5521   if (BO->Opcode != Instruction::Add)
5522     return nullptr;
5523 
5524   const SCEV *Accum = nullptr;
5525   if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5526     Accum = getSCEV(BO->RHS);
5527   else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5528     Accum = getSCEV(BO->LHS);
5529 
5530   if (!Accum)
5531     return nullptr;
5532 
5533   SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5534   if (BO->IsNUW)
5535     Flags = setFlags(Flags, SCEV::FlagNUW);
5536   if (BO->IsNSW)
5537     Flags = setFlags(Flags, SCEV::FlagNSW);
5538 
5539   const SCEV *StartVal = getSCEV(StartValueV);
5540   const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5541   insertValueToMap(PN, PHISCEV);
5542 
5543   // We can add Flags to the post-inc expression only if we
5544   // know that it is *undefined behavior* for BEValueV to
5545   // overflow.
5546   if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) {
5547     assert(isLoopInvariant(Accum, L) &&
5548            "Accum is defined outside L, but is not invariant?");
5549     if (isAddRecNeverPoison(BEInst, L))
5550       (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5551   }
5552 
5553   return PHISCEV;
5554 }
5555 
5556 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5557   const Loop *L = LI.getLoopFor(PN->getParent());
5558   if (!L || L->getHeader() != PN->getParent())
5559     return nullptr;
5560 
5561   // The loop may have multiple entrances or multiple exits; we can analyze
5562   // this phi as an addrec if it has a unique entry value and a unique
5563   // backedge value.
5564   Value *BEValueV = nullptr, *StartValueV = nullptr;
5565   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5566     Value *V = PN->getIncomingValue(i);
5567     if (L->contains(PN->getIncomingBlock(i))) {
5568       if (!BEValueV) {
5569         BEValueV = V;
5570       } else if (BEValueV != V) {
5571         BEValueV = nullptr;
5572         break;
5573       }
5574     } else if (!StartValueV) {
5575       StartValueV = V;
5576     } else if (StartValueV != V) {
5577       StartValueV = nullptr;
5578       break;
5579     }
5580   }
5581   if (!BEValueV || !StartValueV)
5582     return nullptr;
5583 
5584   assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5585          "PHI node already processed?");
5586 
5587   // First, try to find AddRec expression without creating a fictituos symbolic
5588   // value for PN.
5589   if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5590     return S;
5591 
5592   // Handle PHI node value symbolically.
5593   const SCEV *SymbolicName = getUnknown(PN);
5594   insertValueToMap(PN, SymbolicName);
5595 
5596   // Using this symbolic name for the PHI, analyze the value coming around
5597   // the back-edge.
5598   const SCEV *BEValue = getSCEV(BEValueV);
5599 
5600   // NOTE: If BEValue is loop invariant, we know that the PHI node just
5601   // has a special value for the first iteration of the loop.
5602 
5603   // If the value coming around the backedge is an add with the symbolic
5604   // value we just inserted, then we found a simple induction variable!
5605   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5606     // If there is a single occurrence of the symbolic value, replace it
5607     // with a recurrence.
5608     unsigned FoundIndex = Add->getNumOperands();
5609     for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5610       if (Add->getOperand(i) == SymbolicName)
5611         if (FoundIndex == e) {
5612           FoundIndex = i;
5613           break;
5614         }
5615 
5616     if (FoundIndex != Add->getNumOperands()) {
5617       // Create an add with everything but the specified operand.
5618       SmallVector<const SCEV *, 8> Ops;
5619       for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5620         if (i != FoundIndex)
5621           Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5622                                                              L, *this));
5623       const SCEV *Accum = getAddExpr(Ops);
5624 
5625       // This is not a valid addrec if the step amount is varying each
5626       // loop iteration, but is not itself an addrec in this loop.
5627       if (isLoopInvariant(Accum, L) ||
5628           (isa<SCEVAddRecExpr>(Accum) &&
5629            cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5630         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5631 
5632         if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5633           if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5634             if (BO->IsNUW)
5635               Flags = setFlags(Flags, SCEV::FlagNUW);
5636             if (BO->IsNSW)
5637               Flags = setFlags(Flags, SCEV::FlagNSW);
5638           }
5639         } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5640           // If the increment is an inbounds GEP, then we know the address
5641           // space cannot be wrapped around. We cannot make any guarantee
5642           // about signed or unsigned overflow because pointers are
5643           // unsigned but we may have a negative index from the base
5644           // pointer. We can guarantee that no unsigned wrap occurs if the
5645           // indices form a positive value.
5646           if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5647             Flags = setFlags(Flags, SCEV::FlagNW);
5648 
5649             const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5650             if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5651               Flags = setFlags(Flags, SCEV::FlagNUW);
5652           }
5653 
5654           // We cannot transfer nuw and nsw flags from subtraction
5655           // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5656           // for instance.
5657         }
5658 
5659         const SCEV *StartVal = getSCEV(StartValueV);
5660         const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5661 
5662         // Okay, for the entire analysis of this edge we assumed the PHI
5663         // to be symbolic.  We now need to go back and purge all of the
5664         // entries for the scalars that use the symbolic expression.
5665         forgetMemoizedResults(SymbolicName);
5666         insertValueToMap(PN, PHISCEV);
5667 
5668         // We can add Flags to the post-inc expression only if we
5669         // know that it is *undefined behavior* for BEValueV to
5670         // overflow.
5671         if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5672           if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5673             (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5674 
5675         return PHISCEV;
5676       }
5677     }
5678   } else {
5679     // Otherwise, this could be a loop like this:
5680     //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
5681     // In this case, j = {1,+,1}  and BEValue is j.
5682     // Because the other in-value of i (0) fits the evolution of BEValue
5683     // i really is an addrec evolution.
5684     //
5685     // We can generalize this saying that i is the shifted value of BEValue
5686     // by one iteration:
5687     //   PHI(f(0), f({1,+,1})) --> f({0,+,1})
5688     const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5689     const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5690     if (Shifted != getCouldNotCompute() &&
5691         Start != getCouldNotCompute()) {
5692       const SCEV *StartVal = getSCEV(StartValueV);
5693       if (Start == StartVal) {
5694         // Okay, for the entire analysis of this edge we assumed the PHI
5695         // to be symbolic.  We now need to go back and purge all of the
5696         // entries for the scalars that use the symbolic expression.
5697         forgetMemoizedResults(SymbolicName);
5698         insertValueToMap(PN, Shifted);
5699         return Shifted;
5700       }
5701     }
5702   }
5703 
5704   // Remove the temporary PHI node SCEV that has been inserted while intending
5705   // to create an AddRecExpr for this PHI node. We can not keep this temporary
5706   // as it will prevent later (possibly simpler) SCEV expressions to be added
5707   // to the ValueExprMap.
5708   eraseValueFromMap(PN);
5709 
5710   return nullptr;
5711 }
5712 
5713 // Checks if the SCEV S is available at BB.  S is considered available at BB
5714 // if S can be materialized at BB without introducing a fault.
5715 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5716                                BasicBlock *BB) {
5717   struct CheckAvailable {
5718     bool TraversalDone = false;
5719     bool Available = true;
5720 
5721     const Loop *L = nullptr;  // The loop BB is in (can be nullptr)
5722     BasicBlock *BB = nullptr;
5723     DominatorTree &DT;
5724 
5725     CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5726       : L(L), BB(BB), DT(DT) {}
5727 
5728     bool setUnavailable() {
5729       TraversalDone = true;
5730       Available = false;
5731       return false;
5732     }
5733 
5734     bool follow(const SCEV *S) {
5735       switch (S->getSCEVType()) {
5736       case scConstant:
5737       case scPtrToInt:
5738       case scTruncate:
5739       case scZeroExtend:
5740       case scSignExtend:
5741       case scAddExpr:
5742       case scMulExpr:
5743       case scUMaxExpr:
5744       case scSMaxExpr:
5745       case scUMinExpr:
5746       case scSMinExpr:
5747       case scSequentialUMinExpr:
5748         // These expressions are available if their operand(s) is/are.
5749         return true;
5750 
5751       case scAddRecExpr: {
5752         // We allow add recurrences that are on the loop BB is in, or some
5753         // outer loop.  This guarantees availability because the value of the
5754         // add recurrence at BB is simply the "current" value of the induction
5755         // variable.  We can relax this in the future; for instance an add
5756         // recurrence on a sibling dominating loop is also available at BB.
5757         const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5758         if (L && (ARLoop == L || ARLoop->contains(L)))
5759           return true;
5760 
5761         return setUnavailable();
5762       }
5763 
5764       case scUnknown: {
5765         // For SCEVUnknown, we check for simple dominance.
5766         const auto *SU = cast<SCEVUnknown>(S);
5767         Value *V = SU->getValue();
5768 
5769         if (isa<Argument>(V))
5770           return false;
5771 
5772         if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5773           return false;
5774 
5775         return setUnavailable();
5776       }
5777 
5778       case scUDivExpr:
5779       case scCouldNotCompute:
5780         // We do not try to smart about these at all.
5781         return setUnavailable();
5782       }
5783       llvm_unreachable("Unknown SCEV kind!");
5784     }
5785 
5786     bool isDone() { return TraversalDone; }
5787   };
5788 
5789   CheckAvailable CA(L, BB, DT);
5790   SCEVTraversal<CheckAvailable> ST(CA);
5791 
5792   ST.visitAll(S);
5793   return CA.Available;
5794 }
5795 
5796 // Try to match a control flow sequence that branches out at BI and merges back
5797 // at Merge into a "C ? LHS : RHS" select pattern.  Return true on a successful
5798 // match.
5799 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5800                           Value *&C, Value *&LHS, Value *&RHS) {
5801   C = BI->getCondition();
5802 
5803   BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5804   BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5805 
5806   if (!LeftEdge.isSingleEdge())
5807     return false;
5808 
5809   assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5810 
5811   Use &LeftUse = Merge->getOperandUse(0);
5812   Use &RightUse = Merge->getOperandUse(1);
5813 
5814   if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5815     LHS = LeftUse;
5816     RHS = RightUse;
5817     return true;
5818   }
5819 
5820   if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5821     LHS = RightUse;
5822     RHS = LeftUse;
5823     return true;
5824   }
5825 
5826   return false;
5827 }
5828 
5829 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5830   auto IsReachable =
5831       [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5832   if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5833     const Loop *L = LI.getLoopFor(PN->getParent());
5834 
5835     // We don't want to break LCSSA, even in a SCEV expression tree.
5836     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5837       if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5838         return nullptr;
5839 
5840     // Try to match
5841     //
5842     //  br %cond, label %left, label %right
5843     // left:
5844     //  br label %merge
5845     // right:
5846     //  br label %merge
5847     // merge:
5848     //  V = phi [ %x, %left ], [ %y, %right ]
5849     //
5850     // as "select %cond, %x, %y"
5851 
5852     BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5853     assert(IDom && "At least the entry block should dominate PN");
5854 
5855     auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5856     Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5857 
5858     if (BI && BI->isConditional() &&
5859         BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5860         IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5861         IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5862       return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5863   }
5864 
5865   return nullptr;
5866 }
5867 
5868 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5869   if (const SCEV *S = createAddRecFromPHI(PN))
5870     return S;
5871 
5872   if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5873     return S;
5874 
5875   // If the PHI has a single incoming value, follow that value, unless the
5876   // PHI's incoming blocks are in a different loop, in which case doing so
5877   // risks breaking LCSSA form. Instcombine would normally zap these, but
5878   // it doesn't have DominatorTree information, so it may miss cases.
5879   if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5880     if (LI.replacementPreservesLCSSAForm(PN, V))
5881       return getSCEV(V);
5882 
5883   // If it's not a loop phi, we can't handle it yet.
5884   return getUnknown(PN);
5885 }
5886 
5887 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5888                                                       Value *Cond,
5889                                                       Value *TrueVal,
5890                                                       Value *FalseVal) {
5891   // Handle "constant" branch or select. This can occur for instance when a
5892   // loop pass transforms an inner loop and moves on to process the outer loop.
5893   if (auto *CI = dyn_cast<ConstantInt>(Cond))
5894     return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5895 
5896   // Try to match some simple smax or umax patterns.
5897   auto *ICI = dyn_cast<ICmpInst>(Cond);
5898   if (!ICI)
5899     return getUnknown(I);
5900 
5901   Value *LHS = ICI->getOperand(0);
5902   Value *RHS = ICI->getOperand(1);
5903 
5904   switch (ICI->getPredicate()) {
5905   case ICmpInst::ICMP_SLT:
5906   case ICmpInst::ICMP_SLE:
5907   case ICmpInst::ICMP_ULT:
5908   case ICmpInst::ICMP_ULE:
5909     std::swap(LHS, RHS);
5910     LLVM_FALLTHROUGH;
5911   case ICmpInst::ICMP_SGT:
5912   case ICmpInst::ICMP_SGE:
5913   case ICmpInst::ICMP_UGT:
5914   case ICmpInst::ICMP_UGE:
5915     // a > b ? a+x : b+x  ->  max(a, b)+x
5916     // a > b ? b+x : a+x  ->  min(a, b)+x
5917     if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5918       bool Signed = ICI->isSigned();
5919       const SCEV *LA = getSCEV(TrueVal);
5920       const SCEV *RA = getSCEV(FalseVal);
5921       const SCEV *LS = getSCEV(LHS);
5922       const SCEV *RS = getSCEV(RHS);
5923       if (LA->getType()->isPointerTy()) {
5924         // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
5925         // Need to make sure we can't produce weird expressions involving
5926         // negated pointers.
5927         if (LA == LS && RA == RS)
5928           return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
5929         if (LA == RS && RA == LS)
5930           return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
5931       }
5932       auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
5933         if (Op->getType()->isPointerTy()) {
5934           Op = getLosslessPtrToIntExpr(Op);
5935           if (isa<SCEVCouldNotCompute>(Op))
5936             return Op;
5937         }
5938         if (Signed)
5939           Op = getNoopOrSignExtend(Op, I->getType());
5940         else
5941           Op = getNoopOrZeroExtend(Op, I->getType());
5942         return Op;
5943       };
5944       LS = CoerceOperand(LS);
5945       RS = CoerceOperand(RS);
5946       if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
5947         break;
5948       const SCEV *LDiff = getMinusSCEV(LA, LS);
5949       const SCEV *RDiff = getMinusSCEV(RA, RS);
5950       if (LDiff == RDiff)
5951         return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
5952                           LDiff);
5953       LDiff = getMinusSCEV(LA, RS);
5954       RDiff = getMinusSCEV(RA, LS);
5955       if (LDiff == RDiff)
5956         return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
5957                           LDiff);
5958     }
5959     break;
5960   case ICmpInst::ICMP_NE:
5961     // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
5962     if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5963         isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5964       const SCEV *One = getOne(I->getType());
5965       const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5966       const SCEV *LA = getSCEV(TrueVal);
5967       const SCEV *RA = getSCEV(FalseVal);
5968       const SCEV *LDiff = getMinusSCEV(LA, LS);
5969       const SCEV *RDiff = getMinusSCEV(RA, One);
5970       if (LDiff == RDiff)
5971         return getAddExpr(getUMaxExpr(One, LS), LDiff);
5972     }
5973     break;
5974   case ICmpInst::ICMP_EQ:
5975     // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
5976     if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5977         isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5978       const SCEV *One = getOne(I->getType());
5979       const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5980       const SCEV *LA = getSCEV(TrueVal);
5981       const SCEV *RA = getSCEV(FalseVal);
5982       const SCEV *LDiff = getMinusSCEV(LA, One);
5983       const SCEV *RDiff = getMinusSCEV(RA, LS);
5984       if (LDiff == RDiff)
5985         return getAddExpr(getUMaxExpr(One, LS), LDiff);
5986     }
5987     break;
5988   default:
5989     break;
5990   }
5991 
5992   return getUnknown(I);
5993 }
5994 
5995 /// Expand GEP instructions into add and multiply operations. This allows them
5996 /// to be analyzed by regular SCEV code.
5997 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5998   // Don't attempt to analyze GEPs over unsized objects.
5999   if (!GEP->getSourceElementType()->isSized())
6000     return getUnknown(GEP);
6001 
6002   SmallVector<const SCEV *, 4> IndexExprs;
6003   for (Value *Index : GEP->indices())
6004     IndexExprs.push_back(getSCEV(Index));
6005   return getGEPExpr(GEP, IndexExprs);
6006 }
6007 
6008 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
6009   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6010     return C->getAPInt().countTrailingZeros();
6011 
6012   if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
6013     return GetMinTrailingZeros(I->getOperand());
6014 
6015   if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
6016     return std::min(GetMinTrailingZeros(T->getOperand()),
6017                     (uint32_t)getTypeSizeInBits(T->getType()));
6018 
6019   if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
6020     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
6021     return OpRes == getTypeSizeInBits(E->getOperand()->getType())
6022                ? getTypeSizeInBits(E->getType())
6023                : OpRes;
6024   }
6025 
6026   if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
6027     uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
6028     return OpRes == getTypeSizeInBits(E->getOperand()->getType())
6029                ? getTypeSizeInBits(E->getType())
6030                : OpRes;
6031   }
6032 
6033   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
6034     // The result is the min of all operands results.
6035     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
6036     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
6037       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
6038     return MinOpRes;
6039   }
6040 
6041   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
6042     // The result is the sum of all operands results.
6043     uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
6044     uint32_t BitWidth = getTypeSizeInBits(M->getType());
6045     for (unsigned i = 1, e = M->getNumOperands();
6046          SumOpRes != BitWidth && i != e; ++i)
6047       SumOpRes =
6048           std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
6049     return SumOpRes;
6050   }
6051 
6052   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
6053     // The result is the min of all operands results.
6054     uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
6055     for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
6056       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
6057     return MinOpRes;
6058   }
6059 
6060   if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
6061     // The result is the min of all operands results.
6062     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
6063     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
6064       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
6065     return MinOpRes;
6066   }
6067 
6068   if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
6069     // The result is the min of all operands results.
6070     uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
6071     for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
6072       MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
6073     return MinOpRes;
6074   }
6075 
6076   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6077     // For a SCEVUnknown, ask ValueTracking.
6078     KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
6079     return Known.countMinTrailingZeros();
6080   }
6081 
6082   // SCEVUDivExpr
6083   return 0;
6084 }
6085 
6086 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
6087   auto I = MinTrailingZerosCache.find(S);
6088   if (I != MinTrailingZerosCache.end())
6089     return I->second;
6090 
6091   uint32_t Result = GetMinTrailingZerosImpl(S);
6092   auto InsertPair = MinTrailingZerosCache.insert({S, Result});
6093   assert(InsertPair.second && "Should insert a new key");
6094   return InsertPair.first->second;
6095 }
6096 
6097 /// Helper method to assign a range to V from metadata present in the IR.
6098 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
6099   if (Instruction *I = dyn_cast<Instruction>(V))
6100     if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
6101       return getConstantRangeFromMetadata(*MD);
6102 
6103   return None;
6104 }
6105 
6106 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
6107                                      SCEV::NoWrapFlags Flags) {
6108   if (AddRec->getNoWrapFlags(Flags) != Flags) {
6109     AddRec->setNoWrapFlags(Flags);
6110     UnsignedRanges.erase(AddRec);
6111     SignedRanges.erase(AddRec);
6112   }
6113 }
6114 
6115 ConstantRange ScalarEvolution::
6116 getRangeForUnknownRecurrence(const SCEVUnknown *U) {
6117   const DataLayout &DL = getDataLayout();
6118 
6119   unsigned BitWidth = getTypeSizeInBits(U->getType());
6120   const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
6121 
6122   // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
6123   // use information about the trip count to improve our available range.  Note
6124   // that the trip count independent cases are already handled by known bits.
6125   // WARNING: The definition of recurrence used here is subtly different than
6126   // the one used by AddRec (and thus most of this file).  Step is allowed to
6127   // be arbitrarily loop varying here, where AddRec allows only loop invariant
6128   // and other addrecs in the same loop (for non-affine addrecs).  The code
6129   // below intentionally handles the case where step is not loop invariant.
6130   auto *P = dyn_cast<PHINode>(U->getValue());
6131   if (!P)
6132     return FullSet;
6133 
6134   // Make sure that no Phi input comes from an unreachable block. Otherwise,
6135   // even the values that are not available in these blocks may come from them,
6136   // and this leads to false-positive recurrence test.
6137   for (auto *Pred : predecessors(P->getParent()))
6138     if (!DT.isReachableFromEntry(Pred))
6139       return FullSet;
6140 
6141   BinaryOperator *BO;
6142   Value *Start, *Step;
6143   if (!matchSimpleRecurrence(P, BO, Start, Step))
6144     return FullSet;
6145 
6146   // If we found a recurrence in reachable code, we must be in a loop. Note
6147   // that BO might be in some subloop of L, and that's completely okay.
6148   auto *L = LI.getLoopFor(P->getParent());
6149   assert(L && L->getHeader() == P->getParent());
6150   if (!L->contains(BO->getParent()))
6151     // NOTE: This bailout should be an assert instead.  However, asserting
6152     // the condition here exposes a case where LoopFusion is querying SCEV
6153     // with malformed loop information during the midst of the transform.
6154     // There doesn't appear to be an obvious fix, so for the moment bailout
6155     // until the caller issue can be fixed.  PR49566 tracks the bug.
6156     return FullSet;
6157 
6158   // TODO: Extend to other opcodes such as mul, and div
6159   switch (BO->getOpcode()) {
6160   default:
6161     return FullSet;
6162   case Instruction::AShr:
6163   case Instruction::LShr:
6164   case Instruction::Shl:
6165     break;
6166   };
6167 
6168   if (BO->getOperand(0) != P)
6169     // TODO: Handle the power function forms some day.
6170     return FullSet;
6171 
6172   unsigned TC = getSmallConstantMaxTripCount(L);
6173   if (!TC || TC >= BitWidth)
6174     return FullSet;
6175 
6176   auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
6177   auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
6178   assert(KnownStart.getBitWidth() == BitWidth &&
6179          KnownStep.getBitWidth() == BitWidth);
6180 
6181   // Compute total shift amount, being careful of overflow and bitwidths.
6182   auto MaxShiftAmt = KnownStep.getMaxValue();
6183   APInt TCAP(BitWidth, TC-1);
6184   bool Overflow = false;
6185   auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6186   if (Overflow)
6187     return FullSet;
6188 
6189   switch (BO->getOpcode()) {
6190   default:
6191     llvm_unreachable("filtered out above");
6192   case Instruction::AShr: {
6193     // For each ashr, three cases:
6194     //   shift = 0 => unchanged value
6195     //   saturation => 0 or -1
6196     //   other => a value closer to zero (of the same sign)
6197     // Thus, the end value is closer to zero than the start.
6198     auto KnownEnd = KnownBits::ashr(KnownStart,
6199                                     KnownBits::makeConstant(TotalShift));
6200     if (KnownStart.isNonNegative())
6201       // Analogous to lshr (simply not yet canonicalized)
6202       return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
6203                                         KnownStart.getMaxValue() + 1);
6204     if (KnownStart.isNegative())
6205       // End >=u Start && End <=s Start
6206       return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
6207                                         KnownEnd.getMaxValue() + 1);
6208     break;
6209   }
6210   case Instruction::LShr: {
6211     // For each lshr, three cases:
6212     //   shift = 0 => unchanged value
6213     //   saturation => 0
6214     //   other => a smaller positive number
6215     // Thus, the low end of the unsigned range is the last value produced.
6216     auto KnownEnd = KnownBits::lshr(KnownStart,
6217                                     KnownBits::makeConstant(TotalShift));
6218     return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
6219                                       KnownStart.getMaxValue() + 1);
6220   }
6221   case Instruction::Shl: {
6222     // Iff no bits are shifted out, value increases on every shift.
6223     auto KnownEnd = KnownBits::shl(KnownStart,
6224                                    KnownBits::makeConstant(TotalShift));
6225     if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6226       return ConstantRange(KnownStart.getMinValue(),
6227                            KnownEnd.getMaxValue() + 1);
6228     break;
6229   }
6230   };
6231   return FullSet;
6232 }
6233 
6234 /// Determine the range for a particular SCEV.  If SignHint is
6235 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
6236 /// with a "cleaner" unsigned (resp. signed) representation.
6237 const ConstantRange &
6238 ScalarEvolution::getRangeRef(const SCEV *S,
6239                              ScalarEvolution::RangeSignHint SignHint) {
6240   DenseMap<const SCEV *, ConstantRange> &Cache =
6241       SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6242                                                        : SignedRanges;
6243   ConstantRange::PreferredRangeType RangeType =
6244       SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
6245           ? ConstantRange::Unsigned : ConstantRange::Signed;
6246 
6247   // See if we've computed this range already.
6248   DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
6249   if (I != Cache.end())
6250     return I->second;
6251 
6252   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6253     return setRange(C, SignHint, ConstantRange(C->getAPInt()));
6254 
6255   unsigned BitWidth = getTypeSizeInBits(S->getType());
6256   ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
6257   using OBO = OverflowingBinaryOperator;
6258 
6259   // If the value has known zeros, the maximum value will have those known zeros
6260   // as well.
6261   uint32_t TZ = GetMinTrailingZeros(S);
6262   if (TZ != 0) {
6263     if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
6264       ConservativeResult =
6265           ConstantRange(APInt::getMinValue(BitWidth),
6266                         APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
6267     else
6268       ConservativeResult = ConstantRange(
6269           APInt::getSignedMinValue(BitWidth),
6270           APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
6271   }
6272 
6273   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
6274     ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
6275     unsigned WrapType = OBO::AnyWrap;
6276     if (Add->hasNoSignedWrap())
6277       WrapType |= OBO::NoSignedWrap;
6278     if (Add->hasNoUnsignedWrap())
6279       WrapType |= OBO::NoUnsignedWrap;
6280     for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
6281       X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
6282                           WrapType, RangeType);
6283     return setRange(Add, SignHint,
6284                     ConservativeResult.intersectWith(X, RangeType));
6285   }
6286 
6287   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
6288     ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
6289     for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
6290       X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
6291     return setRange(Mul, SignHint,
6292                     ConservativeResult.intersectWith(X, RangeType));
6293   }
6294 
6295   if (isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) {
6296     Intrinsic::ID ID;
6297     switch (S->getSCEVType()) {
6298     case scUMaxExpr:
6299       ID = Intrinsic::umax;
6300       break;
6301     case scSMaxExpr:
6302       ID = Intrinsic::smax;
6303       break;
6304     case scUMinExpr:
6305     case scSequentialUMinExpr:
6306       ID = Intrinsic::umin;
6307       break;
6308     case scSMinExpr:
6309       ID = Intrinsic::smin;
6310       break;
6311     default:
6312       llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr.");
6313     }
6314 
6315     const auto *NAry = cast<SCEVNAryExpr>(S);
6316     ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint);
6317     for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6318       X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)});
6319     return setRange(S, SignHint,
6320                     ConservativeResult.intersectWith(X, RangeType));
6321   }
6322 
6323   if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
6324     ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
6325     ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
6326     return setRange(UDiv, SignHint,
6327                     ConservativeResult.intersectWith(X.udiv(Y), RangeType));
6328   }
6329 
6330   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
6331     ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
6332     return setRange(ZExt, SignHint,
6333                     ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
6334                                                      RangeType));
6335   }
6336 
6337   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
6338     ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
6339     return setRange(SExt, SignHint,
6340                     ConservativeResult.intersectWith(X.signExtend(BitWidth),
6341                                                      RangeType));
6342   }
6343 
6344   if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
6345     ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
6346     return setRange(PtrToInt, SignHint, X);
6347   }
6348 
6349   if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
6350     ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
6351     return setRange(Trunc, SignHint,
6352                     ConservativeResult.intersectWith(X.truncate(BitWidth),
6353                                                      RangeType));
6354   }
6355 
6356   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
6357     // If there's no unsigned wrap, the value will never be less than its
6358     // initial value.
6359     if (AddRec->hasNoUnsignedWrap()) {
6360       APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
6361       if (!UnsignedMinValue.isZero())
6362         ConservativeResult = ConservativeResult.intersectWith(
6363             ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
6364     }
6365 
6366     // If there's no signed wrap, and all the operands except initial value have
6367     // the same sign or zero, the value won't ever be:
6368     // 1: smaller than initial value if operands are non negative,
6369     // 2: bigger than initial value if operands are non positive.
6370     // For both cases, value can not cross signed min/max boundary.
6371     if (AddRec->hasNoSignedWrap()) {
6372       bool AllNonNeg = true;
6373       bool AllNonPos = true;
6374       for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
6375         if (!isKnownNonNegative(AddRec->getOperand(i)))
6376           AllNonNeg = false;
6377         if (!isKnownNonPositive(AddRec->getOperand(i)))
6378           AllNonPos = false;
6379       }
6380       if (AllNonNeg)
6381         ConservativeResult = ConservativeResult.intersectWith(
6382             ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
6383                                        APInt::getSignedMinValue(BitWidth)),
6384             RangeType);
6385       else if (AllNonPos)
6386         ConservativeResult = ConservativeResult.intersectWith(
6387             ConstantRange::getNonEmpty(
6388                 APInt::getSignedMinValue(BitWidth),
6389                 getSignedRangeMax(AddRec->getStart()) + 1),
6390             RangeType);
6391     }
6392 
6393     // TODO: non-affine addrec
6394     if (AddRec->isAffine()) {
6395       const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
6396       if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
6397           getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
6398         auto RangeFromAffine = getRangeForAffineAR(
6399             AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6400             BitWidth);
6401         ConservativeResult =
6402             ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6403 
6404         auto RangeFromFactoring = getRangeViaFactoring(
6405             AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6406             BitWidth);
6407         ConservativeResult =
6408             ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6409       }
6410 
6411       // Now try symbolic BE count and more powerful methods.
6412       if (UseExpensiveRangeSharpening) {
6413         const SCEV *SymbolicMaxBECount =
6414             getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
6415         if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6416             getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6417             AddRec->hasNoSelfWrap()) {
6418           auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6419               AddRec, SymbolicMaxBECount, BitWidth, SignHint);
6420           ConservativeResult =
6421               ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6422         }
6423       }
6424     }
6425 
6426     return setRange(AddRec, SignHint, std::move(ConservativeResult));
6427   }
6428 
6429   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6430 
6431     // Check if the IR explicitly contains !range metadata.
6432     Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
6433     if (MDRange.hasValue())
6434       ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
6435                                                             RangeType);
6436 
6437     // Use facts about recurrences in the underlying IR.  Note that add
6438     // recurrences are AddRecExprs and thus don't hit this path.  This
6439     // primarily handles shift recurrences.
6440     auto CR = getRangeForUnknownRecurrence(U);
6441     ConservativeResult = ConservativeResult.intersectWith(CR);
6442 
6443     // See if ValueTracking can give us a useful range.
6444     const DataLayout &DL = getDataLayout();
6445     KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6446     if (Known.getBitWidth() != BitWidth)
6447       Known = Known.zextOrTrunc(BitWidth);
6448 
6449     // ValueTracking may be able to compute a tighter result for the number of
6450     // sign bits than for the value of those sign bits.
6451     unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6452     if (U->getType()->isPointerTy()) {
6453       // If the pointer size is larger than the index size type, this can cause
6454       // NS to be larger than BitWidth. So compensate for this.
6455       unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
6456       int ptrIdxDiff = ptrSize - BitWidth;
6457       if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
6458         NS -= ptrIdxDiff;
6459     }
6460 
6461     if (NS > 1) {
6462       // If we know any of the sign bits, we know all of the sign bits.
6463       if (!Known.Zero.getHiBits(NS).isZero())
6464         Known.Zero.setHighBits(NS);
6465       if (!Known.One.getHiBits(NS).isZero())
6466         Known.One.setHighBits(NS);
6467     }
6468 
6469     if (Known.getMinValue() != Known.getMaxValue() + 1)
6470       ConservativeResult = ConservativeResult.intersectWith(
6471           ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
6472           RangeType);
6473     if (NS > 1)
6474       ConservativeResult = ConservativeResult.intersectWith(
6475           ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
6476                         APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
6477           RangeType);
6478 
6479     // A range of Phi is a subset of union of all ranges of its input.
6480     if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
6481       // Make sure that we do not run over cycled Phis.
6482       if (PendingPhiRanges.insert(Phi).second) {
6483         ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
6484         for (auto &Op : Phi->operands()) {
6485           auto OpRange = getRangeRef(getSCEV(Op), SignHint);
6486           RangeFromOps = RangeFromOps.unionWith(OpRange);
6487           // No point to continue if we already have a full set.
6488           if (RangeFromOps.isFullSet())
6489             break;
6490         }
6491         ConservativeResult =
6492             ConservativeResult.intersectWith(RangeFromOps, RangeType);
6493         bool Erased = PendingPhiRanges.erase(Phi);
6494         assert(Erased && "Failed to erase Phi properly?");
6495         (void) Erased;
6496       }
6497     }
6498 
6499     return setRange(U, SignHint, std::move(ConservativeResult));
6500   }
6501 
6502   return setRange(S, SignHint, std::move(ConservativeResult));
6503 }
6504 
6505 // Given a StartRange, Step and MaxBECount for an expression compute a range of
6506 // values that the expression can take. Initially, the expression has a value
6507 // from StartRange and then is changed by Step up to MaxBECount times. Signed
6508 // argument defines if we treat Step as signed or unsigned.
6509 static ConstantRange getRangeForAffineARHelper(APInt Step,
6510                                                const ConstantRange &StartRange,
6511                                                const APInt &MaxBECount,
6512                                                unsigned BitWidth, bool Signed) {
6513   // If either Step or MaxBECount is 0, then the expression won't change, and we
6514   // just need to return the initial range.
6515   if (Step == 0 || MaxBECount == 0)
6516     return StartRange;
6517 
6518   // If we don't know anything about the initial value (i.e. StartRange is
6519   // FullRange), then we don't know anything about the final range either.
6520   // Return FullRange.
6521   if (StartRange.isFullSet())
6522     return ConstantRange::getFull(BitWidth);
6523 
6524   // If Step is signed and negative, then we use its absolute value, but we also
6525   // note that we're moving in the opposite direction.
6526   bool Descending = Signed && Step.isNegative();
6527 
6528   if (Signed)
6529     // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
6530     // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
6531     // This equations hold true due to the well-defined wrap-around behavior of
6532     // APInt.
6533     Step = Step.abs();
6534 
6535   // Check if Offset is more than full span of BitWidth. If it is, the
6536   // expression is guaranteed to overflow.
6537   if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
6538     return ConstantRange::getFull(BitWidth);
6539 
6540   // Offset is by how much the expression can change. Checks above guarantee no
6541   // overflow here.
6542   APInt Offset = Step * MaxBECount;
6543 
6544   // Minimum value of the final range will match the minimal value of StartRange
6545   // if the expression is increasing and will be decreased by Offset otherwise.
6546   // Maximum value of the final range will match the maximal value of StartRange
6547   // if the expression is decreasing and will be increased by Offset otherwise.
6548   APInt StartLower = StartRange.getLower();
6549   APInt StartUpper = StartRange.getUpper() - 1;
6550   APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
6551                                    : (StartUpper + std::move(Offset));
6552 
6553   // It's possible that the new minimum/maximum value will fall into the initial
6554   // range (due to wrap around). This means that the expression can take any
6555   // value in this bitwidth, and we have to return full range.
6556   if (StartRange.contains(MovedBoundary))
6557     return ConstantRange::getFull(BitWidth);
6558 
6559   APInt NewLower =
6560       Descending ? std::move(MovedBoundary) : std::move(StartLower);
6561   APInt NewUpper =
6562       Descending ? std::move(StartUpper) : std::move(MovedBoundary);
6563   NewUpper += 1;
6564 
6565   // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
6566   return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
6567 }
6568 
6569 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
6570                                                    const SCEV *Step,
6571                                                    const SCEV *MaxBECount,
6572                                                    unsigned BitWidth) {
6573   assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
6574          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6575          "Precondition!");
6576 
6577   MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
6578   APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
6579 
6580   // First, consider step signed.
6581   ConstantRange StartSRange = getSignedRange(Start);
6582   ConstantRange StepSRange = getSignedRange(Step);
6583 
6584   // If Step can be both positive and negative, we need to find ranges for the
6585   // maximum absolute step values in both directions and union them.
6586   ConstantRange SR =
6587       getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
6588                                 MaxBECountValue, BitWidth, /* Signed = */ true);
6589   SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
6590                                               StartSRange, MaxBECountValue,
6591                                               BitWidth, /* Signed = */ true));
6592 
6593   // Next, consider step unsigned.
6594   ConstantRange UR = getRangeForAffineARHelper(
6595       getUnsignedRangeMax(Step), getUnsignedRange(Start),
6596       MaxBECountValue, BitWidth, /* Signed = */ false);
6597 
6598   // Finally, intersect signed and unsigned ranges.
6599   return SR.intersectWith(UR, ConstantRange::Smallest);
6600 }
6601 
6602 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
6603     const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
6604     ScalarEvolution::RangeSignHint SignHint) {
6605   assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
6606   assert(AddRec->hasNoSelfWrap() &&
6607          "This only works for non-self-wrapping AddRecs!");
6608   const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
6609   const SCEV *Step = AddRec->getStepRecurrence(*this);
6610   // Only deal with constant step to save compile time.
6611   if (!isa<SCEVConstant>(Step))
6612     return ConstantRange::getFull(BitWidth);
6613   // Let's make sure that we can prove that we do not self-wrap during
6614   // MaxBECount iterations. We need this because MaxBECount is a maximum
6615   // iteration count estimate, and we might infer nw from some exit for which we
6616   // do not know max exit count (or any other side reasoning).
6617   // TODO: Turn into assert at some point.
6618   if (getTypeSizeInBits(MaxBECount->getType()) >
6619       getTypeSizeInBits(AddRec->getType()))
6620     return ConstantRange::getFull(BitWidth);
6621   MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
6622   const SCEV *RangeWidth = getMinusOne(AddRec->getType());
6623   const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
6624   const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
6625   if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
6626                                          MaxItersWithoutWrap))
6627     return ConstantRange::getFull(BitWidth);
6628 
6629   ICmpInst::Predicate LEPred =
6630       IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
6631   ICmpInst::Predicate GEPred =
6632       IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
6633   const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
6634 
6635   // We know that there is no self-wrap. Let's take Start and End values and
6636   // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
6637   // the iteration. They either lie inside the range [Min(Start, End),
6638   // Max(Start, End)] or outside it:
6639   //
6640   // Case 1:   RangeMin    ...    Start V1 ... VN End ...           RangeMax;
6641   // Case 2:   RangeMin Vk ... V1 Start    ...    End Vn ... Vk + 1 RangeMax;
6642   //
6643   // No self wrap flag guarantees that the intermediate values cannot be BOTH
6644   // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
6645   // knowledge, let's try to prove that we are dealing with Case 1. It is so if
6646   // Start <= End and step is positive, or Start >= End and step is negative.
6647   const SCEV *Start = AddRec->getStart();
6648   ConstantRange StartRange = getRangeRef(Start, SignHint);
6649   ConstantRange EndRange = getRangeRef(End, SignHint);
6650   ConstantRange RangeBetween = StartRange.unionWith(EndRange);
6651   // If they already cover full iteration space, we will know nothing useful
6652   // even if we prove what we want to prove.
6653   if (RangeBetween.isFullSet())
6654     return RangeBetween;
6655   // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
6656   bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
6657                                : RangeBetween.isWrappedSet();
6658   if (IsWrappedSet)
6659     return ConstantRange::getFull(BitWidth);
6660 
6661   if (isKnownPositive(Step) &&
6662       isKnownPredicateViaConstantRanges(LEPred, Start, End))
6663     return RangeBetween;
6664   else if (isKnownNegative(Step) &&
6665            isKnownPredicateViaConstantRanges(GEPred, Start, End))
6666     return RangeBetween;
6667   return ConstantRange::getFull(BitWidth);
6668 }
6669 
6670 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
6671                                                     const SCEV *Step,
6672                                                     const SCEV *MaxBECount,
6673                                                     unsigned BitWidth) {
6674   //    RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
6675   // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
6676 
6677   struct SelectPattern {
6678     Value *Condition = nullptr;
6679     APInt TrueValue;
6680     APInt FalseValue;
6681 
6682     explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
6683                            const SCEV *S) {
6684       Optional<unsigned> CastOp;
6685       APInt Offset(BitWidth, 0);
6686 
6687       assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
6688              "Should be!");
6689 
6690       // Peel off a constant offset:
6691       if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
6692         // In the future we could consider being smarter here and handle
6693         // {Start+Step,+,Step} too.
6694         if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
6695           return;
6696 
6697         Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
6698         S = SA->getOperand(1);
6699       }
6700 
6701       // Peel off a cast operation
6702       if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
6703         CastOp = SCast->getSCEVType();
6704         S = SCast->getOperand();
6705       }
6706 
6707       using namespace llvm::PatternMatch;
6708 
6709       auto *SU = dyn_cast<SCEVUnknown>(S);
6710       const APInt *TrueVal, *FalseVal;
6711       if (!SU ||
6712           !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
6713                                           m_APInt(FalseVal)))) {
6714         Condition = nullptr;
6715         return;
6716       }
6717 
6718       TrueValue = *TrueVal;
6719       FalseValue = *FalseVal;
6720 
6721       // Re-apply the cast we peeled off earlier
6722       if (CastOp.hasValue())
6723         switch (*CastOp) {
6724         default:
6725           llvm_unreachable("Unknown SCEV cast type!");
6726 
6727         case scTruncate:
6728           TrueValue = TrueValue.trunc(BitWidth);
6729           FalseValue = FalseValue.trunc(BitWidth);
6730           break;
6731         case scZeroExtend:
6732           TrueValue = TrueValue.zext(BitWidth);
6733           FalseValue = FalseValue.zext(BitWidth);
6734           break;
6735         case scSignExtend:
6736           TrueValue = TrueValue.sext(BitWidth);
6737           FalseValue = FalseValue.sext(BitWidth);
6738           break;
6739         }
6740 
6741       // Re-apply the constant offset we peeled off earlier
6742       TrueValue += Offset;
6743       FalseValue += Offset;
6744     }
6745 
6746     bool isRecognized() { return Condition != nullptr; }
6747   };
6748 
6749   SelectPattern StartPattern(*this, BitWidth, Start);
6750   if (!StartPattern.isRecognized())
6751     return ConstantRange::getFull(BitWidth);
6752 
6753   SelectPattern StepPattern(*this, BitWidth, Step);
6754   if (!StepPattern.isRecognized())
6755     return ConstantRange::getFull(BitWidth);
6756 
6757   if (StartPattern.Condition != StepPattern.Condition) {
6758     // We don't handle this case today; but we could, by considering four
6759     // possibilities below instead of two. I'm not sure if there are cases where
6760     // that will help over what getRange already does, though.
6761     return ConstantRange::getFull(BitWidth);
6762   }
6763 
6764   // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6765   // construct arbitrary general SCEV expressions here.  This function is called
6766   // from deep in the call stack, and calling getSCEV (on a sext instruction,
6767   // say) can end up caching a suboptimal value.
6768 
6769   // FIXME: without the explicit `this` receiver below, MSVC errors out with
6770   // C2352 and C2512 (otherwise it isn't needed).
6771 
6772   const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6773   const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6774   const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6775   const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6776 
6777   ConstantRange TrueRange =
6778       this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6779   ConstantRange FalseRange =
6780       this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6781 
6782   return TrueRange.unionWith(FalseRange);
6783 }
6784 
6785 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6786   if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6787   const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6788 
6789   // Return early if there are no flags to propagate to the SCEV.
6790   SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6791   if (BinOp->hasNoUnsignedWrap())
6792     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6793   if (BinOp->hasNoSignedWrap())
6794     Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6795   if (Flags == SCEV::FlagAnyWrap)
6796     return SCEV::FlagAnyWrap;
6797 
6798   return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6799 }
6800 
6801 const Instruction *
6802 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
6803   if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
6804     return &*AddRec->getLoop()->getHeader()->begin();
6805   if (auto *U = dyn_cast<SCEVUnknown>(S))
6806     if (auto *I = dyn_cast<Instruction>(U->getValue()))
6807       return I;
6808   return nullptr;
6809 }
6810 
6811 /// Fills \p Ops with unique operands of \p S, if it has operands. If not,
6812 /// \p Ops remains unmodified.
6813 static void collectUniqueOps(const SCEV *S,
6814                              SmallVectorImpl<const SCEV *> &Ops) {
6815   SmallPtrSet<const SCEV *, 4> Unique;
6816   auto InsertUnique = [&](const SCEV *S) {
6817     if (Unique.insert(S).second)
6818       Ops.push_back(S);
6819   };
6820   if (auto *S2 = dyn_cast<SCEVCastExpr>(S))
6821     for (auto *Op : S2->operands())
6822       InsertUnique(Op);
6823   else if (auto *S2 = dyn_cast<SCEVNAryExpr>(S))
6824     for (auto *Op : S2->operands())
6825       InsertUnique(Op);
6826   else if (auto *S2 = dyn_cast<SCEVUDivExpr>(S))
6827     for (auto *Op : S2->operands())
6828       InsertUnique(Op);
6829 }
6830 
6831 const Instruction *
6832 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
6833                                        bool &Precise) {
6834   Precise = true;
6835   // Do a bounded search of the def relation of the requested SCEVs.
6836   SmallSet<const SCEV *, 16> Visited;
6837   SmallVector<const SCEV *> Worklist;
6838   auto pushOp = [&](const SCEV *S) {
6839     if (!Visited.insert(S).second)
6840       return;
6841     // Threshold of 30 here is arbitrary.
6842     if (Visited.size() > 30) {
6843       Precise = false;
6844       return;
6845     }
6846     Worklist.push_back(S);
6847   };
6848 
6849   for (auto *S : Ops)
6850     pushOp(S);
6851 
6852   const Instruction *Bound = nullptr;
6853   while (!Worklist.empty()) {
6854     auto *S = Worklist.pop_back_val();
6855     if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
6856       if (!Bound || DT.dominates(Bound, DefI))
6857         Bound = DefI;
6858     } else {
6859       SmallVector<const SCEV *, 4> Ops;
6860       collectUniqueOps(S, Ops);
6861       for (auto *Op : Ops)
6862         pushOp(Op);
6863     }
6864   }
6865   return Bound ? Bound : &*F.getEntryBlock().begin();
6866 }
6867 
6868 const Instruction *
6869 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
6870   bool Discard;
6871   return getDefiningScopeBound(Ops, Discard);
6872 }
6873 
6874 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A,
6875                                                         const Instruction *B) {
6876   if (A->getParent() == B->getParent() &&
6877       isGuaranteedToTransferExecutionToSuccessor(A->getIterator(),
6878                                                  B->getIterator()))
6879     return true;
6880 
6881   auto *BLoop = LI.getLoopFor(B->getParent());
6882   if (BLoop && BLoop->getHeader() == B->getParent() &&
6883       BLoop->getLoopPreheader() == A->getParent() &&
6884       isGuaranteedToTransferExecutionToSuccessor(A->getIterator(),
6885                                                  A->getParent()->end()) &&
6886       isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(),
6887                                                  B->getIterator()))
6888     return true;
6889   return false;
6890 }
6891 
6892 
6893 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6894   // Only proceed if we can prove that I does not yield poison.
6895   if (!programUndefinedIfPoison(I))
6896     return false;
6897 
6898   // At this point we know that if I is executed, then it does not wrap
6899   // according to at least one of NSW or NUW. If I is not executed, then we do
6900   // not know if the calculation that I represents would wrap. Multiple
6901   // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6902   // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6903   // derived from other instructions that map to the same SCEV. We cannot make
6904   // that guarantee for cases where I is not executed. So we need to find a
6905   // upper bound on the defining scope for the SCEV, and prove that I is
6906   // executed every time we enter that scope.  When the bounding scope is a
6907   // loop (the common case), this is equivalent to proving I executes on every
6908   // iteration of that loop.
6909   SmallVector<const SCEV *> SCEVOps;
6910   for (const Use &Op : I->operands()) {
6911     // I could be an extractvalue from a call to an overflow intrinsic.
6912     // TODO: We can do better here in some cases.
6913     if (isSCEVable(Op->getType()))
6914       SCEVOps.push_back(getSCEV(Op));
6915   }
6916   auto *DefI = getDefiningScopeBound(SCEVOps);
6917   return isGuaranteedToTransferExecutionTo(DefI, I);
6918 }
6919 
6920 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6921   // If we know that \c I can never be poison period, then that's enough.
6922   if (isSCEVExprNeverPoison(I))
6923     return true;
6924 
6925   // For an add recurrence specifically, we assume that infinite loops without
6926   // side effects are undefined behavior, and then reason as follows:
6927   //
6928   // If the add recurrence is poison in any iteration, it is poison on all
6929   // future iterations (since incrementing poison yields poison). If the result
6930   // of the add recurrence is fed into the loop latch condition and the loop
6931   // does not contain any throws or exiting blocks other than the latch, we now
6932   // have the ability to "choose" whether the backedge is taken or not (by
6933   // choosing a sufficiently evil value for the poison feeding into the branch)
6934   // for every iteration including and after the one in which \p I first became
6935   // poison.  There are two possibilities (let's call the iteration in which \p
6936   // I first became poison as K):
6937   //
6938   //  1. In the set of iterations including and after K, the loop body executes
6939   //     no side effects.  In this case executing the backege an infinte number
6940   //     of times will yield undefined behavior.
6941   //
6942   //  2. In the set of iterations including and after K, the loop body executes
6943   //     at least one side effect.  In this case, that specific instance of side
6944   //     effect is control dependent on poison, which also yields undefined
6945   //     behavior.
6946 
6947   auto *ExitingBB = L->getExitingBlock();
6948   auto *LatchBB = L->getLoopLatch();
6949   if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6950     return false;
6951 
6952   SmallPtrSet<const Instruction *, 16> Pushed;
6953   SmallVector<const Instruction *, 8> PoisonStack;
6954 
6955   // We start by assuming \c I, the post-inc add recurrence, is poison.  Only
6956   // things that are known to be poison under that assumption go on the
6957   // PoisonStack.
6958   Pushed.insert(I);
6959   PoisonStack.push_back(I);
6960 
6961   bool LatchControlDependentOnPoison = false;
6962   while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6963     const Instruction *Poison = PoisonStack.pop_back_val();
6964 
6965     for (auto *PoisonUser : Poison->users()) {
6966       if (propagatesPoison(cast<Operator>(PoisonUser))) {
6967         if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6968           PoisonStack.push_back(cast<Instruction>(PoisonUser));
6969       } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6970         assert(BI->isConditional() && "Only possibility!");
6971         if (BI->getParent() == LatchBB) {
6972           LatchControlDependentOnPoison = true;
6973           break;
6974         }
6975       }
6976     }
6977   }
6978 
6979   return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6980 }
6981 
6982 ScalarEvolution::LoopProperties
6983 ScalarEvolution::getLoopProperties(const Loop *L) {
6984   using LoopProperties = ScalarEvolution::LoopProperties;
6985 
6986   auto Itr = LoopPropertiesCache.find(L);
6987   if (Itr == LoopPropertiesCache.end()) {
6988     auto HasSideEffects = [](Instruction *I) {
6989       if (auto *SI = dyn_cast<StoreInst>(I))
6990         return !SI->isSimple();
6991 
6992       return I->mayThrow() || I->mayWriteToMemory();
6993     };
6994 
6995     LoopProperties LP = {/* HasNoAbnormalExits */ true,
6996                          /*HasNoSideEffects*/ true};
6997 
6998     for (auto *BB : L->getBlocks())
6999       for (auto &I : *BB) {
7000         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7001           LP.HasNoAbnormalExits = false;
7002         if (HasSideEffects(&I))
7003           LP.HasNoSideEffects = false;
7004         if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7005           break; // We're already as pessimistic as we can get.
7006       }
7007 
7008     auto InsertPair = LoopPropertiesCache.insert({L, LP});
7009     assert(InsertPair.second && "We just checked!");
7010     Itr = InsertPair.first;
7011   }
7012 
7013   return Itr->second;
7014 }
7015 
7016 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
7017   // A mustprogress loop without side effects must be finite.
7018   // TODO: The check used here is very conservative.  It's only *specific*
7019   // side effects which are well defined in infinite loops.
7020   return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L));
7021 }
7022 
7023 const SCEV *ScalarEvolution::createSCEV(Value *V) {
7024   if (!isSCEVable(V->getType()))
7025     return getUnknown(V);
7026 
7027   if (Instruction *I = dyn_cast<Instruction>(V)) {
7028     // Don't attempt to analyze instructions in blocks that aren't
7029     // reachable. Such instructions don't matter, and they aren't required
7030     // to obey basic rules for definitions dominating uses which this
7031     // analysis depends on.
7032     if (!DT.isReachableFromEntry(I->getParent()))
7033       return getUnknown(UndefValue::get(V->getType()));
7034   } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
7035     return getConstant(CI);
7036   else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
7037     return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
7038   else if (!isa<ConstantExpr>(V))
7039     return getUnknown(V);
7040 
7041   Operator *U = cast<Operator>(V);
7042   if (auto BO = MatchBinaryOp(U, DT)) {
7043     switch (BO->Opcode) {
7044     case Instruction::Add: {
7045       // The simple thing to do would be to just call getSCEV on both operands
7046       // and call getAddExpr with the result. However if we're looking at a
7047       // bunch of things all added together, this can be quite inefficient,
7048       // because it leads to N-1 getAddExpr calls for N ultimate operands.
7049       // Instead, gather up all the operands and make a single getAddExpr call.
7050       // LLVM IR canonical form means we need only traverse the left operands.
7051       SmallVector<const SCEV *, 4> AddOps;
7052       do {
7053         if (BO->Op) {
7054           if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
7055             AddOps.push_back(OpSCEV);
7056             break;
7057           }
7058 
7059           // If a NUW or NSW flag can be applied to the SCEV for this
7060           // addition, then compute the SCEV for this addition by itself
7061           // with a separate call to getAddExpr. We need to do that
7062           // instead of pushing the operands of the addition onto AddOps,
7063           // since the flags are only known to apply to this particular
7064           // addition - they may not apply to other additions that can be
7065           // formed with operands from AddOps.
7066           const SCEV *RHS = getSCEV(BO->RHS);
7067           SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
7068           if (Flags != SCEV::FlagAnyWrap) {
7069             const SCEV *LHS = getSCEV(BO->LHS);
7070             if (BO->Opcode == Instruction::Sub)
7071               AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
7072             else
7073               AddOps.push_back(getAddExpr(LHS, RHS, Flags));
7074             break;
7075           }
7076         }
7077 
7078         if (BO->Opcode == Instruction::Sub)
7079           AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
7080         else
7081           AddOps.push_back(getSCEV(BO->RHS));
7082 
7083         auto NewBO = MatchBinaryOp(BO->LHS, DT);
7084         if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7085                        NewBO->Opcode != Instruction::Sub)) {
7086           AddOps.push_back(getSCEV(BO->LHS));
7087           break;
7088         }
7089         BO = NewBO;
7090       } while (true);
7091 
7092       return getAddExpr(AddOps);
7093     }
7094 
7095     case Instruction::Mul: {
7096       SmallVector<const SCEV *, 4> MulOps;
7097       do {
7098         if (BO->Op) {
7099           if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
7100             MulOps.push_back(OpSCEV);
7101             break;
7102           }
7103 
7104           SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
7105           if (Flags != SCEV::FlagAnyWrap) {
7106             MulOps.push_back(
7107                 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
7108             break;
7109           }
7110         }
7111 
7112         MulOps.push_back(getSCEV(BO->RHS));
7113         auto NewBO = MatchBinaryOp(BO->LHS, DT);
7114         if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7115           MulOps.push_back(getSCEV(BO->LHS));
7116           break;
7117         }
7118         BO = NewBO;
7119       } while (true);
7120 
7121       return getMulExpr(MulOps);
7122     }
7123     case Instruction::UDiv:
7124       return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
7125     case Instruction::URem:
7126       return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
7127     case Instruction::Sub: {
7128       SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
7129       if (BO->Op)
7130         Flags = getNoWrapFlagsFromUB(BO->Op);
7131       return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
7132     }
7133     case Instruction::And:
7134       // For an expression like x&255 that merely masks off the high bits,
7135       // use zext(trunc(x)) as the SCEV expression.
7136       if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7137         if (CI->isZero())
7138           return getSCEV(BO->RHS);
7139         if (CI->isMinusOne())
7140           return getSCEV(BO->LHS);
7141         const APInt &A = CI->getValue();
7142 
7143         // Instcombine's ShrinkDemandedConstant may strip bits out of
7144         // constants, obscuring what would otherwise be a low-bits mask.
7145         // Use computeKnownBits to compute what ShrinkDemandedConstant
7146         // knew about to reconstruct a low-bits mask value.
7147         unsigned LZ = A.countLeadingZeros();
7148         unsigned TZ = A.countTrailingZeros();
7149         unsigned BitWidth = A.getBitWidth();
7150         KnownBits Known(BitWidth);
7151         computeKnownBits(BO->LHS, Known, getDataLayout(),
7152                          0, &AC, nullptr, &DT);
7153 
7154         APInt EffectiveMask =
7155             APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
7156         if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
7157           const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
7158           const SCEV *LHS = getSCEV(BO->LHS);
7159           const SCEV *ShiftedLHS = nullptr;
7160           if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
7161             if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
7162               // For an expression like (x * 8) & 8, simplify the multiply.
7163               unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
7164               unsigned GCD = std::min(MulZeros, TZ);
7165               APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
7166               SmallVector<const SCEV*, 4> MulOps;
7167               MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
7168               MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
7169               auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7170               ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
7171             }
7172           }
7173           if (!ShiftedLHS)
7174             ShiftedLHS = getUDivExpr(LHS, MulCount);
7175           return getMulExpr(
7176               getZeroExtendExpr(
7177                   getTruncateExpr(ShiftedLHS,
7178                       IntegerType::get(getContext(), BitWidth - LZ - TZ)),
7179                   BO->LHS->getType()),
7180               MulCount);
7181         }
7182       }
7183       break;
7184 
7185     case Instruction::Or:
7186       // If the RHS of the Or is a constant, we may have something like:
7187       // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
7188       // optimizations will transparently handle this case.
7189       //
7190       // In order for this transformation to be safe, the LHS must be of the
7191       // form X*(2^n) and the Or constant must be less than 2^n.
7192       if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7193         const SCEV *LHS = getSCEV(BO->LHS);
7194         const APInt &CIVal = CI->getValue();
7195         if (GetMinTrailingZeros(LHS) >=
7196             (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
7197           // Build a plain add SCEV.
7198           return getAddExpr(LHS, getSCEV(CI),
7199                             (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
7200         }
7201       }
7202       break;
7203 
7204     case Instruction::Xor:
7205       if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7206         // If the RHS of xor is -1, then this is a not operation.
7207         if (CI->isMinusOne())
7208           return getNotSCEV(getSCEV(BO->LHS));
7209 
7210         // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
7211         // This is a variant of the check for xor with -1, and it handles
7212         // the case where instcombine has trimmed non-demanded bits out
7213         // of an xor with -1.
7214         if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
7215           if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
7216             if (LBO->getOpcode() == Instruction::And &&
7217                 LCI->getValue() == CI->getValue())
7218               if (const SCEVZeroExtendExpr *Z =
7219                       dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
7220                 Type *UTy = BO->LHS->getType();
7221                 const SCEV *Z0 = Z->getOperand();
7222                 Type *Z0Ty = Z0->getType();
7223                 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
7224 
7225                 // If C is a low-bits mask, the zero extend is serving to
7226                 // mask off the high bits. Complement the operand and
7227                 // re-apply the zext.
7228                 if (CI->getValue().isMask(Z0TySize))
7229                   return getZeroExtendExpr(getNotSCEV(Z0), UTy);
7230 
7231                 // If C is a single bit, it may be in the sign-bit position
7232                 // before the zero-extend. In this case, represent the xor
7233                 // using an add, which is equivalent, and re-apply the zext.
7234                 APInt Trunc = CI->getValue().trunc(Z0TySize);
7235                 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
7236                     Trunc.isSignMask())
7237                   return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
7238                                            UTy);
7239               }
7240       }
7241       break;
7242 
7243     case Instruction::Shl:
7244       // Turn shift left of a constant amount into a multiply.
7245       if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
7246         uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
7247 
7248         // If the shift count is not less than the bitwidth, the result of
7249         // the shift is undefined. Don't try to analyze it, because the
7250         // resolution chosen here may differ from the resolution chosen in
7251         // other parts of the compiler.
7252         if (SA->getValue().uge(BitWidth))
7253           break;
7254 
7255         // We can safely preserve the nuw flag in all cases. It's also safe to
7256         // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
7257         // requires special handling. It can be preserved as long as we're not
7258         // left shifting by bitwidth - 1.
7259         auto Flags = SCEV::FlagAnyWrap;
7260         if (BO->Op) {
7261           auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
7262           if ((MulFlags & SCEV::FlagNSW) &&
7263               ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
7264             Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
7265           if (MulFlags & SCEV::FlagNUW)
7266             Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
7267         }
7268 
7269         Constant *X = ConstantInt::get(
7270             getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
7271         return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
7272       }
7273       break;
7274 
7275     case Instruction::AShr: {
7276       // AShr X, C, where C is a constant.
7277       ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
7278       if (!CI)
7279         break;
7280 
7281       Type *OuterTy = BO->LHS->getType();
7282       uint64_t BitWidth = getTypeSizeInBits(OuterTy);
7283       // If the shift count is not less than the bitwidth, the result of
7284       // the shift is undefined. Don't try to analyze it, because the
7285       // resolution chosen here may differ from the resolution chosen in
7286       // other parts of the compiler.
7287       if (CI->getValue().uge(BitWidth))
7288         break;
7289 
7290       if (CI->isZero())
7291         return getSCEV(BO->LHS); // shift by zero --> noop
7292 
7293       uint64_t AShrAmt = CI->getZExtValue();
7294       Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
7295 
7296       Operator *L = dyn_cast<Operator>(BO->LHS);
7297       if (L && L->getOpcode() == Instruction::Shl) {
7298         // X = Shl A, n
7299         // Y = AShr X, m
7300         // Both n and m are constant.
7301 
7302         const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
7303         if (L->getOperand(1) == BO->RHS)
7304           // For a two-shift sext-inreg, i.e. n = m,
7305           // use sext(trunc(x)) as the SCEV expression.
7306           return getSignExtendExpr(
7307               getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
7308 
7309         ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
7310         if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
7311           uint64_t ShlAmt = ShlAmtCI->getZExtValue();
7312           if (ShlAmt > AShrAmt) {
7313             // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
7314             // expression. We already checked that ShlAmt < BitWidth, so
7315             // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
7316             // ShlAmt - AShrAmt < Amt.
7317             APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
7318                                             ShlAmt - AShrAmt);
7319             return getSignExtendExpr(
7320                 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
7321                 getConstant(Mul)), OuterTy);
7322           }
7323         }
7324       }
7325       break;
7326     }
7327     }
7328   }
7329 
7330   switch (U->getOpcode()) {
7331   case Instruction::Trunc:
7332     return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
7333 
7334   case Instruction::ZExt:
7335     return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7336 
7337   case Instruction::SExt:
7338     if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
7339       // The NSW flag of a subtract does not always survive the conversion to
7340       // A + (-1)*B.  By pushing sign extension onto its operands we are much
7341       // more likely to preserve NSW and allow later AddRec optimisations.
7342       //
7343       // NOTE: This is effectively duplicating this logic from getSignExtend:
7344       //   sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
7345       // but by that point the NSW information has potentially been lost.
7346       if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
7347         Type *Ty = U->getType();
7348         auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
7349         auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
7350         return getMinusSCEV(V1, V2, SCEV::FlagNSW);
7351       }
7352     }
7353     return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7354 
7355   case Instruction::BitCast:
7356     // BitCasts are no-op casts so we just eliminate the cast.
7357     if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
7358       return getSCEV(U->getOperand(0));
7359     break;
7360 
7361   case Instruction::PtrToInt: {
7362     // Pointer to integer cast is straight-forward, so do model it.
7363     const SCEV *Op = getSCEV(U->getOperand(0));
7364     Type *DstIntTy = U->getType();
7365     // But only if effective SCEV (integer) type is wide enough to represent
7366     // all possible pointer values.
7367     const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
7368     if (isa<SCEVCouldNotCompute>(IntOp))
7369       return getUnknown(V);
7370     return IntOp;
7371   }
7372   case Instruction::IntToPtr:
7373     // Just don't deal with inttoptr casts.
7374     return getUnknown(V);
7375 
7376   case Instruction::SDiv:
7377     // If both operands are non-negative, this is just an udiv.
7378     if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7379         isKnownNonNegative(getSCEV(U->getOperand(1))))
7380       return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7381     break;
7382 
7383   case Instruction::SRem:
7384     // If both operands are non-negative, this is just an urem.
7385     if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7386         isKnownNonNegative(getSCEV(U->getOperand(1))))
7387       return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7388     break;
7389 
7390   case Instruction::GetElementPtr:
7391     return createNodeForGEP(cast<GEPOperator>(U));
7392 
7393   case Instruction::PHI:
7394     return createNodeForPHI(cast<PHINode>(U));
7395 
7396   case Instruction::Select:
7397     // U can also be a select constant expr, which let fall through.  Since
7398     // createNodeForSelect only works for a condition that is an `ICmpInst`, and
7399     // constant expressions cannot have instructions as operands, we'd have
7400     // returned getUnknown for a select constant expressions anyway.
7401     if (isa<Instruction>(U))
7402       return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
7403                                       U->getOperand(1), U->getOperand(2));
7404     break;
7405 
7406   case Instruction::Call:
7407   case Instruction::Invoke:
7408     if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
7409       return getSCEV(RV);
7410 
7411     if (auto *II = dyn_cast<IntrinsicInst>(U)) {
7412       switch (II->getIntrinsicID()) {
7413       case Intrinsic::abs:
7414         return getAbsExpr(
7415             getSCEV(II->getArgOperand(0)),
7416             /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
7417       case Intrinsic::umax:
7418         return getUMaxExpr(getSCEV(II->getArgOperand(0)),
7419                            getSCEV(II->getArgOperand(1)));
7420       case Intrinsic::umin:
7421         return getUMinExpr(getSCEV(II->getArgOperand(0)),
7422                            getSCEV(II->getArgOperand(1)));
7423       case Intrinsic::smax:
7424         return getSMaxExpr(getSCEV(II->getArgOperand(0)),
7425                            getSCEV(II->getArgOperand(1)));
7426       case Intrinsic::smin:
7427         return getSMinExpr(getSCEV(II->getArgOperand(0)),
7428                            getSCEV(II->getArgOperand(1)));
7429       case Intrinsic::usub_sat: {
7430         const SCEV *X = getSCEV(II->getArgOperand(0));
7431         const SCEV *Y = getSCEV(II->getArgOperand(1));
7432         const SCEV *ClampedY = getUMinExpr(X, Y);
7433         return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
7434       }
7435       case Intrinsic::uadd_sat: {
7436         const SCEV *X = getSCEV(II->getArgOperand(0));
7437         const SCEV *Y = getSCEV(II->getArgOperand(1));
7438         const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
7439         return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
7440       }
7441       case Intrinsic::start_loop_iterations:
7442         // A start_loop_iterations is just equivalent to the first operand for
7443         // SCEV purposes.
7444         return getSCEV(II->getArgOperand(0));
7445       default:
7446         break;
7447       }
7448     }
7449     break;
7450   }
7451 
7452   return getUnknown(V);
7453 }
7454 
7455 //===----------------------------------------------------------------------===//
7456 //                   Iteration Count Computation Code
7457 //
7458 
7459 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount,
7460                                                        bool Extend) {
7461   if (isa<SCEVCouldNotCompute>(ExitCount))
7462     return getCouldNotCompute();
7463 
7464   auto *ExitCountType = ExitCount->getType();
7465   assert(ExitCountType->isIntegerTy());
7466 
7467   if (!Extend)
7468     return getAddExpr(ExitCount, getOne(ExitCountType));
7469 
7470   auto *WiderType = Type::getIntNTy(ExitCountType->getContext(),
7471                                     1 + ExitCountType->getScalarSizeInBits());
7472   return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType),
7473                     getOne(WiderType));
7474 }
7475 
7476 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
7477   if (!ExitCount)
7478     return 0;
7479 
7480   ConstantInt *ExitConst = ExitCount->getValue();
7481 
7482   // Guard against huge trip counts.
7483   if (ExitConst->getValue().getActiveBits() > 32)
7484     return 0;
7485 
7486   // In case of integer overflow, this returns 0, which is correct.
7487   return ((unsigned)ExitConst->getZExtValue()) + 1;
7488 }
7489 
7490 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
7491   auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
7492   return getConstantTripCount(ExitCount);
7493 }
7494 
7495 unsigned
7496 ScalarEvolution::getSmallConstantTripCount(const Loop *L,
7497                                            const BasicBlock *ExitingBlock) {
7498   assert(ExitingBlock && "Must pass a non-null exiting block!");
7499   assert(L->isLoopExiting(ExitingBlock) &&
7500          "Exiting block must actually branch out of the loop!");
7501   const SCEVConstant *ExitCount =
7502       dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
7503   return getConstantTripCount(ExitCount);
7504 }
7505 
7506 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
7507   const auto *MaxExitCount =
7508       dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
7509   return getConstantTripCount(MaxExitCount);
7510 }
7511 
7512 const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) {
7513   // We can't infer from Array in Irregular Loop.
7514   // FIXME: It's hard to infer loop bound from array operated in Nested Loop.
7515   if (!L->isLoopSimplifyForm() || !L->isInnermost())
7516     return getCouldNotCompute();
7517 
7518   // FIXME: To make the scene more typical, we only analysis loops that have
7519   // one exiting block and that block must be the latch. To make it easier to
7520   // capture loops that have memory access and memory access will be executed
7521   // in each iteration.
7522   const BasicBlock *LoopLatch = L->getLoopLatch();
7523   assert(LoopLatch && "See defination of simplify form loop.");
7524   if (L->getExitingBlock() != LoopLatch)
7525     return getCouldNotCompute();
7526 
7527   const DataLayout &DL = getDataLayout();
7528   SmallVector<const SCEV *> InferCountColl;
7529   for (auto *BB : L->getBlocks()) {
7530     // Go here, we can know that Loop is a single exiting and simplified form
7531     // loop. Make sure that infer from Memory Operation in those BBs must be
7532     // executed in loop. First step, we can make sure that max execution time
7533     // of MemAccessBB in loop represents latch max excution time.
7534     // If MemAccessBB does not dom Latch, skip.
7535     //            Entry
7536     //              │
7537     //        ┌─────▼─────┐
7538     //        │Loop Header◄─────┐
7539     //        └──┬──────┬─┘     │
7540     //           │      │       │
7541     //  ┌────────▼──┐ ┌─▼─────┐ │
7542     //  │MemAccessBB│ │OtherBB│ │
7543     //  └────────┬──┘ └─┬─────┘ │
7544     //           │      │       │
7545     //         ┌─▼──────▼─┐     │
7546     //         │Loop Latch├─────┘
7547     //         └────┬─────┘
7548     //              ▼
7549     //             Exit
7550     if (!DT.dominates(BB, LoopLatch))
7551       continue;
7552 
7553     for (Instruction &Inst : *BB) {
7554       // Find Memory Operation Instruction.
7555       auto *GEP = getLoadStorePointerOperand(&Inst);
7556       if (!GEP)
7557         continue;
7558 
7559       auto *ElemSize = dyn_cast<SCEVConstant>(getElementSize(&Inst));
7560       // Do not infer from scalar type, eg."ElemSize = sizeof()".
7561       if (!ElemSize)
7562         continue;
7563 
7564       // Use a existing polynomial recurrence on the trip count.
7565       auto *AddRec = dyn_cast<SCEVAddRecExpr>(getSCEV(GEP));
7566       if (!AddRec)
7567         continue;
7568       auto *ArrBase = dyn_cast<SCEVUnknown>(getPointerBase(AddRec));
7569       auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*this));
7570       if (!ArrBase || !Step)
7571         continue;
7572       assert(isLoopInvariant(ArrBase, L) && "See addrec definition");
7573 
7574       // Only handle { %array + step },
7575       // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here.
7576       if (AddRec->getStart() != ArrBase)
7577         continue;
7578 
7579       // Memory operation pattern which have gaps.
7580       // Or repeat memory opreation.
7581       // And index of GEP wraps arround.
7582       if (Step->getAPInt().getActiveBits() > 32 ||
7583           Step->getAPInt().getZExtValue() !=
7584               ElemSize->getAPInt().getZExtValue() ||
7585           Step->isZero() || Step->getAPInt().isNegative())
7586         continue;
7587 
7588       // Only infer from stack array which has certain size.
7589       // Make sure alloca instruction is not excuted in loop.
7590       AllocaInst *AllocateInst = dyn_cast<AllocaInst>(ArrBase->getValue());
7591       if (!AllocateInst || L->contains(AllocateInst->getParent()))
7592         continue;
7593 
7594       // Make sure only handle normal array.
7595       auto *Ty = dyn_cast<ArrayType>(AllocateInst->getAllocatedType());
7596       auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize());
7597       if (!Ty || !ArrSize || !ArrSize->isOne())
7598         continue;
7599 
7600       // FIXME: Since gep indices are silently zext to the indexing type,
7601       // we will have a narrow gep index which wraps around rather than
7602       // increasing strictly, we shoule ensure that step is increasing
7603       // strictly by the loop iteration.
7604       // Now we can infer a max execution time by MemLength/StepLength.
7605       const SCEV *MemSize =
7606           getConstant(Step->getType(), DL.getTypeAllocSize(Ty));
7607       auto *MaxExeCount =
7608           dyn_cast<SCEVConstant>(getUDivCeilSCEV(MemSize, Step));
7609       if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32)
7610         continue;
7611 
7612       // If the loop reaches the maximum number of executions, we can not
7613       // access bytes starting outside the statically allocated size without
7614       // being immediate UB. But it is allowed to enter loop header one more
7615       // time.
7616       auto *InferCount = dyn_cast<SCEVConstant>(
7617           getAddExpr(MaxExeCount, getOne(MaxExeCount->getType())));
7618       // Discard the maximum number of execution times under 32bits.
7619       if (!InferCount || InferCount->getAPInt().getActiveBits() > 32)
7620         continue;
7621 
7622       InferCountColl.push_back(InferCount);
7623     }
7624   }
7625 
7626   if (InferCountColl.size() == 0)
7627     return getCouldNotCompute();
7628 
7629   return getUMinFromMismatchedTypes(InferCountColl);
7630 }
7631 
7632 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
7633   SmallVector<BasicBlock *, 8> ExitingBlocks;
7634   L->getExitingBlocks(ExitingBlocks);
7635 
7636   Optional<unsigned> Res = None;
7637   for (auto *ExitingBB : ExitingBlocks) {
7638     unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
7639     if (!Res)
7640       Res = Multiple;
7641     Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple);
7642   }
7643   return Res.getValueOr(1);
7644 }
7645 
7646 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7647                                                        const SCEV *ExitCount) {
7648   if (ExitCount == getCouldNotCompute())
7649     return 1;
7650 
7651   // Get the trip count
7652   const SCEV *TCExpr = getTripCountFromExitCount(ExitCount);
7653 
7654   const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
7655   if (!TC)
7656     // Attempt to factor more general cases. Returns the greatest power of
7657     // two divisor. If overflow happens, the trip count expression is still
7658     // divisible by the greatest power of 2 divisor returned.
7659     return 1U << std::min((uint32_t)31,
7660                           GetMinTrailingZeros(applyLoopGuards(TCExpr, L)));
7661 
7662   ConstantInt *Result = TC->getValue();
7663 
7664   // Guard against huge trip counts (this requires checking
7665   // for zero to handle the case where the trip count == -1 and the
7666   // addition wraps).
7667   if (!Result || Result->getValue().getActiveBits() > 32 ||
7668       Result->getValue().getActiveBits() == 0)
7669     return 1;
7670 
7671   return (unsigned)Result->getZExtValue();
7672 }
7673 
7674 /// Returns the largest constant divisor of the trip count of this loop as a
7675 /// normal unsigned value, if possible. This means that the actual trip count is
7676 /// always a multiple of the returned value (don't forget the trip count could
7677 /// very well be zero as well!).
7678 ///
7679 /// Returns 1 if the trip count is unknown or not guaranteed to be the
7680 /// multiple of a constant (which is also the case if the trip count is simply
7681 /// constant, use getSmallConstantTripCount for that case), Will also return 1
7682 /// if the trip count is very large (>= 2^32).
7683 ///
7684 /// As explained in the comments for getSmallConstantTripCount, this assumes
7685 /// that control exits the loop via ExitingBlock.
7686 unsigned
7687 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7688                                               const BasicBlock *ExitingBlock) {
7689   assert(ExitingBlock && "Must pass a non-null exiting block!");
7690   assert(L->isLoopExiting(ExitingBlock) &&
7691          "Exiting block must actually branch out of the loop!");
7692   const SCEV *ExitCount = getExitCount(L, ExitingBlock);
7693   return getSmallConstantTripMultiple(L, ExitCount);
7694 }
7695 
7696 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
7697                                           const BasicBlock *ExitingBlock,
7698                                           ExitCountKind Kind) {
7699   switch (Kind) {
7700   case Exact:
7701   case SymbolicMaximum:
7702     return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
7703   case ConstantMaximum:
7704     return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
7705   };
7706   llvm_unreachable("Invalid ExitCountKind!");
7707 }
7708 
7709 const SCEV *
7710 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
7711                                                  SmallVector<const SCEVPredicate *, 4> &Preds) {
7712   return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
7713 }
7714 
7715 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
7716                                                    ExitCountKind Kind) {
7717   switch (Kind) {
7718   case Exact:
7719     return getBackedgeTakenInfo(L).getExact(L, this);
7720   case ConstantMaximum:
7721     return getBackedgeTakenInfo(L).getConstantMax(this);
7722   case SymbolicMaximum:
7723     return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
7724   };
7725   llvm_unreachable("Invalid ExitCountKind!");
7726 }
7727 
7728 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
7729   return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
7730 }
7731 
7732 /// Push PHI nodes in the header of the given loop onto the given Worklist.
7733 static void PushLoopPHIs(const Loop *L,
7734                          SmallVectorImpl<Instruction *> &Worklist,
7735                          SmallPtrSetImpl<Instruction *> &Visited) {
7736   BasicBlock *Header = L->getHeader();
7737 
7738   // Push all Loop-header PHIs onto the Worklist stack.
7739   for (PHINode &PN : Header->phis())
7740     if (Visited.insert(&PN).second)
7741       Worklist.push_back(&PN);
7742 }
7743 
7744 const ScalarEvolution::BackedgeTakenInfo &
7745 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
7746   auto &BTI = getBackedgeTakenInfo(L);
7747   if (BTI.hasFullInfo())
7748     return BTI;
7749 
7750   auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7751 
7752   if (!Pair.second)
7753     return Pair.first->second;
7754 
7755   BackedgeTakenInfo Result =
7756       computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
7757 
7758   return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
7759 }
7760 
7761 ScalarEvolution::BackedgeTakenInfo &
7762 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
7763   // Initially insert an invalid entry for this loop. If the insertion
7764   // succeeds, proceed to actually compute a backedge-taken count and
7765   // update the value. The temporary CouldNotCompute value tells SCEV
7766   // code elsewhere that it shouldn't attempt to request a new
7767   // backedge-taken count, which could result in infinite recursion.
7768   std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
7769       BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7770   if (!Pair.second)
7771     return Pair.first->second;
7772 
7773   // computeBackedgeTakenCount may allocate memory for its result. Inserting it
7774   // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
7775   // must be cleared in this scope.
7776   BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
7777 
7778   // In product build, there are no usage of statistic.
7779   (void)NumTripCountsComputed;
7780   (void)NumTripCountsNotComputed;
7781 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
7782   const SCEV *BEExact = Result.getExact(L, this);
7783   if (BEExact != getCouldNotCompute()) {
7784     assert(isLoopInvariant(BEExact, L) &&
7785            isLoopInvariant(Result.getConstantMax(this), L) &&
7786            "Computed backedge-taken count isn't loop invariant for loop!");
7787     ++NumTripCountsComputed;
7788   } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
7789              isa<PHINode>(L->getHeader()->begin())) {
7790     // Only count loops that have phi nodes as not being computable.
7791     ++NumTripCountsNotComputed;
7792   }
7793 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
7794 
7795   // Now that we know more about the trip count for this loop, forget any
7796   // existing SCEV values for PHI nodes in this loop since they are only
7797   // conservative estimates made without the benefit of trip count
7798   // information. This invalidation is not necessary for correctness, and is
7799   // only done to produce more precise results.
7800   if (Result.hasAnyInfo()) {
7801     // Invalidate any expression using an addrec in this loop.
7802     SmallVector<const SCEV *, 8> ToForget;
7803     auto LoopUsersIt = LoopUsers.find(L);
7804     if (LoopUsersIt != LoopUsers.end())
7805       append_range(ToForget, LoopUsersIt->second);
7806     forgetMemoizedResults(ToForget);
7807 
7808     // Invalidate constant-evolved loop header phis.
7809     for (PHINode &PN : L->getHeader()->phis())
7810       ConstantEvolutionLoopExitValue.erase(&PN);
7811   }
7812 
7813   // Re-lookup the insert position, since the call to
7814   // computeBackedgeTakenCount above could result in a
7815   // recusive call to getBackedgeTakenInfo (on a different
7816   // loop), which would invalidate the iterator computed
7817   // earlier.
7818   return BackedgeTakenCounts.find(L)->second = std::move(Result);
7819 }
7820 
7821 void ScalarEvolution::forgetAllLoops() {
7822   // This method is intended to forget all info about loops. It should
7823   // invalidate caches as if the following happened:
7824   // - The trip counts of all loops have changed arbitrarily
7825   // - Every llvm::Value has been updated in place to produce a different
7826   // result.
7827   BackedgeTakenCounts.clear();
7828   PredicatedBackedgeTakenCounts.clear();
7829   BECountUsers.clear();
7830   LoopPropertiesCache.clear();
7831   ConstantEvolutionLoopExitValue.clear();
7832   ValueExprMap.clear();
7833   ValuesAtScopes.clear();
7834   ValuesAtScopesUsers.clear();
7835   LoopDispositions.clear();
7836   BlockDispositions.clear();
7837   UnsignedRanges.clear();
7838   SignedRanges.clear();
7839   ExprValueMap.clear();
7840   HasRecMap.clear();
7841   MinTrailingZerosCache.clear();
7842   PredicatedSCEVRewrites.clear();
7843 }
7844 
7845 void ScalarEvolution::forgetLoop(const Loop *L) {
7846   SmallVector<const Loop *, 16> LoopWorklist(1, L);
7847   SmallVector<Instruction *, 32> Worklist;
7848   SmallPtrSet<Instruction *, 16> Visited;
7849   SmallVector<const SCEV *, 16> ToForget;
7850 
7851   // Iterate over all the loops and sub-loops to drop SCEV information.
7852   while (!LoopWorklist.empty()) {
7853     auto *CurrL = LoopWorklist.pop_back_val();
7854 
7855     // Drop any stored trip count value.
7856     forgetBackedgeTakenCounts(CurrL, /* Predicated */ false);
7857     forgetBackedgeTakenCounts(CurrL, /* Predicated */ true);
7858 
7859     // Drop information about predicated SCEV rewrites for this loop.
7860     for (auto I = PredicatedSCEVRewrites.begin();
7861          I != PredicatedSCEVRewrites.end();) {
7862       std::pair<const SCEV *, const Loop *> Entry = I->first;
7863       if (Entry.second == CurrL)
7864         PredicatedSCEVRewrites.erase(I++);
7865       else
7866         ++I;
7867     }
7868 
7869     auto LoopUsersItr = LoopUsers.find(CurrL);
7870     if (LoopUsersItr != LoopUsers.end()) {
7871       ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(),
7872                 LoopUsersItr->second.end());
7873       LoopUsers.erase(LoopUsersItr);
7874     }
7875 
7876     // Drop information about expressions based on loop-header PHIs.
7877     PushLoopPHIs(CurrL, Worklist, Visited);
7878 
7879     while (!Worklist.empty()) {
7880       Instruction *I = Worklist.pop_back_val();
7881 
7882       ValueExprMapType::iterator It =
7883           ValueExprMap.find_as(static_cast<Value *>(I));
7884       if (It != ValueExprMap.end()) {
7885         eraseValueFromMap(It->first);
7886         ToForget.push_back(It->second);
7887         if (PHINode *PN = dyn_cast<PHINode>(I))
7888           ConstantEvolutionLoopExitValue.erase(PN);
7889       }
7890 
7891       PushDefUseChildren(I, Worklist, Visited);
7892     }
7893 
7894     LoopPropertiesCache.erase(CurrL);
7895     // Forget all contained loops too, to avoid dangling entries in the
7896     // ValuesAtScopes map.
7897     LoopWorklist.append(CurrL->begin(), CurrL->end());
7898   }
7899   forgetMemoizedResults(ToForget);
7900 }
7901 
7902 void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7903   while (Loop *Parent = L->getParentLoop())
7904     L = Parent;
7905   forgetLoop(L);
7906 }
7907 
7908 void ScalarEvolution::forgetValue(Value *V) {
7909   Instruction *I = dyn_cast<Instruction>(V);
7910   if (!I) return;
7911 
7912   // Drop information about expressions based on loop-header PHIs.
7913   SmallVector<Instruction *, 16> Worklist;
7914   SmallPtrSet<Instruction *, 8> Visited;
7915   SmallVector<const SCEV *, 8> ToForget;
7916   Worklist.push_back(I);
7917   Visited.insert(I);
7918 
7919   while (!Worklist.empty()) {
7920     I = Worklist.pop_back_val();
7921     ValueExprMapType::iterator It =
7922       ValueExprMap.find_as(static_cast<Value *>(I));
7923     if (It != ValueExprMap.end()) {
7924       eraseValueFromMap(It->first);
7925       ToForget.push_back(It->second);
7926       if (PHINode *PN = dyn_cast<PHINode>(I))
7927         ConstantEvolutionLoopExitValue.erase(PN);
7928     }
7929 
7930     PushDefUseChildren(I, Worklist, Visited);
7931   }
7932   forgetMemoizedResults(ToForget);
7933 }
7934 
7935 void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7936   LoopDispositions.clear();
7937 }
7938 
7939 /// Get the exact loop backedge taken count considering all loop exits. A
7940 /// computable result can only be returned for loops with all exiting blocks
7941 /// dominating the latch. howFarToZero assumes that the limit of each loop test
7942 /// is never skipped. This is a valid assumption as long as the loop exits via
7943 /// that test. For precise results, it is the caller's responsibility to specify
7944 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
7945 const SCEV *
7946 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7947                                              SmallVector<const SCEVPredicate *, 4> *Preds) const {
7948   // If any exits were not computable, the loop is not computable.
7949   if (!isComplete() || ExitNotTaken.empty())
7950     return SE->getCouldNotCompute();
7951 
7952   const BasicBlock *Latch = L->getLoopLatch();
7953   // All exiting blocks we have collected must dominate the only backedge.
7954   if (!Latch)
7955     return SE->getCouldNotCompute();
7956 
7957   // All exiting blocks we have gathered dominate loop's latch, so exact trip
7958   // count is simply a minimum out of all these calculated exit counts.
7959   SmallVector<const SCEV *, 2> Ops;
7960   for (auto &ENT : ExitNotTaken) {
7961     const SCEV *BECount = ENT.ExactNotTaken;
7962     assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
7963     assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
7964            "We should only have known counts for exiting blocks that dominate "
7965            "latch!");
7966 
7967     Ops.push_back(BECount);
7968 
7969     if (Preds)
7970       for (auto *P : ENT.Predicates)
7971         Preds->push_back(P);
7972 
7973     assert((Preds || ENT.hasAlwaysTruePredicate()) &&
7974            "Predicate should be always true!");
7975   }
7976 
7977   return SE->getUMinFromMismatchedTypes(Ops);
7978 }
7979 
7980 /// Get the exact not taken count for this loop exit.
7981 const SCEV *
7982 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7983                                              ScalarEvolution *SE) const {
7984   for (auto &ENT : ExitNotTaken)
7985     if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7986       return ENT.ExactNotTaken;
7987 
7988   return SE->getCouldNotCompute();
7989 }
7990 
7991 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7992     const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7993   for (auto &ENT : ExitNotTaken)
7994     if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7995       return ENT.MaxNotTaken;
7996 
7997   return SE->getCouldNotCompute();
7998 }
7999 
8000 /// getConstantMax - Get the constant max backedge taken count for the loop.
8001 const SCEV *
8002 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
8003   auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
8004     return !ENT.hasAlwaysTruePredicate();
8005   };
8006 
8007   if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue))
8008     return SE->getCouldNotCompute();
8009 
8010   assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
8011           isa<SCEVConstant>(getConstantMax())) &&
8012          "No point in having a non-constant max backedge taken count!");
8013   return getConstantMax();
8014 }
8015 
8016 const SCEV *
8017 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
8018                                                    ScalarEvolution *SE) {
8019   if (!SymbolicMax)
8020     SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
8021   return SymbolicMax;
8022 }
8023 
8024 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8025     ScalarEvolution *SE) const {
8026   auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
8027     return !ENT.hasAlwaysTruePredicate();
8028   };
8029   return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8030 }
8031 
8032 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
8033     : ExitLimit(E, E, false, None) {
8034 }
8035 
8036 ScalarEvolution::ExitLimit::ExitLimit(
8037     const SCEV *E, const SCEV *M, bool MaxOrZero,
8038     ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
8039     : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
8040   // If we prove the max count is zero, so is the symbolic bound.  This happens
8041   // in practice due to differences in a) how context sensitive we've chosen
8042   // to be and b) how we reason about bounds impied by UB.
8043   if (MaxNotTaken->isZero())
8044     ExactNotTaken = MaxNotTaken;
8045 
8046   assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
8047           !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
8048          "Exact is not allowed to be less precise than Max");
8049   assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
8050           isa<SCEVConstant>(MaxNotTaken)) &&
8051          "No point in having a non-constant max backedge taken count!");
8052   for (auto *PredSet : PredSetList)
8053     for (auto *P : *PredSet)
8054       addPredicate(P);
8055   assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&
8056          "Backedge count should be int");
8057   assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&
8058          "Max backedge count should be int");
8059 }
8060 
8061 ScalarEvolution::ExitLimit::ExitLimit(
8062     const SCEV *E, const SCEV *M, bool MaxOrZero,
8063     const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
8064     : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
8065 }
8066 
8067 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
8068                                       bool MaxOrZero)
8069     : ExitLimit(E, M, MaxOrZero, None) {
8070 }
8071 
8072 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
8073 /// computable exit into a persistent ExitNotTakenInfo array.
8074 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8075     ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
8076     bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
8077     : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8078   using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8079 
8080   ExitNotTaken.reserve(ExitCounts.size());
8081   std::transform(
8082       ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
8083       [&](const EdgeExitInfo &EEI) {
8084         BasicBlock *ExitBB = EEI.first;
8085         const ExitLimit &EL = EEI.second;
8086         return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
8087                                 EL.Predicates);
8088       });
8089   assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
8090           isa<SCEVConstant>(ConstantMax)) &&
8091          "No point in having a non-constant max backedge taken count!");
8092 }
8093 
8094 /// Compute the number of times the backedge of the specified loop will execute.
8095 ScalarEvolution::BackedgeTakenInfo
8096 ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
8097                                            bool AllowPredicates) {
8098   SmallVector<BasicBlock *, 8> ExitingBlocks;
8099   L->getExitingBlocks(ExitingBlocks);
8100 
8101   using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8102 
8103   SmallVector<EdgeExitInfo, 4> ExitCounts;
8104   bool CouldComputeBECount = true;
8105   BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
8106   const SCEV *MustExitMaxBECount = nullptr;
8107   const SCEV *MayExitMaxBECount = nullptr;
8108   bool MustExitMaxOrZero = false;
8109 
8110   // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
8111   // and compute maxBECount.
8112   // Do a union of all the predicates here.
8113   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
8114     BasicBlock *ExitBB = ExitingBlocks[i];
8115 
8116     // We canonicalize untaken exits to br (constant), ignore them so that
8117     // proving an exit untaken doesn't negatively impact our ability to reason
8118     // about the loop as whole.
8119     if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
8120       if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
8121         bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
8122         if (ExitIfTrue == CI->isZero())
8123           continue;
8124       }
8125 
8126     ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
8127 
8128     assert((AllowPredicates || EL.Predicates.empty()) &&
8129            "Predicated exit limit when predicates are not allowed!");
8130 
8131     // 1. For each exit that can be computed, add an entry to ExitCounts.
8132     // CouldComputeBECount is true only if all exits can be computed.
8133     if (EL.ExactNotTaken == getCouldNotCompute())
8134       // We couldn't compute an exact value for this exit, so
8135       // we won't be able to compute an exact value for the loop.
8136       CouldComputeBECount = false;
8137     else
8138       ExitCounts.emplace_back(ExitBB, EL);
8139 
8140     // 2. Derive the loop's MaxBECount from each exit's max number of
8141     // non-exiting iterations. Partition the loop exits into two kinds:
8142     // LoopMustExits and LoopMayExits.
8143     //
8144     // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
8145     // is a LoopMayExit.  If any computable LoopMustExit is found, then
8146     // MaxBECount is the minimum EL.MaxNotTaken of computable
8147     // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
8148     // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
8149     // computable EL.MaxNotTaken.
8150     if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
8151         DT.dominates(ExitBB, Latch)) {
8152       if (!MustExitMaxBECount) {
8153         MustExitMaxBECount = EL.MaxNotTaken;
8154         MustExitMaxOrZero = EL.MaxOrZero;
8155       } else {
8156         MustExitMaxBECount =
8157             getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
8158       }
8159     } else if (MayExitMaxBECount != getCouldNotCompute()) {
8160       if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
8161         MayExitMaxBECount = EL.MaxNotTaken;
8162       else {
8163         MayExitMaxBECount =
8164             getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
8165       }
8166     }
8167   }
8168   const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8169     (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
8170   // The loop backedge will be taken the maximum or zero times if there's
8171   // a single exit that must be taken the maximum or zero times.
8172   bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8173 
8174   // Remember which SCEVs are used in exit limits for invalidation purposes.
8175   // We only care about non-constant SCEVs here, so we can ignore EL.MaxNotTaken
8176   // and MaxBECount, which must be SCEVConstant.
8177   for (const auto &Pair : ExitCounts)
8178     if (!isa<SCEVConstant>(Pair.second.ExactNotTaken))
8179       BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates});
8180   return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8181                            MaxBECount, MaxOrZero);
8182 }
8183 
8184 ScalarEvolution::ExitLimit
8185 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
8186                                       bool AllowPredicates) {
8187   assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
8188   // If our exiting block does not dominate the latch, then its connection with
8189   // loop's exit limit may be far from trivial.
8190   const BasicBlock *Latch = L->getLoopLatch();
8191   if (!Latch || !DT.dominates(ExitingBlock, Latch))
8192     return getCouldNotCompute();
8193 
8194   bool IsOnlyExit = (L->getExitingBlock() != nullptr);
8195   Instruction *Term = ExitingBlock->getTerminator();
8196   if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
8197     assert(BI->isConditional() && "If unconditional, it can't be in loop!");
8198     bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
8199     assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
8200            "It should have one successor in loop and one exit block!");
8201     // Proceed to the next level to examine the exit condition expression.
8202     return computeExitLimitFromCond(
8203         L, BI->getCondition(), ExitIfTrue,
8204         /*ControlsExit=*/IsOnlyExit, AllowPredicates);
8205   }
8206 
8207   if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
8208     // For switch, make sure that there is a single exit from the loop.
8209     BasicBlock *Exit = nullptr;
8210     for (auto *SBB : successors(ExitingBlock))
8211       if (!L->contains(SBB)) {
8212         if (Exit) // Multiple exit successors.
8213           return getCouldNotCompute();
8214         Exit = SBB;
8215       }
8216     assert(Exit && "Exiting block must have at least one exit");
8217     return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
8218                                                 /*ControlsExit=*/IsOnlyExit);
8219   }
8220 
8221   return getCouldNotCompute();
8222 }
8223 
8224 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
8225     const Loop *L, Value *ExitCond, bool ExitIfTrue,
8226     bool ControlsExit, bool AllowPredicates) {
8227   ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
8228   return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
8229                                         ControlsExit, AllowPredicates);
8230 }
8231 
8232 Optional<ScalarEvolution::ExitLimit>
8233 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
8234                                       bool ExitIfTrue, bool ControlsExit,
8235                                       bool AllowPredicates) {
8236   (void)this->L;
8237   (void)this->ExitIfTrue;
8238   (void)this->AllowPredicates;
8239 
8240   assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
8241          this->AllowPredicates == AllowPredicates &&
8242          "Variance in assumed invariant key components!");
8243   auto Itr = TripCountMap.find({ExitCond, ControlsExit});
8244   if (Itr == TripCountMap.end())
8245     return None;
8246   return Itr->second;
8247 }
8248 
8249 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
8250                                              bool ExitIfTrue,
8251                                              bool ControlsExit,
8252                                              bool AllowPredicates,
8253                                              const ExitLimit &EL) {
8254   assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
8255          this->AllowPredicates == AllowPredicates &&
8256          "Variance in assumed invariant key components!");
8257 
8258   auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
8259   assert(InsertResult.second && "Expected successful insertion!");
8260   (void)InsertResult;
8261   (void)ExitIfTrue;
8262 }
8263 
8264 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
8265     ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
8266     bool ControlsExit, bool AllowPredicates) {
8267 
8268   if (auto MaybeEL =
8269           Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
8270     return *MaybeEL;
8271 
8272   ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
8273                                               ControlsExit, AllowPredicates);
8274   Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
8275   return EL;
8276 }
8277 
8278 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
8279     ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
8280     bool ControlsExit, bool AllowPredicates) {
8281   // Handle BinOp conditions (And, Or).
8282   if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
8283           Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
8284     return *LimitFromBinOp;
8285 
8286   // With an icmp, it may be feasible to compute an exact backedge-taken count.
8287   // Proceed to the next level to examine the icmp.
8288   if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
8289     ExitLimit EL =
8290         computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
8291     if (EL.hasFullInfo() || !AllowPredicates)
8292       return EL;
8293 
8294     // Try again, but use SCEV predicates this time.
8295     return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
8296                                     /*AllowPredicates=*/true);
8297   }
8298 
8299   // Check for a constant condition. These are normally stripped out by
8300   // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
8301   // preserve the CFG and is temporarily leaving constant conditions
8302   // in place.
8303   if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
8304     if (ExitIfTrue == !CI->getZExtValue())
8305       // The backedge is always taken.
8306       return getCouldNotCompute();
8307     else
8308       // The backedge is never taken.
8309       return getZero(CI->getType());
8310   }
8311 
8312   // If we're exiting based on the overflow flag of an x.with.overflow intrinsic
8313   // with a constant step, we can form an equivalent icmp predicate and figure
8314   // out how many iterations will be taken before we exit.
8315   const WithOverflowInst *WO;
8316   const APInt *C;
8317   if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) &&
8318       match(WO->getRHS(), m_APInt(C))) {
8319     ConstantRange NWR =
8320       ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C,
8321                                            WO->getNoWrapKind());
8322     CmpInst::Predicate Pred;
8323     APInt NewRHSC, Offset;
8324     NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
8325     if (!ExitIfTrue)
8326       Pred = ICmpInst::getInversePredicate(Pred);
8327     auto *LHS = getSCEV(WO->getLHS());
8328     if (Offset != 0)
8329       LHS = getAddExpr(LHS, getConstant(Offset));
8330     auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC),
8331                                        ControlsExit, AllowPredicates);
8332     if (EL.hasAnyInfo()) return EL;
8333   }
8334 
8335   // If it's not an integer or pointer comparison then compute it the hard way.
8336   return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8337 }
8338 
8339 Optional<ScalarEvolution::ExitLimit>
8340 ScalarEvolution::computeExitLimitFromCondFromBinOp(
8341     ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
8342     bool ControlsExit, bool AllowPredicates) {
8343   // Check if the controlling expression for this loop is an And or Or.
8344   Value *Op0, *Op1;
8345   bool IsAnd = false;
8346   if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
8347     IsAnd = true;
8348   else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
8349     IsAnd = false;
8350   else
8351     return None;
8352 
8353   // EitherMayExit is true in these two cases:
8354   //   br (and Op0 Op1), loop, exit
8355   //   br (or  Op0 Op1), exit, loop
8356   bool EitherMayExit = IsAnd ^ ExitIfTrue;
8357   ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
8358                                                  ControlsExit && !EitherMayExit,
8359                                                  AllowPredicates);
8360   ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue,
8361                                                  ControlsExit && !EitherMayExit,
8362                                                  AllowPredicates);
8363 
8364   // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
8365   const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd);
8366   if (isa<ConstantInt>(Op1))
8367     return Op1 == NeutralElement ? EL0 : EL1;
8368   if (isa<ConstantInt>(Op0))
8369     return Op0 == NeutralElement ? EL1 : EL0;
8370 
8371   const SCEV *BECount = getCouldNotCompute();
8372   const SCEV *MaxBECount = getCouldNotCompute();
8373   if (EitherMayExit) {
8374     // Both conditions must be same for the loop to continue executing.
8375     // Choose the less conservative count.
8376     if (EL0.ExactNotTaken != getCouldNotCompute() &&
8377         EL1.ExactNotTaken != getCouldNotCompute()) {
8378       BECount = getUMinFromMismatchedTypes(
8379           EL0.ExactNotTaken, EL1.ExactNotTaken,
8380           /*Sequential=*/!isa<BinaryOperator>(ExitCond));
8381 
8382       // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
8383       // it should have been simplified to zero (see the condition (3) above)
8384       assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||
8385              BECount->isZero());
8386     }
8387     if (EL0.MaxNotTaken == getCouldNotCompute())
8388       MaxBECount = EL1.MaxNotTaken;
8389     else if (EL1.MaxNotTaken == getCouldNotCompute())
8390       MaxBECount = EL0.MaxNotTaken;
8391     else
8392       MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
8393   } else {
8394     // Both conditions must be same at the same time for the loop to exit.
8395     // For now, be conservative.
8396     if (EL0.ExactNotTaken == EL1.ExactNotTaken)
8397       BECount = EL0.ExactNotTaken;
8398   }
8399 
8400   // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
8401   // to be more aggressive when computing BECount than when computing
8402   // MaxBECount.  In these cases it is possible for EL0.ExactNotTaken and
8403   // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
8404   // to not.
8405   if (isa<SCEVCouldNotCompute>(MaxBECount) &&
8406       !isa<SCEVCouldNotCompute>(BECount))
8407     MaxBECount = getConstant(getUnsignedRangeMax(BECount));
8408 
8409   return ExitLimit(BECount, MaxBECount, false,
8410                    { &EL0.Predicates, &EL1.Predicates });
8411 }
8412 
8413 ScalarEvolution::ExitLimit
8414 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
8415                                           ICmpInst *ExitCond,
8416                                           bool ExitIfTrue,
8417                                           bool ControlsExit,
8418                                           bool AllowPredicates) {
8419   // If the condition was exit on true, convert the condition to exit on false
8420   ICmpInst::Predicate Pred;
8421   if (!ExitIfTrue)
8422     Pred = ExitCond->getPredicate();
8423   else
8424     Pred = ExitCond->getInversePredicate();
8425   const ICmpInst::Predicate OriginalPred = Pred;
8426 
8427   const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
8428   const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
8429 
8430   ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit,
8431                                           AllowPredicates);
8432   if (EL.hasAnyInfo()) return EL;
8433 
8434   auto *ExhaustiveCount =
8435       computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8436 
8437   if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
8438     return ExhaustiveCount;
8439 
8440   return computeShiftCompareExitLimit(ExitCond->getOperand(0),
8441                                       ExitCond->getOperand(1), L, OriginalPred);
8442 }
8443 ScalarEvolution::ExitLimit
8444 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
8445                                           ICmpInst::Predicate Pred,
8446                                           const SCEV *LHS, const SCEV *RHS,
8447                                           bool ControlsExit,
8448                                           bool AllowPredicates) {
8449 
8450   // Try to evaluate any dependencies out of the loop.
8451   LHS = getSCEVAtScope(LHS, L);
8452   RHS = getSCEVAtScope(RHS, L);
8453 
8454   // At this point, we would like to compute how many iterations of the
8455   // loop the predicate will return true for these inputs.
8456   if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
8457     // If there is a loop-invariant, force it into the RHS.
8458     std::swap(LHS, RHS);
8459     Pred = ICmpInst::getSwappedPredicate(Pred);
8460   }
8461 
8462   bool ControllingFiniteLoop =
8463       ControlsExit && loopHasNoAbnormalExits(L) && loopIsFiniteByAssumption(L);
8464   // Simplify the operands before analyzing them.
8465   (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0,
8466                              ControllingFiniteLoop);
8467 
8468   // If we have a comparison of a chrec against a constant, try to use value
8469   // ranges to answer this query.
8470   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
8471     if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
8472       if (AddRec->getLoop() == L) {
8473         // Form the constant range.
8474         ConstantRange CompRange =
8475             ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
8476 
8477         const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
8478         if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
8479       }
8480 
8481   // If this loop must exit based on this condition (or execute undefined
8482   // behaviour), and we can prove the test sequence produced must repeat
8483   // the same values on self-wrap of the IV, then we can infer that IV
8484   // doesn't self wrap because if it did, we'd have an infinite (undefined)
8485   // loop.
8486   if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) {
8487     // TODO: We can peel off any functions which are invertible *in L*.  Loop
8488     // invariant terms are effectively constants for our purposes here.
8489     auto *InnerLHS = LHS;
8490     if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
8491       InnerLHS = ZExt->getOperand();
8492     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) {
8493       auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this));
8494       if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() &&
8495           StrideC && StrideC->getAPInt().isPowerOf2()) {
8496         auto Flags = AR->getNoWrapFlags();
8497         Flags = setFlags(Flags, SCEV::FlagNW);
8498         SmallVector<const SCEV*> Operands{AR->operands()};
8499         Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
8500         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
8501       }
8502     }
8503   }
8504 
8505   switch (Pred) {
8506   case ICmpInst::ICMP_NE: {                     // while (X != Y)
8507     // Convert to: while (X-Y != 0)
8508     if (LHS->getType()->isPointerTy()) {
8509       LHS = getLosslessPtrToIntExpr(LHS);
8510       if (isa<SCEVCouldNotCompute>(LHS))
8511         return LHS;
8512     }
8513     if (RHS->getType()->isPointerTy()) {
8514       RHS = getLosslessPtrToIntExpr(RHS);
8515       if (isa<SCEVCouldNotCompute>(RHS))
8516         return RHS;
8517     }
8518     ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
8519                                 AllowPredicates);
8520     if (EL.hasAnyInfo()) return EL;
8521     break;
8522   }
8523   case ICmpInst::ICMP_EQ: {                     // while (X == Y)
8524     // Convert to: while (X-Y == 0)
8525     if (LHS->getType()->isPointerTy()) {
8526       LHS = getLosslessPtrToIntExpr(LHS);
8527       if (isa<SCEVCouldNotCompute>(LHS))
8528         return LHS;
8529     }
8530     if (RHS->getType()->isPointerTy()) {
8531       RHS = getLosslessPtrToIntExpr(RHS);
8532       if (isa<SCEVCouldNotCompute>(RHS))
8533         return RHS;
8534     }
8535     ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
8536     if (EL.hasAnyInfo()) return EL;
8537     break;
8538   }
8539   case ICmpInst::ICMP_SLT:
8540   case ICmpInst::ICMP_ULT: {                    // while (X < Y)
8541     bool IsSigned = Pred == ICmpInst::ICMP_SLT;
8542     ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
8543                                     AllowPredicates);
8544     if (EL.hasAnyInfo()) return EL;
8545     break;
8546   }
8547   case ICmpInst::ICMP_SGT:
8548   case ICmpInst::ICMP_UGT: {                    // while (X > Y)
8549     bool IsSigned = Pred == ICmpInst::ICMP_SGT;
8550     ExitLimit EL =
8551         howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
8552                             AllowPredicates);
8553     if (EL.hasAnyInfo()) return EL;
8554     break;
8555   }
8556   default:
8557     break;
8558   }
8559 
8560   return getCouldNotCompute();
8561 }
8562 
8563 ScalarEvolution::ExitLimit
8564 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
8565                                                       SwitchInst *Switch,
8566                                                       BasicBlock *ExitingBlock,
8567                                                       bool ControlsExit) {
8568   assert(!L->contains(ExitingBlock) && "Not an exiting block!");
8569 
8570   // Give up if the exit is the default dest of a switch.
8571   if (Switch->getDefaultDest() == ExitingBlock)
8572     return getCouldNotCompute();
8573 
8574   assert(L->contains(Switch->getDefaultDest()) &&
8575          "Default case must not exit the loop!");
8576   const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
8577   const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
8578 
8579   // while (X != Y) --> while (X-Y != 0)
8580   ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
8581   if (EL.hasAnyInfo())
8582     return EL;
8583 
8584   return getCouldNotCompute();
8585 }
8586 
8587 static ConstantInt *
8588 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
8589                                 ScalarEvolution &SE) {
8590   const SCEV *InVal = SE.getConstant(C);
8591   const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
8592   assert(isa<SCEVConstant>(Val) &&
8593          "Evaluation of SCEV at constant didn't fold correctly?");
8594   return cast<SCEVConstant>(Val)->getValue();
8595 }
8596 
8597 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
8598     Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
8599   ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
8600   if (!RHS)
8601     return getCouldNotCompute();
8602 
8603   const BasicBlock *Latch = L->getLoopLatch();
8604   if (!Latch)
8605     return getCouldNotCompute();
8606 
8607   const BasicBlock *Predecessor = L->getLoopPredecessor();
8608   if (!Predecessor)
8609     return getCouldNotCompute();
8610 
8611   // Return true if V is of the form "LHS `shift_op` <positive constant>".
8612   // Return LHS in OutLHS and shift_opt in OutOpCode.
8613   auto MatchPositiveShift =
8614       [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
8615 
8616     using namespace PatternMatch;
8617 
8618     ConstantInt *ShiftAmt;
8619     if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8620       OutOpCode = Instruction::LShr;
8621     else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8622       OutOpCode = Instruction::AShr;
8623     else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8624       OutOpCode = Instruction::Shl;
8625     else
8626       return false;
8627 
8628     return ShiftAmt->getValue().isStrictlyPositive();
8629   };
8630 
8631   // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
8632   //
8633   // loop:
8634   //   %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
8635   //   %iv.shifted = lshr i32 %iv, <positive constant>
8636   //
8637   // Return true on a successful match.  Return the corresponding PHI node (%iv
8638   // above) in PNOut and the opcode of the shift operation in OpCodeOut.
8639   auto MatchShiftRecurrence =
8640       [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
8641     Optional<Instruction::BinaryOps> PostShiftOpCode;
8642 
8643     {
8644       Instruction::BinaryOps OpC;
8645       Value *V;
8646 
8647       // If we encounter a shift instruction, "peel off" the shift operation,
8648       // and remember that we did so.  Later when we inspect %iv's backedge
8649       // value, we will make sure that the backedge value uses the same
8650       // operation.
8651       //
8652       // Note: the peeled shift operation does not have to be the same
8653       // instruction as the one feeding into the PHI's backedge value.  We only
8654       // really care about it being the same *kind* of shift instruction --
8655       // that's all that is required for our later inferences to hold.
8656       if (MatchPositiveShift(LHS, V, OpC)) {
8657         PostShiftOpCode = OpC;
8658         LHS = V;
8659       }
8660     }
8661 
8662     PNOut = dyn_cast<PHINode>(LHS);
8663     if (!PNOut || PNOut->getParent() != L->getHeader())
8664       return false;
8665 
8666     Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
8667     Value *OpLHS;
8668 
8669     return
8670         // The backedge value for the PHI node must be a shift by a positive
8671         // amount
8672         MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
8673 
8674         // of the PHI node itself
8675         OpLHS == PNOut &&
8676 
8677         // and the kind of shift should be match the kind of shift we peeled
8678         // off, if any.
8679         (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
8680   };
8681 
8682   PHINode *PN;
8683   Instruction::BinaryOps OpCode;
8684   if (!MatchShiftRecurrence(LHS, PN, OpCode))
8685     return getCouldNotCompute();
8686 
8687   const DataLayout &DL = getDataLayout();
8688 
8689   // The key rationale for this optimization is that for some kinds of shift
8690   // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
8691   // within a finite number of iterations.  If the condition guarding the
8692   // backedge (in the sense that the backedge is taken if the condition is true)
8693   // is false for the value the shift recurrence stabilizes to, then we know
8694   // that the backedge is taken only a finite number of times.
8695 
8696   ConstantInt *StableValue = nullptr;
8697   switch (OpCode) {
8698   default:
8699     llvm_unreachable("Impossible case!");
8700 
8701   case Instruction::AShr: {
8702     // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
8703     // bitwidth(K) iterations.
8704     Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
8705     KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
8706                                        Predecessor->getTerminator(), &DT);
8707     auto *Ty = cast<IntegerType>(RHS->getType());
8708     if (Known.isNonNegative())
8709       StableValue = ConstantInt::get(Ty, 0);
8710     else if (Known.isNegative())
8711       StableValue = ConstantInt::get(Ty, -1, true);
8712     else
8713       return getCouldNotCompute();
8714 
8715     break;
8716   }
8717   case Instruction::LShr:
8718   case Instruction::Shl:
8719     // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
8720     // stabilize to 0 in at most bitwidth(K) iterations.
8721     StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
8722     break;
8723   }
8724 
8725   auto *Result =
8726       ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
8727   assert(Result->getType()->isIntegerTy(1) &&
8728          "Otherwise cannot be an operand to a branch instruction");
8729 
8730   if (Result->isZeroValue()) {
8731     unsigned BitWidth = getTypeSizeInBits(RHS->getType());
8732     const SCEV *UpperBound =
8733         getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
8734     return ExitLimit(getCouldNotCompute(), UpperBound, false);
8735   }
8736 
8737   return getCouldNotCompute();
8738 }
8739 
8740 /// Return true if we can constant fold an instruction of the specified type,
8741 /// assuming that all operands were constants.
8742 static bool CanConstantFold(const Instruction *I) {
8743   if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
8744       isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8745       isa<LoadInst>(I) || isa<ExtractValueInst>(I))
8746     return true;
8747 
8748   if (const CallInst *CI = dyn_cast<CallInst>(I))
8749     if (const Function *F = CI->getCalledFunction())
8750       return canConstantFoldCallTo(CI, F);
8751   return false;
8752 }
8753 
8754 /// Determine whether this instruction can constant evolve within this loop
8755 /// assuming its operands can all constant evolve.
8756 static bool canConstantEvolve(Instruction *I, const Loop *L) {
8757   // An instruction outside of the loop can't be derived from a loop PHI.
8758   if (!L->contains(I)) return false;
8759 
8760   if (isa<PHINode>(I)) {
8761     // We don't currently keep track of the control flow needed to evaluate
8762     // PHIs, so we cannot handle PHIs inside of loops.
8763     return L->getHeader() == I->getParent();
8764   }
8765 
8766   // If we won't be able to constant fold this expression even if the operands
8767   // are constants, bail early.
8768   return CanConstantFold(I);
8769 }
8770 
8771 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
8772 /// recursing through each instruction operand until reaching a loop header phi.
8773 static PHINode *
8774 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
8775                                DenseMap<Instruction *, PHINode *> &PHIMap,
8776                                unsigned Depth) {
8777   if (Depth > MaxConstantEvolvingDepth)
8778     return nullptr;
8779 
8780   // Otherwise, we can evaluate this instruction if all of its operands are
8781   // constant or derived from a PHI node themselves.
8782   PHINode *PHI = nullptr;
8783   for (Value *Op : UseInst->operands()) {
8784     if (isa<Constant>(Op)) continue;
8785 
8786     Instruction *OpInst = dyn_cast<Instruction>(Op);
8787     if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
8788 
8789     PHINode *P = dyn_cast<PHINode>(OpInst);
8790     if (!P)
8791       // If this operand is already visited, reuse the prior result.
8792       // We may have P != PHI if this is the deepest point at which the
8793       // inconsistent paths meet.
8794       P = PHIMap.lookup(OpInst);
8795     if (!P) {
8796       // Recurse and memoize the results, whether a phi is found or not.
8797       // This recursive call invalidates pointers into PHIMap.
8798       P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
8799       PHIMap[OpInst] = P;
8800     }
8801     if (!P)
8802       return nullptr;  // Not evolving from PHI
8803     if (PHI && PHI != P)
8804       return nullptr;  // Evolving from multiple different PHIs.
8805     PHI = P;
8806   }
8807   // This is a expression evolving from a constant PHI!
8808   return PHI;
8809 }
8810 
8811 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
8812 /// in the loop that V is derived from.  We allow arbitrary operations along the
8813 /// way, but the operands of an operation must either be constants or a value
8814 /// derived from a constant PHI.  If this expression does not fit with these
8815 /// constraints, return null.
8816 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8817   Instruction *I = dyn_cast<Instruction>(V);
8818   if (!I || !canConstantEvolve(I, L)) return nullptr;
8819 
8820   if (PHINode *PN = dyn_cast<PHINode>(I))
8821     return PN;
8822 
8823   // Record non-constant instructions contained by the loop.
8824   DenseMap<Instruction *, PHINode *> PHIMap;
8825   return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8826 }
8827 
8828 /// EvaluateExpression - Given an expression that passes the
8829 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8830 /// in the loop has the value PHIVal.  If we can't fold this expression for some
8831 /// reason, return null.
8832 static Constant *EvaluateExpression(Value *V, const Loop *L,
8833                                     DenseMap<Instruction *, Constant *> &Vals,
8834                                     const DataLayout &DL,
8835                                     const TargetLibraryInfo *TLI) {
8836   // Convenient constant check, but redundant for recursive calls.
8837   if (Constant *C = dyn_cast<Constant>(V)) return C;
8838   Instruction *I = dyn_cast<Instruction>(V);
8839   if (!I) return nullptr;
8840 
8841   if (Constant *C = Vals.lookup(I)) return C;
8842 
8843   // An instruction inside the loop depends on a value outside the loop that we
8844   // weren't given a mapping for, or a value such as a call inside the loop.
8845   if (!canConstantEvolve(I, L)) return nullptr;
8846 
8847   // An unmapped PHI can be due to a branch or another loop inside this loop,
8848   // or due to this not being the initial iteration through a loop where we
8849   // couldn't compute the evolution of this particular PHI last time.
8850   if (isa<PHINode>(I)) return nullptr;
8851 
8852   std::vector<Constant*> Operands(I->getNumOperands());
8853 
8854   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8855     Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8856     if (!Operand) {
8857       Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8858       if (!Operands[i]) return nullptr;
8859       continue;
8860     }
8861     Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8862     Vals[Operand] = C;
8863     if (!C) return nullptr;
8864     Operands[i] = C;
8865   }
8866 
8867   if (CmpInst *CI = dyn_cast<CmpInst>(I))
8868     return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8869                                            Operands[1], DL, TLI);
8870   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8871     if (!LI->isVolatile())
8872       return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8873   }
8874   return ConstantFoldInstOperands(I, Operands, DL, TLI);
8875 }
8876 
8877 
8878 // If every incoming value to PN except the one for BB is a specific Constant,
8879 // return that, else return nullptr.
8880 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8881   Constant *IncomingVal = nullptr;
8882 
8883   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8884     if (PN->getIncomingBlock(i) == BB)
8885       continue;
8886 
8887     auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8888     if (!CurrentVal)
8889       return nullptr;
8890 
8891     if (IncomingVal != CurrentVal) {
8892       if (IncomingVal)
8893         return nullptr;
8894       IncomingVal = CurrentVal;
8895     }
8896   }
8897 
8898   return IncomingVal;
8899 }
8900 
8901 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8902 /// in the header of its containing loop, we know the loop executes a
8903 /// constant number of times, and the PHI node is just a recurrence
8904 /// involving constants, fold it.
8905 Constant *
8906 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8907                                                    const APInt &BEs,
8908                                                    const Loop *L) {
8909   auto I = ConstantEvolutionLoopExitValue.find(PN);
8910   if (I != ConstantEvolutionLoopExitValue.end())
8911     return I->second;
8912 
8913   if (BEs.ugt(MaxBruteForceIterations))
8914     return ConstantEvolutionLoopExitValue[PN] = nullptr;  // Not going to evaluate it.
8915 
8916   Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8917 
8918   DenseMap<Instruction *, Constant *> CurrentIterVals;
8919   BasicBlock *Header = L->getHeader();
8920   assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8921 
8922   BasicBlock *Latch = L->getLoopLatch();
8923   if (!Latch)
8924     return nullptr;
8925 
8926   for (PHINode &PHI : Header->phis()) {
8927     if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8928       CurrentIterVals[&PHI] = StartCST;
8929   }
8930   if (!CurrentIterVals.count(PN))
8931     return RetVal = nullptr;
8932 
8933   Value *BEValue = PN->getIncomingValueForBlock(Latch);
8934 
8935   // Execute the loop symbolically to determine the exit value.
8936   assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
8937          "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
8938 
8939   unsigned NumIterations = BEs.getZExtValue(); // must be in range
8940   unsigned IterationNum = 0;
8941   const DataLayout &DL = getDataLayout();
8942   for (; ; ++IterationNum) {
8943     if (IterationNum == NumIterations)
8944       return RetVal = CurrentIterVals[PN];  // Got exit value!
8945 
8946     // Compute the value of the PHIs for the next iteration.
8947     // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8948     DenseMap<Instruction *, Constant *> NextIterVals;
8949     Constant *NextPHI =
8950         EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8951     if (!NextPHI)
8952       return nullptr;        // Couldn't evaluate!
8953     NextIterVals[PN] = NextPHI;
8954 
8955     bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8956 
8957     // Also evaluate the other PHI nodes.  However, we don't get to stop if we
8958     // cease to be able to evaluate one of them or if they stop evolving,
8959     // because that doesn't necessarily prevent us from computing PN.
8960     SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8961     for (const auto &I : CurrentIterVals) {
8962       PHINode *PHI = dyn_cast<PHINode>(I.first);
8963       if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8964       PHIsToCompute.emplace_back(PHI, I.second);
8965     }
8966     // We use two distinct loops because EvaluateExpression may invalidate any
8967     // iterators into CurrentIterVals.
8968     for (const auto &I : PHIsToCompute) {
8969       PHINode *PHI = I.first;
8970       Constant *&NextPHI = NextIterVals[PHI];
8971       if (!NextPHI) {   // Not already computed.
8972         Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8973         NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8974       }
8975       if (NextPHI != I.second)
8976         StoppedEvolving = false;
8977     }
8978 
8979     // If all entries in CurrentIterVals == NextIterVals then we can stop
8980     // iterating, the loop can't continue to change.
8981     if (StoppedEvolving)
8982       return RetVal = CurrentIterVals[PN];
8983 
8984     CurrentIterVals.swap(NextIterVals);
8985   }
8986 }
8987 
8988 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8989                                                           Value *Cond,
8990                                                           bool ExitWhen) {
8991   PHINode *PN = getConstantEvolvingPHI(Cond, L);
8992   if (!PN) return getCouldNotCompute();
8993 
8994   // If the loop is canonicalized, the PHI will have exactly two entries.
8995   // That's the only form we support here.
8996   if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8997 
8998   DenseMap<Instruction *, Constant *> CurrentIterVals;
8999   BasicBlock *Header = L->getHeader();
9000   assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
9001 
9002   BasicBlock *Latch = L->getLoopLatch();
9003   assert(Latch && "Should follow from NumIncomingValues == 2!");
9004 
9005   for (PHINode &PHI : Header->phis()) {
9006     if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
9007       CurrentIterVals[&PHI] = StartCST;
9008   }
9009   if (!CurrentIterVals.count(PN))
9010     return getCouldNotCompute();
9011 
9012   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
9013   // the loop symbolically to determine when the condition gets a value of
9014   // "ExitWhen".
9015   unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
9016   const DataLayout &DL = getDataLayout();
9017   for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9018     auto *CondVal = dyn_cast_or_null<ConstantInt>(
9019         EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
9020 
9021     // Couldn't symbolically evaluate.
9022     if (!CondVal) return getCouldNotCompute();
9023 
9024     if (CondVal->getValue() == uint64_t(ExitWhen)) {
9025       ++NumBruteForceTripCountsComputed;
9026       return getConstant(Type::getInt32Ty(getContext()), IterationNum);
9027     }
9028 
9029     // Update all the PHI nodes for the next iteration.
9030     DenseMap<Instruction *, Constant *> NextIterVals;
9031 
9032     // Create a list of which PHIs we need to compute. We want to do this before
9033     // calling EvaluateExpression on them because that may invalidate iterators
9034     // into CurrentIterVals.
9035     SmallVector<PHINode *, 8> PHIsToCompute;
9036     for (const auto &I : CurrentIterVals) {
9037       PHINode *PHI = dyn_cast<PHINode>(I.first);
9038       if (!PHI || PHI->getParent() != Header) continue;
9039       PHIsToCompute.push_back(PHI);
9040     }
9041     for (PHINode *PHI : PHIsToCompute) {
9042       Constant *&NextPHI = NextIterVals[PHI];
9043       if (NextPHI) continue;    // Already computed!
9044 
9045       Value *BEValue = PHI->getIncomingValueForBlock(Latch);
9046       NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
9047     }
9048     CurrentIterVals.swap(NextIterVals);
9049   }
9050 
9051   // Too many iterations were needed to evaluate.
9052   return getCouldNotCompute();
9053 }
9054 
9055 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
9056   SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
9057       ValuesAtScopes[V];
9058   // Check to see if we've folded this expression at this loop before.
9059   for (auto &LS : Values)
9060     if (LS.first == L)
9061       return LS.second ? LS.second : V;
9062 
9063   Values.emplace_back(L, nullptr);
9064 
9065   // Otherwise compute it.
9066   const SCEV *C = computeSCEVAtScope(V, L);
9067   for (auto &LS : reverse(ValuesAtScopes[V]))
9068     if (LS.first == L) {
9069       LS.second = C;
9070       if (!isa<SCEVConstant>(C))
9071         ValuesAtScopesUsers[C].push_back({L, V});
9072       break;
9073     }
9074   return C;
9075 }
9076 
9077 /// This builds up a Constant using the ConstantExpr interface.  That way, we
9078 /// will return Constants for objects which aren't represented by a
9079 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
9080 /// Returns NULL if the SCEV isn't representable as a Constant.
9081 static Constant *BuildConstantFromSCEV(const SCEV *V) {
9082   switch (V->getSCEVType()) {
9083   case scCouldNotCompute:
9084   case scAddRecExpr:
9085     return nullptr;
9086   case scConstant:
9087     return cast<SCEVConstant>(V)->getValue();
9088   case scUnknown:
9089     return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
9090   case scSignExtend: {
9091     const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
9092     if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
9093       return ConstantExpr::getSExt(CastOp, SS->getType());
9094     return nullptr;
9095   }
9096   case scZeroExtend: {
9097     const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
9098     if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
9099       return ConstantExpr::getZExt(CastOp, SZ->getType());
9100     return nullptr;
9101   }
9102   case scPtrToInt: {
9103     const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
9104     if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
9105       return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
9106 
9107     return nullptr;
9108   }
9109   case scTruncate: {
9110     const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
9111     if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
9112       return ConstantExpr::getTrunc(CastOp, ST->getType());
9113     return nullptr;
9114   }
9115   case scAddExpr: {
9116     const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
9117     if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
9118       if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
9119         unsigned AS = PTy->getAddressSpace();
9120         Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
9121         C = ConstantExpr::getBitCast(C, DestPtrTy);
9122       }
9123       for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
9124         Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
9125         if (!C2)
9126           return nullptr;
9127 
9128         // First pointer!
9129         if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
9130           unsigned AS = C2->getType()->getPointerAddressSpace();
9131           std::swap(C, C2);
9132           Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
9133           // The offsets have been converted to bytes.  We can add bytes to an
9134           // i8* by GEP with the byte count in the first index.
9135           C = ConstantExpr::getBitCast(C, DestPtrTy);
9136         }
9137 
9138         // Don't bother trying to sum two pointers. We probably can't
9139         // statically compute a load that results from it anyway.
9140         if (C2->getType()->isPointerTy())
9141           return nullptr;
9142 
9143         if (C->getType()->isPointerTy()) {
9144           C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()),
9145                                              C, C2);
9146         } else {
9147           C = ConstantExpr::getAdd(C, C2);
9148         }
9149       }
9150       return C;
9151     }
9152     return nullptr;
9153   }
9154   case scMulExpr: {
9155     const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
9156     if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
9157       // Don't bother with pointers at all.
9158       if (C->getType()->isPointerTy())
9159         return nullptr;
9160       for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
9161         Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
9162         if (!C2 || C2->getType()->isPointerTy())
9163           return nullptr;
9164         C = ConstantExpr::getMul(C, C2);
9165       }
9166       return C;
9167     }
9168     return nullptr;
9169   }
9170   case scUDivExpr: {
9171     const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
9172     if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
9173       if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
9174         if (LHS->getType() == RHS->getType())
9175           return ConstantExpr::getUDiv(LHS, RHS);
9176     return nullptr;
9177   }
9178   case scSMaxExpr:
9179   case scUMaxExpr:
9180   case scSMinExpr:
9181   case scUMinExpr:
9182   case scSequentialUMinExpr:
9183     return nullptr; // TODO: smax, umax, smin, umax, umin_seq.
9184   }
9185   llvm_unreachable("Unknown SCEV kind!");
9186 }
9187 
9188 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
9189   if (isa<SCEVConstant>(V)) return V;
9190 
9191   // If this instruction is evolved from a constant-evolving PHI, compute the
9192   // exit value from the loop without using SCEVs.
9193   if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
9194     if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
9195       if (PHINode *PN = dyn_cast<PHINode>(I)) {
9196         const Loop *CurrLoop = this->LI[I->getParent()];
9197         // Looking for loop exit value.
9198         if (CurrLoop && CurrLoop->getParentLoop() == L &&
9199             PN->getParent() == CurrLoop->getHeader()) {
9200           // Okay, there is no closed form solution for the PHI node.  Check
9201           // to see if the loop that contains it has a known backedge-taken
9202           // count.  If so, we may be able to force computation of the exit
9203           // value.
9204           const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
9205           // This trivial case can show up in some degenerate cases where
9206           // the incoming IR has not yet been fully simplified.
9207           if (BackedgeTakenCount->isZero()) {
9208             Value *InitValue = nullptr;
9209             bool MultipleInitValues = false;
9210             for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
9211               if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
9212                 if (!InitValue)
9213                   InitValue = PN->getIncomingValue(i);
9214                 else if (InitValue != PN->getIncomingValue(i)) {
9215                   MultipleInitValues = true;
9216                   break;
9217                 }
9218               }
9219             }
9220             if (!MultipleInitValues && InitValue)
9221               return getSCEV(InitValue);
9222           }
9223           // Do we have a loop invariant value flowing around the backedge
9224           // for a loop which must execute the backedge?
9225           if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
9226               isKnownPositive(BackedgeTakenCount) &&
9227               PN->getNumIncomingValues() == 2) {
9228 
9229             unsigned InLoopPred =
9230                 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
9231             Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
9232             if (CurrLoop->isLoopInvariant(BackedgeVal))
9233               return getSCEV(BackedgeVal);
9234           }
9235           if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
9236             // Okay, we know how many times the containing loop executes.  If
9237             // this is a constant evolving PHI node, get the final value at
9238             // the specified iteration number.
9239             Constant *RV = getConstantEvolutionLoopExitValue(
9240                 PN, BTCC->getAPInt(), CurrLoop);
9241             if (RV) return getSCEV(RV);
9242           }
9243         }
9244 
9245         // If there is a single-input Phi, evaluate it at our scope. If we can
9246         // prove that this replacement does not break LCSSA form, use new value.
9247         if (PN->getNumOperands() == 1) {
9248           const SCEV *Input = getSCEV(PN->getOperand(0));
9249           const SCEV *InputAtScope = getSCEVAtScope(Input, L);
9250           // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
9251           // for the simplest case just support constants.
9252           if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
9253         }
9254       }
9255 
9256       // Okay, this is an expression that we cannot symbolically evaluate
9257       // into a SCEV.  Check to see if it's possible to symbolically evaluate
9258       // the arguments into constants, and if so, try to constant propagate the
9259       // result.  This is particularly useful for computing loop exit values.
9260       if (CanConstantFold(I)) {
9261         SmallVector<Constant *, 4> Operands;
9262         bool MadeImprovement = false;
9263         for (Value *Op : I->operands()) {
9264           if (Constant *C = dyn_cast<Constant>(Op)) {
9265             Operands.push_back(C);
9266             continue;
9267           }
9268 
9269           // If any of the operands is non-constant and if they are
9270           // non-integer and non-pointer, don't even try to analyze them
9271           // with scev techniques.
9272           if (!isSCEVable(Op->getType()))
9273             return V;
9274 
9275           const SCEV *OrigV = getSCEV(Op);
9276           const SCEV *OpV = getSCEVAtScope(OrigV, L);
9277           MadeImprovement |= OrigV != OpV;
9278 
9279           Constant *C = BuildConstantFromSCEV(OpV);
9280           if (!C) return V;
9281           if (C->getType() != Op->getType())
9282             C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
9283                                                               Op->getType(),
9284                                                               false),
9285                                       C, Op->getType());
9286           Operands.push_back(C);
9287         }
9288 
9289         // Check to see if getSCEVAtScope actually made an improvement.
9290         if (MadeImprovement) {
9291           Constant *C = nullptr;
9292           const DataLayout &DL = getDataLayout();
9293           if (const CmpInst *CI = dyn_cast<CmpInst>(I))
9294             C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
9295                                                 Operands[1], DL, &TLI);
9296           else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
9297             if (!Load->isVolatile())
9298               C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
9299                                                DL);
9300           } else
9301             C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
9302           if (!C) return V;
9303           return getSCEV(C);
9304         }
9305       }
9306     }
9307 
9308     // This is some other type of SCEVUnknown, just return it.
9309     return V;
9310   }
9311 
9312   if (isa<SCEVCommutativeExpr>(V) || isa<SCEVSequentialMinMaxExpr>(V)) {
9313     const auto *Comm = cast<SCEVNAryExpr>(V);
9314     // Avoid performing the look-up in the common case where the specified
9315     // expression has no loop-variant portions.
9316     for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
9317       const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
9318       if (OpAtScope != Comm->getOperand(i)) {
9319         // Okay, at least one of these operands is loop variant but might be
9320         // foldable.  Build a new instance of the folded commutative expression.
9321         SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
9322                                             Comm->op_begin()+i);
9323         NewOps.push_back(OpAtScope);
9324 
9325         for (++i; i != e; ++i) {
9326           OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
9327           NewOps.push_back(OpAtScope);
9328         }
9329         if (isa<SCEVAddExpr>(Comm))
9330           return getAddExpr(NewOps, Comm->getNoWrapFlags());
9331         if (isa<SCEVMulExpr>(Comm))
9332           return getMulExpr(NewOps, Comm->getNoWrapFlags());
9333         if (isa<SCEVMinMaxExpr>(Comm))
9334           return getMinMaxExpr(Comm->getSCEVType(), NewOps);
9335         if (isa<SCEVSequentialMinMaxExpr>(Comm))
9336           return getSequentialMinMaxExpr(Comm->getSCEVType(), NewOps);
9337         llvm_unreachable("Unknown commutative / sequential min/max SCEV type!");
9338       }
9339     }
9340     // If we got here, all operands are loop invariant.
9341     return Comm;
9342   }
9343 
9344   if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
9345     const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
9346     const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
9347     if (LHS == Div->getLHS() && RHS == Div->getRHS())
9348       return Div;   // must be loop invariant
9349     return getUDivExpr(LHS, RHS);
9350   }
9351 
9352   // If this is a loop recurrence for a loop that does not contain L, then we
9353   // are dealing with the final value computed by the loop.
9354   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
9355     // First, attempt to evaluate each operand.
9356     // Avoid performing the look-up in the common case where the specified
9357     // expression has no loop-variant portions.
9358     for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
9359       const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
9360       if (OpAtScope == AddRec->getOperand(i))
9361         continue;
9362 
9363       // Okay, at least one of these operands is loop variant but might be
9364       // foldable.  Build a new instance of the folded commutative expression.
9365       SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
9366                                           AddRec->op_begin()+i);
9367       NewOps.push_back(OpAtScope);
9368       for (++i; i != e; ++i)
9369         NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
9370 
9371       const SCEV *FoldedRec =
9372         getAddRecExpr(NewOps, AddRec->getLoop(),
9373                       AddRec->getNoWrapFlags(SCEV::FlagNW));
9374       AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
9375       // The addrec may be folded to a nonrecurrence, for example, if the
9376       // induction variable is multiplied by zero after constant folding. Go
9377       // ahead and return the folded value.
9378       if (!AddRec)
9379         return FoldedRec;
9380       break;
9381     }
9382 
9383     // If the scope is outside the addrec's loop, evaluate it by using the
9384     // loop exit value of the addrec.
9385     if (!AddRec->getLoop()->contains(L)) {
9386       // To evaluate this recurrence, we need to know how many times the AddRec
9387       // loop iterates.  Compute this now.
9388       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
9389       if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
9390 
9391       // Then, evaluate the AddRec.
9392       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
9393     }
9394 
9395     return AddRec;
9396   }
9397 
9398   if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
9399     const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9400     if (Op == Cast->getOperand())
9401       return Cast;  // must be loop invariant
9402     return getCastExpr(Cast->getSCEVType(), Op, Cast->getType());
9403   }
9404 
9405   llvm_unreachable("Unknown SCEV type!");
9406 }
9407 
9408 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
9409   return getSCEVAtScope(getSCEV(V), L);
9410 }
9411 
9412 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
9413   if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
9414     return stripInjectiveFunctions(ZExt->getOperand());
9415   if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
9416     return stripInjectiveFunctions(SExt->getOperand());
9417   return S;
9418 }
9419 
9420 /// Finds the minimum unsigned root of the following equation:
9421 ///
9422 ///     A * X = B (mod N)
9423 ///
9424 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
9425 /// A and B isn't important.
9426 ///
9427 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
9428 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
9429                                                ScalarEvolution &SE) {
9430   uint32_t BW = A.getBitWidth();
9431   assert(BW == SE.getTypeSizeInBits(B->getType()));
9432   assert(A != 0 && "A must be non-zero.");
9433 
9434   // 1. D = gcd(A, N)
9435   //
9436   // The gcd of A and N may have only one prime factor: 2. The number of
9437   // trailing zeros in A is its multiplicity
9438   uint32_t Mult2 = A.countTrailingZeros();
9439   // D = 2^Mult2
9440 
9441   // 2. Check if B is divisible by D.
9442   //
9443   // B is divisible by D if and only if the multiplicity of prime factor 2 for B
9444   // is not less than multiplicity of this prime factor for D.
9445   if (SE.GetMinTrailingZeros(B) < Mult2)
9446     return SE.getCouldNotCompute();
9447 
9448   // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
9449   // modulo (N / D).
9450   //
9451   // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
9452   // (N / D) in general. The inverse itself always fits into BW bits, though,
9453   // so we immediately truncate it.
9454   APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
9455   APInt Mod(BW + 1, 0);
9456   Mod.setBit(BW - Mult2);  // Mod = N / D
9457   APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
9458 
9459   // 4. Compute the minimum unsigned root of the equation:
9460   // I * (B / D) mod (N / D)
9461   // To simplify the computation, we factor out the divide by D:
9462   // (I * B mod N) / D
9463   const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
9464   return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
9465 }
9466 
9467 /// For a given quadratic addrec, generate coefficients of the corresponding
9468 /// quadratic equation, multiplied by a common value to ensure that they are
9469 /// integers.
9470 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
9471 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
9472 /// were multiplied by, and BitWidth is the bit width of the original addrec
9473 /// coefficients.
9474 /// This function returns None if the addrec coefficients are not compile-
9475 /// time constants.
9476 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
9477 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
9478   assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
9479   const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
9480   const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
9481   const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
9482   LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
9483                     << *AddRec << '\n');
9484 
9485   // We currently can only solve this if the coefficients are constants.
9486   if (!LC || !MC || !NC) {
9487     LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
9488     return None;
9489   }
9490 
9491   APInt L = LC->getAPInt();
9492   APInt M = MC->getAPInt();
9493   APInt N = NC->getAPInt();
9494   assert(!N.isZero() && "This is not a quadratic addrec");
9495 
9496   unsigned BitWidth = LC->getAPInt().getBitWidth();
9497   unsigned NewWidth = BitWidth + 1;
9498   LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
9499                     << BitWidth << '\n');
9500   // The sign-extension (as opposed to a zero-extension) here matches the
9501   // extension used in SolveQuadraticEquationWrap (with the same motivation).
9502   N = N.sext(NewWidth);
9503   M = M.sext(NewWidth);
9504   L = L.sext(NewWidth);
9505 
9506   // The increments are M, M+N, M+2N, ..., so the accumulated values are
9507   //   L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
9508   //   L+M, L+2M+N, L+3M+3N, ...
9509   // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
9510   //
9511   // The equation Acc = 0 is then
9512   //   L + nM + n(n-1)/2 N = 0,  or  2L + 2M n + n(n-1) N = 0.
9513   // In a quadratic form it becomes:
9514   //   N n^2 + (2M-N) n + 2L = 0.
9515 
9516   APInt A = N;
9517   APInt B = 2 * M - A;
9518   APInt C = 2 * L;
9519   APInt T = APInt(NewWidth, 2);
9520   LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
9521                     << "x + " << C << ", coeff bw: " << NewWidth
9522                     << ", multiplied by " << T << '\n');
9523   return std::make_tuple(A, B, C, T, BitWidth);
9524 }
9525 
9526 /// Helper function to compare optional APInts:
9527 /// (a) if X and Y both exist, return min(X, Y),
9528 /// (b) if neither X nor Y exist, return None,
9529 /// (c) if exactly one of X and Y exists, return that value.
9530 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
9531   if (X.hasValue() && Y.hasValue()) {
9532     unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
9533     APInt XW = X->sextOrSelf(W);
9534     APInt YW = Y->sextOrSelf(W);
9535     return XW.slt(YW) ? *X : *Y;
9536   }
9537   if (!X.hasValue() && !Y.hasValue())
9538     return None;
9539   return X.hasValue() ? *X : *Y;
9540 }
9541 
9542 /// Helper function to truncate an optional APInt to a given BitWidth.
9543 /// When solving addrec-related equations, it is preferable to return a value
9544 /// that has the same bit width as the original addrec's coefficients. If the
9545 /// solution fits in the original bit width, truncate it (except for i1).
9546 /// Returning a value of a different bit width may inhibit some optimizations.
9547 ///
9548 /// In general, a solution to a quadratic equation generated from an addrec
9549 /// may require BW+1 bits, where BW is the bit width of the addrec's
9550 /// coefficients. The reason is that the coefficients of the quadratic
9551 /// equation are BW+1 bits wide (to avoid truncation when converting from
9552 /// the addrec to the equation).
9553 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
9554   if (!X.hasValue())
9555     return None;
9556   unsigned W = X->getBitWidth();
9557   if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
9558     return X->trunc(BitWidth);
9559   return X;
9560 }
9561 
9562 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
9563 /// iterations. The values L, M, N are assumed to be signed, and they
9564 /// should all have the same bit widths.
9565 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
9566 /// where BW is the bit width of the addrec's coefficients.
9567 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
9568 /// returned as such, otherwise the bit width of the returned value may
9569 /// be greater than BW.
9570 ///
9571 /// This function returns None if
9572 /// (a) the addrec coefficients are not constant, or
9573 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
9574 ///     like x^2 = 5, no integer solutions exist, in other cases an integer
9575 ///     solution may exist, but SolveQuadraticEquationWrap may fail to find it.
9576 static Optional<APInt>
9577 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
9578   APInt A, B, C, M;
9579   unsigned BitWidth;
9580   auto T = GetQuadraticEquation(AddRec);
9581   if (!T.hasValue())
9582     return None;
9583 
9584   std::tie(A, B, C, M, BitWidth) = *T;
9585   LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
9586   Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
9587   if (!X.hasValue())
9588     return None;
9589 
9590   ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
9591   ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
9592   if (!V->isZero())
9593     return None;
9594 
9595   return TruncIfPossible(X, BitWidth);
9596 }
9597 
9598 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
9599 /// iterations. The values M, N are assumed to be signed, and they
9600 /// should all have the same bit widths.
9601 /// Find the least n such that c(n) does not belong to the given range,
9602 /// while c(n-1) does.
9603 ///
9604 /// This function returns None if
9605 /// (a) the addrec coefficients are not constant, or
9606 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
9607 ///     bounds of the range.
9608 static Optional<APInt>
9609 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
9610                           const ConstantRange &Range, ScalarEvolution &SE) {
9611   assert(AddRec->getOperand(0)->isZero() &&
9612          "Starting value of addrec should be 0");
9613   LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
9614                     << Range << ", addrec " << *AddRec << '\n');
9615   // This case is handled in getNumIterationsInRange. Here we can assume that
9616   // we start in the range.
9617   assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
9618          "Addrec's initial value should be in range");
9619 
9620   APInt A, B, C, M;
9621   unsigned BitWidth;
9622   auto T = GetQuadraticEquation(AddRec);
9623   if (!T.hasValue())
9624     return None;
9625 
9626   // Be careful about the return value: there can be two reasons for not
9627   // returning an actual number. First, if no solutions to the equations
9628   // were found, and second, if the solutions don't leave the given range.
9629   // The first case means that the actual solution is "unknown", the second
9630   // means that it's known, but not valid. If the solution is unknown, we
9631   // cannot make any conclusions.
9632   // Return a pair: the optional solution and a flag indicating if the
9633   // solution was found.
9634   auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
9635     // Solve for signed overflow and unsigned overflow, pick the lower
9636     // solution.
9637     LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
9638                       << Bound << " (before multiplying by " << M << ")\n");
9639     Bound *= M; // The quadratic equation multiplier.
9640 
9641     Optional<APInt> SO = None;
9642     if (BitWidth > 1) {
9643       LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9644                            "signed overflow\n");
9645       SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
9646     }
9647     LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9648                          "unsigned overflow\n");
9649     Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
9650                                                               BitWidth+1);
9651 
9652     auto LeavesRange = [&] (const APInt &X) {
9653       ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
9654       ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
9655       if (Range.contains(V0->getValue()))
9656         return false;
9657       // X should be at least 1, so X-1 is non-negative.
9658       ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
9659       ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
9660       if (Range.contains(V1->getValue()))
9661         return true;
9662       return false;
9663     };
9664 
9665     // If SolveQuadraticEquationWrap returns None, it means that there can
9666     // be a solution, but the function failed to find it. We cannot treat it
9667     // as "no solution".
9668     if (!SO.hasValue() || !UO.hasValue())
9669       return { None, false };
9670 
9671     // Check the smaller value first to see if it leaves the range.
9672     // At this point, both SO and UO must have values.
9673     Optional<APInt> Min = MinOptional(SO, UO);
9674     if (LeavesRange(*Min))
9675       return { Min, true };
9676     Optional<APInt> Max = Min == SO ? UO : SO;
9677     if (LeavesRange(*Max))
9678       return { Max, true };
9679 
9680     // Solutions were found, but were eliminated, hence the "true".
9681     return { None, true };
9682   };
9683 
9684   std::tie(A, B, C, M, BitWidth) = *T;
9685   // Lower bound is inclusive, subtract 1 to represent the exiting value.
9686   APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
9687   APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
9688   auto SL = SolveForBoundary(Lower);
9689   auto SU = SolveForBoundary(Upper);
9690   // If any of the solutions was unknown, no meaninigful conclusions can
9691   // be made.
9692   if (!SL.second || !SU.second)
9693     return None;
9694 
9695   // Claim: The correct solution is not some value between Min and Max.
9696   //
9697   // Justification: Assuming that Min and Max are different values, one of
9698   // them is when the first signed overflow happens, the other is when the
9699   // first unsigned overflow happens. Crossing the range boundary is only
9700   // possible via an overflow (treating 0 as a special case of it, modeling
9701   // an overflow as crossing k*2^W for some k).
9702   //
9703   // The interesting case here is when Min was eliminated as an invalid
9704   // solution, but Max was not. The argument is that if there was another
9705   // overflow between Min and Max, it would also have been eliminated if
9706   // it was considered.
9707   //
9708   // For a given boundary, it is possible to have two overflows of the same
9709   // type (signed/unsigned) without having the other type in between: this
9710   // can happen when the vertex of the parabola is between the iterations
9711   // corresponding to the overflows. This is only possible when the two
9712   // overflows cross k*2^W for the same k. In such case, if the second one
9713   // left the range (and was the first one to do so), the first overflow
9714   // would have to enter the range, which would mean that either we had left
9715   // the range before or that we started outside of it. Both of these cases
9716   // are contradictions.
9717   //
9718   // Claim: In the case where SolveForBoundary returns None, the correct
9719   // solution is not some value between the Max for this boundary and the
9720   // Min of the other boundary.
9721   //
9722   // Justification: Assume that we had such Max_A and Min_B corresponding
9723   // to range boundaries A and B and such that Max_A < Min_B. If there was
9724   // a solution between Max_A and Min_B, it would have to be caused by an
9725   // overflow corresponding to either A or B. It cannot correspond to B,
9726   // since Min_B is the first occurrence of such an overflow. If it
9727   // corresponded to A, it would have to be either a signed or an unsigned
9728   // overflow that is larger than both eliminated overflows for A. But
9729   // between the eliminated overflows and this overflow, the values would
9730   // cover the entire value space, thus crossing the other boundary, which
9731   // is a contradiction.
9732 
9733   return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
9734 }
9735 
9736 ScalarEvolution::ExitLimit
9737 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
9738                               bool AllowPredicates) {
9739 
9740   // This is only used for loops with a "x != y" exit test. The exit condition
9741   // is now expressed as a single expression, V = x-y. So the exit test is
9742   // effectively V != 0.  We know and take advantage of the fact that this
9743   // expression only being used in a comparison by zero context.
9744 
9745   SmallPtrSet<const SCEVPredicate *, 4> Predicates;
9746   // If the value is a constant
9747   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9748     // If the value is already zero, the branch will execute zero times.
9749     if (C->getValue()->isZero()) return C;
9750     return getCouldNotCompute();  // Otherwise it will loop infinitely.
9751   }
9752 
9753   const SCEVAddRecExpr *AddRec =
9754       dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
9755 
9756   if (!AddRec && AllowPredicates)
9757     // Try to make this an AddRec using runtime tests, in the first X
9758     // iterations of this loop, where X is the SCEV expression found by the
9759     // algorithm below.
9760     AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
9761 
9762   if (!AddRec || AddRec->getLoop() != L)
9763     return getCouldNotCompute();
9764 
9765   // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
9766   // the quadratic equation to solve it.
9767   if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
9768     // We can only use this value if the chrec ends up with an exact zero
9769     // value at this index.  When solving for "X*X != 5", for example, we
9770     // should not accept a root of 2.
9771     if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
9772       const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
9773       return ExitLimit(R, R, false, Predicates);
9774     }
9775     return getCouldNotCompute();
9776   }
9777 
9778   // Otherwise we can only handle this if it is affine.
9779   if (!AddRec->isAffine())
9780     return getCouldNotCompute();
9781 
9782   // If this is an affine expression, the execution count of this branch is
9783   // the minimum unsigned root of the following equation:
9784   //
9785   //     Start + Step*N = 0 (mod 2^BW)
9786   //
9787   // equivalent to:
9788   //
9789   //             Step*N = -Start (mod 2^BW)
9790   //
9791   // where BW is the common bit width of Start and Step.
9792 
9793   // Get the initial value for the loop.
9794   const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
9795   const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
9796 
9797   // For now we handle only constant steps.
9798   //
9799   // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9800   // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9801   // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9802   // We have not yet seen any such cases.
9803   const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9804   if (!StepC || StepC->getValue()->isZero())
9805     return getCouldNotCompute();
9806 
9807   // For positive steps (counting up until unsigned overflow):
9808   //   N = -Start/Step (as unsigned)
9809   // For negative steps (counting down to zero):
9810   //   N = Start/-Step
9811   // First compute the unsigned distance from zero in the direction of Step.
9812   bool CountDown = StepC->getAPInt().isNegative();
9813   const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9814 
9815   // Handle unitary steps, which cannot wraparound.
9816   // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9817   //   N = Distance (as unsigned)
9818   if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9819     APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9820     MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance));
9821 
9822     // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9823     // we end up with a loop whose backedge-taken count is n - 1.  Detect this
9824     // case, and see if we can improve the bound.
9825     //
9826     // Explicitly handling this here is necessary because getUnsignedRange
9827     // isn't context-sensitive; it doesn't know that we only care about the
9828     // range inside the loop.
9829     const SCEV *Zero = getZero(Distance->getType());
9830     const SCEV *One = getOne(Distance->getType());
9831     const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9832     if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9833       // If Distance + 1 doesn't overflow, we can compute the maximum distance
9834       // as "unsigned_max(Distance + 1) - 1".
9835       ConstantRange CR = getUnsignedRange(DistancePlusOne);
9836       MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9837     }
9838     return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9839   }
9840 
9841   // If the condition controls loop exit (the loop exits only if the expression
9842   // is true) and the addition is no-wrap we can use unsigned divide to
9843   // compute the backedge count.  In this case, the step may not divide the
9844   // distance, but we don't care because if the condition is "missed" the loop
9845   // will have undefined behavior due to wrapping.
9846   if (ControlsExit && AddRec->hasNoSelfWrap() &&
9847       loopHasNoAbnormalExits(AddRec->getLoop())) {
9848     const SCEV *Exact =
9849         getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9850     const SCEV *Max = getCouldNotCompute();
9851     if (Exact != getCouldNotCompute()) {
9852       APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
9853       Max = getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact)));
9854     }
9855     return ExitLimit(Exact, Max, false, Predicates);
9856   }
9857 
9858   // Solve the general equation.
9859   const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9860                                                getNegativeSCEV(Start), *this);
9861 
9862   const SCEV *M = E;
9863   if (E != getCouldNotCompute()) {
9864     APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L));
9865     M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E)));
9866   }
9867   return ExitLimit(E, M, false, Predicates);
9868 }
9869 
9870 ScalarEvolution::ExitLimit
9871 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9872   // Loops that look like: while (X == 0) are very strange indeed.  We don't
9873   // handle them yet except for the trivial case.  This could be expanded in the
9874   // future as needed.
9875 
9876   // If the value is a constant, check to see if it is known to be non-zero
9877   // already.  If so, the backedge will execute zero times.
9878   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9879     if (!C->getValue()->isZero())
9880       return getZero(C->getType());
9881     return getCouldNotCompute();  // Otherwise it will loop infinitely.
9882   }
9883 
9884   // We could implement others, but I really doubt anyone writes loops like
9885   // this, and if they did, they would already be constant folded.
9886   return getCouldNotCompute();
9887 }
9888 
9889 std::pair<const BasicBlock *, const BasicBlock *>
9890 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9891     const {
9892   // If the block has a unique predecessor, then there is no path from the
9893   // predecessor to the block that does not go through the direct edge
9894   // from the predecessor to the block.
9895   if (const BasicBlock *Pred = BB->getSinglePredecessor())
9896     return {Pred, BB};
9897 
9898   // A loop's header is defined to be a block that dominates the loop.
9899   // If the header has a unique predecessor outside the loop, it must be
9900   // a block that has exactly one successor that can reach the loop.
9901   if (const Loop *L = LI.getLoopFor(BB))
9902     return {L->getLoopPredecessor(), L->getHeader()};
9903 
9904   return {nullptr, nullptr};
9905 }
9906 
9907 /// SCEV structural equivalence is usually sufficient for testing whether two
9908 /// expressions are equal, however for the purposes of looking for a condition
9909 /// guarding a loop, it can be useful to be a little more general, since a
9910 /// front-end may have replicated the controlling expression.
9911 static bool HasSameValue(const SCEV *A, const SCEV *B) {
9912   // Quick check to see if they are the same SCEV.
9913   if (A == B) return true;
9914 
9915   auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9916     // Not all instructions that are "identical" compute the same value.  For
9917     // instance, two distinct alloca instructions allocating the same type are
9918     // identical and do not read memory; but compute distinct values.
9919     return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9920   };
9921 
9922   // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9923   // two different instructions with the same value. Check for this case.
9924   if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9925     if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9926       if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9927         if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9928           if (ComputesEqualValues(AI, BI))
9929             return true;
9930 
9931   // Otherwise assume they may have a different value.
9932   return false;
9933 }
9934 
9935 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9936                                            const SCEV *&LHS, const SCEV *&RHS,
9937                                            unsigned Depth,
9938                                            bool ControllingFiniteLoop) {
9939   bool Changed = false;
9940   // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9941   // '0 != 0'.
9942   auto TrivialCase = [&](bool TriviallyTrue) {
9943     LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9944     Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9945     return true;
9946   };
9947   // If we hit the max recursion limit bail out.
9948   if (Depth >= 3)
9949     return false;
9950 
9951   // Canonicalize a constant to the right side.
9952   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9953     // Check for both operands constant.
9954     if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9955       if (ConstantExpr::getICmp(Pred,
9956                                 LHSC->getValue(),
9957                                 RHSC->getValue())->isNullValue())
9958         return TrivialCase(false);
9959       else
9960         return TrivialCase(true);
9961     }
9962     // Otherwise swap the operands to put the constant on the right.
9963     std::swap(LHS, RHS);
9964     Pred = ICmpInst::getSwappedPredicate(Pred);
9965     Changed = true;
9966   }
9967 
9968   // If we're comparing an addrec with a value which is loop-invariant in the
9969   // addrec's loop, put the addrec on the left. Also make a dominance check,
9970   // as both operands could be addrecs loop-invariant in each other's loop.
9971   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9972     const Loop *L = AR->getLoop();
9973     if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9974       std::swap(LHS, RHS);
9975       Pred = ICmpInst::getSwappedPredicate(Pred);
9976       Changed = true;
9977     }
9978   }
9979 
9980   // If there's a constant operand, canonicalize comparisons with boundary
9981   // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9982   if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9983     const APInt &RA = RC->getAPInt();
9984 
9985     bool SimplifiedByConstantRange = false;
9986 
9987     if (!ICmpInst::isEquality(Pred)) {
9988       ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9989       if (ExactCR.isFullSet())
9990         return TrivialCase(true);
9991       else if (ExactCR.isEmptySet())
9992         return TrivialCase(false);
9993 
9994       APInt NewRHS;
9995       CmpInst::Predicate NewPred;
9996       if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9997           ICmpInst::isEquality(NewPred)) {
9998         // We were able to convert an inequality to an equality.
9999         Pred = NewPred;
10000         RHS = getConstant(NewRHS);
10001         Changed = SimplifiedByConstantRange = true;
10002       }
10003     }
10004 
10005     if (!SimplifiedByConstantRange) {
10006       switch (Pred) {
10007       default:
10008         break;
10009       case ICmpInst::ICMP_EQ:
10010       case ICmpInst::ICMP_NE:
10011         // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
10012         if (!RA)
10013           if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
10014             if (const SCEVMulExpr *ME =
10015                     dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
10016               if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
10017                   ME->getOperand(0)->isAllOnesValue()) {
10018                 RHS = AE->getOperand(1);
10019                 LHS = ME->getOperand(1);
10020                 Changed = true;
10021               }
10022         break;
10023 
10024 
10025         // The "Should have been caught earlier!" messages refer to the fact
10026         // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
10027         // should have fired on the corresponding cases, and canonicalized the
10028         // check to trivial case.
10029 
10030       case ICmpInst::ICMP_UGE:
10031         assert(!RA.isMinValue() && "Should have been caught earlier!");
10032         Pred = ICmpInst::ICMP_UGT;
10033         RHS = getConstant(RA - 1);
10034         Changed = true;
10035         break;
10036       case ICmpInst::ICMP_ULE:
10037         assert(!RA.isMaxValue() && "Should have been caught earlier!");
10038         Pred = ICmpInst::ICMP_ULT;
10039         RHS = getConstant(RA + 1);
10040         Changed = true;
10041         break;
10042       case ICmpInst::ICMP_SGE:
10043         assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
10044         Pred = ICmpInst::ICMP_SGT;
10045         RHS = getConstant(RA - 1);
10046         Changed = true;
10047         break;
10048       case ICmpInst::ICMP_SLE:
10049         assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
10050         Pred = ICmpInst::ICMP_SLT;
10051         RHS = getConstant(RA + 1);
10052         Changed = true;
10053         break;
10054       }
10055     }
10056   }
10057 
10058   // Check for obvious equality.
10059   if (HasSameValue(LHS, RHS)) {
10060     if (ICmpInst::isTrueWhenEqual(Pred))
10061       return TrivialCase(true);
10062     if (ICmpInst::isFalseWhenEqual(Pred))
10063       return TrivialCase(false);
10064   }
10065 
10066   // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
10067   // adding or subtracting 1 from one of the operands. This can be done for
10068   // one of two reasons:
10069   // 1) The range of the RHS does not include the (signed/unsigned) boundaries
10070   // 2) The loop is finite, with this comparison controlling the exit. Since the
10071   // loop is finite, the bound cannot include the corresponding boundary
10072   // (otherwise it would loop forever).
10073   switch (Pred) {
10074   case ICmpInst::ICMP_SLE:
10075     if (ControllingFiniteLoop || !getSignedRangeMax(RHS).isMaxSignedValue()) {
10076       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
10077                        SCEV::FlagNSW);
10078       Pred = ICmpInst::ICMP_SLT;
10079       Changed = true;
10080     } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
10081       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
10082                        SCEV::FlagNSW);
10083       Pred = ICmpInst::ICMP_SLT;
10084       Changed = true;
10085     }
10086     break;
10087   case ICmpInst::ICMP_SGE:
10088     if (ControllingFiniteLoop || !getSignedRangeMin(RHS).isMinSignedValue()) {
10089       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
10090                        SCEV::FlagNSW);
10091       Pred = ICmpInst::ICMP_SGT;
10092       Changed = true;
10093     } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
10094       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
10095                        SCEV::FlagNSW);
10096       Pred = ICmpInst::ICMP_SGT;
10097       Changed = true;
10098     }
10099     break;
10100   case ICmpInst::ICMP_ULE:
10101     if (ControllingFiniteLoop || !getUnsignedRangeMax(RHS).isMaxValue()) {
10102       RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
10103                        SCEV::FlagNUW);
10104       Pred = ICmpInst::ICMP_ULT;
10105       Changed = true;
10106     } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
10107       LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
10108       Pred = ICmpInst::ICMP_ULT;
10109       Changed = true;
10110     }
10111     break;
10112   case ICmpInst::ICMP_UGE:
10113     if (ControllingFiniteLoop || !getUnsignedRangeMin(RHS).isMinValue()) {
10114       RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
10115       Pred = ICmpInst::ICMP_UGT;
10116       Changed = true;
10117     } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
10118       LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
10119                        SCEV::FlagNUW);
10120       Pred = ICmpInst::ICMP_UGT;
10121       Changed = true;
10122     }
10123     break;
10124   default:
10125     break;
10126   }
10127 
10128   // TODO: More simplifications are possible here.
10129 
10130   // Recursively simplify until we either hit a recursion limit or nothing
10131   // changes.
10132   if (Changed)
10133     return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1,
10134                                 ControllingFiniteLoop);
10135 
10136   return Changed;
10137 }
10138 
10139 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
10140   return getSignedRangeMax(S).isNegative();
10141 }
10142 
10143 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
10144   return getSignedRangeMin(S).isStrictlyPositive();
10145 }
10146 
10147 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
10148   return !getSignedRangeMin(S).isNegative();
10149 }
10150 
10151 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
10152   return !getSignedRangeMax(S).isStrictlyPositive();
10153 }
10154 
10155 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
10156   return getUnsignedRangeMin(S) != 0;
10157 }
10158 
10159 std::pair<const SCEV *, const SCEV *>
10160 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
10161   // Compute SCEV on entry of loop L.
10162   const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
10163   if (Start == getCouldNotCompute())
10164     return { Start, Start };
10165   // Compute post increment SCEV for loop L.
10166   const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
10167   assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
10168   return { Start, PostInc };
10169 }
10170 
10171 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
10172                                           const SCEV *LHS, const SCEV *RHS) {
10173   // First collect all loops.
10174   SmallPtrSet<const Loop *, 8> LoopsUsed;
10175   getUsedLoops(LHS, LoopsUsed);
10176   getUsedLoops(RHS, LoopsUsed);
10177 
10178   if (LoopsUsed.empty())
10179     return false;
10180 
10181   // Domination relationship must be a linear order on collected loops.
10182 #ifndef NDEBUG
10183   for (auto *L1 : LoopsUsed)
10184     for (auto *L2 : LoopsUsed)
10185       assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
10186               DT.dominates(L2->getHeader(), L1->getHeader())) &&
10187              "Domination relationship is not a linear order");
10188 #endif
10189 
10190   const Loop *MDL =
10191       *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
10192                         [&](const Loop *L1, const Loop *L2) {
10193          return DT.properlyDominates(L1->getHeader(), L2->getHeader());
10194        });
10195 
10196   // Get init and post increment value for LHS.
10197   auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
10198   // if LHS contains unknown non-invariant SCEV then bail out.
10199   if (SplitLHS.first == getCouldNotCompute())
10200     return false;
10201   assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
10202   // Get init and post increment value for RHS.
10203   auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
10204   // if RHS contains unknown non-invariant SCEV then bail out.
10205   if (SplitRHS.first == getCouldNotCompute())
10206     return false;
10207   assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
10208   // It is possible that init SCEV contains an invariant load but it does
10209   // not dominate MDL and is not available at MDL loop entry, so we should
10210   // check it here.
10211   if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
10212       !isAvailableAtLoopEntry(SplitRHS.first, MDL))
10213     return false;
10214 
10215   // It seems backedge guard check is faster than entry one so in some cases
10216   // it can speed up whole estimation by short circuit
10217   return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
10218                                      SplitRHS.second) &&
10219          isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
10220 }
10221 
10222 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
10223                                        const SCEV *LHS, const SCEV *RHS) {
10224   // Canonicalize the inputs first.
10225   (void)SimplifyICmpOperands(Pred, LHS, RHS);
10226 
10227   if (isKnownViaInduction(Pred, LHS, RHS))
10228     return true;
10229 
10230   if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
10231     return true;
10232 
10233   // Otherwise see what can be done with some simple reasoning.
10234   return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
10235 }
10236 
10237 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
10238                                                   const SCEV *LHS,
10239                                                   const SCEV *RHS) {
10240   if (isKnownPredicate(Pred, LHS, RHS))
10241     return true;
10242   else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
10243     return false;
10244   return None;
10245 }
10246 
10247 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
10248                                          const SCEV *LHS, const SCEV *RHS,
10249                                          const Instruction *CtxI) {
10250   // TODO: Analyze guards and assumes from Context's block.
10251   return isKnownPredicate(Pred, LHS, RHS) ||
10252          isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS);
10253 }
10254 
10255 Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred,
10256                                                     const SCEV *LHS,
10257                                                     const SCEV *RHS,
10258                                                     const Instruction *CtxI) {
10259   Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
10260   if (KnownWithoutContext)
10261     return KnownWithoutContext;
10262 
10263   if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS))
10264     return true;
10265   else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(),
10266                                           ICmpInst::getInversePredicate(Pred),
10267                                           LHS, RHS))
10268     return false;
10269   return None;
10270 }
10271 
10272 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
10273                                               const SCEVAddRecExpr *LHS,
10274                                               const SCEV *RHS) {
10275   const Loop *L = LHS->getLoop();
10276   return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
10277          isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
10278 }
10279 
10280 Optional<ScalarEvolution::MonotonicPredicateType>
10281 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
10282                                            ICmpInst::Predicate Pred) {
10283   auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
10284 
10285 #ifndef NDEBUG
10286   // Verify an invariant: inverting the predicate should turn a monotonically
10287   // increasing change to a monotonically decreasing one, and vice versa.
10288   if (Result) {
10289     auto ResultSwapped =
10290         getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
10291 
10292     assert(ResultSwapped.hasValue() && "should be able to analyze both!");
10293     assert(ResultSwapped.getValue() != Result.getValue() &&
10294            "monotonicity should flip as we flip the predicate");
10295   }
10296 #endif
10297 
10298   return Result;
10299 }
10300 
10301 Optional<ScalarEvolution::MonotonicPredicateType>
10302 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
10303                                                ICmpInst::Predicate Pred) {
10304   // A zero step value for LHS means the induction variable is essentially a
10305   // loop invariant value. We don't really depend on the predicate actually
10306   // flipping from false to true (for increasing predicates, and the other way
10307   // around for decreasing predicates), all we care about is that *if* the
10308   // predicate changes then it only changes from false to true.
10309   //
10310   // A zero step value in itself is not very useful, but there may be places
10311   // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
10312   // as general as possible.
10313 
10314   // Only handle LE/LT/GE/GT predicates.
10315   if (!ICmpInst::isRelational(Pred))
10316     return None;
10317 
10318   bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
10319   assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
10320          "Should be greater or less!");
10321 
10322   // Check that AR does not wrap.
10323   if (ICmpInst::isUnsigned(Pred)) {
10324     if (!LHS->hasNoUnsignedWrap())
10325       return None;
10326     return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10327   } else {
10328     assert(ICmpInst::isSigned(Pred) &&
10329            "Relational predicate is either signed or unsigned!");
10330     if (!LHS->hasNoSignedWrap())
10331       return None;
10332 
10333     const SCEV *Step = LHS->getStepRecurrence(*this);
10334 
10335     if (isKnownNonNegative(Step))
10336       return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10337 
10338     if (isKnownNonPositive(Step))
10339       return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10340 
10341     return None;
10342   }
10343 }
10344 
10345 Optional<ScalarEvolution::LoopInvariantPredicate>
10346 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
10347                                            const SCEV *LHS, const SCEV *RHS,
10348                                            const Loop *L) {
10349 
10350   // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10351   if (!isLoopInvariant(RHS, L)) {
10352     if (!isLoopInvariant(LHS, L))
10353       return None;
10354 
10355     std::swap(LHS, RHS);
10356     Pred = ICmpInst::getSwappedPredicate(Pred);
10357   }
10358 
10359   const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10360   if (!ArLHS || ArLHS->getLoop() != L)
10361     return None;
10362 
10363   auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
10364   if (!MonotonicType)
10365     return None;
10366   // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
10367   // true as the loop iterates, and the backedge is control dependent on
10368   // "ArLHS `Pred` RHS" == true then we can reason as follows:
10369   //
10370   //   * if the predicate was false in the first iteration then the predicate
10371   //     is never evaluated again, since the loop exits without taking the
10372   //     backedge.
10373   //   * if the predicate was true in the first iteration then it will
10374   //     continue to be true for all future iterations since it is
10375   //     monotonically increasing.
10376   //
10377   // For both the above possibilities, we can replace the loop varying
10378   // predicate with its value on the first iteration of the loop (which is
10379   // loop invariant).
10380   //
10381   // A similar reasoning applies for a monotonically decreasing predicate, by
10382   // replacing true with false and false with true in the above two bullets.
10383   bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
10384   auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
10385 
10386   if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
10387     return None;
10388 
10389   return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS);
10390 }
10391 
10392 Optional<ScalarEvolution::LoopInvariantPredicate>
10393 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
10394     ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
10395     const Instruction *CtxI, const SCEV *MaxIter) {
10396   // Try to prove the following set of facts:
10397   // - The predicate is monotonic in the iteration space.
10398   // - If the check does not fail on the 1st iteration:
10399   //   - No overflow will happen during first MaxIter iterations;
10400   //   - It will not fail on the MaxIter'th iteration.
10401   // If the check does fail on the 1st iteration, we leave the loop and no
10402   // other checks matter.
10403 
10404   // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10405   if (!isLoopInvariant(RHS, L)) {
10406     if (!isLoopInvariant(LHS, L))
10407       return None;
10408 
10409     std::swap(LHS, RHS);
10410     Pred = ICmpInst::getSwappedPredicate(Pred);
10411   }
10412 
10413   auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
10414   if (!AR || AR->getLoop() != L)
10415     return None;
10416 
10417   // The predicate must be relational (i.e. <, <=, >=, >).
10418   if (!ICmpInst::isRelational(Pred))
10419     return None;
10420 
10421   // TODO: Support steps other than +/- 1.
10422   const SCEV *Step = AR->getStepRecurrence(*this);
10423   auto *One = getOne(Step->getType());
10424   auto *MinusOne = getNegativeSCEV(One);
10425   if (Step != One && Step != MinusOne)
10426     return None;
10427 
10428   // Type mismatch here means that MaxIter is potentially larger than max
10429   // unsigned value in start type, which mean we cannot prove no wrap for the
10430   // indvar.
10431   if (AR->getType() != MaxIter->getType())
10432     return None;
10433 
10434   // Value of IV on suggested last iteration.
10435   const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
10436   // Does it still meet the requirement?
10437   if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
10438     return None;
10439   // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
10440   // not exceed max unsigned value of this type), this effectively proves
10441   // that there is no wrap during the iteration. To prove that there is no
10442   // signed/unsigned wrap, we need to check that
10443   // Start <= Last for step = 1 or Start >= Last for step = -1.
10444   ICmpInst::Predicate NoOverflowPred =
10445       CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
10446   if (Step == MinusOne)
10447     NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
10448   const SCEV *Start = AR->getStart();
10449   if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI))
10450     return None;
10451 
10452   // Everything is fine.
10453   return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
10454 }
10455 
10456 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
10457     ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
10458   if (HasSameValue(LHS, RHS))
10459     return ICmpInst::isTrueWhenEqual(Pred);
10460 
10461   // This code is split out from isKnownPredicate because it is called from
10462   // within isLoopEntryGuardedByCond.
10463 
10464   auto CheckRanges = [&](const ConstantRange &RangeLHS,
10465                          const ConstantRange &RangeRHS) {
10466     return RangeLHS.icmp(Pred, RangeRHS);
10467   };
10468 
10469   // The check at the top of the function catches the case where the values are
10470   // known to be equal.
10471   if (Pred == CmpInst::ICMP_EQ)
10472     return false;
10473 
10474   if (Pred == CmpInst::ICMP_NE) {
10475     if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
10476         CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)))
10477       return true;
10478     auto *Diff = getMinusSCEV(LHS, RHS);
10479     return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
10480   }
10481 
10482   if (CmpInst::isSigned(Pred))
10483     return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
10484 
10485   return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
10486 }
10487 
10488 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
10489                                                     const SCEV *LHS,
10490                                                     const SCEV *RHS) {
10491   // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
10492   // C1 and C2 are constant integers. If either X or Y are not add expressions,
10493   // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
10494   // OutC1 and OutC2.
10495   auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
10496                                       APInt &OutC1, APInt &OutC2,
10497                                       SCEV::NoWrapFlags ExpectedFlags) {
10498     const SCEV *XNonConstOp, *XConstOp;
10499     const SCEV *YNonConstOp, *YConstOp;
10500     SCEV::NoWrapFlags XFlagsPresent;
10501     SCEV::NoWrapFlags YFlagsPresent;
10502 
10503     if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
10504       XConstOp = getZero(X->getType());
10505       XNonConstOp = X;
10506       XFlagsPresent = ExpectedFlags;
10507     }
10508     if (!isa<SCEVConstant>(XConstOp) ||
10509         (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
10510       return false;
10511 
10512     if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
10513       YConstOp = getZero(Y->getType());
10514       YNonConstOp = Y;
10515       YFlagsPresent = ExpectedFlags;
10516     }
10517 
10518     if (!isa<SCEVConstant>(YConstOp) ||
10519         (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
10520       return false;
10521 
10522     if (YNonConstOp != XNonConstOp)
10523       return false;
10524 
10525     OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
10526     OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
10527 
10528     return true;
10529   };
10530 
10531   APInt C1;
10532   APInt C2;
10533 
10534   switch (Pred) {
10535   default:
10536     break;
10537 
10538   case ICmpInst::ICMP_SGE:
10539     std::swap(LHS, RHS);
10540     LLVM_FALLTHROUGH;
10541   case ICmpInst::ICMP_SLE:
10542     // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
10543     if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
10544       return true;
10545 
10546     break;
10547 
10548   case ICmpInst::ICMP_SGT:
10549     std::swap(LHS, RHS);
10550     LLVM_FALLTHROUGH;
10551   case ICmpInst::ICMP_SLT:
10552     // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
10553     if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
10554       return true;
10555 
10556     break;
10557 
10558   case ICmpInst::ICMP_UGE:
10559     std::swap(LHS, RHS);
10560     LLVM_FALLTHROUGH;
10561   case ICmpInst::ICMP_ULE:
10562     // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
10563     if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
10564       return true;
10565 
10566     break;
10567 
10568   case ICmpInst::ICMP_UGT:
10569     std::swap(LHS, RHS);
10570     LLVM_FALLTHROUGH;
10571   case ICmpInst::ICMP_ULT:
10572     // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
10573     if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
10574       return true;
10575     break;
10576   }
10577 
10578   return false;
10579 }
10580 
10581 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
10582                                                    const SCEV *LHS,
10583                                                    const SCEV *RHS) {
10584   if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
10585     return false;
10586 
10587   // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
10588   // the stack can result in exponential time complexity.
10589   SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
10590 
10591   // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
10592   //
10593   // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
10594   // isKnownPredicate.  isKnownPredicate is more powerful, but also more
10595   // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
10596   // interesting cases seen in practice.  We can consider "upgrading" L >= 0 to
10597   // use isKnownPredicate later if needed.
10598   return isKnownNonNegative(RHS) &&
10599          isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
10600          isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
10601 }
10602 
10603 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
10604                                         ICmpInst::Predicate Pred,
10605                                         const SCEV *LHS, const SCEV *RHS) {
10606   // No need to even try if we know the module has no guards.
10607   if (!HasGuards)
10608     return false;
10609 
10610   return any_of(*BB, [&](const Instruction &I) {
10611     using namespace llvm::PatternMatch;
10612 
10613     Value *Condition;
10614     return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
10615                          m_Value(Condition))) &&
10616            isImpliedCond(Pred, LHS, RHS, Condition, false);
10617   });
10618 }
10619 
10620 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
10621 /// protected by a conditional between LHS and RHS.  This is used to
10622 /// to eliminate casts.
10623 bool
10624 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
10625                                              ICmpInst::Predicate Pred,
10626                                              const SCEV *LHS, const SCEV *RHS) {
10627   // Interpret a null as meaning no loop, where there is obviously no guard
10628   // (interprocedural conditions notwithstanding).
10629   if (!L) return true;
10630 
10631   if (VerifyIR)
10632     assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
10633            "This cannot be done on broken IR!");
10634 
10635 
10636   if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10637     return true;
10638 
10639   BasicBlock *Latch = L->getLoopLatch();
10640   if (!Latch)
10641     return false;
10642 
10643   BranchInst *LoopContinuePredicate =
10644     dyn_cast<BranchInst>(Latch->getTerminator());
10645   if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
10646       isImpliedCond(Pred, LHS, RHS,
10647                     LoopContinuePredicate->getCondition(),
10648                     LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
10649     return true;
10650 
10651   // We don't want more than one activation of the following loops on the stack
10652   // -- that can lead to O(n!) time complexity.
10653   if (WalkingBEDominatingConds)
10654     return false;
10655 
10656   SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
10657 
10658   // See if we can exploit a trip count to prove the predicate.
10659   const auto &BETakenInfo = getBackedgeTakenInfo(L);
10660   const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
10661   if (LatchBECount != getCouldNotCompute()) {
10662     // We know that Latch branches back to the loop header exactly
10663     // LatchBECount times.  This means the backdege condition at Latch is
10664     // equivalent to  "{0,+,1} u< LatchBECount".
10665     Type *Ty = LatchBECount->getType();
10666     auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
10667     const SCEV *LoopCounter =
10668       getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
10669     if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
10670                       LatchBECount))
10671       return true;
10672   }
10673 
10674   // Check conditions due to any @llvm.assume intrinsics.
10675   for (auto &AssumeVH : AC.assumptions()) {
10676     if (!AssumeVH)
10677       continue;
10678     auto *CI = cast<CallInst>(AssumeVH);
10679     if (!DT.dominates(CI, Latch->getTerminator()))
10680       continue;
10681 
10682     if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
10683       return true;
10684   }
10685 
10686   // If the loop is not reachable from the entry block, we risk running into an
10687   // infinite loop as we walk up into the dom tree.  These loops do not matter
10688   // anyway, so we just return a conservative answer when we see them.
10689   if (!DT.isReachableFromEntry(L->getHeader()))
10690     return false;
10691 
10692   if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
10693     return true;
10694 
10695   for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
10696        DTN != HeaderDTN; DTN = DTN->getIDom()) {
10697     assert(DTN && "should reach the loop header before reaching the root!");
10698 
10699     BasicBlock *BB = DTN->getBlock();
10700     if (isImpliedViaGuard(BB, Pred, LHS, RHS))
10701       return true;
10702 
10703     BasicBlock *PBB = BB->getSinglePredecessor();
10704     if (!PBB)
10705       continue;
10706 
10707     BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
10708     if (!ContinuePredicate || !ContinuePredicate->isConditional())
10709       continue;
10710 
10711     Value *Condition = ContinuePredicate->getCondition();
10712 
10713     // If we have an edge `E` within the loop body that dominates the only
10714     // latch, the condition guarding `E` also guards the backedge.  This
10715     // reasoning works only for loops with a single latch.
10716 
10717     BasicBlockEdge DominatingEdge(PBB, BB);
10718     if (DominatingEdge.isSingleEdge()) {
10719       // We're constructively (and conservatively) enumerating edges within the
10720       // loop body that dominate the latch.  The dominator tree better agree
10721       // with us on this:
10722       assert(DT.dominates(DominatingEdge, Latch) && "should be!");
10723 
10724       if (isImpliedCond(Pred, LHS, RHS, Condition,
10725                         BB != ContinuePredicate->getSuccessor(0)))
10726         return true;
10727     }
10728   }
10729 
10730   return false;
10731 }
10732 
10733 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
10734                                                      ICmpInst::Predicate Pred,
10735                                                      const SCEV *LHS,
10736                                                      const SCEV *RHS) {
10737   if (VerifyIR)
10738     assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
10739            "This cannot be done on broken IR!");
10740 
10741   // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
10742   // the facts (a >= b && a != b) separately. A typical situation is when the
10743   // non-strict comparison is known from ranges and non-equality is known from
10744   // dominating predicates. If we are proving strict comparison, we always try
10745   // to prove non-equality and non-strict comparison separately.
10746   auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
10747   const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
10748   bool ProvedNonStrictComparison = false;
10749   bool ProvedNonEquality = false;
10750 
10751   auto SplitAndProve =
10752     [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool {
10753     if (!ProvedNonStrictComparison)
10754       ProvedNonStrictComparison = Fn(NonStrictPredicate);
10755     if (!ProvedNonEquality)
10756       ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
10757     if (ProvedNonStrictComparison && ProvedNonEquality)
10758       return true;
10759     return false;
10760   };
10761 
10762   if (ProvingStrictComparison) {
10763     auto ProofFn = [&](ICmpInst::Predicate P) {
10764       return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
10765     };
10766     if (SplitAndProve(ProofFn))
10767       return true;
10768   }
10769 
10770   // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
10771   auto ProveViaGuard = [&](const BasicBlock *Block) {
10772     if (isImpliedViaGuard(Block, Pred, LHS, RHS))
10773       return true;
10774     if (ProvingStrictComparison) {
10775       auto ProofFn = [&](ICmpInst::Predicate P) {
10776         return isImpliedViaGuard(Block, P, LHS, RHS);
10777       };
10778       if (SplitAndProve(ProofFn))
10779         return true;
10780     }
10781     return false;
10782   };
10783 
10784   // Try to prove (Pred, LHS, RHS) using isImpliedCond.
10785   auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
10786     const Instruction *CtxI = &BB->front();
10787     if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI))
10788       return true;
10789     if (ProvingStrictComparison) {
10790       auto ProofFn = [&](ICmpInst::Predicate P) {
10791         return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI);
10792       };
10793       if (SplitAndProve(ProofFn))
10794         return true;
10795     }
10796     return false;
10797   };
10798 
10799   // Starting at the block's predecessor, climb up the predecessor chain, as long
10800   // as there are predecessors that can be found that have unique successors
10801   // leading to the original block.
10802   const Loop *ContainingLoop = LI.getLoopFor(BB);
10803   const BasicBlock *PredBB;
10804   if (ContainingLoop && ContainingLoop->getHeader() == BB)
10805     PredBB = ContainingLoop->getLoopPredecessor();
10806   else
10807     PredBB = BB->getSinglePredecessor();
10808   for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
10809        Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
10810     if (ProveViaGuard(Pair.first))
10811       return true;
10812 
10813     const BranchInst *LoopEntryPredicate =
10814         dyn_cast<BranchInst>(Pair.first->getTerminator());
10815     if (!LoopEntryPredicate ||
10816         LoopEntryPredicate->isUnconditional())
10817       continue;
10818 
10819     if (ProveViaCond(LoopEntryPredicate->getCondition(),
10820                      LoopEntryPredicate->getSuccessor(0) != Pair.second))
10821       return true;
10822   }
10823 
10824   // Check conditions due to any @llvm.assume intrinsics.
10825   for (auto &AssumeVH : AC.assumptions()) {
10826     if (!AssumeVH)
10827       continue;
10828     auto *CI = cast<CallInst>(AssumeVH);
10829     if (!DT.dominates(CI, BB))
10830       continue;
10831 
10832     if (ProveViaCond(CI->getArgOperand(0), false))
10833       return true;
10834   }
10835 
10836   return false;
10837 }
10838 
10839 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
10840                                                ICmpInst::Predicate Pred,
10841                                                const SCEV *LHS,
10842                                                const SCEV *RHS) {
10843   // Interpret a null as meaning no loop, where there is obviously no guard
10844   // (interprocedural conditions notwithstanding).
10845   if (!L)
10846     return false;
10847 
10848   // Both LHS and RHS must be available at loop entry.
10849   assert(isAvailableAtLoopEntry(LHS, L) &&
10850          "LHS is not available at Loop Entry");
10851   assert(isAvailableAtLoopEntry(RHS, L) &&
10852          "RHS is not available at Loop Entry");
10853 
10854   if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10855     return true;
10856 
10857   return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10858 }
10859 
10860 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10861                                     const SCEV *RHS,
10862                                     const Value *FoundCondValue, bool Inverse,
10863                                     const Instruction *CtxI) {
10864   // False conditions implies anything. Do not bother analyzing it further.
10865   if (FoundCondValue ==
10866       ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
10867     return true;
10868 
10869   if (!PendingLoopPredicates.insert(FoundCondValue).second)
10870     return false;
10871 
10872   auto ClearOnExit =
10873       make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10874 
10875   // Recursively handle And and Or conditions.
10876   const Value *Op0, *Op1;
10877   if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
10878     if (!Inverse)
10879       return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
10880              isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
10881   } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
10882     if (Inverse)
10883       return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
10884              isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
10885   }
10886 
10887   const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10888   if (!ICI) return false;
10889 
10890   // Now that we found a conditional branch that dominates the loop or controls
10891   // the loop latch. Check to see if it is the comparison we are looking for.
10892   ICmpInst::Predicate FoundPred;
10893   if (Inverse)
10894     FoundPred = ICI->getInversePredicate();
10895   else
10896     FoundPred = ICI->getPredicate();
10897 
10898   const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10899   const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10900 
10901   return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
10902 }
10903 
10904 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10905                                     const SCEV *RHS,
10906                                     ICmpInst::Predicate FoundPred,
10907                                     const SCEV *FoundLHS, const SCEV *FoundRHS,
10908                                     const Instruction *CtxI) {
10909   // Balance the types.
10910   if (getTypeSizeInBits(LHS->getType()) <
10911       getTypeSizeInBits(FoundLHS->getType())) {
10912     // For unsigned and equality predicates, try to prove that both found
10913     // operands fit into narrow unsigned range. If so, try to prove facts in
10914     // narrow types.
10915     if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() &&
10916         !FoundRHS->getType()->isPointerTy()) {
10917       auto *NarrowType = LHS->getType();
10918       auto *WideType = FoundLHS->getType();
10919       auto BitWidth = getTypeSizeInBits(NarrowType);
10920       const SCEV *MaxValue = getZeroExtendExpr(
10921           getConstant(APInt::getMaxValue(BitWidth)), WideType);
10922       if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS,
10923                                           MaxValue) &&
10924           isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS,
10925                                           MaxValue)) {
10926         const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10927         const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10928         if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10929                                        TruncFoundRHS, CtxI))
10930           return true;
10931       }
10932     }
10933 
10934     if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy())
10935       return false;
10936     if (CmpInst::isSigned(Pred)) {
10937       LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10938       RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10939     } else {
10940       LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10941       RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10942     }
10943   } else if (getTypeSizeInBits(LHS->getType()) >
10944       getTypeSizeInBits(FoundLHS->getType())) {
10945     if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy())
10946       return false;
10947     if (CmpInst::isSigned(FoundPred)) {
10948       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10949       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10950     } else {
10951       FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10952       FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10953     }
10954   }
10955   return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10956                                     FoundRHS, CtxI);
10957 }
10958 
10959 bool ScalarEvolution::isImpliedCondBalancedTypes(
10960     ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10961     ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10962     const Instruction *CtxI) {
10963   assert(getTypeSizeInBits(LHS->getType()) ==
10964              getTypeSizeInBits(FoundLHS->getType()) &&
10965          "Types should be balanced!");
10966   // Canonicalize the query to match the way instcombine will have
10967   // canonicalized the comparison.
10968   if (SimplifyICmpOperands(Pred, LHS, RHS))
10969     if (LHS == RHS)
10970       return CmpInst::isTrueWhenEqual(Pred);
10971   if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10972     if (FoundLHS == FoundRHS)
10973       return CmpInst::isFalseWhenEqual(FoundPred);
10974 
10975   // Check to see if we can make the LHS or RHS match.
10976   if (LHS == FoundRHS || RHS == FoundLHS) {
10977     if (isa<SCEVConstant>(RHS)) {
10978       std::swap(FoundLHS, FoundRHS);
10979       FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10980     } else {
10981       std::swap(LHS, RHS);
10982       Pred = ICmpInst::getSwappedPredicate(Pred);
10983     }
10984   }
10985 
10986   // Check whether the found predicate is the same as the desired predicate.
10987   if (FoundPred == Pred)
10988     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
10989 
10990   // Check whether swapping the found predicate makes it the same as the
10991   // desired predicate.
10992   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10993     // We can write the implication
10994     // 0.  LHS Pred      RHS  <-   FoundLHS SwapPred  FoundRHS
10995     // using one of the following ways:
10996     // 1.  LHS Pred      RHS  <-   FoundRHS Pred      FoundLHS
10997     // 2.  RHS SwapPred  LHS  <-   FoundLHS SwapPred  FoundRHS
10998     // 3.  LHS Pred      RHS  <-  ~FoundLHS Pred     ~FoundRHS
10999     // 4. ~LHS SwapPred ~RHS  <-   FoundLHS SwapPred  FoundRHS
11000     // Forms 1. and 2. require swapping the operands of one condition. Don't
11001     // do this if it would break canonical constant/addrec ordering.
11002     if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
11003       return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
11004                                    CtxI);
11005     if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
11006       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI);
11007 
11008     // There's no clear preference between forms 3. and 4., try both.  Avoid
11009     // forming getNotSCEV of pointer values as the resulting subtract is
11010     // not legal.
11011     if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() &&
11012         isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS),
11013                               FoundLHS, FoundRHS, CtxI))
11014       return true;
11015 
11016     if (!FoundLHS->getType()->isPointerTy() &&
11017         !FoundRHS->getType()->isPointerTy() &&
11018         isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS),
11019                               getNotSCEV(FoundRHS), CtxI))
11020       return true;
11021 
11022     return false;
11023   }
11024 
11025   auto IsSignFlippedPredicate = [](CmpInst::Predicate P1,
11026                                    CmpInst::Predicate P2) {
11027     assert(P1 != P2 && "Handled earlier!");
11028     return CmpInst::isRelational(P2) &&
11029            P1 == CmpInst::getFlippedSignednessPredicate(P2);
11030   };
11031   if (IsSignFlippedPredicate(Pred, FoundPred)) {
11032     // Unsigned comparison is the same as signed comparison when both the
11033     // operands are non-negative or negative.
11034     if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) ||
11035         (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS)))
11036       return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11037     // Create local copies that we can freely swap and canonicalize our
11038     // conditions to "le/lt".
11039     ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
11040     const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS,
11041                *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
11042     if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) {
11043       CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred);
11044       CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred);
11045       std::swap(CanonicalLHS, CanonicalRHS);
11046       std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
11047     }
11048     assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) &&
11049            "Must be!");
11050     assert((ICmpInst::isLT(CanonicalFoundPred) ||
11051             ICmpInst::isLE(CanonicalFoundPred)) &&
11052            "Must be!");
11053     if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS))
11054       // Use implication:
11055       // x <u y && y >=s 0 --> x <s y.
11056       // If we can prove the left part, the right part is also proven.
11057       return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11058                                    CanonicalRHS, CanonicalFoundLHS,
11059                                    CanonicalFoundRHS);
11060     if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS))
11061       // Use implication:
11062       // x <s y && y <s 0 --> x <u y.
11063       // If we can prove the left part, the right part is also proven.
11064       return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11065                                    CanonicalRHS, CanonicalFoundLHS,
11066                                    CanonicalFoundRHS);
11067   }
11068 
11069   // Check if we can make progress by sharpening ranges.
11070   if (FoundPred == ICmpInst::ICMP_NE &&
11071       (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
11072 
11073     const SCEVConstant *C = nullptr;
11074     const SCEV *V = nullptr;
11075 
11076     if (isa<SCEVConstant>(FoundLHS)) {
11077       C = cast<SCEVConstant>(FoundLHS);
11078       V = FoundRHS;
11079     } else {
11080       C = cast<SCEVConstant>(FoundRHS);
11081       V = FoundLHS;
11082     }
11083 
11084     // The guarding predicate tells us that C != V. If the known range
11085     // of V is [C, t), we can sharpen the range to [C + 1, t).  The
11086     // range we consider has to correspond to same signedness as the
11087     // predicate we're interested in folding.
11088 
11089     APInt Min = ICmpInst::isSigned(Pred) ?
11090         getSignedRangeMin(V) : getUnsignedRangeMin(V);
11091 
11092     if (Min == C->getAPInt()) {
11093       // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
11094       // This is true even if (Min + 1) wraps around -- in case of
11095       // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
11096 
11097       APInt SharperMin = Min + 1;
11098 
11099       switch (Pred) {
11100         case ICmpInst::ICMP_SGE:
11101         case ICmpInst::ICMP_UGE:
11102           // We know V `Pred` SharperMin.  If this implies LHS `Pred`
11103           // RHS, we're done.
11104           if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
11105                                     CtxI))
11106             return true;
11107           LLVM_FALLTHROUGH;
11108 
11109         case ICmpInst::ICMP_SGT:
11110         case ICmpInst::ICMP_UGT:
11111           // We know from the range information that (V `Pred` Min ||
11112           // V == Min).  We know from the guarding condition that !(V
11113           // == Min).  This gives us
11114           //
11115           //       V `Pred` Min || V == Min && !(V == Min)
11116           //   =>  V `Pred` Min
11117           //
11118           // If V `Pred` Min implies LHS `Pred` RHS, we're done.
11119 
11120           if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
11121             return true;
11122           break;
11123 
11124         // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
11125         case ICmpInst::ICMP_SLE:
11126         case ICmpInst::ICMP_ULE:
11127           if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
11128                                     LHS, V, getConstant(SharperMin), CtxI))
11129             return true;
11130           LLVM_FALLTHROUGH;
11131 
11132         case ICmpInst::ICMP_SLT:
11133         case ICmpInst::ICMP_ULT:
11134           if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
11135                                     LHS, V, getConstant(Min), CtxI))
11136             return true;
11137           break;
11138 
11139         default:
11140           // No change
11141           break;
11142       }
11143     }
11144   }
11145 
11146   // Check whether the actual condition is beyond sufficient.
11147   if (FoundPred == ICmpInst::ICMP_EQ)
11148     if (ICmpInst::isTrueWhenEqual(Pred))
11149       if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
11150         return true;
11151   if (Pred == ICmpInst::ICMP_NE)
11152     if (!ICmpInst::isTrueWhenEqual(FoundPred))
11153       if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
11154         return true;
11155 
11156   // Otherwise assume the worst.
11157   return false;
11158 }
11159 
11160 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
11161                                      const SCEV *&L, const SCEV *&R,
11162                                      SCEV::NoWrapFlags &Flags) {
11163   const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
11164   if (!AE || AE->getNumOperands() != 2)
11165     return false;
11166 
11167   L = AE->getOperand(0);
11168   R = AE->getOperand(1);
11169   Flags = AE->getNoWrapFlags();
11170   return true;
11171 }
11172 
11173 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
11174                                                            const SCEV *Less) {
11175   // We avoid subtracting expressions here because this function is usually
11176   // fairly deep in the call stack (i.e. is called many times).
11177 
11178   // X - X = 0.
11179   if (More == Less)
11180     return APInt(getTypeSizeInBits(More->getType()), 0);
11181 
11182   if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
11183     const auto *LAR = cast<SCEVAddRecExpr>(Less);
11184     const auto *MAR = cast<SCEVAddRecExpr>(More);
11185 
11186     if (LAR->getLoop() != MAR->getLoop())
11187       return None;
11188 
11189     // We look at affine expressions only; not for correctness but to keep
11190     // getStepRecurrence cheap.
11191     if (!LAR->isAffine() || !MAR->isAffine())
11192       return None;
11193 
11194     if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
11195       return None;
11196 
11197     Less = LAR->getStart();
11198     More = MAR->getStart();
11199 
11200     // fall through
11201   }
11202 
11203   if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
11204     const auto &M = cast<SCEVConstant>(More)->getAPInt();
11205     const auto &L = cast<SCEVConstant>(Less)->getAPInt();
11206     return M - L;
11207   }
11208 
11209   SCEV::NoWrapFlags Flags;
11210   const SCEV *LLess = nullptr, *RLess = nullptr;
11211   const SCEV *LMore = nullptr, *RMore = nullptr;
11212   const SCEVConstant *C1 = nullptr, *C2 = nullptr;
11213   // Compare (X + C1) vs X.
11214   if (splitBinaryAdd(Less, LLess, RLess, Flags))
11215     if ((C1 = dyn_cast<SCEVConstant>(LLess)))
11216       if (RLess == More)
11217         return -(C1->getAPInt());
11218 
11219   // Compare X vs (X + C2).
11220   if (splitBinaryAdd(More, LMore, RMore, Flags))
11221     if ((C2 = dyn_cast<SCEVConstant>(LMore)))
11222       if (RMore == Less)
11223         return C2->getAPInt();
11224 
11225   // Compare (X + C1) vs (X + C2).
11226   if (C1 && C2 && RLess == RMore)
11227     return C2->getAPInt() - C1->getAPInt();
11228 
11229   return None;
11230 }
11231 
11232 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
11233     ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
11234     const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) {
11235   // Try to recognize the following pattern:
11236   //
11237   //   FoundRHS = ...
11238   // ...
11239   // loop:
11240   //   FoundLHS = {Start,+,W}
11241   // context_bb: // Basic block from the same loop
11242   //   known(Pred, FoundLHS, FoundRHS)
11243   //
11244   // If some predicate is known in the context of a loop, it is also known on
11245   // each iteration of this loop, including the first iteration. Therefore, in
11246   // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
11247   // prove the original pred using this fact.
11248   if (!CtxI)
11249     return false;
11250   const BasicBlock *ContextBB = CtxI->getParent();
11251   // Make sure AR varies in the context block.
11252   if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
11253     const Loop *L = AR->getLoop();
11254     // Make sure that context belongs to the loop and executes on 1st iteration
11255     // (if it ever executes at all).
11256     if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
11257       return false;
11258     if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
11259       return false;
11260     return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
11261   }
11262 
11263   if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
11264     const Loop *L = AR->getLoop();
11265     // Make sure that context belongs to the loop and executes on 1st iteration
11266     // (if it ever executes at all).
11267     if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
11268       return false;
11269     if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
11270       return false;
11271     return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
11272   }
11273 
11274   return false;
11275 }
11276 
11277 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
11278     ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
11279     const SCEV *FoundLHS, const SCEV *FoundRHS) {
11280   if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
11281     return false;
11282 
11283   const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
11284   if (!AddRecLHS)
11285     return false;
11286 
11287   const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
11288   if (!AddRecFoundLHS)
11289     return false;
11290 
11291   // We'd like to let SCEV reason about control dependencies, so we constrain
11292   // both the inequalities to be about add recurrences on the same loop.  This
11293   // way we can use isLoopEntryGuardedByCond later.
11294 
11295   const Loop *L = AddRecFoundLHS->getLoop();
11296   if (L != AddRecLHS->getLoop())
11297     return false;
11298 
11299   //  FoundLHS u< FoundRHS u< -C =>  (FoundLHS + C) u< (FoundRHS + C) ... (1)
11300   //
11301   //  FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
11302   //                                                                  ... (2)
11303   //
11304   // Informal proof for (2), assuming (1) [*]:
11305   //
11306   // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
11307   //
11308   // Then
11309   //
11310   //       FoundLHS s< FoundRHS s< INT_MIN - C
11311   // <=>  (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C   [ using (3) ]
11312   // <=>  (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
11313   // <=>  (FoundLHS + INT_MIN + C + INT_MIN) s<
11314   //                        (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
11315   // <=>  FoundLHS + C s< FoundRHS + C
11316   //
11317   // [*]: (1) can be proved by ruling out overflow.
11318   //
11319   // [**]: This can be proved by analyzing all the four possibilities:
11320   //    (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
11321   //    (A s>= 0, B s>= 0).
11322   //
11323   // Note:
11324   // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
11325   // will not sign underflow.  For instance, say FoundLHS = (i8 -128), FoundRHS
11326   // = (i8 -127) and C = (i8 -100).  Then INT_MIN - C = (i8 -28), and FoundRHS
11327   // s< (INT_MIN - C).  Lack of sign overflow / underflow in "FoundRHS + C" is
11328   // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
11329   // C)".
11330 
11331   Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
11332   Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
11333   if (!LDiff || !RDiff || *LDiff != *RDiff)
11334     return false;
11335 
11336   if (LDiff->isMinValue())
11337     return true;
11338 
11339   APInt FoundRHSLimit;
11340 
11341   if (Pred == CmpInst::ICMP_ULT) {
11342     FoundRHSLimit = -(*RDiff);
11343   } else {
11344     assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
11345     FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
11346   }
11347 
11348   // Try to prove (1) or (2), as needed.
11349   return isAvailableAtLoopEntry(FoundRHS, L) &&
11350          isLoopEntryGuardedByCond(L, Pred, FoundRHS,
11351                                   getConstant(FoundRHSLimit));
11352 }
11353 
11354 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
11355                                         const SCEV *LHS, const SCEV *RHS,
11356                                         const SCEV *FoundLHS,
11357                                         const SCEV *FoundRHS, unsigned Depth) {
11358   const PHINode *LPhi = nullptr, *RPhi = nullptr;
11359 
11360   auto ClearOnExit = make_scope_exit([&]() {
11361     if (LPhi) {
11362       bool Erased = PendingMerges.erase(LPhi);
11363       assert(Erased && "Failed to erase LPhi!");
11364       (void)Erased;
11365     }
11366     if (RPhi) {
11367       bool Erased = PendingMerges.erase(RPhi);
11368       assert(Erased && "Failed to erase RPhi!");
11369       (void)Erased;
11370     }
11371   });
11372 
11373   // Find respective Phis and check that they are not being pending.
11374   if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
11375     if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
11376       if (!PendingMerges.insert(Phi).second)
11377         return false;
11378       LPhi = Phi;
11379     }
11380   if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
11381     if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
11382       // If we detect a loop of Phi nodes being processed by this method, for
11383       // example:
11384       //
11385       //   %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
11386       //   %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
11387       //
11388       // we don't want to deal with a case that complex, so return conservative
11389       // answer false.
11390       if (!PendingMerges.insert(Phi).second)
11391         return false;
11392       RPhi = Phi;
11393     }
11394 
11395   // If none of LHS, RHS is a Phi, nothing to do here.
11396   if (!LPhi && !RPhi)
11397     return false;
11398 
11399   // If there is a SCEVUnknown Phi we are interested in, make it left.
11400   if (!LPhi) {
11401     std::swap(LHS, RHS);
11402     std::swap(FoundLHS, FoundRHS);
11403     std::swap(LPhi, RPhi);
11404     Pred = ICmpInst::getSwappedPredicate(Pred);
11405   }
11406 
11407   assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
11408   const BasicBlock *LBB = LPhi->getParent();
11409   const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11410 
11411   auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
11412     return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
11413            isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
11414            isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
11415   };
11416 
11417   if (RPhi && RPhi->getParent() == LBB) {
11418     // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
11419     // If we compare two Phis from the same block, and for each entry block
11420     // the predicate is true for incoming values from this block, then the
11421     // predicate is also true for the Phis.
11422     for (const BasicBlock *IncBB : predecessors(LBB)) {
11423       const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11424       const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
11425       if (!ProvedEasily(L, R))
11426         return false;
11427     }
11428   } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
11429     // Case two: RHS is also a Phi from the same basic block, and it is an
11430     // AddRec. It means that there is a loop which has both AddRec and Unknown
11431     // PHIs, for it we can compare incoming values of AddRec from above the loop
11432     // and latch with their respective incoming values of LPhi.
11433     // TODO: Generalize to handle loops with many inputs in a header.
11434     if (LPhi->getNumIncomingValues() != 2) return false;
11435 
11436     auto *RLoop = RAR->getLoop();
11437     auto *Predecessor = RLoop->getLoopPredecessor();
11438     assert(Predecessor && "Loop with AddRec with no predecessor?");
11439     const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
11440     if (!ProvedEasily(L1, RAR->getStart()))
11441       return false;
11442     auto *Latch = RLoop->getLoopLatch();
11443     assert(Latch && "Loop with AddRec with no latch?");
11444     const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
11445     if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
11446       return false;
11447   } else {
11448     // In all other cases go over inputs of LHS and compare each of them to RHS,
11449     // the predicate is true for (LHS, RHS) if it is true for all such pairs.
11450     // At this point RHS is either a non-Phi, or it is a Phi from some block
11451     // different from LBB.
11452     for (const BasicBlock *IncBB : predecessors(LBB)) {
11453       // Check that RHS is available in this block.
11454       if (!dominates(RHS, IncBB))
11455         return false;
11456       const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11457       // Make sure L does not refer to a value from a potentially previous
11458       // iteration of a loop.
11459       if (!properlyDominates(L, IncBB))
11460         return false;
11461       if (!ProvedEasily(L, RHS))
11462         return false;
11463     }
11464   }
11465   return true;
11466 }
11467 
11468 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred,
11469                                                     const SCEV *LHS,
11470                                                     const SCEV *RHS,
11471                                                     const SCEV *FoundLHS,
11472                                                     const SCEV *FoundRHS) {
11473   // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue).  First, make
11474   // sure that we are dealing with same LHS.
11475   if (RHS == FoundRHS) {
11476     std::swap(LHS, RHS);
11477     std::swap(FoundLHS, FoundRHS);
11478     Pred = ICmpInst::getSwappedPredicate(Pred);
11479   }
11480   if (LHS != FoundLHS)
11481     return false;
11482 
11483   auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
11484   if (!SUFoundRHS)
11485     return false;
11486 
11487   Value *Shiftee, *ShiftValue;
11488 
11489   using namespace PatternMatch;
11490   if (match(SUFoundRHS->getValue(),
11491             m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) {
11492     auto *ShifteeS = getSCEV(Shiftee);
11493     // Prove one of the following:
11494     // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS
11495     // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS
11496     // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
11497     //   ---> LHS <s RHS
11498     // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
11499     //   ---> LHS <=s RHS
11500     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
11501       return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS);
11502     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
11503       if (isKnownNonNegative(ShifteeS))
11504         return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS);
11505   }
11506 
11507   return false;
11508 }
11509 
11510 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
11511                                             const SCEV *LHS, const SCEV *RHS,
11512                                             const SCEV *FoundLHS,
11513                                             const SCEV *FoundRHS,
11514                                             const Instruction *CtxI) {
11515   if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
11516     return true;
11517 
11518   if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
11519     return true;
11520 
11521   if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS))
11522     return true;
11523 
11524   if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
11525                                           CtxI))
11526     return true;
11527 
11528   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
11529                                      FoundLHS, FoundRHS);
11530 }
11531 
11532 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
11533 template <typename MinMaxExprType>
11534 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
11535                                  const SCEV *Candidate) {
11536   const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
11537   if (!MinMaxExpr)
11538     return false;
11539 
11540   return is_contained(MinMaxExpr->operands(), Candidate);
11541 }
11542 
11543 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
11544                                            ICmpInst::Predicate Pred,
11545                                            const SCEV *LHS, const SCEV *RHS) {
11546   // If both sides are affine addrecs for the same loop, with equal
11547   // steps, and we know the recurrences don't wrap, then we only
11548   // need to check the predicate on the starting values.
11549 
11550   if (!ICmpInst::isRelational(Pred))
11551     return false;
11552 
11553   const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
11554   if (!LAR)
11555     return false;
11556   const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11557   if (!RAR)
11558     return false;
11559   if (LAR->getLoop() != RAR->getLoop())
11560     return false;
11561   if (!LAR->isAffine() || !RAR->isAffine())
11562     return false;
11563 
11564   if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
11565     return false;
11566 
11567   SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
11568                          SCEV::FlagNSW : SCEV::FlagNUW;
11569   if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
11570     return false;
11571 
11572   return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
11573 }
11574 
11575 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
11576 /// expression?
11577 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
11578                                         ICmpInst::Predicate Pred,
11579                                         const SCEV *LHS, const SCEV *RHS) {
11580   switch (Pred) {
11581   default:
11582     return false;
11583 
11584   case ICmpInst::ICMP_SGE:
11585     std::swap(LHS, RHS);
11586     LLVM_FALLTHROUGH;
11587   case ICmpInst::ICMP_SLE:
11588     return
11589         // min(A, ...) <= A
11590         IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
11591         // A <= max(A, ...)
11592         IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
11593 
11594   case ICmpInst::ICMP_UGE:
11595     std::swap(LHS, RHS);
11596     LLVM_FALLTHROUGH;
11597   case ICmpInst::ICMP_ULE:
11598     return
11599         // min(A, ...) <= A
11600         // FIXME: what about umin_seq?
11601         IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
11602         // A <= max(A, ...)
11603         IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
11604   }
11605 
11606   llvm_unreachable("covered switch fell through?!");
11607 }
11608 
11609 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
11610                                              const SCEV *LHS, const SCEV *RHS,
11611                                              const SCEV *FoundLHS,
11612                                              const SCEV *FoundRHS,
11613                                              unsigned Depth) {
11614   assert(getTypeSizeInBits(LHS->getType()) ==
11615              getTypeSizeInBits(RHS->getType()) &&
11616          "LHS and RHS have different sizes?");
11617   assert(getTypeSizeInBits(FoundLHS->getType()) ==
11618              getTypeSizeInBits(FoundRHS->getType()) &&
11619          "FoundLHS and FoundRHS have different sizes?");
11620   // We want to avoid hurting the compile time with analysis of too big trees.
11621   if (Depth > MaxSCEVOperationsImplicationDepth)
11622     return false;
11623 
11624   // We only want to work with GT comparison so far.
11625   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
11626     Pred = CmpInst::getSwappedPredicate(Pred);
11627     std::swap(LHS, RHS);
11628     std::swap(FoundLHS, FoundRHS);
11629   }
11630 
11631   // For unsigned, try to reduce it to corresponding signed comparison.
11632   if (Pred == ICmpInst::ICMP_UGT)
11633     // We can replace unsigned predicate with its signed counterpart if all
11634     // involved values are non-negative.
11635     // TODO: We could have better support for unsigned.
11636     if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
11637       // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
11638       // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
11639       // use this fact to prove that LHS and RHS are non-negative.
11640       const SCEV *MinusOne = getMinusOne(LHS->getType());
11641       if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
11642                                 FoundRHS) &&
11643           isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
11644                                 FoundRHS))
11645         Pred = ICmpInst::ICMP_SGT;
11646     }
11647 
11648   if (Pred != ICmpInst::ICMP_SGT)
11649     return false;
11650 
11651   auto GetOpFromSExt = [&](const SCEV *S) {
11652     if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
11653       return Ext->getOperand();
11654     // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
11655     // the constant in some cases.
11656     return S;
11657   };
11658 
11659   // Acquire values from extensions.
11660   auto *OrigLHS = LHS;
11661   auto *OrigFoundLHS = FoundLHS;
11662   LHS = GetOpFromSExt(LHS);
11663   FoundLHS = GetOpFromSExt(FoundLHS);
11664 
11665   // Is the SGT predicate can be proved trivially or using the found context.
11666   auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
11667     return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
11668            isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
11669                                   FoundRHS, Depth + 1);
11670   };
11671 
11672   if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
11673     // We want to avoid creation of any new non-constant SCEV. Since we are
11674     // going to compare the operands to RHS, we should be certain that we don't
11675     // need any size extensions for this. So let's decline all cases when the
11676     // sizes of types of LHS and RHS do not match.
11677     // TODO: Maybe try to get RHS from sext to catch more cases?
11678     if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
11679       return false;
11680 
11681     // Should not overflow.
11682     if (!LHSAddExpr->hasNoSignedWrap())
11683       return false;
11684 
11685     auto *LL = LHSAddExpr->getOperand(0);
11686     auto *LR = LHSAddExpr->getOperand(1);
11687     auto *MinusOne = getMinusOne(RHS->getType());
11688 
11689     // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
11690     auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
11691       return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
11692     };
11693     // Try to prove the following rule:
11694     // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
11695     // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
11696     if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
11697       return true;
11698   } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
11699     Value *LL, *LR;
11700     // FIXME: Once we have SDiv implemented, we can get rid of this matching.
11701 
11702     using namespace llvm::PatternMatch;
11703 
11704     if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
11705       // Rules for division.
11706       // We are going to perform some comparisons with Denominator and its
11707       // derivative expressions. In general case, creating a SCEV for it may
11708       // lead to a complex analysis of the entire graph, and in particular it
11709       // can request trip count recalculation for the same loop. This would
11710       // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
11711       // this, we only want to create SCEVs that are constants in this section.
11712       // So we bail if Denominator is not a constant.
11713       if (!isa<ConstantInt>(LR))
11714         return false;
11715 
11716       auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
11717 
11718       // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
11719       // then a SCEV for the numerator already exists and matches with FoundLHS.
11720       auto *Numerator = getExistingSCEV(LL);
11721       if (!Numerator || Numerator->getType() != FoundLHS->getType())
11722         return false;
11723 
11724       // Make sure that the numerator matches with FoundLHS and the denominator
11725       // is positive.
11726       if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
11727         return false;
11728 
11729       auto *DTy = Denominator->getType();
11730       auto *FRHSTy = FoundRHS->getType();
11731       if (DTy->isPointerTy() != FRHSTy->isPointerTy())
11732         // One of types is a pointer and another one is not. We cannot extend
11733         // them properly to a wider type, so let us just reject this case.
11734         // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
11735         // to avoid this check.
11736         return false;
11737 
11738       // Given that:
11739       // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
11740       auto *WTy = getWiderType(DTy, FRHSTy);
11741       auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
11742       auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
11743 
11744       // Try to prove the following rule:
11745       // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
11746       // For example, given that FoundLHS > 2. It means that FoundLHS is at
11747       // least 3. If we divide it by Denominator < 4, we will have at least 1.
11748       auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
11749       if (isKnownNonPositive(RHS) &&
11750           IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
11751         return true;
11752 
11753       // Try to prove the following rule:
11754       // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
11755       // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
11756       // If we divide it by Denominator > 2, then:
11757       // 1. If FoundLHS is negative, then the result is 0.
11758       // 2. If FoundLHS is non-negative, then the result is non-negative.
11759       // Anyways, the result is non-negative.
11760       auto *MinusOne = getMinusOne(WTy);
11761       auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
11762       if (isKnownNegative(RHS) &&
11763           IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
11764         return true;
11765     }
11766   }
11767 
11768   // If our expression contained SCEVUnknown Phis, and we split it down and now
11769   // need to prove something for them, try to prove the predicate for every
11770   // possible incoming values of those Phis.
11771   if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
11772     return true;
11773 
11774   return false;
11775 }
11776 
11777 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
11778                                         const SCEV *LHS, const SCEV *RHS) {
11779   // zext x u<= sext x, sext x s<= zext x
11780   switch (Pred) {
11781   case ICmpInst::ICMP_SGE:
11782     std::swap(LHS, RHS);
11783     LLVM_FALLTHROUGH;
11784   case ICmpInst::ICMP_SLE: {
11785     // If operand >=s 0 then ZExt == SExt.  If operand <s 0 then SExt <s ZExt.
11786     const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
11787     const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
11788     if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11789       return true;
11790     break;
11791   }
11792   case ICmpInst::ICMP_UGE:
11793     std::swap(LHS, RHS);
11794     LLVM_FALLTHROUGH;
11795   case ICmpInst::ICMP_ULE: {
11796     // If operand >=s 0 then ZExt == SExt.  If operand <s 0 then ZExt <u SExt.
11797     const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
11798     const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
11799     if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11800       return true;
11801     break;
11802   }
11803   default:
11804     break;
11805   };
11806   return false;
11807 }
11808 
11809 bool
11810 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
11811                                            const SCEV *LHS, const SCEV *RHS) {
11812   return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
11813          isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
11814          IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
11815          IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
11816          isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
11817 }
11818 
11819 bool
11820 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
11821                                              const SCEV *LHS, const SCEV *RHS,
11822                                              const SCEV *FoundLHS,
11823                                              const SCEV *FoundRHS) {
11824   switch (Pred) {
11825   default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
11826   case ICmpInst::ICMP_EQ:
11827   case ICmpInst::ICMP_NE:
11828     if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
11829       return true;
11830     break;
11831   case ICmpInst::ICMP_SLT:
11832   case ICmpInst::ICMP_SLE:
11833     if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
11834         isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
11835       return true;
11836     break;
11837   case ICmpInst::ICMP_SGT:
11838   case ICmpInst::ICMP_SGE:
11839     if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
11840         isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
11841       return true;
11842     break;
11843   case ICmpInst::ICMP_ULT:
11844   case ICmpInst::ICMP_ULE:
11845     if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
11846         isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
11847       return true;
11848     break;
11849   case ICmpInst::ICMP_UGT:
11850   case ICmpInst::ICMP_UGE:
11851     if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
11852         isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
11853       return true;
11854     break;
11855   }
11856 
11857   // Maybe it can be proved via operations?
11858   if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
11859     return true;
11860 
11861   return false;
11862 }
11863 
11864 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
11865                                                      const SCEV *LHS,
11866                                                      const SCEV *RHS,
11867                                                      const SCEV *FoundLHS,
11868                                                      const SCEV *FoundRHS) {
11869   if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
11870     // The restriction on `FoundRHS` be lifted easily -- it exists only to
11871     // reduce the compile time impact of this optimization.
11872     return false;
11873 
11874   Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
11875   if (!Addend)
11876     return false;
11877 
11878   const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
11879 
11880   // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
11881   // antecedent "`FoundLHS` `Pred` `FoundRHS`".
11882   ConstantRange FoundLHSRange =
11883       ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS);
11884 
11885   // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
11886   ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
11887 
11888   // We can also compute the range of values for `LHS` that satisfy the
11889   // consequent, "`LHS` `Pred` `RHS`":
11890   const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
11891   // The antecedent implies the consequent if every value of `LHS` that
11892   // satisfies the antecedent also satisfies the consequent.
11893   return LHSRange.icmp(Pred, ConstRHS);
11894 }
11895 
11896 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
11897                                         bool IsSigned) {
11898   assert(isKnownPositive(Stride) && "Positive stride expected!");
11899 
11900   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11901   const SCEV *One = getOne(Stride->getType());
11902 
11903   if (IsSigned) {
11904     APInt MaxRHS = getSignedRangeMax(RHS);
11905     APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
11906     APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11907 
11908     // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
11909     return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
11910   }
11911 
11912   APInt MaxRHS = getUnsignedRangeMax(RHS);
11913   APInt MaxValue = APInt::getMaxValue(BitWidth);
11914   APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11915 
11916   // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
11917   return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
11918 }
11919 
11920 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
11921                                         bool IsSigned) {
11922 
11923   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11924   const SCEV *One = getOne(Stride->getType());
11925 
11926   if (IsSigned) {
11927     APInt MinRHS = getSignedRangeMin(RHS);
11928     APInt MinValue = APInt::getSignedMinValue(BitWidth);
11929     APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11930 
11931     // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
11932     return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
11933   }
11934 
11935   APInt MinRHS = getUnsignedRangeMin(RHS);
11936   APInt MinValue = APInt::getMinValue(BitWidth);
11937   APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11938 
11939   // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
11940   return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
11941 }
11942 
11943 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
11944   // umin(N, 1) + floor((N - umin(N, 1)) / D)
11945   // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
11946   // expression fixes the case of N=0.
11947   const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
11948   const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
11949   return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
11950 }
11951 
11952 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
11953                                                     const SCEV *Stride,
11954                                                     const SCEV *End,
11955                                                     unsigned BitWidth,
11956                                                     bool IsSigned) {
11957   // The logic in this function assumes we can represent a positive stride.
11958   // If we can't, the backedge-taken count must be zero.
11959   if (IsSigned && BitWidth == 1)
11960     return getZero(Stride->getType());
11961 
11962   // This code has only been closely audited for negative strides in the
11963   // unsigned comparison case, it may be correct for signed comparison, but
11964   // that needs to be established.
11965   assert((!IsSigned || !isKnownNonPositive(Stride)) &&
11966          "Stride is expected strictly positive for signed case!");
11967 
11968   // Calculate the maximum backedge count based on the range of values
11969   // permitted by Start, End, and Stride.
11970   APInt MinStart =
11971       IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11972 
11973   APInt MinStride =
11974       IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11975 
11976   // We assume either the stride is positive, or the backedge-taken count
11977   // is zero. So force StrideForMaxBECount to be at least one.
11978   APInt One(BitWidth, 1);
11979   APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
11980                                        : APIntOps::umax(One, MinStride);
11981 
11982   APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11983                             : APInt::getMaxValue(BitWidth);
11984   APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11985 
11986   // Although End can be a MAX expression we estimate MaxEnd considering only
11987   // the case End = RHS of the loop termination condition. This is safe because
11988   // in the other case (End - Start) is zero, leading to a zero maximum backedge
11989   // taken count.
11990   APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11991                           : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11992 
11993   // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
11994   MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
11995                     : APIntOps::umax(MaxEnd, MinStart);
11996 
11997   return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
11998                          getConstant(StrideForMaxBECount) /* Step */);
11999 }
12000 
12001 ScalarEvolution::ExitLimit
12002 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
12003                                   const Loop *L, bool IsSigned,
12004                                   bool ControlsExit, bool AllowPredicates) {
12005   SmallPtrSet<const SCEVPredicate *, 4> Predicates;
12006 
12007   const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
12008   bool PredicatedIV = false;
12009 
12010   auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) {
12011     // Can we prove this loop *must* be UB if overflow of IV occurs?
12012     // Reasoning goes as follows:
12013     // * Suppose the IV did self wrap.
12014     // * If Stride evenly divides the iteration space, then once wrap
12015     //   occurs, the loop must revisit the same values.
12016     // * We know that RHS is invariant, and that none of those values
12017     //   caused this exit to be taken previously.  Thus, this exit is
12018     //   dynamically dead.
12019     // * If this is the sole exit, then a dead exit implies the loop
12020     //   must be infinite if there are no abnormal exits.
12021     // * If the loop were infinite, then it must either not be mustprogress
12022     //   or have side effects. Otherwise, it must be UB.
12023     // * It can't (by assumption), be UB so we have contradicted our
12024     //   premise and can conclude the IV did not in fact self-wrap.
12025     if (!isLoopInvariant(RHS, L))
12026       return false;
12027 
12028     auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this));
12029     if (!StrideC || !StrideC->getAPInt().isPowerOf2())
12030       return false;
12031 
12032     if (!ControlsExit || !loopHasNoAbnormalExits(L))
12033       return false;
12034 
12035     return loopIsFiniteByAssumption(L);
12036   };
12037 
12038   if (!IV) {
12039     if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
12040       const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
12041       if (AR && AR->getLoop() == L && AR->isAffine()) {
12042         auto canProveNUW = [&]() {
12043           if (!isLoopInvariant(RHS, L))
12044             return false;
12045 
12046           if (!isKnownNonZero(AR->getStepRecurrence(*this)))
12047             // We need the sequence defined by AR to strictly increase in the
12048             // unsigned integer domain for the logic below to hold.
12049             return false;
12050 
12051           const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType());
12052           const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType());
12053           // If RHS <=u Limit, then there must exist a value V in the sequence
12054           // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and
12055           // V <=u UINT_MAX.  Thus, we must exit the loop before unsigned
12056           // overflow occurs.  This limit also implies that a signed comparison
12057           // (in the wide bitwidth) is equivalent to an unsigned comparison as
12058           // the high bits on both sides must be zero.
12059           APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this));
12060           APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1);
12061           Limit = Limit.zext(OuterBitWidth);
12062           return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit);
12063         };
12064         auto Flags = AR->getNoWrapFlags();
12065         if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW())
12066           Flags = setFlags(Flags, SCEV::FlagNUW);
12067 
12068         setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
12069         if (AR->hasNoUnsignedWrap()) {
12070           // Emulate what getZeroExtendExpr would have done during construction
12071           // if we'd been able to infer the fact just above at that time.
12072           const SCEV *Step = AR->getStepRecurrence(*this);
12073           Type *Ty = ZExt->getType();
12074           auto *S = getAddRecExpr(
12075             getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0),
12076             getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags());
12077           IV = dyn_cast<SCEVAddRecExpr>(S);
12078         }
12079       }
12080     }
12081   }
12082 
12083 
12084   if (!IV && AllowPredicates) {
12085     // Try to make this an AddRec using runtime tests, in the first X
12086     // iterations of this loop, where X is the SCEV expression found by the
12087     // algorithm below.
12088     IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
12089     PredicatedIV = true;
12090   }
12091 
12092   // Avoid weird loops
12093   if (!IV || IV->getLoop() != L || !IV->isAffine())
12094     return getCouldNotCompute();
12095 
12096   // A precondition of this method is that the condition being analyzed
12097   // reaches an exiting branch which dominates the latch.  Given that, we can
12098   // assume that an increment which violates the nowrap specification and
12099   // produces poison must cause undefined behavior when the resulting poison
12100   // value is branched upon and thus we can conclude that the backedge is
12101   // taken no more often than would be required to produce that poison value.
12102   // Note that a well defined loop can exit on the iteration which violates
12103   // the nowrap specification if there is another exit (either explicit or
12104   // implicit/exceptional) which causes the loop to execute before the
12105   // exiting instruction we're analyzing would trigger UB.
12106   auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
12107   bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
12108   ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
12109 
12110   const SCEV *Stride = IV->getStepRecurrence(*this);
12111 
12112   bool PositiveStride = isKnownPositive(Stride);
12113 
12114   // Avoid negative or zero stride values.
12115   if (!PositiveStride) {
12116     // We can compute the correct backedge taken count for loops with unknown
12117     // strides if we can prove that the loop is not an infinite loop with side
12118     // effects. Here's the loop structure we are trying to handle -
12119     //
12120     // i = start
12121     // do {
12122     //   A[i] = i;
12123     //   i += s;
12124     // } while (i < end);
12125     //
12126     // The backedge taken count for such loops is evaluated as -
12127     // (max(end, start + stride) - start - 1) /u stride
12128     //
12129     // The additional preconditions that we need to check to prove correctness
12130     // of the above formula is as follows -
12131     //
12132     // a) IV is either nuw or nsw depending upon signedness (indicated by the
12133     //    NoWrap flag).
12134     // b) the loop is guaranteed to be finite (e.g. is mustprogress and has
12135     //    no side effects within the loop)
12136     // c) loop has a single static exit (with no abnormal exits)
12137     //
12138     // Precondition a) implies that if the stride is negative, this is a single
12139     // trip loop. The backedge taken count formula reduces to zero in this case.
12140     //
12141     // Precondition b) and c) combine to imply that if rhs is invariant in L,
12142     // then a zero stride means the backedge can't be taken without executing
12143     // undefined behavior.
12144     //
12145     // The positive stride case is the same as isKnownPositive(Stride) returning
12146     // true (original behavior of the function).
12147     //
12148     if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) ||
12149         !loopHasNoAbnormalExits(L))
12150       return getCouldNotCompute();
12151 
12152     // This bailout is protecting the logic in computeMaxBECountForLT which
12153     // has not yet been sufficiently auditted or tested with negative strides.
12154     // We used to filter out all known-non-positive cases here, we're in the
12155     // process of being less restrictive bit by bit.
12156     if (IsSigned && isKnownNonPositive(Stride))
12157       return getCouldNotCompute();
12158 
12159     if (!isKnownNonZero(Stride)) {
12160       // If we have a step of zero, and RHS isn't invariant in L, we don't know
12161       // if it might eventually be greater than start and if so, on which
12162       // iteration.  We can't even produce a useful upper bound.
12163       if (!isLoopInvariant(RHS, L))
12164         return getCouldNotCompute();
12165 
12166       // We allow a potentially zero stride, but we need to divide by stride
12167       // below.  Since the loop can't be infinite and this check must control
12168       // the sole exit, we can infer the exit must be taken on the first
12169       // iteration (e.g. backedge count = 0) if the stride is zero.  Given that,
12170       // we know the numerator in the divides below must be zero, so we can
12171       // pick an arbitrary non-zero value for the denominator (e.g. stride)
12172       // and produce the right result.
12173       // FIXME: Handle the case where Stride is poison?
12174       auto wouldZeroStrideBeUB = [&]() {
12175         // Proof by contradiction.  Suppose the stride were zero.  If we can
12176         // prove that the backedge *is* taken on the first iteration, then since
12177         // we know this condition controls the sole exit, we must have an
12178         // infinite loop.  We can't have a (well defined) infinite loop per
12179         // check just above.
12180         // Note: The (Start - Stride) term is used to get the start' term from
12181         // (start' + stride,+,stride). Remember that we only care about the
12182         // result of this expression when stride == 0 at runtime.
12183         auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
12184         return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
12185       };
12186       if (!wouldZeroStrideBeUB()) {
12187         Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
12188       }
12189     }
12190   } else if (!Stride->isOne() && !NoWrap) {
12191     auto isUBOnWrap = [&]() {
12192       // From no-self-wrap, we need to then prove no-(un)signed-wrap.  This
12193       // follows trivially from the fact that every (un)signed-wrapped, but
12194       // not self-wrapped value must be LT than the last value before
12195       // (un)signed wrap.  Since we know that last value didn't exit, nor
12196       // will any smaller one.
12197       return canAssumeNoSelfWrap(IV);
12198     };
12199 
12200     // Avoid proven overflow cases: this will ensure that the backedge taken
12201     // count will not generate any unsigned overflow. Relaxed no-overflow
12202     // conditions exploit NoWrapFlags, allowing to optimize in presence of
12203     // undefined behaviors like the case of C language.
12204     if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
12205       return getCouldNotCompute();
12206   }
12207 
12208   // On all paths just preceeding, we established the following invariant:
12209   //   IV can be assumed not to overflow up to and including the exiting
12210   //   iteration.  We proved this in one of two ways:
12211   //   1) We can show overflow doesn't occur before the exiting iteration
12212   //      1a) canIVOverflowOnLT, and b) step of one
12213   //   2) We can show that if overflow occurs, the loop must execute UB
12214   //      before any possible exit.
12215   // Note that we have not yet proved RHS invariant (in general).
12216 
12217   const SCEV *Start = IV->getStart();
12218 
12219   // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
12220   // If we convert to integers, isLoopEntryGuardedByCond will miss some cases.
12221   // Use integer-typed versions for actual computation; we can't subtract
12222   // pointers in general.
12223   const SCEV *OrigStart = Start;
12224   const SCEV *OrigRHS = RHS;
12225   if (Start->getType()->isPointerTy()) {
12226     Start = getLosslessPtrToIntExpr(Start);
12227     if (isa<SCEVCouldNotCompute>(Start))
12228       return Start;
12229   }
12230   if (RHS->getType()->isPointerTy()) {
12231     RHS = getLosslessPtrToIntExpr(RHS);
12232     if (isa<SCEVCouldNotCompute>(RHS))
12233       return RHS;
12234   }
12235 
12236   // When the RHS is not invariant, we do not know the end bound of the loop and
12237   // cannot calculate the ExactBECount needed by ExitLimit. However, we can
12238   // calculate the MaxBECount, given the start, stride and max value for the end
12239   // bound of the loop (RHS), and the fact that IV does not overflow (which is
12240   // checked above).
12241   if (!isLoopInvariant(RHS, L)) {
12242     const SCEV *MaxBECount = computeMaxBECountForLT(
12243         Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
12244     return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
12245                      false /*MaxOrZero*/, Predicates);
12246   }
12247 
12248   // We use the expression (max(End,Start)-Start)/Stride to describe the
12249   // backedge count, as if the backedge is taken at least once max(End,Start)
12250   // is End and so the result is as above, and if not max(End,Start) is Start
12251   // so we get a backedge count of zero.
12252   const SCEV *BECount = nullptr;
12253   auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride);
12254   assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!");
12255   assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!");
12256   assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!");
12257   // Can we prove (max(RHS,Start) > Start - Stride?
12258   if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) &&
12259       isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) {
12260     // In this case, we can use a refined formula for computing backedge taken
12261     // count.  The general formula remains:
12262     //   "End-Start /uceiling Stride" where "End = max(RHS,Start)"
12263     // We want to use the alternate formula:
12264     //   "((End - 1) - (Start - Stride)) /u Stride"
12265     // Let's do a quick case analysis to show these are equivalent under
12266     // our precondition that max(RHS,Start) > Start - Stride.
12267     // * For RHS <= Start, the backedge-taken count must be zero.
12268     //   "((End - 1) - (Start - Stride)) /u Stride" reduces to
12269     //   "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
12270     //   "Stride - 1 /u Stride" which is indeed zero for all non-zero values
12271     //     of Stride.  For 0 stride, we've use umin(1,Stride) above, reducing
12272     //     this to the stride of 1 case.
12273     // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride".
12274     //   "((End - 1) - (Start - Stride)) /u Stride" reduces to
12275     //   "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
12276     //   "((RHS - (Start - Stride) - 1) /u Stride".
12277     //   Our preconditions trivially imply no overflow in that form.
12278     const SCEV *MinusOne = getMinusOne(Stride->getType());
12279     const SCEV *Numerator =
12280         getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride));
12281     BECount = getUDivExpr(Numerator, Stride);
12282   }
12283 
12284   const SCEV *BECountIfBackedgeTaken = nullptr;
12285   if (!BECount) {
12286     auto canProveRHSGreaterThanEqualStart = [&]() {
12287       auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
12288       if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart))
12289         return true;
12290 
12291       // (RHS > Start - 1) implies RHS >= Start.
12292       // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
12293       //   "Start - 1" doesn't overflow.
12294       // * For signed comparison, if Start - 1 does overflow, it's equal
12295       //   to INT_MAX, and "RHS >s INT_MAX" is trivially false.
12296       // * For unsigned comparison, if Start - 1 does overflow, it's equal
12297       //   to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
12298       //
12299       // FIXME: Should isLoopEntryGuardedByCond do this for us?
12300       auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12301       auto *StartMinusOne = getAddExpr(OrigStart,
12302                                        getMinusOne(OrigStart->getType()));
12303       return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
12304     };
12305 
12306     // If we know that RHS >= Start in the context of loop, then we know that
12307     // max(RHS, Start) = RHS at this point.
12308     const SCEV *End;
12309     if (canProveRHSGreaterThanEqualStart()) {
12310       End = RHS;
12311     } else {
12312       // If RHS < Start, the backedge will be taken zero times.  So in
12313       // general, we can write the backedge-taken count as:
12314       //
12315       //     RHS >= Start ? ceil(RHS - Start) / Stride : 0
12316       //
12317       // We convert it to the following to make it more convenient for SCEV:
12318       //
12319       //     ceil(max(RHS, Start) - Start) / Stride
12320       End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
12321 
12322       // See what would happen if we assume the backedge is taken. This is
12323       // used to compute MaxBECount.
12324       BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
12325     }
12326 
12327     // At this point, we know:
12328     //
12329     // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
12330     // 2. The index variable doesn't overflow.
12331     //
12332     // Therefore, we know N exists such that
12333     // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
12334     // doesn't overflow.
12335     //
12336     // Using this information, try to prove whether the addition in
12337     // "(Start - End) + (Stride - 1)" has unsigned overflow.
12338     const SCEV *One = getOne(Stride->getType());
12339     bool MayAddOverflow = [&] {
12340       if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
12341         if (StrideC->getAPInt().isPowerOf2()) {
12342           // Suppose Stride is a power of two, and Start/End are unsigned
12343           // integers.  Let UMAX be the largest representable unsigned
12344           // integer.
12345           //
12346           // By the preconditions of this function, we know
12347           // "(Start + Stride * N) >= End", and this doesn't overflow.
12348           // As a formula:
12349           //
12350           //   End <= (Start + Stride * N) <= UMAX
12351           //
12352           // Subtracting Start from all the terms:
12353           //
12354           //   End - Start <= Stride * N <= UMAX - Start
12355           //
12356           // Since Start is unsigned, UMAX - Start <= UMAX.  Therefore:
12357           //
12358           //   End - Start <= Stride * N <= UMAX
12359           //
12360           // Stride * N is a multiple of Stride. Therefore,
12361           //
12362           //   End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
12363           //
12364           // Since Stride is a power of two, UMAX + 1 is divisible by Stride.
12365           // Therefore, UMAX mod Stride == Stride - 1.  So we can write:
12366           //
12367           //   End - Start <= Stride * N <= UMAX - Stride - 1
12368           //
12369           // Dropping the middle term:
12370           //
12371           //   End - Start <= UMAX - Stride - 1
12372           //
12373           // Adding Stride - 1 to both sides:
12374           //
12375           //   (End - Start) + (Stride - 1) <= UMAX
12376           //
12377           // In other words, the addition doesn't have unsigned overflow.
12378           //
12379           // A similar proof works if we treat Start/End as signed values.
12380           // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to
12381           // use signed max instead of unsigned max. Note that we're trying
12382           // to prove a lack of unsigned overflow in either case.
12383           return false;
12384         }
12385       }
12386       if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
12387         // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1.
12388         // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End.
12389         // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End.
12390         //
12391         // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End.
12392         return false;
12393       }
12394       return true;
12395     }();
12396 
12397     const SCEV *Delta = getMinusSCEV(End, Start);
12398     if (!MayAddOverflow) {
12399       // floor((D + (S - 1)) / S)
12400       // We prefer this formulation if it's legal because it's fewer operations.
12401       BECount =
12402           getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
12403     } else {
12404       BECount = getUDivCeilSCEV(Delta, Stride);
12405     }
12406   }
12407 
12408   const SCEV *MaxBECount;
12409   bool MaxOrZero = false;
12410   if (isa<SCEVConstant>(BECount)) {
12411     MaxBECount = BECount;
12412   } else if (BECountIfBackedgeTaken &&
12413              isa<SCEVConstant>(BECountIfBackedgeTaken)) {
12414     // If we know exactly how many times the backedge will be taken if it's
12415     // taken at least once, then the backedge count will either be that or
12416     // zero.
12417     MaxBECount = BECountIfBackedgeTaken;
12418     MaxOrZero = true;
12419   } else {
12420     MaxBECount = computeMaxBECountForLT(
12421         Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
12422   }
12423 
12424   if (isa<SCEVCouldNotCompute>(MaxBECount) &&
12425       !isa<SCEVCouldNotCompute>(BECount))
12426     MaxBECount = getConstant(getUnsignedRangeMax(BECount));
12427 
12428   return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
12429 }
12430 
12431 ScalarEvolution::ExitLimit
12432 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
12433                                      const Loop *L, bool IsSigned,
12434                                      bool ControlsExit, bool AllowPredicates) {
12435   SmallPtrSet<const SCEVPredicate *, 4> Predicates;
12436   // We handle only IV > Invariant
12437   if (!isLoopInvariant(RHS, L))
12438     return getCouldNotCompute();
12439 
12440   const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
12441   if (!IV && AllowPredicates)
12442     // Try to make this an AddRec using runtime tests, in the first X
12443     // iterations of this loop, where X is the SCEV expression found by the
12444     // algorithm below.
12445     IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
12446 
12447   // Avoid weird loops
12448   if (!IV || IV->getLoop() != L || !IV->isAffine())
12449     return getCouldNotCompute();
12450 
12451   auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
12452   bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
12453   ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12454 
12455   const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
12456 
12457   // Avoid negative or zero stride values
12458   if (!isKnownPositive(Stride))
12459     return getCouldNotCompute();
12460 
12461   // Avoid proven overflow cases: this will ensure that the backedge taken count
12462   // will not generate any unsigned overflow. Relaxed no-overflow conditions
12463   // exploit NoWrapFlags, allowing to optimize in presence of undefined
12464   // behaviors like the case of C language.
12465   if (!Stride->isOne() && !NoWrap)
12466     if (canIVOverflowOnGT(RHS, Stride, IsSigned))
12467       return getCouldNotCompute();
12468 
12469   const SCEV *Start = IV->getStart();
12470   const SCEV *End = RHS;
12471   if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
12472     // If we know that Start >= RHS in the context of loop, then we know that
12473     // min(RHS, Start) = RHS at this point.
12474     if (isLoopEntryGuardedByCond(
12475             L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
12476       End = RHS;
12477     else
12478       End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
12479   }
12480 
12481   if (Start->getType()->isPointerTy()) {
12482     Start = getLosslessPtrToIntExpr(Start);
12483     if (isa<SCEVCouldNotCompute>(Start))
12484       return Start;
12485   }
12486   if (End->getType()->isPointerTy()) {
12487     End = getLosslessPtrToIntExpr(End);
12488     if (isa<SCEVCouldNotCompute>(End))
12489       return End;
12490   }
12491 
12492   // Compute ((Start - End) + (Stride - 1)) / Stride.
12493   // FIXME: This can overflow. Holding off on fixing this for now;
12494   // howManyGreaterThans will hopefully be gone soon.
12495   const SCEV *One = getOne(Stride->getType());
12496   const SCEV *BECount = getUDivExpr(
12497       getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
12498 
12499   APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
12500                             : getUnsignedRangeMax(Start);
12501 
12502   APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
12503                              : getUnsignedRangeMin(Stride);
12504 
12505   unsigned BitWidth = getTypeSizeInBits(LHS->getType());
12506   APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
12507                          : APInt::getMinValue(BitWidth) + (MinStride - 1);
12508 
12509   // Although End can be a MIN expression we estimate MinEnd considering only
12510   // the case End = RHS. This is safe because in the other case (Start - End)
12511   // is zero, leading to a zero maximum backedge taken count.
12512   APInt MinEnd =
12513     IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
12514              : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
12515 
12516   const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
12517                                ? BECount
12518                                : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
12519                                                  getConstant(MinStride));
12520 
12521   if (isa<SCEVCouldNotCompute>(MaxBECount))
12522     MaxBECount = BECount;
12523 
12524   return ExitLimit(BECount, MaxBECount, false, Predicates);
12525 }
12526 
12527 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
12528                                                     ScalarEvolution &SE) const {
12529   if (Range.isFullSet())  // Infinite loop.
12530     return SE.getCouldNotCompute();
12531 
12532   // If the start is a non-zero constant, shift the range to simplify things.
12533   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
12534     if (!SC->getValue()->isZero()) {
12535       SmallVector<const SCEV *, 4> Operands(operands());
12536       Operands[0] = SE.getZero(SC->getType());
12537       const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
12538                                              getNoWrapFlags(FlagNW));
12539       if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
12540         return ShiftedAddRec->getNumIterationsInRange(
12541             Range.subtract(SC->getAPInt()), SE);
12542       // This is strange and shouldn't happen.
12543       return SE.getCouldNotCompute();
12544     }
12545 
12546   // The only time we can solve this is when we have all constant indices.
12547   // Otherwise, we cannot determine the overflow conditions.
12548   if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
12549     return SE.getCouldNotCompute();
12550 
12551   // Okay at this point we know that all elements of the chrec are constants and
12552   // that the start element is zero.
12553 
12554   // First check to see if the range contains zero.  If not, the first
12555   // iteration exits.
12556   unsigned BitWidth = SE.getTypeSizeInBits(getType());
12557   if (!Range.contains(APInt(BitWidth, 0)))
12558     return SE.getZero(getType());
12559 
12560   if (isAffine()) {
12561     // If this is an affine expression then we have this situation:
12562     //   Solve {0,+,A} in Range  ===  Ax in Range
12563 
12564     // We know that zero is in the range.  If A is positive then we know that
12565     // the upper value of the range must be the first possible exit value.
12566     // If A is negative then the lower of the range is the last possible loop
12567     // value.  Also note that we already checked for a full range.
12568     APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
12569     APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
12570 
12571     // The exit value should be (End+A)/A.
12572     APInt ExitVal = (End + A).udiv(A);
12573     ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
12574 
12575     // Evaluate at the exit value.  If we really did fall out of the valid
12576     // range, then we computed our trip count, otherwise wrap around or other
12577     // things must have happened.
12578     ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
12579     if (Range.contains(Val->getValue()))
12580       return SE.getCouldNotCompute();  // Something strange happened
12581 
12582     // Ensure that the previous value is in the range.
12583     assert(Range.contains(
12584            EvaluateConstantChrecAtConstant(this,
12585            ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
12586            "Linear scev computation is off in a bad way!");
12587     return SE.getConstant(ExitValue);
12588   }
12589 
12590   if (isQuadratic()) {
12591     if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
12592       return SE.getConstant(S.getValue());
12593   }
12594 
12595   return SE.getCouldNotCompute();
12596 }
12597 
12598 const SCEVAddRecExpr *
12599 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
12600   assert(getNumOperands() > 1 && "AddRec with zero step?");
12601   // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
12602   // but in this case we cannot guarantee that the value returned will be an
12603   // AddRec because SCEV does not have a fixed point where it stops
12604   // simplification: it is legal to return ({rec1} + {rec2}). For example, it
12605   // may happen if we reach arithmetic depth limit while simplifying. So we
12606   // construct the returned value explicitly.
12607   SmallVector<const SCEV *, 3> Ops;
12608   // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
12609   // (this + Step) is {A+B,+,B+C,+...,+,N}.
12610   for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
12611     Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
12612   // We know that the last operand is not a constant zero (otherwise it would
12613   // have been popped out earlier). This guarantees us that if the result has
12614   // the same last operand, then it will also not be popped out, meaning that
12615   // the returned value will be an AddRec.
12616   const SCEV *Last = getOperand(getNumOperands() - 1);
12617   assert(!Last->isZero() && "Recurrency with zero step?");
12618   Ops.push_back(Last);
12619   return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
12620                                                SCEV::FlagAnyWrap));
12621 }
12622 
12623 // Return true when S contains at least an undef value.
12624 bool ScalarEvolution::containsUndefs(const SCEV *S) const {
12625   return SCEVExprContains(S, [](const SCEV *S) {
12626     if (const auto *SU = dyn_cast<SCEVUnknown>(S))
12627       return isa<UndefValue>(SU->getValue());
12628     return false;
12629   });
12630 }
12631 
12632 /// Return the size of an element read or written by Inst.
12633 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
12634   Type *Ty;
12635   if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
12636     Ty = Store->getValueOperand()->getType();
12637   else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
12638     Ty = Load->getType();
12639   else
12640     return nullptr;
12641 
12642   Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
12643   return getSizeOfExpr(ETy, Ty);
12644 }
12645 
12646 //===----------------------------------------------------------------------===//
12647 //                   SCEVCallbackVH Class Implementation
12648 //===----------------------------------------------------------------------===//
12649 
12650 void ScalarEvolution::SCEVCallbackVH::deleted() {
12651   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12652   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
12653     SE->ConstantEvolutionLoopExitValue.erase(PN);
12654   SE->eraseValueFromMap(getValPtr());
12655   // this now dangles!
12656 }
12657 
12658 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
12659   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12660 
12661   // Forget all the expressions associated with users of the old value,
12662   // so that future queries will recompute the expressions using the new
12663   // value.
12664   Value *Old = getValPtr();
12665   SmallVector<User *, 16> Worklist(Old->users());
12666   SmallPtrSet<User *, 8> Visited;
12667   while (!Worklist.empty()) {
12668     User *U = Worklist.pop_back_val();
12669     // Deleting the Old value will cause this to dangle. Postpone
12670     // that until everything else is done.
12671     if (U == Old)
12672       continue;
12673     if (!Visited.insert(U).second)
12674       continue;
12675     if (PHINode *PN = dyn_cast<PHINode>(U))
12676       SE->ConstantEvolutionLoopExitValue.erase(PN);
12677     SE->eraseValueFromMap(U);
12678     llvm::append_range(Worklist, U->users());
12679   }
12680   // Delete the Old value.
12681   if (PHINode *PN = dyn_cast<PHINode>(Old))
12682     SE->ConstantEvolutionLoopExitValue.erase(PN);
12683   SE->eraseValueFromMap(Old);
12684   // this now dangles!
12685 }
12686 
12687 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
12688   : CallbackVH(V), SE(se) {}
12689 
12690 //===----------------------------------------------------------------------===//
12691 //                   ScalarEvolution Class Implementation
12692 //===----------------------------------------------------------------------===//
12693 
12694 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
12695                                  AssumptionCache &AC, DominatorTree &DT,
12696                                  LoopInfo &LI)
12697     : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
12698       CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
12699       LoopDispositions(64), BlockDispositions(64) {
12700   // To use guards for proving predicates, we need to scan every instruction in
12701   // relevant basic blocks, and not just terminators.  Doing this is a waste of
12702   // time if the IR does not actually contain any calls to
12703   // @llvm.experimental.guard, so do a quick check and remember this beforehand.
12704   //
12705   // This pessimizes the case where a pass that preserves ScalarEvolution wants
12706   // to _add_ guards to the module when there weren't any before, and wants
12707   // ScalarEvolution to optimize based on those guards.  For now we prefer to be
12708   // efficient in lieu of being smart in that rather obscure case.
12709 
12710   auto *GuardDecl = F.getParent()->getFunction(
12711       Intrinsic::getName(Intrinsic::experimental_guard));
12712   HasGuards = GuardDecl && !GuardDecl->use_empty();
12713 }
12714 
12715 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
12716     : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
12717       LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
12718       ValueExprMap(std::move(Arg.ValueExprMap)),
12719       PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
12720       PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
12721       PendingMerges(std::move(Arg.PendingMerges)),
12722       MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
12723       BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
12724       PredicatedBackedgeTakenCounts(
12725           std::move(Arg.PredicatedBackedgeTakenCounts)),
12726       BECountUsers(std::move(Arg.BECountUsers)),
12727       ConstantEvolutionLoopExitValue(
12728           std::move(Arg.ConstantEvolutionLoopExitValue)),
12729       ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
12730       ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)),
12731       LoopDispositions(std::move(Arg.LoopDispositions)),
12732       LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
12733       BlockDispositions(std::move(Arg.BlockDispositions)),
12734       SCEVUsers(std::move(Arg.SCEVUsers)),
12735       UnsignedRanges(std::move(Arg.UnsignedRanges)),
12736       SignedRanges(std::move(Arg.SignedRanges)),
12737       UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
12738       UniquePreds(std::move(Arg.UniquePreds)),
12739       SCEVAllocator(std::move(Arg.SCEVAllocator)),
12740       LoopUsers(std::move(Arg.LoopUsers)),
12741       PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
12742       FirstUnknown(Arg.FirstUnknown) {
12743   Arg.FirstUnknown = nullptr;
12744 }
12745 
12746 ScalarEvolution::~ScalarEvolution() {
12747   // Iterate through all the SCEVUnknown instances and call their
12748   // destructors, so that they release their references to their values.
12749   for (SCEVUnknown *U = FirstUnknown; U;) {
12750     SCEVUnknown *Tmp = U;
12751     U = U->Next;
12752     Tmp->~SCEVUnknown();
12753   }
12754   FirstUnknown = nullptr;
12755 
12756   ExprValueMap.clear();
12757   ValueExprMap.clear();
12758   HasRecMap.clear();
12759   BackedgeTakenCounts.clear();
12760   PredicatedBackedgeTakenCounts.clear();
12761 
12762   assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
12763   assert(PendingPhiRanges.empty() && "getRangeRef garbage");
12764   assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
12765   assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
12766   assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
12767 }
12768 
12769 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
12770   return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
12771 }
12772 
12773 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
12774                           const Loop *L) {
12775   // Print all inner loops first
12776   for (Loop *I : *L)
12777     PrintLoopInfo(OS, SE, I);
12778 
12779   OS << "Loop ";
12780   L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12781   OS << ": ";
12782 
12783   SmallVector<BasicBlock *, 8> ExitingBlocks;
12784   L->getExitingBlocks(ExitingBlocks);
12785   if (ExitingBlocks.size() != 1)
12786     OS << "<multiple exits> ";
12787 
12788   if (SE->hasLoopInvariantBackedgeTakenCount(L))
12789     OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12790   else
12791     OS << "Unpredictable backedge-taken count.\n";
12792 
12793   if (ExitingBlocks.size() > 1)
12794     for (BasicBlock *ExitingBlock : ExitingBlocks) {
12795       OS << "  exit count for " << ExitingBlock->getName() << ": "
12796          << *SE->getExitCount(L, ExitingBlock) << "\n";
12797     }
12798 
12799   OS << "Loop ";
12800   L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12801   OS << ": ";
12802 
12803   if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12804     OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12805     if (SE->isBackedgeTakenCountMaxOrZero(L))
12806       OS << ", actual taken count either this or zero.";
12807   } else {
12808     OS << "Unpredictable max backedge-taken count. ";
12809   }
12810 
12811   OS << "\n"
12812         "Loop ";
12813   L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12814   OS << ": ";
12815 
12816   SmallVector<const SCEVPredicate *, 4> Preds;
12817   auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds);
12818   if (!isa<SCEVCouldNotCompute>(PBT)) {
12819     OS << "Predicated backedge-taken count is " << *PBT << "\n";
12820     OS << " Predicates:\n";
12821     SCEVUnionPredicate Dedup(Preds);
12822     Dedup.print(OS, 4);
12823   } else {
12824     OS << "Unpredictable predicated backedge-taken count. ";
12825   }
12826   OS << "\n";
12827 
12828   if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12829     OS << "Loop ";
12830     L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12831     OS << ": ";
12832     OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12833   }
12834 }
12835 
12836 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12837   switch (LD) {
12838   case ScalarEvolution::LoopVariant:
12839     return "Variant";
12840   case ScalarEvolution::LoopInvariant:
12841     return "Invariant";
12842   case ScalarEvolution::LoopComputable:
12843     return "Computable";
12844   }
12845   llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
12846 }
12847 
12848 void ScalarEvolution::print(raw_ostream &OS) const {
12849   // ScalarEvolution's implementation of the print method is to print
12850   // out SCEV values of all instructions that are interesting. Doing
12851   // this potentially causes it to create new SCEV objects though,
12852   // which technically conflicts with the const qualifier. This isn't
12853   // observable from outside the class though, so casting away the
12854   // const isn't dangerous.
12855   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12856 
12857   if (ClassifyExpressions) {
12858     OS << "Classifying expressions for: ";
12859     F.printAsOperand(OS, /*PrintType=*/false);
12860     OS << "\n";
12861     for (Instruction &I : instructions(F))
12862       if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12863         OS << I << '\n';
12864         OS << "  -->  ";
12865         const SCEV *SV = SE.getSCEV(&I);
12866         SV->print(OS);
12867         if (!isa<SCEVCouldNotCompute>(SV)) {
12868           OS << " U: ";
12869           SE.getUnsignedRange(SV).print(OS);
12870           OS << " S: ";
12871           SE.getSignedRange(SV).print(OS);
12872         }
12873 
12874         const Loop *L = LI.getLoopFor(I.getParent());
12875 
12876         const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12877         if (AtUse != SV) {
12878           OS << "  -->  ";
12879           AtUse->print(OS);
12880           if (!isa<SCEVCouldNotCompute>(AtUse)) {
12881             OS << " U: ";
12882             SE.getUnsignedRange(AtUse).print(OS);
12883             OS << " S: ";
12884             SE.getSignedRange(AtUse).print(OS);
12885           }
12886         }
12887 
12888         if (L) {
12889           OS << "\t\t" "Exits: ";
12890           const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12891           if (!SE.isLoopInvariant(ExitValue, L)) {
12892             OS << "<<Unknown>>";
12893           } else {
12894             OS << *ExitValue;
12895           }
12896 
12897           bool First = true;
12898           for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12899             if (First) {
12900               OS << "\t\t" "LoopDispositions: { ";
12901               First = false;
12902             } else {
12903               OS << ", ";
12904             }
12905 
12906             Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12907             OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12908           }
12909 
12910           for (auto *InnerL : depth_first(L)) {
12911             if (InnerL == L)
12912               continue;
12913             if (First) {
12914               OS << "\t\t" "LoopDispositions: { ";
12915               First = false;
12916             } else {
12917               OS << ", ";
12918             }
12919 
12920             InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12921             OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12922           }
12923 
12924           OS << " }";
12925         }
12926 
12927         OS << "\n";
12928       }
12929   }
12930 
12931   OS << "Determining loop execution counts for: ";
12932   F.printAsOperand(OS, /*PrintType=*/false);
12933   OS << "\n";
12934   for (Loop *I : LI)
12935     PrintLoopInfo(OS, &SE, I);
12936 }
12937 
12938 ScalarEvolution::LoopDisposition
12939 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12940   auto &Values = LoopDispositions[S];
12941   for (auto &V : Values) {
12942     if (V.getPointer() == L)
12943       return V.getInt();
12944   }
12945   Values.emplace_back(L, LoopVariant);
12946   LoopDisposition D = computeLoopDisposition(S, L);
12947   auto &Values2 = LoopDispositions[S];
12948   for (auto &V : llvm::reverse(Values2)) {
12949     if (V.getPointer() == L) {
12950       V.setInt(D);
12951       break;
12952     }
12953   }
12954   return D;
12955 }
12956 
12957 ScalarEvolution::LoopDisposition
12958 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12959   switch (S->getSCEVType()) {
12960   case scConstant:
12961     return LoopInvariant;
12962   case scPtrToInt:
12963   case scTruncate:
12964   case scZeroExtend:
12965   case scSignExtend:
12966     return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12967   case scAddRecExpr: {
12968     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12969 
12970     // If L is the addrec's loop, it's computable.
12971     if (AR->getLoop() == L)
12972       return LoopComputable;
12973 
12974     // Add recurrences are never invariant in the function-body (null loop).
12975     if (!L)
12976       return LoopVariant;
12977 
12978     // Everything that is not defined at loop entry is variant.
12979     if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12980       return LoopVariant;
12981     assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
12982            " dominate the contained loop's header?");
12983 
12984     // This recurrence is invariant w.r.t. L if AR's loop contains L.
12985     if (AR->getLoop()->contains(L))
12986       return LoopInvariant;
12987 
12988     // This recurrence is variant w.r.t. L if any of its operands
12989     // are variant.
12990     for (auto *Op : AR->operands())
12991       if (!isLoopInvariant(Op, L))
12992         return LoopVariant;
12993 
12994     // Otherwise it's loop-invariant.
12995     return LoopInvariant;
12996   }
12997   case scAddExpr:
12998   case scMulExpr:
12999   case scUMaxExpr:
13000   case scSMaxExpr:
13001   case scUMinExpr:
13002   case scSMinExpr:
13003   case scSequentialUMinExpr: {
13004     bool HasVarying = false;
13005     for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
13006       LoopDisposition D = getLoopDisposition(Op, L);
13007       if (D == LoopVariant)
13008         return LoopVariant;
13009       if (D == LoopComputable)
13010         HasVarying = true;
13011     }
13012     return HasVarying ? LoopComputable : LoopInvariant;
13013   }
13014   case scUDivExpr: {
13015     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13016     LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
13017     if (LD == LoopVariant)
13018       return LoopVariant;
13019     LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
13020     if (RD == LoopVariant)
13021       return LoopVariant;
13022     return (LD == LoopInvariant && RD == LoopInvariant) ?
13023            LoopInvariant : LoopComputable;
13024   }
13025   case scUnknown:
13026     // All non-instruction values are loop invariant.  All instructions are loop
13027     // invariant if they are not contained in the specified loop.
13028     // Instructions are never considered invariant in the function body
13029     // (null loop) because they are defined within the "loop".
13030     if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
13031       return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
13032     return LoopInvariant;
13033   case scCouldNotCompute:
13034     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
13035   }
13036   llvm_unreachable("Unknown SCEV kind!");
13037 }
13038 
13039 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
13040   return getLoopDisposition(S, L) == LoopInvariant;
13041 }
13042 
13043 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
13044   return getLoopDisposition(S, L) == LoopComputable;
13045 }
13046 
13047 ScalarEvolution::BlockDisposition
13048 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13049   auto &Values = BlockDispositions[S];
13050   for (auto &V : Values) {
13051     if (V.getPointer() == BB)
13052       return V.getInt();
13053   }
13054   Values.emplace_back(BB, DoesNotDominateBlock);
13055   BlockDisposition D = computeBlockDisposition(S, BB);
13056   auto &Values2 = BlockDispositions[S];
13057   for (auto &V : llvm::reverse(Values2)) {
13058     if (V.getPointer() == BB) {
13059       V.setInt(D);
13060       break;
13061     }
13062   }
13063   return D;
13064 }
13065 
13066 ScalarEvolution::BlockDisposition
13067 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13068   switch (S->getSCEVType()) {
13069   case scConstant:
13070     return ProperlyDominatesBlock;
13071   case scPtrToInt:
13072   case scTruncate:
13073   case scZeroExtend:
13074   case scSignExtend:
13075     return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
13076   case scAddRecExpr: {
13077     // This uses a "dominates" query instead of "properly dominates" query
13078     // to test for proper dominance too, because the instruction which
13079     // produces the addrec's value is a PHI, and a PHI effectively properly
13080     // dominates its entire containing block.
13081     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
13082     if (!DT.dominates(AR->getLoop()->getHeader(), BB))
13083       return DoesNotDominateBlock;
13084 
13085     // Fall through into SCEVNAryExpr handling.
13086     LLVM_FALLTHROUGH;
13087   }
13088   case scAddExpr:
13089   case scMulExpr:
13090   case scUMaxExpr:
13091   case scSMaxExpr:
13092   case scUMinExpr:
13093   case scSMinExpr:
13094   case scSequentialUMinExpr: {
13095     const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
13096     bool Proper = true;
13097     for (const SCEV *NAryOp : NAry->operands()) {
13098       BlockDisposition D = getBlockDisposition(NAryOp, BB);
13099       if (D == DoesNotDominateBlock)
13100         return DoesNotDominateBlock;
13101       if (D == DominatesBlock)
13102         Proper = false;
13103     }
13104     return Proper ? ProperlyDominatesBlock : DominatesBlock;
13105   }
13106   case scUDivExpr: {
13107     const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13108     const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
13109     BlockDisposition LD = getBlockDisposition(LHS, BB);
13110     if (LD == DoesNotDominateBlock)
13111       return DoesNotDominateBlock;
13112     BlockDisposition RD = getBlockDisposition(RHS, BB);
13113     if (RD == DoesNotDominateBlock)
13114       return DoesNotDominateBlock;
13115     return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
13116       ProperlyDominatesBlock : DominatesBlock;
13117   }
13118   case scUnknown:
13119     if (Instruction *I =
13120           dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
13121       if (I->getParent() == BB)
13122         return DominatesBlock;
13123       if (DT.properlyDominates(I->getParent(), BB))
13124         return ProperlyDominatesBlock;
13125       return DoesNotDominateBlock;
13126     }
13127     return ProperlyDominatesBlock;
13128   case scCouldNotCompute:
13129     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
13130   }
13131   llvm_unreachable("Unknown SCEV kind!");
13132 }
13133 
13134 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
13135   return getBlockDisposition(S, BB) >= DominatesBlock;
13136 }
13137 
13138 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
13139   return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
13140 }
13141 
13142 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
13143   return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
13144 }
13145 
13146 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
13147                                                 bool Predicated) {
13148   auto &BECounts =
13149       Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
13150   auto It = BECounts.find(L);
13151   if (It != BECounts.end()) {
13152     for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
13153       if (!isa<SCEVConstant>(ENT.ExactNotTaken)) {
13154         auto UserIt = BECountUsers.find(ENT.ExactNotTaken);
13155         assert(UserIt != BECountUsers.end());
13156         UserIt->second.erase({L, Predicated});
13157       }
13158     }
13159     BECounts.erase(It);
13160   }
13161 }
13162 
13163 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) {
13164   SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end());
13165   SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end());
13166 
13167   while (!Worklist.empty()) {
13168     const SCEV *Curr = Worklist.pop_back_val();
13169     auto Users = SCEVUsers.find(Curr);
13170     if (Users != SCEVUsers.end())
13171       for (auto *User : Users->second)
13172         if (ToForget.insert(User).second)
13173           Worklist.push_back(User);
13174   }
13175 
13176   for (auto *S : ToForget)
13177     forgetMemoizedResultsImpl(S);
13178 
13179   for (auto I = PredicatedSCEVRewrites.begin();
13180        I != PredicatedSCEVRewrites.end();) {
13181     std::pair<const SCEV *, const Loop *> Entry = I->first;
13182     if (ToForget.count(Entry.first))
13183       PredicatedSCEVRewrites.erase(I++);
13184     else
13185       ++I;
13186   }
13187 }
13188 
13189 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) {
13190   LoopDispositions.erase(S);
13191   BlockDispositions.erase(S);
13192   UnsignedRanges.erase(S);
13193   SignedRanges.erase(S);
13194   HasRecMap.erase(S);
13195   MinTrailingZerosCache.erase(S);
13196 
13197   auto ExprIt = ExprValueMap.find(S);
13198   if (ExprIt != ExprValueMap.end()) {
13199     for (auto &ValueAndOffset : ExprIt->second) {
13200       if (ValueAndOffset.second == nullptr) {
13201         auto ValueIt = ValueExprMap.find_as(ValueAndOffset.first);
13202         if (ValueIt != ValueExprMap.end())
13203           ValueExprMap.erase(ValueIt);
13204       }
13205     }
13206     ExprValueMap.erase(ExprIt);
13207   }
13208 
13209   auto ScopeIt = ValuesAtScopes.find(S);
13210   if (ScopeIt != ValuesAtScopes.end()) {
13211     for (const auto &Pair : ScopeIt->second)
13212       if (!isa_and_nonnull<SCEVConstant>(Pair.second))
13213         erase_value(ValuesAtScopesUsers[Pair.second],
13214                     std::make_pair(Pair.first, S));
13215     ValuesAtScopes.erase(ScopeIt);
13216   }
13217 
13218   auto ScopeUserIt = ValuesAtScopesUsers.find(S);
13219   if (ScopeUserIt != ValuesAtScopesUsers.end()) {
13220     for (const auto &Pair : ScopeUserIt->second)
13221       erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
13222     ValuesAtScopesUsers.erase(ScopeUserIt);
13223   }
13224 
13225   auto BEUsersIt = BECountUsers.find(S);
13226   if (BEUsersIt != BECountUsers.end()) {
13227     // Work on a copy, as forgetBackedgeTakenCounts() will modify the original.
13228     auto Copy = BEUsersIt->second;
13229     for (const auto &Pair : Copy)
13230       forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
13231     BECountUsers.erase(BEUsersIt);
13232   }
13233 }
13234 
13235 void
13236 ScalarEvolution::getUsedLoops(const SCEV *S,
13237                               SmallPtrSetImpl<const Loop *> &LoopsUsed) {
13238   struct FindUsedLoops {
13239     FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
13240         : LoopsUsed(LoopsUsed) {}
13241     SmallPtrSetImpl<const Loop *> &LoopsUsed;
13242     bool follow(const SCEV *S) {
13243       if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
13244         LoopsUsed.insert(AR->getLoop());
13245       return true;
13246     }
13247 
13248     bool isDone() const { return false; }
13249   };
13250 
13251   FindUsedLoops F(LoopsUsed);
13252   SCEVTraversal<FindUsedLoops>(F).visitAll(S);
13253 }
13254 
13255 void ScalarEvolution::verify() const {
13256   ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
13257   ScalarEvolution SE2(F, TLI, AC, DT, LI);
13258 
13259   SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
13260 
13261   // Map's SCEV expressions from one ScalarEvolution "universe" to another.
13262   struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
13263     SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
13264 
13265     const SCEV *visitConstant(const SCEVConstant *Constant) {
13266       return SE.getConstant(Constant->getAPInt());
13267     }
13268 
13269     const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13270       return SE.getUnknown(Expr->getValue());
13271     }
13272 
13273     const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
13274       return SE.getCouldNotCompute();
13275     }
13276   };
13277 
13278   SCEVMapper SCM(SE2);
13279 
13280   while (!LoopStack.empty()) {
13281     auto *L = LoopStack.pop_back_val();
13282     llvm::append_range(LoopStack, *L);
13283 
13284     auto *CurBECount = SCM.visit(
13285         const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
13286     auto *NewBECount = SE2.getBackedgeTakenCount(L);
13287 
13288     if (CurBECount == SE2.getCouldNotCompute() ||
13289         NewBECount == SE2.getCouldNotCompute()) {
13290       // NB! This situation is legal, but is very suspicious -- whatever pass
13291       // change the loop to make a trip count go from could not compute to
13292       // computable or vice-versa *should have* invalidated SCEV.  However, we
13293       // choose not to assert here (for now) since we don't want false
13294       // positives.
13295       continue;
13296     }
13297 
13298     if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
13299       // SCEV treats "undef" as an unknown but consistent value (i.e. it does
13300       // not propagate undef aggressively).  This means we can (and do) fail
13301       // verification in cases where a transform makes the trip count of a loop
13302       // go from "undef" to "undef+1" (say).  The transform is fine, since in
13303       // both cases the loop iterates "undef" times, but SCEV thinks we
13304       // increased the trip count of the loop by 1 incorrectly.
13305       continue;
13306     }
13307 
13308     if (SE.getTypeSizeInBits(CurBECount->getType()) >
13309         SE.getTypeSizeInBits(NewBECount->getType()))
13310       NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
13311     else if (SE.getTypeSizeInBits(CurBECount->getType()) <
13312              SE.getTypeSizeInBits(NewBECount->getType()))
13313       CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
13314 
13315     const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
13316 
13317     // Unless VerifySCEVStrict is set, we only compare constant deltas.
13318     if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
13319       dbgs() << "Trip Count for " << *L << " Changed!\n";
13320       dbgs() << "Old: " << *CurBECount << "\n";
13321       dbgs() << "New: " << *NewBECount << "\n";
13322       dbgs() << "Delta: " << *Delta << "\n";
13323       std::abort();
13324     }
13325   }
13326 
13327   // Collect all valid loops currently in LoopInfo.
13328   SmallPtrSet<Loop *, 32> ValidLoops;
13329   SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
13330   while (!Worklist.empty()) {
13331     Loop *L = Worklist.pop_back_val();
13332     if (ValidLoops.contains(L))
13333       continue;
13334     ValidLoops.insert(L);
13335     Worklist.append(L->begin(), L->end());
13336   }
13337   for (auto &KV : ValueExprMap) {
13338 #ifndef NDEBUG
13339     // Check for SCEV expressions referencing invalid/deleted loops.
13340     if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) {
13341       assert(ValidLoops.contains(AR->getLoop()) &&
13342              "AddRec references invalid loop");
13343     }
13344 #endif
13345 
13346     // Check that the value is also part of the reverse map.
13347     auto It = ExprValueMap.find(KV.second);
13348     if (It == ExprValueMap.end() || !It->second.contains({KV.first, nullptr})) {
13349       dbgs() << "Value " << *KV.first
13350              << " is in ValueExprMap but not in ExprValueMap\n";
13351       std::abort();
13352     }
13353   }
13354 
13355   for (const auto &KV : ExprValueMap) {
13356     for (const auto &ValueAndOffset : KV.second) {
13357       if (ValueAndOffset.second != nullptr)
13358         continue;
13359 
13360       auto It = ValueExprMap.find_as(ValueAndOffset.first);
13361       if (It == ValueExprMap.end()) {
13362         dbgs() << "Value " << *ValueAndOffset.first
13363                << " is in ExprValueMap but not in ValueExprMap\n";
13364         std::abort();
13365       }
13366       if (It->second != KV.first) {
13367         dbgs() << "Value " << *ValueAndOffset.first
13368                << " mapped to " << *It->second
13369                << " rather than " << *KV.first << "\n";
13370         std::abort();
13371       }
13372     }
13373   }
13374 
13375   // Verify integrity of SCEV users.
13376   for (const auto &S : UniqueSCEVs) {
13377     SmallVector<const SCEV *, 4> Ops;
13378     collectUniqueOps(&S, Ops);
13379     for (const auto *Op : Ops) {
13380       // We do not store dependencies of constants.
13381       if (isa<SCEVConstant>(Op))
13382         continue;
13383       auto It = SCEVUsers.find(Op);
13384       if (It != SCEVUsers.end() && It->second.count(&S))
13385         continue;
13386       dbgs() << "Use of operand  " << *Op << " by user " << S
13387              << " is not being tracked!\n";
13388       std::abort();
13389     }
13390   }
13391 
13392   // Verify integrity of ValuesAtScopes users.
13393   for (const auto &ValueAndVec : ValuesAtScopes) {
13394     const SCEV *Value = ValueAndVec.first;
13395     for (const auto &LoopAndValueAtScope : ValueAndVec.second) {
13396       const Loop *L = LoopAndValueAtScope.first;
13397       const SCEV *ValueAtScope = LoopAndValueAtScope.second;
13398       if (!isa<SCEVConstant>(ValueAtScope)) {
13399         auto It = ValuesAtScopesUsers.find(ValueAtScope);
13400         if (It != ValuesAtScopesUsers.end() &&
13401             is_contained(It->second, std::make_pair(L, Value)))
13402           continue;
13403         dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: "
13404                << *ValueAtScope << " missing in ValuesAtScopesUsers\n";
13405         std::abort();
13406       }
13407     }
13408   }
13409 
13410   for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
13411     const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
13412     for (const auto &LoopAndValue : ValueAtScopeAndVec.second) {
13413       const Loop *L = LoopAndValue.first;
13414       const SCEV *Value = LoopAndValue.second;
13415       assert(!isa<SCEVConstant>(Value));
13416       auto It = ValuesAtScopes.find(Value);
13417       if (It != ValuesAtScopes.end() &&
13418           is_contained(It->second, std::make_pair(L, ValueAtScope)))
13419         continue;
13420       dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: "
13421              << *ValueAtScope << " missing in ValuesAtScopes\n";
13422       std::abort();
13423     }
13424   }
13425 
13426   // Verify integrity of BECountUsers.
13427   auto VerifyBECountUsers = [&](bool Predicated) {
13428     auto &BECounts =
13429         Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
13430     for (const auto &LoopAndBEInfo : BECounts) {
13431       for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
13432         if (!isa<SCEVConstant>(ENT.ExactNotTaken)) {
13433           auto UserIt = BECountUsers.find(ENT.ExactNotTaken);
13434           if (UserIt != BECountUsers.end() &&
13435               UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
13436             continue;
13437           dbgs() << "Value " << *ENT.ExactNotTaken << " for loop "
13438                  << *LoopAndBEInfo.first << " missing from BECountUsers\n";
13439           std::abort();
13440         }
13441       }
13442     }
13443   };
13444   VerifyBECountUsers(/* Predicated */ false);
13445   VerifyBECountUsers(/* Predicated */ true);
13446 }
13447 
13448 bool ScalarEvolution::invalidate(
13449     Function &F, const PreservedAnalyses &PA,
13450     FunctionAnalysisManager::Invalidator &Inv) {
13451   // Invalidate the ScalarEvolution object whenever it isn't preserved or one
13452   // of its dependencies is invalidated.
13453   auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
13454   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
13455          Inv.invalidate<AssumptionAnalysis>(F, PA) ||
13456          Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
13457          Inv.invalidate<LoopAnalysis>(F, PA);
13458 }
13459 
13460 AnalysisKey ScalarEvolutionAnalysis::Key;
13461 
13462 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
13463                                              FunctionAnalysisManager &AM) {
13464   return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
13465                          AM.getResult<AssumptionAnalysis>(F),
13466                          AM.getResult<DominatorTreeAnalysis>(F),
13467                          AM.getResult<LoopAnalysis>(F));
13468 }
13469 
13470 PreservedAnalyses
13471 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
13472   AM.getResult<ScalarEvolutionAnalysis>(F).verify();
13473   return PreservedAnalyses::all();
13474 }
13475 
13476 PreservedAnalyses
13477 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
13478   // For compatibility with opt's -analyze feature under legacy pass manager
13479   // which was not ported to NPM. This keeps tests using
13480   // update_analyze_test_checks.py working.
13481   OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
13482      << F.getName() << "':\n";
13483   AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
13484   return PreservedAnalyses::all();
13485 }
13486 
13487 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
13488                       "Scalar Evolution Analysis", false, true)
13489 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
13490 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
13491 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
13492 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
13493 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
13494                     "Scalar Evolution Analysis", false, true)
13495 
13496 char ScalarEvolutionWrapperPass::ID = 0;
13497 
13498 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
13499   initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
13500 }
13501 
13502 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
13503   SE.reset(new ScalarEvolution(
13504       F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
13505       getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
13506       getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
13507       getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
13508   return false;
13509 }
13510 
13511 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
13512 
13513 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
13514   SE->print(OS);
13515 }
13516 
13517 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
13518   if (!VerifySCEV)
13519     return;
13520 
13521   SE->verify();
13522 }
13523 
13524 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
13525   AU.setPreservesAll();
13526   AU.addRequiredTransitive<AssumptionCacheTracker>();
13527   AU.addRequiredTransitive<LoopInfoWrapperPass>();
13528   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
13529   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
13530 }
13531 
13532 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
13533                                                         const SCEV *RHS) {
13534   return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS);
13535 }
13536 
13537 const SCEVPredicate *
13538 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred,
13539                                      const SCEV *LHS, const SCEV *RHS) {
13540   FoldingSetNodeID ID;
13541   assert(LHS->getType() == RHS->getType() &&
13542          "Type mismatch between LHS and RHS");
13543   // Unique this node based on the arguments
13544   ID.AddInteger(SCEVPredicate::P_Compare);
13545   ID.AddInteger(Pred);
13546   ID.AddPointer(LHS);
13547   ID.AddPointer(RHS);
13548   void *IP = nullptr;
13549   if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13550     return S;
13551   SCEVComparePredicate *Eq = new (SCEVAllocator)
13552     SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS);
13553   UniquePreds.InsertNode(Eq, IP);
13554   return Eq;
13555 }
13556 
13557 const SCEVPredicate *ScalarEvolution::getWrapPredicate(
13558     const SCEVAddRecExpr *AR,
13559     SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13560   FoldingSetNodeID ID;
13561   // Unique this node based on the arguments
13562   ID.AddInteger(SCEVPredicate::P_Wrap);
13563   ID.AddPointer(AR);
13564   ID.AddInteger(AddedFlags);
13565   void *IP = nullptr;
13566   if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13567     return S;
13568   auto *OF = new (SCEVAllocator)
13569       SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
13570   UniquePreds.InsertNode(OF, IP);
13571   return OF;
13572 }
13573 
13574 namespace {
13575 
13576 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
13577 public:
13578 
13579   /// Rewrites \p S in the context of a loop L and the SCEV predication
13580   /// infrastructure.
13581   ///
13582   /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
13583   /// equivalences present in \p Pred.
13584   ///
13585   /// If \p NewPreds is non-null, rewrite is free to add further predicates to
13586   /// \p NewPreds such that the result will be an AddRecExpr.
13587   static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
13588                              SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13589                              SCEVUnionPredicate *Pred) {
13590     SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
13591     return Rewriter.visit(S);
13592   }
13593 
13594   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13595     if (Pred) {
13596       auto ExprPreds = Pred->getPredicatesForExpr(Expr);
13597       for (auto *Pred : ExprPreds)
13598         if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
13599           if (IPred->getLHS() == Expr &&
13600               IPred->getPredicate() == ICmpInst::ICMP_EQ)
13601             return IPred->getRHS();
13602     }
13603     return convertToAddRecWithPreds(Expr);
13604   }
13605 
13606   const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
13607     const SCEV *Operand = visit(Expr->getOperand());
13608     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13609     if (AR && AR->getLoop() == L && AR->isAffine()) {
13610       // This couldn't be folded because the operand didn't have the nuw
13611       // flag. Add the nusw flag as an assumption that we could make.
13612       const SCEV *Step = AR->getStepRecurrence(SE);
13613       Type *Ty = Expr->getType();
13614       if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
13615         return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
13616                                 SE.getSignExtendExpr(Step, Ty), L,
13617                                 AR->getNoWrapFlags());
13618     }
13619     return SE.getZeroExtendExpr(Operand, Expr->getType());
13620   }
13621 
13622   const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
13623     const SCEV *Operand = visit(Expr->getOperand());
13624     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13625     if (AR && AR->getLoop() == L && AR->isAffine()) {
13626       // This couldn't be folded because the operand didn't have the nsw
13627       // flag. Add the nssw flag as an assumption that we could make.
13628       const SCEV *Step = AR->getStepRecurrence(SE);
13629       Type *Ty = Expr->getType();
13630       if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
13631         return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
13632                                 SE.getSignExtendExpr(Step, Ty), L,
13633                                 AR->getNoWrapFlags());
13634     }
13635     return SE.getSignExtendExpr(Operand, Expr->getType());
13636   }
13637 
13638 private:
13639   explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
13640                         SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13641                         SCEVUnionPredicate *Pred)
13642       : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
13643 
13644   bool addOverflowAssumption(const SCEVPredicate *P) {
13645     if (!NewPreds) {
13646       // Check if we've already made this assumption.
13647       return Pred && Pred->implies(P);
13648     }
13649     NewPreds->insert(P);
13650     return true;
13651   }
13652 
13653   bool addOverflowAssumption(const SCEVAddRecExpr *AR,
13654                              SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13655     auto *A = SE.getWrapPredicate(AR, AddedFlags);
13656     return addOverflowAssumption(A);
13657   }
13658 
13659   // If \p Expr represents a PHINode, we try to see if it can be represented
13660   // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
13661   // to add this predicate as a runtime overflow check, we return the AddRec.
13662   // If \p Expr does not meet these conditions (is not a PHI node, or we
13663   // couldn't create an AddRec for it, or couldn't add the predicate), we just
13664   // return \p Expr.
13665   const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
13666     if (!isa<PHINode>(Expr->getValue()))
13667       return Expr;
13668     Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
13669     PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
13670     if (!PredicatedRewrite)
13671       return Expr;
13672     for (auto *P : PredicatedRewrite->second){
13673       // Wrap predicates from outer loops are not supported.
13674       if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
13675         auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
13676         if (L != AR->getLoop())
13677           return Expr;
13678       }
13679       if (!addOverflowAssumption(P))
13680         return Expr;
13681     }
13682     return PredicatedRewrite->first;
13683   }
13684 
13685   SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
13686   SCEVUnionPredicate *Pred;
13687   const Loop *L;
13688 };
13689 
13690 } // end anonymous namespace
13691 
13692 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
13693                                                    SCEVUnionPredicate &Preds) {
13694   return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
13695 }
13696 
13697 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
13698     const SCEV *S, const Loop *L,
13699     SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
13700   SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
13701   S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
13702   auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
13703 
13704   if (!AddRec)
13705     return nullptr;
13706 
13707   // Since the transformation was successful, we can now transfer the SCEV
13708   // predicates.
13709   for (auto *P : TransformPreds)
13710     Preds.insert(P);
13711 
13712   return AddRec;
13713 }
13714 
13715 /// SCEV predicates
13716 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
13717                              SCEVPredicateKind Kind)
13718     : FastID(ID), Kind(Kind) {}
13719 
13720 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID,
13721                                    const ICmpInst::Predicate Pred,
13722                                    const SCEV *LHS, const SCEV *RHS)
13723   : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) {
13724   assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
13725   assert(LHS != RHS && "LHS and RHS are the same SCEV");
13726 }
13727 
13728 bool SCEVComparePredicate::implies(const SCEVPredicate *N) const {
13729   const auto *Op = dyn_cast<SCEVComparePredicate>(N);
13730 
13731   if (!Op)
13732     return false;
13733 
13734   if (Pred != ICmpInst::ICMP_EQ)
13735     return false;
13736 
13737   return Op->LHS == LHS && Op->RHS == RHS;
13738 }
13739 
13740 bool SCEVComparePredicate::isAlwaysTrue() const { return false; }
13741 
13742 const SCEV *SCEVComparePredicate::getExpr() const { return LHS; }
13743 
13744 void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const {
13745   if (Pred == ICmpInst::ICMP_EQ)
13746     OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
13747   else
13748     OS.indent(Depth) << "Compare predicate: " << *LHS
13749                      << " " << CmpInst::getPredicateName(Pred) << ") "
13750                      << *RHS << "\n";
13751 
13752 }
13753 
13754 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
13755                                      const SCEVAddRecExpr *AR,
13756                                      IncrementWrapFlags Flags)
13757     : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
13758 
13759 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
13760 
13761 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
13762   const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
13763 
13764   return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
13765 }
13766 
13767 bool SCEVWrapPredicate::isAlwaysTrue() const {
13768   SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
13769   IncrementWrapFlags IFlags = Flags;
13770 
13771   if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
13772     IFlags = clearFlags(IFlags, IncrementNSSW);
13773 
13774   return IFlags == IncrementAnyWrap;
13775 }
13776 
13777 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
13778   OS.indent(Depth) << *getExpr() << " Added Flags: ";
13779   if (SCEVWrapPredicate::IncrementNUSW & getFlags())
13780     OS << "<nusw>";
13781   if (SCEVWrapPredicate::IncrementNSSW & getFlags())
13782     OS << "<nssw>";
13783   OS << "\n";
13784 }
13785 
13786 SCEVWrapPredicate::IncrementWrapFlags
13787 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
13788                                    ScalarEvolution &SE) {
13789   IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
13790   SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
13791 
13792   // We can safely transfer the NSW flag as NSSW.
13793   if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
13794     ImpliedFlags = IncrementNSSW;
13795 
13796   if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
13797     // If the increment is positive, the SCEV NUW flag will also imply the
13798     // WrapPredicate NUSW flag.
13799     if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
13800       if (Step->getValue()->getValue().isNonNegative())
13801         ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
13802   }
13803 
13804   return ImpliedFlags;
13805 }
13806 
13807 /// Union predicates don't get cached so create a dummy set ID for it.
13808 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds)
13809   : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {
13810   for (auto *P : Preds)
13811     add(P);
13812 }
13813 
13814 bool SCEVUnionPredicate::isAlwaysTrue() const {
13815   return all_of(Preds,
13816                 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
13817 }
13818 
13819 ArrayRef<const SCEVPredicate *>
13820 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
13821   auto I = SCEVToPreds.find(Expr);
13822   if (I == SCEVToPreds.end())
13823     return ArrayRef<const SCEVPredicate *>();
13824   return I->second;
13825 }
13826 
13827 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
13828   if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
13829     return all_of(Set->Preds,
13830                   [this](const SCEVPredicate *I) { return this->implies(I); });
13831 
13832   auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
13833   if (ScevPredsIt == SCEVToPreds.end())
13834     return false;
13835   auto &SCEVPreds = ScevPredsIt->second;
13836 
13837   return any_of(SCEVPreds,
13838                 [N](const SCEVPredicate *I) { return I->implies(N); });
13839 }
13840 
13841 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
13842 
13843 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
13844   for (auto Pred : Preds)
13845     Pred->print(OS, Depth);
13846 }
13847 
13848 void SCEVUnionPredicate::add(const SCEVPredicate *N) {
13849   if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
13850     for (auto Pred : Set->Preds)
13851       add(Pred);
13852     return;
13853   }
13854 
13855   if (implies(N))
13856     return;
13857 
13858   const SCEV *Key = N->getExpr();
13859   assert(Key && "Only SCEVUnionPredicate doesn't have an "
13860                 " associated expression!");
13861 
13862   SCEVToPreds[Key].push_back(N);
13863   Preds.push_back(N);
13864 }
13865 
13866 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
13867                                                      Loop &L)
13868     : SE(SE), L(L) {
13869   SmallVector<const SCEVPredicate*, 4> Empty;
13870   Preds = std::make_unique<SCEVUnionPredicate>(Empty);
13871 }
13872 
13873 void ScalarEvolution::registerUser(const SCEV *User,
13874                                    ArrayRef<const SCEV *> Ops) {
13875   for (auto *Op : Ops)
13876     // We do not expect that forgetting cached data for SCEVConstants will ever
13877     // open any prospects for sharpening or introduce any correctness issues,
13878     // so we don't bother storing their dependencies.
13879     if (!isa<SCEVConstant>(Op))
13880       SCEVUsers[Op].insert(User);
13881 }
13882 
13883 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
13884   const SCEV *Expr = SE.getSCEV(V);
13885   RewriteEntry &Entry = RewriteMap[Expr];
13886 
13887   // If we already have an entry and the version matches, return it.
13888   if (Entry.second && Generation == Entry.first)
13889     return Entry.second;
13890 
13891   // We found an entry but it's stale. Rewrite the stale entry
13892   // according to the current predicate.
13893   if (Entry.second)
13894     Expr = Entry.second;
13895 
13896   const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
13897   Entry = {Generation, NewSCEV};
13898 
13899   return NewSCEV;
13900 }
13901 
13902 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
13903   if (!BackedgeCount) {
13904     SmallVector<const SCEVPredicate *, 4> Preds;
13905     BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
13906     for (auto *P : Preds)
13907       addPredicate(*P);
13908   }
13909   return BackedgeCount;
13910 }
13911 
13912 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
13913   if (Preds->implies(&Pred))
13914     return;
13915 
13916   auto &OldPreds = Preds->getPredicates();
13917   SmallVector<const SCEVPredicate*, 4> NewPreds(OldPreds.begin(), OldPreds.end());
13918   NewPreds.push_back(&Pred);
13919   Preds = std::make_unique<SCEVUnionPredicate>(NewPreds);
13920   updateGeneration();
13921 }
13922 
13923 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
13924   return *Preds;
13925 }
13926 
13927 void PredicatedScalarEvolution::updateGeneration() {
13928   // If the generation number wrapped recompute everything.
13929   if (++Generation == 0) {
13930     for (auto &II : RewriteMap) {
13931       const SCEV *Rewritten = II.second.second;
13932       II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)};
13933     }
13934   }
13935 }
13936 
13937 void PredicatedScalarEvolution::setNoOverflow(
13938     Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13939   const SCEV *Expr = getSCEV(V);
13940   const auto *AR = cast<SCEVAddRecExpr>(Expr);
13941 
13942   auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
13943 
13944   // Clear the statically implied flags.
13945   Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
13946   addPredicate(*SE.getWrapPredicate(AR, Flags));
13947 
13948   auto II = FlagsMap.insert({V, Flags});
13949   if (!II.second)
13950     II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
13951 }
13952 
13953 bool PredicatedScalarEvolution::hasNoOverflow(
13954     Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13955   const SCEV *Expr = getSCEV(V);
13956   const auto *AR = cast<SCEVAddRecExpr>(Expr);
13957 
13958   Flags = SCEVWrapPredicate::clearFlags(
13959       Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13960 
13961   auto II = FlagsMap.find(V);
13962 
13963   if (II != FlagsMap.end())
13964     Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13965 
13966   return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13967 }
13968 
13969 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13970   const SCEV *Expr = this->getSCEV(V);
13971   SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13972   auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13973 
13974   if (!New)
13975     return nullptr;
13976 
13977   for (auto *P : NewPreds)
13978     addPredicate(*P);
13979 
13980   RewriteMap[SE.getSCEV(V)] = {Generation, New};
13981   return New;
13982 }
13983 
13984 PredicatedScalarEvolution::PredicatedScalarEvolution(
13985     const PredicatedScalarEvolution &Init)
13986   : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L),
13987     Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates())),
13988     Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13989   for (auto I : Init.FlagsMap)
13990     FlagsMap.insert(I);
13991 }
13992 
13993 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13994   // For each block.
13995   for (auto *BB : L.getBlocks())
13996     for (auto &I : *BB) {
13997       if (!SE.isSCEVable(I.getType()))
13998         continue;
13999 
14000       auto *Expr = SE.getSCEV(&I);
14001       auto II = RewriteMap.find(Expr);
14002 
14003       if (II == RewriteMap.end())
14004         continue;
14005 
14006       // Don't print things that are not interesting.
14007       if (II->second.second == Expr)
14008         continue;
14009 
14010       OS.indent(Depth) << "[PSE]" << I << ":\n";
14011       OS.indent(Depth + 2) << *Expr << "\n";
14012       OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
14013     }
14014 }
14015 
14016 // Match the mathematical pattern A - (A / B) * B, where A and B can be
14017 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
14018 // for URem with constant power-of-2 second operands.
14019 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
14020 // 4, A / B becomes X / 8).
14021 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
14022                                 const SCEV *&RHS) {
14023   // Try to match 'zext (trunc A to iB) to iY', which is used
14024   // for URem with constant power-of-2 second operands. Make sure the size of
14025   // the operand A matches the size of the whole expressions.
14026   if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
14027     if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
14028       LHS = Trunc->getOperand();
14029       // Bail out if the type of the LHS is larger than the type of the
14030       // expression for now.
14031       if (getTypeSizeInBits(LHS->getType()) >
14032           getTypeSizeInBits(Expr->getType()))
14033         return false;
14034       if (LHS->getType() != Expr->getType())
14035         LHS = getZeroExtendExpr(LHS, Expr->getType());
14036       RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
14037                         << getTypeSizeInBits(Trunc->getType()));
14038       return true;
14039     }
14040   const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
14041   if (Add == nullptr || Add->getNumOperands() != 2)
14042     return false;
14043 
14044   const SCEV *A = Add->getOperand(1);
14045   const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
14046 
14047   if (Mul == nullptr)
14048     return false;
14049 
14050   const auto MatchURemWithDivisor = [&](const SCEV *B) {
14051     // (SomeExpr + (-(SomeExpr / B) * B)).
14052     if (Expr == getURemExpr(A, B)) {
14053       LHS = A;
14054       RHS = B;
14055       return true;
14056     }
14057     return false;
14058   };
14059 
14060   // (SomeExpr + (-1 * (SomeExpr / B) * B)).
14061   if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
14062     return MatchURemWithDivisor(Mul->getOperand(1)) ||
14063            MatchURemWithDivisor(Mul->getOperand(2));
14064 
14065   // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
14066   if (Mul->getNumOperands() == 2)
14067     return MatchURemWithDivisor(Mul->getOperand(1)) ||
14068            MatchURemWithDivisor(Mul->getOperand(0)) ||
14069            MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
14070            MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
14071   return false;
14072 }
14073 
14074 const SCEV *
14075 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
14076   SmallVector<BasicBlock*, 16> ExitingBlocks;
14077   L->getExitingBlocks(ExitingBlocks);
14078 
14079   // Form an expression for the maximum exit count possible for this loop. We
14080   // merge the max and exact information to approximate a version of
14081   // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
14082   SmallVector<const SCEV*, 4> ExitCounts;
14083   for (BasicBlock *ExitingBB : ExitingBlocks) {
14084     const SCEV *ExitCount = getExitCount(L, ExitingBB);
14085     if (isa<SCEVCouldNotCompute>(ExitCount))
14086       ExitCount = getExitCount(L, ExitingBB,
14087                                   ScalarEvolution::ConstantMaximum);
14088     if (!isa<SCEVCouldNotCompute>(ExitCount)) {
14089       assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
14090              "We should only have known counts for exiting blocks that "
14091              "dominate latch!");
14092       ExitCounts.push_back(ExitCount);
14093     }
14094   }
14095   if (ExitCounts.empty())
14096     return getCouldNotCompute();
14097   return getUMinFromMismatchedTypes(ExitCounts);
14098 }
14099 
14100 /// A rewriter to replace SCEV expressions in Map with the corresponding entry
14101 /// in the map. It skips AddRecExpr because we cannot guarantee that the
14102 /// replacement is loop invariant in the loop of the AddRec.
14103 ///
14104 /// At the moment only rewriting SCEVUnknown and SCEVZeroExtendExpr is
14105 /// supported.
14106 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
14107   const DenseMap<const SCEV *, const SCEV *> &Map;
14108 
14109 public:
14110   SCEVLoopGuardRewriter(ScalarEvolution &SE,
14111                         DenseMap<const SCEV *, const SCEV *> &M)
14112       : SCEVRewriteVisitor(SE), Map(M) {}
14113 
14114   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
14115 
14116   const SCEV *visitUnknown(const SCEVUnknown *Expr) {
14117     auto I = Map.find(Expr);
14118     if (I == Map.end())
14119       return Expr;
14120     return I->second;
14121   }
14122 
14123   const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
14124     auto I = Map.find(Expr);
14125     if (I == Map.end())
14126       return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr(
14127           Expr);
14128     return I->second;
14129   }
14130 };
14131 
14132 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
14133   SmallVector<const SCEV *> ExprsToRewrite;
14134   auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
14135                               const SCEV *RHS,
14136                               DenseMap<const SCEV *, const SCEV *>
14137                                   &RewriteMap) {
14138     // WARNING: It is generally unsound to apply any wrap flags to the proposed
14139     // replacement SCEV which isn't directly implied by the structure of that
14140     // SCEV.  In particular, using contextual facts to imply flags is *NOT*
14141     // legal.  See the scoping rules for flags in the header to understand why.
14142 
14143     // If LHS is a constant, apply information to the other expression.
14144     if (isa<SCEVConstant>(LHS)) {
14145       std::swap(LHS, RHS);
14146       Predicate = CmpInst::getSwappedPredicate(Predicate);
14147     }
14148 
14149     // Check for a condition of the form (-C1 + X < C2).  InstCombine will
14150     // create this form when combining two checks of the form (X u< C2 + C1) and
14151     // (X >=u C1).
14152     auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap,
14153                                  &ExprsToRewrite]() {
14154       auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
14155       if (!AddExpr || AddExpr->getNumOperands() != 2)
14156         return false;
14157 
14158       auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
14159       auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
14160       auto *C2 = dyn_cast<SCEVConstant>(RHS);
14161       if (!C1 || !C2 || !LHSUnknown)
14162         return false;
14163 
14164       auto ExactRegion =
14165           ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
14166               .sub(C1->getAPInt());
14167 
14168       // Bail out, unless we have a non-wrapping, monotonic range.
14169       if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
14170         return false;
14171       auto I = RewriteMap.find(LHSUnknown);
14172       const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
14173       RewriteMap[LHSUnknown] = getUMaxExpr(
14174           getConstant(ExactRegion.getUnsignedMin()),
14175           getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
14176       ExprsToRewrite.push_back(LHSUnknown);
14177       return true;
14178     };
14179     if (MatchRangeCheckIdiom())
14180       return;
14181 
14182     // If we have LHS == 0, check if LHS is computing a property of some unknown
14183     // SCEV %v which we can rewrite %v to express explicitly.
14184     const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
14185     if (Predicate == CmpInst::ICMP_EQ && RHSC &&
14186         RHSC->getValue()->isNullValue()) {
14187       // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
14188       // explicitly express that.
14189       const SCEV *URemLHS = nullptr;
14190       const SCEV *URemRHS = nullptr;
14191       if (matchURem(LHS, URemLHS, URemRHS)) {
14192         if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
14193           auto Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS);
14194           RewriteMap[LHSUnknown] = Multiple;
14195           ExprsToRewrite.push_back(LHSUnknown);
14196           return;
14197         }
14198       }
14199     }
14200 
14201     // Do not apply information for constants or if RHS contains an AddRec.
14202     if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS))
14203       return;
14204 
14205     // If RHS is SCEVUnknown, make sure the information is applied to it.
14206     if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
14207       std::swap(LHS, RHS);
14208       Predicate = CmpInst::getSwappedPredicate(Predicate);
14209     }
14210 
14211     // Limit to expressions that can be rewritten.
14212     if (!isa<SCEVUnknown>(LHS) && !isa<SCEVZeroExtendExpr>(LHS))
14213       return;
14214 
14215     // Check whether LHS has already been rewritten. In that case we want to
14216     // chain further rewrites onto the already rewritten value.
14217     auto I = RewriteMap.find(LHS);
14218     const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
14219 
14220     const SCEV *RewrittenRHS = nullptr;
14221     switch (Predicate) {
14222     case CmpInst::ICMP_ULT:
14223       RewrittenRHS =
14224           getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
14225       break;
14226     case CmpInst::ICMP_SLT:
14227       RewrittenRHS =
14228           getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
14229       break;
14230     case CmpInst::ICMP_ULE:
14231       RewrittenRHS = getUMinExpr(RewrittenLHS, RHS);
14232       break;
14233     case CmpInst::ICMP_SLE:
14234       RewrittenRHS = getSMinExpr(RewrittenLHS, RHS);
14235       break;
14236     case CmpInst::ICMP_UGT:
14237       RewrittenRHS =
14238           getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14239       break;
14240     case CmpInst::ICMP_SGT:
14241       RewrittenRHS =
14242           getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14243       break;
14244     case CmpInst::ICMP_UGE:
14245       RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS);
14246       break;
14247     case CmpInst::ICMP_SGE:
14248       RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS);
14249       break;
14250     case CmpInst::ICMP_EQ:
14251       if (isa<SCEVConstant>(RHS))
14252         RewrittenRHS = RHS;
14253       break;
14254     case CmpInst::ICMP_NE:
14255       if (isa<SCEVConstant>(RHS) &&
14256           cast<SCEVConstant>(RHS)->getValue()->isNullValue())
14257         RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType()));
14258       break;
14259     default:
14260       break;
14261     }
14262 
14263     if (RewrittenRHS) {
14264       RewriteMap[LHS] = RewrittenRHS;
14265       if (LHS == RewrittenLHS)
14266         ExprsToRewrite.push_back(LHS);
14267     }
14268   };
14269   // First, collect conditions from dominating branches. Starting at the loop
14270   // predecessor, climb up the predecessor chain, as long as there are
14271   // predecessors that can be found that have unique successors leading to the
14272   // original header.
14273   // TODO: share this logic with isLoopEntryGuardedByCond.
14274   SmallVector<std::pair<Value *, bool>> Terms;
14275   for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
14276            L->getLoopPredecessor(), L->getHeader());
14277        Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
14278 
14279     const BranchInst *LoopEntryPredicate =
14280         dyn_cast<BranchInst>(Pair.first->getTerminator());
14281     if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
14282       continue;
14283 
14284     Terms.emplace_back(LoopEntryPredicate->getCondition(),
14285                        LoopEntryPredicate->getSuccessor(0) == Pair.second);
14286   }
14287 
14288   // Now apply the information from the collected conditions to RewriteMap.
14289   // Conditions are processed in reverse order, so the earliest conditions is
14290   // processed first. This ensures the SCEVs with the shortest dependency chains
14291   // are constructed first.
14292   DenseMap<const SCEV *, const SCEV *> RewriteMap;
14293   for (auto &E : reverse(Terms)) {
14294     bool EnterIfTrue = E.second;
14295     SmallVector<Value *, 8> Worklist;
14296     SmallPtrSet<Value *, 8> Visited;
14297     Worklist.push_back(E.first);
14298     while (!Worklist.empty()) {
14299       Value *Cond = Worklist.pop_back_val();
14300       if (!Visited.insert(Cond).second)
14301         continue;
14302 
14303       if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
14304         auto Predicate =
14305             EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
14306         CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
14307                          getSCEV(Cmp->getOperand(1)), RewriteMap);
14308         continue;
14309       }
14310 
14311       Value *L, *R;
14312       if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
14313                       : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
14314         Worklist.push_back(L);
14315         Worklist.push_back(R);
14316       }
14317     }
14318   }
14319 
14320   // Also collect information from assumptions dominating the loop.
14321   for (auto &AssumeVH : AC.assumptions()) {
14322     if (!AssumeVH)
14323       continue;
14324     auto *AssumeI = cast<CallInst>(AssumeVH);
14325     auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
14326     if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
14327       continue;
14328     CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
14329                      getSCEV(Cmp->getOperand(1)), RewriteMap);
14330   }
14331 
14332   if (RewriteMap.empty())
14333     return Expr;
14334 
14335   // Now that all rewrite information is collect, rewrite the collected
14336   // expressions with the information in the map. This applies information to
14337   // sub-expressions.
14338   if (ExprsToRewrite.size() > 1) {
14339     for (const SCEV *Expr : ExprsToRewrite) {
14340       const SCEV *RewriteTo = RewriteMap[Expr];
14341       RewriteMap.erase(Expr);
14342       SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
14343       RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)});
14344     }
14345   }
14346 
14347   SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
14348   return Rewriter.visit(Expr);
14349 }
14350