xref: /llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp (revision 4b3ba64ba71c06b6bc9db347a66a7316f5edbcc4)
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/Utils/LoopUtils.h"
30 
31 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
32 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
33 #else
34 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
35 #endif
36 
37 using namespace llvm;
38 
39 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
40     "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
41     cl::desc("When performing SCEV expansion only if it is cheap to do, this "
42              "controls the budget that is considered cheap (default = 4)"));
43 
44 using namespace PatternMatch;
45 
46 PoisonFlags::PoisonFlags(const Instruction *I) {
47   NUW = false;
48   NSW = false;
49   Exact = false;
50   Disjoint = false;
51   NNeg = false;
52   GEPNW = GEPNoWrapFlags::none();
53   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I)) {
54     NUW = OBO->hasNoUnsignedWrap();
55     NSW = OBO->hasNoSignedWrap();
56   }
57   if (auto *PEO = dyn_cast<PossiblyExactOperator>(I))
58     Exact = PEO->isExact();
59   if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
60     Disjoint = PDI->isDisjoint();
61   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
62     NNeg = PNI->hasNonNeg();
63   if (auto *TI = dyn_cast<TruncInst>(I)) {
64     NUW = TI->hasNoUnsignedWrap();
65     NSW = TI->hasNoSignedWrap();
66   }
67   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
68     GEPNW = GEP->getNoWrapFlags();
69 }
70 
71 void PoisonFlags::apply(Instruction *I) {
72   if (isa<OverflowingBinaryOperator>(I)) {
73     I->setHasNoUnsignedWrap(NUW);
74     I->setHasNoSignedWrap(NSW);
75   }
76   if (isa<PossiblyExactOperator>(I))
77     I->setIsExact(Exact);
78   if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
79     PDI->setIsDisjoint(Disjoint);
80   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
81     PNI->setNonNeg(NNeg);
82   if (isa<TruncInst>(I)) {
83     I->setHasNoUnsignedWrap(NUW);
84     I->setHasNoSignedWrap(NSW);
85   }
86   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
87     GEP->setNoWrapFlags(GEPNW);
88 }
89 
90 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
91 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
92 /// creating a new one.
93 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
94                                        Instruction::CastOps Op,
95                                        BasicBlock::iterator IP) {
96   // This function must be called with the builder having a valid insertion
97   // point. It doesn't need to be the actual IP where the uses of the returned
98   // cast will be added, but it must dominate such IP.
99   // We use this precondition to produce a cast that will dominate all its
100   // uses. In particular, this is crucial for the case where the builder's
101   // insertion point *is* the point where we were asked to put the cast.
102   // Since we don't know the builder's insertion point is actually
103   // where the uses will be added (only that it dominates it), we are
104   // not allowed to move it.
105   BasicBlock::iterator BIP = Builder.GetInsertPoint();
106 
107   Value *Ret = nullptr;
108 
109   // Check to see if there is already a cast!
110   for (User *U : V->users()) {
111     if (U->getType() != Ty)
112       continue;
113     CastInst *CI = dyn_cast<CastInst>(U);
114     if (!CI || CI->getOpcode() != Op)
115       continue;
116 
117     // Found a suitable cast that is at IP or comes before IP. Use it. Note that
118     // the cast must also properly dominate the Builder's insertion point.
119     if (IP->getParent() == CI->getParent() && &*BIP != CI &&
120         (&*IP == CI || CI->comesBefore(&*IP))) {
121       Ret = CI;
122       break;
123     }
124   }
125 
126   // Create a new cast.
127   if (!Ret) {
128     SCEVInsertPointGuard Guard(Builder, this);
129     Builder.SetInsertPoint(&*IP);
130     Ret = Builder.CreateCast(Op, V, Ty, V->getName());
131   }
132 
133   // We assert at the end of the function since IP might point to an
134   // instruction with different dominance properties than a cast
135   // (an invoke for example) and not dominate BIP (but the cast does).
136   assert(!isa<Instruction>(Ret) ||
137          SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
138 
139   return Ret;
140 }
141 
142 BasicBlock::iterator
143 SCEVExpander::findInsertPointAfter(Instruction *I,
144                                    Instruction *MustDominate) const {
145   BasicBlock::iterator IP = ++I->getIterator();
146   if (auto *II = dyn_cast<InvokeInst>(I))
147     IP = II->getNormalDest()->begin();
148 
149   while (isa<PHINode>(IP))
150     ++IP;
151 
152   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
153     ++IP;
154   } else if (isa<CatchSwitchInst>(IP)) {
155     IP = MustDominate->getParent()->getFirstInsertionPt();
156   } else {
157     assert(!IP->isEHPad() && "unexpected eh pad!");
158   }
159 
160   // Adjust insert point to be after instructions inserted by the expander, so
161   // we can re-use already inserted instructions. Avoid skipping past the
162   // original \p MustDominate, in case it is an inserted instruction.
163   while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
164     ++IP;
165 
166   return IP;
167 }
168 
169 BasicBlock::iterator
170 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
171   // Cast the argument at the beginning of the entry block, after
172   // any bitcasts of other arguments.
173   if (Argument *A = dyn_cast<Argument>(V)) {
174     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
175     while ((isa<BitCastInst>(IP) &&
176             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
177             cast<BitCastInst>(IP)->getOperand(0) != A) ||
178            isa<DbgInfoIntrinsic>(IP))
179       ++IP;
180     return IP;
181   }
182 
183   // Cast the instruction immediately after the instruction.
184   if (Instruction *I = dyn_cast<Instruction>(V))
185     return findInsertPointAfter(I, &*Builder.GetInsertPoint());
186 
187   // Otherwise, this must be some kind of a constant,
188   // so let's plop this cast into the function's entry block.
189   assert(isa<Constant>(V) &&
190          "Expected the cast argument to be a global/constant");
191   return Builder.GetInsertBlock()
192       ->getParent()
193       ->getEntryBlock()
194       .getFirstInsertionPt();
195 }
196 
197 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
198 /// which must be possible with a noop cast, doing what we can to share
199 /// the casts.
200 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
201   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
202   assert((Op == Instruction::BitCast ||
203           Op == Instruction::PtrToInt ||
204           Op == Instruction::IntToPtr) &&
205          "InsertNoopCastOfTo cannot perform non-noop casts!");
206   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
207          "InsertNoopCastOfTo cannot change sizes!");
208 
209   // inttoptr only works for integral pointers. For non-integral pointers, we
210   // can create a GEP on null with the integral value as index. Note that
211   // it is safe to use GEP of null instead of inttoptr here, because only
212   // expressions already based on a GEP of null should be converted to pointers
213   // during expansion.
214   if (Op == Instruction::IntToPtr) {
215     auto *PtrTy = cast<PointerType>(Ty);
216     if (DL.isNonIntegralPointerType(PtrTy))
217       return Builder.CreatePtrAdd(Constant::getNullValue(PtrTy), V, "scevgep");
218   }
219   // Short-circuit unnecessary bitcasts.
220   if (Op == Instruction::BitCast) {
221     if (V->getType() == Ty)
222       return V;
223     if (CastInst *CI = dyn_cast<CastInst>(V)) {
224       if (CI->getOperand(0)->getType() == Ty)
225         return CI->getOperand(0);
226     }
227   }
228   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
229   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
230       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
231     if (CastInst *CI = dyn_cast<CastInst>(V))
232       if ((CI->getOpcode() == Instruction::PtrToInt ||
233            CI->getOpcode() == Instruction::IntToPtr) &&
234           SE.getTypeSizeInBits(CI->getType()) ==
235           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
236         return CI->getOperand(0);
237     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
238       if ((CE->getOpcode() == Instruction::PtrToInt ||
239            CE->getOpcode() == Instruction::IntToPtr) &&
240           SE.getTypeSizeInBits(CE->getType()) ==
241           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
242         return CE->getOperand(0);
243   }
244 
245   // Fold a cast of a constant.
246   if (Constant *C = dyn_cast<Constant>(V))
247     return ConstantExpr::getCast(Op, C, Ty);
248 
249   // Try to reuse existing cast, or insert one.
250   return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
251 }
252 
253 /// InsertBinop - Insert the specified binary operator, doing a small amount
254 /// of work to avoid inserting an obviously redundant operation, and hoisting
255 /// to an outer loop when the opportunity is there and it is safe.
256 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
257                                  Value *LHS, Value *RHS,
258                                  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
259   // Fold a binop with constant operands.
260   if (Constant *CLHS = dyn_cast<Constant>(LHS))
261     if (Constant *CRHS = dyn_cast<Constant>(RHS))
262       if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, DL))
263         return Res;
264 
265   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
266   unsigned ScanLimit = 6;
267   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
268   // Scanning starts from the last instruction before the insertion point.
269   BasicBlock::iterator IP = Builder.GetInsertPoint();
270   if (IP != BlockBegin) {
271     --IP;
272     for (; ScanLimit; --IP, --ScanLimit) {
273       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
274       // generated code.
275       if (isa<DbgInfoIntrinsic>(IP))
276         ScanLimit++;
277 
278       auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
279         // Ensure that no-wrap flags match.
280         if (isa<OverflowingBinaryOperator>(I)) {
281           if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
282             return true;
283           if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
284             return true;
285         }
286         // Conservatively, do not use any instruction which has any of exact
287         // flags installed.
288         if (isa<PossiblyExactOperator>(I) && I->isExact())
289           return true;
290         return false;
291       };
292       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
293           IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
294         return &*IP;
295       if (IP == BlockBegin) break;
296     }
297   }
298 
299   // Save the original insertion point so we can restore it when we're done.
300   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
301   SCEVInsertPointGuard Guard(Builder, this);
302 
303   if (IsSafeToHoist) {
304     // Move the insertion point out of as many loops as we can.
305     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
306       if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
307       BasicBlock *Preheader = L->getLoopPreheader();
308       if (!Preheader) break;
309 
310       // Ok, move up a level.
311       Builder.SetInsertPoint(Preheader->getTerminator());
312     }
313   }
314 
315   // If we haven't found this binop, insert it.
316   // TODO: Use the Builder, which will make CreateBinOp below fold with
317   // InstSimplifyFolder.
318   Instruction *BO = Builder.Insert(BinaryOperator::Create(Opcode, LHS, RHS));
319   BO->setDebugLoc(Loc);
320   if (Flags & SCEV::FlagNUW)
321     BO->setHasNoUnsignedWrap();
322   if (Flags & SCEV::FlagNSW)
323     BO->setHasNoSignedWrap();
324 
325   return BO;
326 }
327 
328 /// expandAddToGEP - Expand an addition expression with a pointer type into
329 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
330 /// BasicAliasAnalysis and other passes analyze the result. See the rules
331 /// for getelementptr vs. inttoptr in
332 /// http://llvm.org/docs/LangRef.html#pointeraliasing
333 /// for details.
334 ///
335 /// Design note: The correctness of using getelementptr here depends on
336 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
337 /// they may introduce pointer arithmetic which may not be safely converted
338 /// into getelementptr.
339 ///
340 /// Design note: It might seem desirable for this function to be more
341 /// loop-aware. If some of the indices are loop-invariant while others
342 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
343 /// loop-invariant portions of the overall computation outside the loop.
344 /// However, there are a few reasons this is not done here. Hoisting simple
345 /// arithmetic is a low-level optimization that often isn't very
346 /// important until late in the optimization process. In fact, passes
347 /// like InstructionCombining will combine GEPs, even if it means
348 /// pushing loop-invariant computation down into loops, so even if the
349 /// GEPs were split here, the work would quickly be undone. The
350 /// LoopStrengthReduction pass, which is usually run quite late (and
351 /// after the last InstructionCombining pass), takes care of hoisting
352 /// loop-invariant portions of expressions, after considering what
353 /// can be folded using target addressing modes.
354 ///
355 Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V) {
356   assert(!isa<Instruction>(V) ||
357          SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
358 
359   Value *Idx = expand(Offset);
360 
361   // Fold a GEP with constant operands.
362   if (Constant *CLHS = dyn_cast<Constant>(V))
363     if (Constant *CRHS = dyn_cast<Constant>(Idx))
364       return Builder.CreatePtrAdd(CLHS, CRHS);
365 
366   // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
367   unsigned ScanLimit = 6;
368   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
369   // Scanning starts from the last instruction before the insertion point.
370   BasicBlock::iterator IP = Builder.GetInsertPoint();
371   if (IP != BlockBegin) {
372     --IP;
373     for (; ScanLimit; --IP, --ScanLimit) {
374       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
375       // generated code.
376       if (isa<DbgInfoIntrinsic>(IP))
377         ScanLimit++;
378       if (auto *GEP = dyn_cast<GetElementPtrInst>(IP)) {
379         if (GEP->getPointerOperand() == V &&
380             GEP->getSourceElementType() == Builder.getInt8Ty() &&
381             GEP->getOperand(1) == Idx) {
382           rememberFlags(GEP);
383           GEP->setNoWrapFlags(GEPNoWrapFlags::none());
384           return &*IP;
385         }
386       }
387       if (IP == BlockBegin) break;
388     }
389   }
390 
391   // Save the original insertion point so we can restore it when we're done.
392   SCEVInsertPointGuard Guard(Builder, this);
393 
394   // Move the insertion point out of as many loops as we can.
395   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
396     if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
397     BasicBlock *Preheader = L->getLoopPreheader();
398     if (!Preheader) break;
399 
400     // Ok, move up a level.
401     Builder.SetInsertPoint(Preheader->getTerminator());
402   }
403 
404   // Emit a GEP.
405   return Builder.CreatePtrAdd(V, Idx, "scevgep");
406 }
407 
408 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
409 /// SCEV expansion. If they are nested, this is the most nested. If they are
410 /// neighboring, pick the later.
411 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
412                                         DominatorTree &DT) {
413   if (!A) return B;
414   if (!B) return A;
415   if (A->contains(B)) return B;
416   if (B->contains(A)) return A;
417   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
418   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
419   return A; // Arbitrarily break the tie.
420 }
421 
422 /// getRelevantLoop - Get the most relevant loop associated with the given
423 /// expression, according to PickMostRelevantLoop.
424 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
425   // Test whether we've already computed the most relevant loop for this SCEV.
426   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
427   if (!Pair.second)
428     return Pair.first->second;
429 
430   switch (S->getSCEVType()) {
431   case scConstant:
432   case scVScale:
433     return nullptr; // A constant has no relevant loops.
434   case scTruncate:
435   case scZeroExtend:
436   case scSignExtend:
437   case scPtrToInt:
438   case scAddExpr:
439   case scMulExpr:
440   case scUDivExpr:
441   case scAddRecExpr:
442   case scUMaxExpr:
443   case scSMaxExpr:
444   case scUMinExpr:
445   case scSMinExpr:
446   case scSequentialUMinExpr: {
447     const Loop *L = nullptr;
448     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
449       L = AR->getLoop();
450     for (const SCEV *Op : S->operands())
451       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
452     return RelevantLoops[S] = L;
453   }
454   case scUnknown: {
455     const SCEVUnknown *U = cast<SCEVUnknown>(S);
456     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
457       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
458     // A non-instruction has no relevant loops.
459     return nullptr;
460   }
461   case scCouldNotCompute:
462     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
463   }
464   llvm_unreachable("Unexpected SCEV type!");
465 }
466 
467 namespace {
468 
469 /// LoopCompare - Compare loops by PickMostRelevantLoop.
470 class LoopCompare {
471   DominatorTree &DT;
472 public:
473   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
474 
475   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
476                   std::pair<const Loop *, const SCEV *> RHS) const {
477     // Keep pointer operands sorted at the end.
478     if (LHS.second->getType()->isPointerTy() !=
479         RHS.second->getType()->isPointerTy())
480       return LHS.second->getType()->isPointerTy();
481 
482     // Compare loops with PickMostRelevantLoop.
483     if (LHS.first != RHS.first)
484       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
485 
486     // If one operand is a non-constant negative and the other is not,
487     // put the non-constant negative on the right so that a sub can
488     // be used instead of a negate and add.
489     if (LHS.second->isNonConstantNegative()) {
490       if (!RHS.second->isNonConstantNegative())
491         return false;
492     } else if (RHS.second->isNonConstantNegative())
493       return true;
494 
495     // Otherwise they are equivalent according to this comparison.
496     return false;
497   }
498 };
499 
500 }
501 
502 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
503   // Recognize the canonical representation of an unsimplifed urem.
504   const SCEV *URemLHS = nullptr;
505   const SCEV *URemRHS = nullptr;
506   if (SE.matchURem(S, URemLHS, URemRHS)) {
507     Value *LHS = expand(URemLHS);
508     Value *RHS = expand(URemRHS);
509     return InsertBinop(Instruction::URem, LHS, RHS, SCEV::FlagAnyWrap,
510                       /*IsSafeToHoist*/ false);
511   }
512 
513   // Collect all the add operands in a loop, along with their associated loops.
514   // Iterate in reverse so that constants are emitted last, all else equal, and
515   // so that pointer operands are inserted first, which the code below relies on
516   // to form more involved GEPs.
517   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
518   for (const SCEV *Op : reverse(S->operands()))
519     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
520 
521   // Sort by loop. Use a stable sort so that constants follow non-constants and
522   // pointer operands precede non-pointer operands.
523   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
524 
525   // Emit instructions to add all the operands. Hoist as much as possible
526   // out of loops, and form meaningful getelementptrs where possible.
527   Value *Sum = nullptr;
528   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
529     const Loop *CurLoop = I->first;
530     const SCEV *Op = I->second;
531     if (!Sum) {
532       // This is the first operand. Just expand it.
533       Sum = expand(Op);
534       ++I;
535       continue;
536     }
537 
538     assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
539     if (isa<PointerType>(Sum->getType())) {
540       // The running sum expression is a pointer. Try to form a getelementptr
541       // at this level with that as the base.
542       SmallVector<const SCEV *, 4> NewOps;
543       for (; I != E && I->first == CurLoop; ++I) {
544         // If the operand is SCEVUnknown and not instructions, peek through
545         // it, to enable more of it to be folded into the GEP.
546         const SCEV *X = I->second;
547         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
548           if (!isa<Instruction>(U->getValue()))
549             X = SE.getSCEV(U->getValue());
550         NewOps.push_back(X);
551       }
552       Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum);
553     } else if (Op->isNonConstantNegative()) {
554       // Instead of doing a negate and add, just do a subtract.
555       Value *W = expand(SE.getNegativeSCEV(Op));
556       Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
557                         /*IsSafeToHoist*/ true);
558       ++I;
559     } else {
560       // A simple add.
561       Value *W = expand(Op);
562       // Canonicalize a constant to the RHS.
563       if (isa<Constant>(Sum))
564         std::swap(Sum, W);
565       Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
566                         /*IsSafeToHoist*/ true);
567       ++I;
568     }
569   }
570 
571   return Sum;
572 }
573 
574 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
575   Type *Ty = S->getType();
576 
577   // Collect all the mul operands in a loop, along with their associated loops.
578   // Iterate in reverse so that constants are emitted last, all else equal.
579   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
580   for (const SCEV *Op : reverse(S->operands()))
581     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
582 
583   // Sort by loop. Use a stable sort so that constants follow non-constants.
584   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
585 
586   // Emit instructions to mul all the operands. Hoist as much as possible
587   // out of loops.
588   Value *Prod = nullptr;
589   auto I = OpsAndLoops.begin();
590 
591   // Expand the calculation of X pow N in the following manner:
592   // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
593   // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
594   const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops]() {
595     auto E = I;
596     // Calculate how many times the same operand from the same loop is included
597     // into this power.
598     uint64_t Exponent = 0;
599     const uint64_t MaxExponent = UINT64_MAX >> 1;
600     // No one sane will ever try to calculate such huge exponents, but if we
601     // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
602     // below when the power of 2 exceeds our Exponent, and we want it to be
603     // 1u << 31 at most to not deal with unsigned overflow.
604     while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
605       ++Exponent;
606       ++E;
607     }
608     assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
609 
610     // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
611     // that are needed into the result.
612     Value *P = expand(I->second);
613     Value *Result = nullptr;
614     if (Exponent & 1)
615       Result = P;
616     for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
617       P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
618                       /*IsSafeToHoist*/ true);
619       if (Exponent & BinExp)
620         Result = Result ? InsertBinop(Instruction::Mul, Result, P,
621                                       SCEV::FlagAnyWrap,
622                                       /*IsSafeToHoist*/ true)
623                         : P;
624     }
625 
626     I = E;
627     assert(Result && "Nothing was expanded?");
628     return Result;
629   };
630 
631   while (I != OpsAndLoops.end()) {
632     if (!Prod) {
633       // This is the first operand. Just expand it.
634       Prod = ExpandOpBinPowN();
635     } else if (I->second->isAllOnesValue()) {
636       // Instead of doing a multiply by negative one, just do a negate.
637       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
638                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
639       ++I;
640     } else {
641       // A simple mul.
642       Value *W = ExpandOpBinPowN();
643       // Canonicalize a constant to the RHS.
644       if (isa<Constant>(Prod)) std::swap(Prod, W);
645       const APInt *RHS;
646       if (match(W, m_Power2(RHS))) {
647         // Canonicalize Prod*(1<<C) to Prod<<C.
648         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
649         auto NWFlags = S->getNoWrapFlags();
650         // clear nsw flag if shl will produce poison value.
651         if (RHS->logBase2() == RHS->getBitWidth() - 1)
652           NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
653         Prod = InsertBinop(Instruction::Shl, Prod,
654                            ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
655                            /*IsSafeToHoist*/ true);
656       } else {
657         Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
658                            /*IsSafeToHoist*/ true);
659       }
660     }
661   }
662 
663   return Prod;
664 }
665 
666 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
667   Value *LHS = expand(S->getLHS());
668   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
669     const APInt &RHS = SC->getAPInt();
670     if (RHS.isPowerOf2())
671       return InsertBinop(Instruction::LShr, LHS,
672                          ConstantInt::get(SC->getType(), RHS.logBase2()),
673                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
674   }
675 
676   Value *RHS = expand(S->getRHS());
677   return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
678                      /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
679 }
680 
681 /// Determine if this is a well-behaved chain of instructions leading back to
682 /// the PHI. If so, it may be reused by expanded expressions.
683 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
684                                          const Loop *L) {
685   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
686       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
687     return false;
688   // If any of the operands don't dominate the insert position, bail.
689   // Addrec operands are always loop-invariant, so this can only happen
690   // if there are instructions which haven't been hoisted.
691   if (L == IVIncInsertLoop) {
692     for (Use &Op : llvm::drop_begin(IncV->operands()))
693       if (Instruction *OInst = dyn_cast<Instruction>(Op))
694         if (!SE.DT.dominates(OInst, IVIncInsertPos))
695           return false;
696   }
697   // Advance to the next instruction.
698   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
699   if (!IncV)
700     return false;
701 
702   if (IncV->mayHaveSideEffects())
703     return false;
704 
705   if (IncV == PN)
706     return true;
707 
708   return isNormalAddRecExprPHI(PN, IncV, L);
709 }
710 
711 /// getIVIncOperand returns an induction variable increment's induction
712 /// variable operand.
713 ///
714 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
715 /// operands dominate InsertPos.
716 ///
717 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
718 /// simple patterns generated by getAddRecExprPHILiterally and
719 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
720 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
721                                            Instruction *InsertPos,
722                                            bool allowScale) {
723   if (IncV == InsertPos)
724     return nullptr;
725 
726   switch (IncV->getOpcode()) {
727   default:
728     return nullptr;
729   // Check for a simple Add/Sub or GEP of a loop invariant step.
730   case Instruction::Add:
731   case Instruction::Sub: {
732     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
733     if (!OInst || SE.DT.dominates(OInst, InsertPos))
734       return dyn_cast<Instruction>(IncV->getOperand(0));
735     return nullptr;
736   }
737   case Instruction::BitCast:
738     return dyn_cast<Instruction>(IncV->getOperand(0));
739   case Instruction::GetElementPtr:
740     for (Use &U : llvm::drop_begin(IncV->operands())) {
741       if (isa<Constant>(U))
742         continue;
743       if (Instruction *OInst = dyn_cast<Instruction>(U)) {
744         if (!SE.DT.dominates(OInst, InsertPos))
745           return nullptr;
746       }
747       if (allowScale) {
748         // allow any kind of GEP as long as it can be hoisted.
749         continue;
750       }
751       // GEPs produced by SCEVExpander use i8 element type.
752       if (!cast<GEPOperator>(IncV)->getSourceElementType()->isIntegerTy(8))
753         return nullptr;
754       break;
755     }
756     return dyn_cast<Instruction>(IncV->getOperand(0));
757   }
758 }
759 
760 /// If the insert point of the current builder or any of the builders on the
761 /// stack of saved builders has 'I' as its insert point, update it to point to
762 /// the instruction after 'I'.  This is intended to be used when the instruction
763 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
764 /// different block, the inconsistent insert point (with a mismatched
765 /// Instruction and Block) can lead to an instruction being inserted in a block
766 /// other than its parent.
767 void SCEVExpander::fixupInsertPoints(Instruction *I) {
768   BasicBlock::iterator It(*I);
769   BasicBlock::iterator NewInsertPt = std::next(It);
770   if (Builder.GetInsertPoint() == It)
771     Builder.SetInsertPoint(&*NewInsertPt);
772   for (auto *InsertPtGuard : InsertPointGuards)
773     if (InsertPtGuard->GetInsertPoint() == It)
774       InsertPtGuard->SetInsertPoint(NewInsertPt);
775 }
776 
777 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
778 /// it available to other uses in this loop. Recursively hoist any operands,
779 /// until we reach a value that dominates InsertPos.
780 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos,
781                               bool RecomputePoisonFlags) {
782   auto FixupPoisonFlags = [this](Instruction *I) {
783     // Drop flags that are potentially inferred from old context and infer flags
784     // in new context.
785     rememberFlags(I);
786     I->dropPoisonGeneratingFlags();
787     if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
788       if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
789         auto *BO = cast<BinaryOperator>(I);
790         BO->setHasNoUnsignedWrap(
791             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) == SCEV::FlagNUW);
792         BO->setHasNoSignedWrap(
793             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) == SCEV::FlagNSW);
794       }
795   };
796 
797   if (SE.DT.dominates(IncV, InsertPos)) {
798     if (RecomputePoisonFlags)
799       FixupPoisonFlags(IncV);
800     return true;
801   }
802 
803   // InsertPos must itself dominate IncV so that IncV's new position satisfies
804   // its existing users.
805   if (isa<PHINode>(InsertPos) ||
806       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
807     return false;
808 
809   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
810     return false;
811 
812   // Check that the chain of IV operands leading back to Phi can be hoisted.
813   SmallVector<Instruction*, 4> IVIncs;
814   for(;;) {
815     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
816     if (!Oper)
817       return false;
818     // IncV is safe to hoist.
819     IVIncs.push_back(IncV);
820     IncV = Oper;
821     if (SE.DT.dominates(IncV, InsertPos))
822       break;
823   }
824   for (Instruction *I : llvm::reverse(IVIncs)) {
825     fixupInsertPoints(I);
826     I->moveBefore(InsertPos);
827     if (RecomputePoisonFlags)
828       FixupPoisonFlags(I);
829   }
830   return true;
831 }
832 
833 bool SCEVExpander::canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi,
834                                                   PHINode *WidePhi,
835                                                   Instruction *OrigInc,
836                                                   Instruction *WideInc) {
837   return match(OrigInc, m_c_BinOp(m_Specific(OrigPhi), m_Value())) &&
838          match(WideInc, m_c_BinOp(m_Specific(WidePhi), m_Value())) &&
839          OrigInc->getOpcode() == WideInc->getOpcode();
840 }
841 
842 /// Determine if this cyclic phi is in a form that would have been generated by
843 /// LSR. We don't care if the phi was actually expanded in this pass, as long
844 /// as it is in a low-cost form, for example, no implied multiplication. This
845 /// should match any patterns generated by getAddRecExprPHILiterally and
846 /// expandAddtoGEP.
847 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
848                                            const Loop *L) {
849   for(Instruction *IVOper = IncV;
850       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
851                                 /*allowScale=*/false));) {
852     if (IVOper == PN)
853       return true;
854   }
855   return false;
856 }
857 
858 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
859 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
860 /// need to materialize IV increments elsewhere to handle difficult situations.
861 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
862                                  bool useSubtract) {
863   Value *IncV;
864   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
865   if (PN->getType()->isPointerTy()) {
866     // TODO: Change name to IVName.iv.next.
867     IncV = Builder.CreatePtrAdd(PN, StepV, "scevgep");
868   } else {
869     IncV = useSubtract ?
870       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
871       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
872   }
873   return IncV;
874 }
875 
876 /// Check whether we can cheaply express the requested SCEV in terms of
877 /// the available PHI SCEV by truncation and/or inversion of the step.
878 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
879                                     const SCEVAddRecExpr *Phi,
880                                     const SCEVAddRecExpr *Requested,
881                                     bool &InvertStep) {
882   // We can't transform to match a pointer PHI.
883   Type *PhiTy = Phi->getType();
884   Type *RequestedTy = Requested->getType();
885   if (PhiTy->isPointerTy() || RequestedTy->isPointerTy())
886     return false;
887 
888   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
889     return false;
890 
891   // Try truncate it if necessary.
892   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
893   if (!Phi)
894     return false;
895 
896   // Check whether truncation will help.
897   if (Phi == Requested) {
898     InvertStep = false;
899     return true;
900   }
901 
902   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
903   if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
904     InvertStep = true;
905     return true;
906   }
907 
908   return false;
909 }
910 
911 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
912   if (!isa<IntegerType>(AR->getType()))
913     return false;
914 
915   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
916   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
917   const SCEV *Step = AR->getStepRecurrence(SE);
918   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
919                                             SE.getSignExtendExpr(AR, WideTy));
920   const SCEV *ExtendAfterOp =
921     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
922   return ExtendAfterOp == OpAfterExtend;
923 }
924 
925 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
926   if (!isa<IntegerType>(AR->getType()))
927     return false;
928 
929   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
930   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
931   const SCEV *Step = AR->getStepRecurrence(SE);
932   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
933                                             SE.getZeroExtendExpr(AR, WideTy));
934   const SCEV *ExtendAfterOp =
935     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
936   return ExtendAfterOp == OpAfterExtend;
937 }
938 
939 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
940 /// the base addrec, which is the addrec without any non-loop-dominating
941 /// values, and return the PHI.
942 PHINode *
943 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
944                                         const Loop *L, Type *&TruncTy,
945                                         bool &InvertStep) {
946   assert((!IVIncInsertLoop || IVIncInsertPos) &&
947          "Uninitialized insert position");
948 
949   // Reuse a previously-inserted PHI, if present.
950   BasicBlock *LatchBlock = L->getLoopLatch();
951   if (LatchBlock) {
952     PHINode *AddRecPhiMatch = nullptr;
953     Instruction *IncV = nullptr;
954     TruncTy = nullptr;
955     InvertStep = false;
956 
957     // Only try partially matching scevs that need truncation and/or
958     // step-inversion if we know this loop is outside the current loop.
959     bool TryNonMatchingSCEV =
960         IVIncInsertLoop &&
961         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
962 
963     for (PHINode &PN : L->getHeader()->phis()) {
964       if (!SE.isSCEVable(PN.getType()))
965         continue;
966 
967       // We should not look for a incomplete PHI. Getting SCEV for a incomplete
968       // PHI has no meaning at all.
969       if (!PN.isComplete()) {
970         SCEV_DEBUG_WITH_TYPE(
971             DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
972         continue;
973       }
974 
975       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
976       if (!PhiSCEV)
977         continue;
978 
979       bool IsMatchingSCEV = PhiSCEV == Normalized;
980       // We only handle truncation and inversion of phi recurrences for the
981       // expanded expression if the expanded expression's loop dominates the
982       // loop we insert to. Check now, so we can bail out early.
983       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
984           continue;
985 
986       // TODO: this possibly can be reworked to avoid this cast at all.
987       Instruction *TempIncV =
988           dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
989       if (!TempIncV)
990         continue;
991 
992       // Check whether we can reuse this PHI node.
993       if (LSRMode) {
994         if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
995           continue;
996       } else {
997         if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
998           continue;
999       }
1000 
1001       // Stop if we have found an exact match SCEV.
1002       if (IsMatchingSCEV) {
1003         IncV = TempIncV;
1004         TruncTy = nullptr;
1005         InvertStep = false;
1006         AddRecPhiMatch = &PN;
1007         break;
1008       }
1009 
1010       // Try whether the phi can be translated into the requested form
1011       // (truncated and/or offset by a constant).
1012       if ((!TruncTy || InvertStep) &&
1013           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1014         // Record the phi node. But don't stop we might find an exact match
1015         // later.
1016         AddRecPhiMatch = &PN;
1017         IncV = TempIncV;
1018         TruncTy = Normalized->getType();
1019       }
1020     }
1021 
1022     if (AddRecPhiMatch) {
1023       // Ok, the add recurrence looks usable.
1024       // Remember this PHI, even in post-inc mode.
1025       InsertedValues.insert(AddRecPhiMatch);
1026       // Remember the increment.
1027       rememberInstruction(IncV);
1028       // Those values were not actually inserted but re-used.
1029       ReusedValues.insert(AddRecPhiMatch);
1030       ReusedValues.insert(IncV);
1031       return AddRecPhiMatch;
1032     }
1033   }
1034 
1035   // Save the original insertion point so we can restore it when we're done.
1036   SCEVInsertPointGuard Guard(Builder, this);
1037 
1038   // Another AddRec may need to be recursively expanded below. For example, if
1039   // this AddRec is quadratic, the StepV may itself be an AddRec in this
1040   // loop. Remove this loop from the PostIncLoops set before expanding such
1041   // AddRecs. Otherwise, we cannot find a valid position for the step
1042   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1043   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1044   // so it's not worth implementing SmallPtrSet::swap.
1045   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1046   PostIncLoops.clear();
1047 
1048   // Expand code for the start value into the loop preheader.
1049   assert(L->getLoopPreheader() &&
1050          "Can't expand add recurrences without a loop preheader!");
1051   Value *StartV =
1052       expand(Normalized->getStart(), L->getLoopPreheader()->getTerminator());
1053 
1054   // StartV must have been be inserted into L's preheader to dominate the new
1055   // phi.
1056   assert(!isa<Instruction>(StartV) ||
1057          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1058                                  L->getHeader()));
1059 
1060   // Expand code for the step value. Do this before creating the PHI so that PHI
1061   // reuse code doesn't see an incomplete PHI.
1062   const SCEV *Step = Normalized->getStepRecurrence(SE);
1063   Type *ExpandTy = Normalized->getType();
1064   // If the stride is negative, insert a sub instead of an add for the increment
1065   // (unless it's a constant, because subtracts of constants are canonicalized
1066   // to adds).
1067   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1068   if (useSubtract)
1069     Step = SE.getNegativeSCEV(Step);
1070   // Expand the step somewhere that dominates the loop header.
1071   Value *StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1072 
1073   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1074   // we actually do emit an addition.  It does not apply if we emit a
1075   // subtraction.
1076   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1077   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1078 
1079   // Create the PHI.
1080   BasicBlock *Header = L->getHeader();
1081   Builder.SetInsertPoint(Header, Header->begin());
1082   PHINode *PN =
1083       Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
1084 
1085   // Create the step instructions and populate the PHI.
1086   for (BasicBlock *Pred : predecessors(Header)) {
1087     // Add a start value.
1088     if (!L->contains(Pred)) {
1089       PN->addIncoming(StartV, Pred);
1090       continue;
1091     }
1092 
1093     // Create a step value and add it to the PHI.
1094     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1095     // instructions at IVIncInsertPos.
1096     Instruction *InsertPos = L == IVIncInsertLoop ?
1097       IVIncInsertPos : Pred->getTerminator();
1098     Builder.SetInsertPoint(InsertPos);
1099     Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1100 
1101     if (isa<OverflowingBinaryOperator>(IncV)) {
1102       if (IncrementIsNUW)
1103         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1104       if (IncrementIsNSW)
1105         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1106     }
1107     PN->addIncoming(IncV, Pred);
1108   }
1109 
1110   // After expanding subexpressions, restore the PostIncLoops set so the caller
1111   // can ensure that IVIncrement dominates the current uses.
1112   PostIncLoops = SavedPostIncLoops;
1113 
1114   // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1115   // effective when we are able to use an IV inserted here, so record it.
1116   InsertedValues.insert(PN);
1117   InsertedIVs.push_back(PN);
1118   return PN;
1119 }
1120 
1121 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1122   const Loop *L = S->getLoop();
1123 
1124   // Determine a normalized form of this expression, which is the expression
1125   // before any post-inc adjustment is made.
1126   const SCEVAddRecExpr *Normalized = S;
1127   if (PostIncLoops.count(L)) {
1128     PostIncLoopSet Loops;
1129     Loops.insert(L);
1130     Normalized = cast<SCEVAddRecExpr>(
1131         normalizeForPostIncUse(S, Loops, SE, /*CheckInvertible=*/false));
1132   }
1133 
1134   [[maybe_unused]] const SCEV *Start = Normalized->getStart();
1135   const SCEV *Step = Normalized->getStepRecurrence(SE);
1136   assert(SE.properlyDominates(Start, L->getHeader()) &&
1137          "Start does not properly dominate loop header");
1138   assert(SE.dominates(Step, L->getHeader()) && "Step not dominate loop header");
1139 
1140   // In some cases, we decide to reuse an existing phi node but need to truncate
1141   // it and/or invert the step.
1142   Type *TruncTy = nullptr;
1143   bool InvertStep = false;
1144   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1145 
1146   // Accommodate post-inc mode, if necessary.
1147   Value *Result;
1148   if (!PostIncLoops.count(L))
1149     Result = PN;
1150   else {
1151     // In PostInc mode, use the post-incremented value.
1152     BasicBlock *LatchBlock = L->getLoopLatch();
1153     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1154     Result = PN->getIncomingValueForBlock(LatchBlock);
1155 
1156     // We might be introducing a new use of the post-inc IV that is not poison
1157     // safe, in which case we should drop poison generating flags. Only keep
1158     // those flags for which SCEV has proven that they always hold.
1159     if (isa<OverflowingBinaryOperator>(Result)) {
1160       auto *I = cast<Instruction>(Result);
1161       if (!S->hasNoUnsignedWrap())
1162         I->setHasNoUnsignedWrap(false);
1163       if (!S->hasNoSignedWrap())
1164         I->setHasNoSignedWrap(false);
1165     }
1166 
1167     // For an expansion to use the postinc form, the client must call
1168     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1169     // or dominated by IVIncInsertPos.
1170     if (isa<Instruction>(Result) &&
1171         !SE.DT.dominates(cast<Instruction>(Result),
1172                          &*Builder.GetInsertPoint())) {
1173       // The induction variable's postinc expansion does not dominate this use.
1174       // IVUsers tries to prevent this case, so it is rare. However, it can
1175       // happen when an IVUser outside the loop is not dominated by the latch
1176       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1177       // all cases. Consider a phi outside whose operand is replaced during
1178       // expansion with the value of the postinc user. Without fundamentally
1179       // changing the way postinc users are tracked, the only remedy is
1180       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1181       // but hopefully expandCodeFor handles that.
1182       bool useSubtract =
1183           !S->getType()->isPointerTy() && Step->isNonConstantNegative();
1184       if (useSubtract)
1185         Step = SE.getNegativeSCEV(Step);
1186       Value *StepV;
1187       {
1188         // Expand the step somewhere that dominates the loop header.
1189         SCEVInsertPointGuard Guard(Builder, this);
1190         StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1191       }
1192       Result = expandIVInc(PN, StepV, L, useSubtract);
1193     }
1194   }
1195 
1196   // We have decided to reuse an induction variable of a dominating loop. Apply
1197   // truncation and/or inversion of the step.
1198   if (TruncTy) {
1199     // Truncate the result.
1200     if (TruncTy != Result->getType())
1201       Result = Builder.CreateTrunc(Result, TruncTy);
1202 
1203     // Invert the result.
1204     if (InvertStep)
1205       Result = Builder.CreateSub(expand(Normalized->getStart()), Result);
1206   }
1207 
1208   return Result;
1209 }
1210 
1211 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1212   // In canonical mode we compute the addrec as an expression of a canonical IV
1213   // using evaluateAtIteration and expand the resulting SCEV expression. This
1214   // way we avoid introducing new IVs to carry on the computation of the addrec
1215   // throughout the loop.
1216   //
1217   // For nested addrecs evaluateAtIteration might need a canonical IV of a
1218   // type wider than the addrec itself. Emitting a canonical IV of the
1219   // proper type might produce non-legal types, for example expanding an i64
1220   // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1221   // back to non-canonical mode for nested addrecs.
1222   if (!CanonicalMode || (S->getNumOperands() > 2))
1223     return expandAddRecExprLiterally(S);
1224 
1225   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1226   const Loop *L = S->getLoop();
1227 
1228   // First check for an existing canonical IV in a suitable type.
1229   PHINode *CanonicalIV = nullptr;
1230   if (PHINode *PN = L->getCanonicalInductionVariable())
1231     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1232       CanonicalIV = PN;
1233 
1234   // Rewrite an AddRec in terms of the canonical induction variable, if
1235   // its type is more narrow.
1236   if (CanonicalIV &&
1237       SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1238       !S->getType()->isPointerTy()) {
1239     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1240     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1241       NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1242     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1243                                        S->getNoWrapFlags(SCEV::FlagNW)));
1244     BasicBlock::iterator NewInsertPt =
1245         findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1246     V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1247     return V;
1248   }
1249 
1250   // {X,+,F} --> X + {0,+,F}
1251   if (!S->getStart()->isZero()) {
1252     if (isa<PointerType>(S->getType())) {
1253       Value *StartV = expand(SE.getPointerBase(S));
1254       return expandAddToGEP(SE.removePointerBase(S), StartV);
1255     }
1256 
1257     SmallVector<const SCEV *, 4> NewOps(S->operands());
1258     NewOps[0] = SE.getConstant(Ty, 0);
1259     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1260                                         S->getNoWrapFlags(SCEV::FlagNW));
1261 
1262     // Just do a normal add. Pre-expand the operands to suppress folding.
1263     //
1264     // The LHS and RHS values are factored out of the expand call to make the
1265     // output independent of the argument evaluation order.
1266     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1267     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1268     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1269   }
1270 
1271   // If we don't yet have a canonical IV, create one.
1272   if (!CanonicalIV) {
1273     // Create and insert the PHI node for the induction variable in the
1274     // specified loop.
1275     BasicBlock *Header = L->getHeader();
1276     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1277     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar");
1278     CanonicalIV->insertBefore(Header->begin());
1279     rememberInstruction(CanonicalIV);
1280 
1281     SmallSet<BasicBlock *, 4> PredSeen;
1282     Constant *One = ConstantInt::get(Ty, 1);
1283     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1284       BasicBlock *HP = *HPI;
1285       if (!PredSeen.insert(HP).second) {
1286         // There must be an incoming value for each predecessor, even the
1287         // duplicates!
1288         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1289         continue;
1290       }
1291 
1292       if (L->contains(HP)) {
1293         // Insert a unit add instruction right before the terminator
1294         // corresponding to the back-edge.
1295         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1296                                                      "indvar.next",
1297                                                      HP->getTerminator()->getIterator());
1298         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1299         rememberInstruction(Add);
1300         CanonicalIV->addIncoming(Add, HP);
1301       } else {
1302         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1303       }
1304     }
1305   }
1306 
1307   // {0,+,1} --> Insert a canonical induction variable into the loop!
1308   if (S->isAffine() && S->getOperand(1)->isOne()) {
1309     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1310            "IVs with types different from the canonical IV should "
1311            "already have been handled!");
1312     return CanonicalIV;
1313   }
1314 
1315   // {0,+,F} --> {0,+,1} * F
1316 
1317   // If this is a simple linear addrec, emit it now as a special case.
1318   if (S->isAffine())    // {0,+,F} --> i*F
1319     return
1320       expand(SE.getTruncateOrNoop(
1321         SE.getMulExpr(SE.getUnknown(CanonicalIV),
1322                       SE.getNoopOrAnyExtend(S->getOperand(1),
1323                                             CanonicalIV->getType())),
1324         Ty));
1325 
1326   // If this is a chain of recurrences, turn it into a closed form, using the
1327   // folders, then expandCodeFor the closed form.  This allows the folders to
1328   // simplify the expression without having to build a bunch of special code
1329   // into this folder.
1330   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1331 
1332   // Promote S up to the canonical IV type, if the cast is foldable.
1333   const SCEV *NewS = S;
1334   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1335   if (isa<SCEVAddRecExpr>(Ext))
1336     NewS = Ext;
1337 
1338   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1339 
1340   // Truncate the result down to the original type, if needed.
1341   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1342   return expand(T);
1343 }
1344 
1345 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1346   Value *V = expand(S->getOperand());
1347   return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1348                            GetOptimalInsertionPointForCastOf(V));
1349 }
1350 
1351 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1352   Value *V = expand(S->getOperand());
1353   return Builder.CreateTrunc(V, S->getType());
1354 }
1355 
1356 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1357   Value *V = expand(S->getOperand());
1358   return Builder.CreateZExt(V, S->getType(), "",
1359                             SE.isKnownNonNegative(S->getOperand()));
1360 }
1361 
1362 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1363   Value *V = expand(S->getOperand());
1364   return Builder.CreateSExt(V, S->getType());
1365 }
1366 
1367 Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
1368                                       Intrinsic::ID IntrinID, Twine Name,
1369                                       bool IsSequential) {
1370   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1371   Type *Ty = LHS->getType();
1372   if (IsSequential)
1373     LHS = Builder.CreateFreeze(LHS);
1374   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1375     Value *RHS = expand(S->getOperand(i));
1376     if (IsSequential && i != 0)
1377       RHS = Builder.CreateFreeze(RHS);
1378     Value *Sel;
1379     if (Ty->isIntegerTy())
1380       Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1381                                     /*FMFSource=*/nullptr, Name);
1382     else {
1383       Value *ICmp =
1384           Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1385       Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1386     }
1387     LHS = Sel;
1388   }
1389   return LHS;
1390 }
1391 
1392 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1393   return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1394 }
1395 
1396 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1397   return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1398 }
1399 
1400 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1401   return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1402 }
1403 
1404 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1405   return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1406 }
1407 
1408 Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1409   return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
1410 }
1411 
1412 Value *SCEVExpander::visitVScale(const SCEVVScale *S) {
1413   return Builder.CreateVScale(ConstantInt::get(S->getType(), 1));
1414 }
1415 
1416 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1417                                    BasicBlock::iterator IP) {
1418   setInsertPoint(IP);
1419   Value *V = expandCodeFor(SH, Ty);
1420   return V;
1421 }
1422 
1423 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1424   // Expand the code for this SCEV.
1425   Value *V = expand(SH);
1426 
1427   if (Ty) {
1428     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1429            "non-trivial casts should be done with the SCEVs directly!");
1430     V = InsertNoopCastOfTo(V, Ty);
1431   }
1432   return V;
1433 }
1434 
1435 Value *SCEVExpander::FindValueInExprValueMap(
1436     const SCEV *S, const Instruction *InsertPt,
1437     SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
1438   // If the expansion is not in CanonicalMode, and the SCEV contains any
1439   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1440   if (!CanonicalMode && SE.containsAddRecurrence(S))
1441     return nullptr;
1442 
1443   // If S is a constant, it may be worse to reuse an existing Value.
1444   if (isa<SCEVConstant>(S))
1445     return nullptr;
1446 
1447   for (Value *V : SE.getSCEVValues(S)) {
1448     Instruction *EntInst = dyn_cast<Instruction>(V);
1449     if (!EntInst)
1450       continue;
1451 
1452     // Choose a Value from the set which dominates the InsertPt.
1453     // InsertPt should be inside the Value's parent loop so as not to break
1454     // the LCSSA form.
1455     assert(EntInst->getFunction() == InsertPt->getFunction());
1456     if (S->getType() != V->getType() || !SE.DT.dominates(EntInst, InsertPt) ||
1457         !(SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1458           SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1459       continue;
1460 
1461     // Make sure reusing the instruction is poison-safe.
1462     if (SE.canReuseInstruction(S, EntInst, DropPoisonGeneratingInsts))
1463       return V;
1464     DropPoisonGeneratingInsts.clear();
1465   }
1466   return nullptr;
1467 }
1468 
1469 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1470 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1471 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1472 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1473 // the expansion will try to reuse Value from ExprValueMap, and only when it
1474 // fails, expand the SCEV literally.
1475 Value *SCEVExpander::expand(const SCEV *S) {
1476   // Compute an insertion point for this SCEV object. Hoist the instructions
1477   // as far out in the loop nest as possible.
1478   BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
1479 
1480   // We can move insertion point only if there is no div or rem operations
1481   // otherwise we are risky to move it over the check for zero denominator.
1482   auto SafeToHoist = [](const SCEV *S) {
1483     return !SCEVExprContains(S, [](const SCEV *S) {
1484               if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1485                 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1486                   // Division by non-zero constants can be hoisted.
1487                   return SC->getValue()->isZero();
1488                 // All other divisions should not be moved as they may be
1489                 // divisions by zero and should be kept within the
1490                 // conditions of the surrounding loops that guard their
1491                 // execution (see PR35406).
1492                 return true;
1493               }
1494               return false;
1495             });
1496   };
1497   if (SafeToHoist(S)) {
1498     for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1499          L = L->getParentLoop()) {
1500       if (SE.isLoopInvariant(S, L)) {
1501         if (!L) break;
1502         if (BasicBlock *Preheader = L->getLoopPreheader()) {
1503           InsertPt = Preheader->getTerminator()->getIterator();
1504         } else {
1505           // LSR sets the insertion point for AddRec start/step values to the
1506           // block start to simplify value reuse, even though it's an invalid
1507           // position. SCEVExpander must correct for this in all cases.
1508           InsertPt = L->getHeader()->getFirstInsertionPt();
1509         }
1510       } else {
1511         // If the SCEV is computable at this level, insert it into the header
1512         // after the PHIs (and after any other instructions that we've inserted
1513         // there) so that it is guaranteed to dominate any user inside the loop.
1514         if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1515           InsertPt = L->getHeader()->getFirstInsertionPt();
1516 
1517         while (InsertPt != Builder.GetInsertPoint() &&
1518                (isInsertedInstruction(&*InsertPt) ||
1519                 isa<DbgInfoIntrinsic>(&*InsertPt))) {
1520           InsertPt = std::next(InsertPt);
1521         }
1522         break;
1523       }
1524     }
1525   }
1526 
1527   // Check to see if we already expanded this here.
1528   auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1529   if (I != InsertedExpressions.end())
1530     return I->second;
1531 
1532   SCEVInsertPointGuard Guard(Builder, this);
1533   Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1534 
1535   // Expand the expression into instructions.
1536   SmallVector<Instruction *> DropPoisonGeneratingInsts;
1537   Value *V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1538   if (!V) {
1539     V = visit(S);
1540     V = fixupLCSSAFormFor(V);
1541   } else {
1542     for (Instruction *I : DropPoisonGeneratingInsts) {
1543       rememberFlags(I);
1544       I->dropPoisonGeneratingAnnotations();
1545       // See if we can re-infer from first principles any of the flags we just
1546       // dropped.
1547       if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
1548         if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1549           auto *BO = cast<BinaryOperator>(I);
1550           BO->setHasNoUnsignedWrap(
1551             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) == SCEV::FlagNUW);
1552           BO->setHasNoSignedWrap(
1553             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) == SCEV::FlagNSW);
1554         }
1555       if (auto *NNI = dyn_cast<PossiblyNonNegInst>(I)) {
1556         auto *Src = NNI->getOperand(0);
1557         if (isImpliedByDomCondition(ICmpInst::ICMP_SGE, Src,
1558                                     Constant::getNullValue(Src->getType()), I,
1559                                     DL).value_or(false))
1560           NNI->setNonNeg(true);
1561       }
1562     }
1563   }
1564   // Remember the expanded value for this SCEV at this location.
1565   //
1566   // This is independent of PostIncLoops. The mapped value simply materializes
1567   // the expression at this insertion point. If the mapped value happened to be
1568   // a postinc expansion, it could be reused by a non-postinc user, but only if
1569   // its insertion point was already at the head of the loop.
1570   InsertedExpressions[std::make_pair(S, &*InsertPt)] = V;
1571   return V;
1572 }
1573 
1574 void SCEVExpander::rememberInstruction(Value *I) {
1575   auto DoInsert = [this](Value *V) {
1576     if (!PostIncLoops.empty())
1577       InsertedPostIncValues.insert(V);
1578     else
1579       InsertedValues.insert(V);
1580   };
1581   DoInsert(I);
1582 }
1583 
1584 void SCEVExpander::rememberFlags(Instruction *I) {
1585   // If we already have flags for the instruction, keep the existing ones.
1586   OrigFlags.try_emplace(I, PoisonFlags(I));
1587 }
1588 
1589 void SCEVExpander::replaceCongruentIVInc(
1590     PHINode *&Phi, PHINode *&OrigPhi, Loop *L, const DominatorTree *DT,
1591     SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
1592   BasicBlock *LatchBlock = L->getLoopLatch();
1593   if (!LatchBlock)
1594     return;
1595 
1596   Instruction *OrigInc =
1597       dyn_cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1598   Instruction *IsomorphicInc =
1599       dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1600   if (!OrigInc || !IsomorphicInc)
1601     return;
1602 
1603   // If this phi has the same width but is more canonical, replace the
1604   // original with it. As part of the "more canonical" determination,
1605   // respect a prior decision to use an IV chain.
1606   if (OrigPhi->getType() == Phi->getType() &&
1607       !(ChainedPhis.count(Phi) ||
1608         isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1609       (ChainedPhis.count(Phi) ||
1610        isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1611     std::swap(OrigPhi, Phi);
1612     std::swap(OrigInc, IsomorphicInc);
1613   }
1614 
1615   // Replacing the congruent phi is sufficient because acyclic
1616   // redundancy elimination, CSE/GVN, should handle the
1617   // rest. However, once SCEV proves that a phi is congruent,
1618   // it's often the head of an IV user cycle that is isomorphic
1619   // with the original phi. It's worth eagerly cleaning up the
1620   // common case of a single IV increment so that DeleteDeadPHIs
1621   // can remove cycles that had postinc uses.
1622   // Because we may potentially introduce a new use of OrigIV that didn't
1623   // exist before at this point, its poison flags need readjustment.
1624   const SCEV *TruncExpr =
1625       SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1626   if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1627       !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1628     return;
1629 
1630   bool BothHaveNUW = false;
1631   bool BothHaveNSW = false;
1632   auto *OBOIncV = dyn_cast<OverflowingBinaryOperator>(OrigInc);
1633   auto *OBOIsomorphic = dyn_cast<OverflowingBinaryOperator>(IsomorphicInc);
1634   if (OBOIncV && OBOIsomorphic) {
1635     BothHaveNUW =
1636         OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1637     BothHaveNSW =
1638         OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1639   }
1640 
1641   if (!hoistIVInc(OrigInc, IsomorphicInc,
1642                   /*RecomputePoisonFlags*/ true))
1643     return;
1644 
1645   // We are replacing with a wider increment. If both OrigInc and IsomorphicInc
1646   // are NUW/NSW, then we can preserve them on the wider increment; the narrower
1647   // IsomorphicInc would wrap before the wider OrigInc, so the replacement won't
1648   // make IsomorphicInc's uses more poisonous.
1649   assert(OrigInc->getType()->getScalarSizeInBits() >=
1650              IsomorphicInc->getType()->getScalarSizeInBits() &&
1651          "Should only replace an increment with a wider one.");
1652   if (BothHaveNUW || BothHaveNSW) {
1653     OrigInc->setHasNoUnsignedWrap(OBOIncV->hasNoUnsignedWrap() || BothHaveNUW);
1654     OrigInc->setHasNoSignedWrap(OBOIncV->hasNoSignedWrap() || BothHaveNSW);
1655   }
1656 
1657   SCEV_DEBUG_WITH_TYPE(DebugType,
1658                        dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1659                               << *IsomorphicInc << '\n');
1660   Value *NewInc = OrigInc;
1661   if (OrigInc->getType() != IsomorphicInc->getType()) {
1662     BasicBlock::iterator IP;
1663     if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1664       IP = PN->getParent()->getFirstInsertionPt();
1665     else
1666       IP = OrigInc->getNextNonDebugInstruction()->getIterator();
1667 
1668     IRBuilder<> Builder(IP->getParent(), IP);
1669     Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1670     NewInc =
1671         Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1672   }
1673   IsomorphicInc->replaceAllUsesWith(NewInc);
1674   DeadInsts.emplace_back(IsomorphicInc);
1675 }
1676 
1677 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1678 /// replace them with their most canonical representative. Return the number of
1679 /// phis eliminated.
1680 ///
1681 /// This does not depend on any SCEVExpander state but should be used in
1682 /// the same context that SCEVExpander is used.
1683 unsigned
1684 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1685                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts,
1686                                   const TargetTransformInfo *TTI) {
1687   // Find integer phis in order of increasing width.
1688   SmallVector<PHINode*, 8> Phis;
1689   for (PHINode &PN : L->getHeader()->phis())
1690     Phis.push_back(&PN);
1691 
1692   if (TTI)
1693     // Use stable_sort to preserve order of equivalent PHIs, so the order
1694     // of the sorted Phis is the same from run to run on the same loop.
1695     llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1696       // Put pointers at the back and make sure pointer < pointer = false.
1697       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1698         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1699       return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1700              LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1701     });
1702 
1703   unsigned NumElim = 0;
1704   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1705   // Process phis from wide to narrow. Map wide phis to their truncation
1706   // so narrow phis can reuse them.
1707   for (PHINode *Phi : Phis) {
1708     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1709       if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1710         return V;
1711       if (!SE.isSCEVable(PN->getType()))
1712         return nullptr;
1713       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1714       if (!Const)
1715         return nullptr;
1716       return Const->getValue();
1717     };
1718 
1719     // Fold constant phis. They may be congruent to other constant phis and
1720     // would confuse the logic below that expects proper IVs.
1721     if (Value *V = SimplifyPHINode(Phi)) {
1722       if (V->getType() != Phi->getType())
1723         continue;
1724       SE.forgetValue(Phi);
1725       Phi->replaceAllUsesWith(V);
1726       DeadInsts.emplace_back(Phi);
1727       ++NumElim;
1728       SCEV_DEBUG_WITH_TYPE(DebugType,
1729                            dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1730                                   << '\n');
1731       continue;
1732     }
1733 
1734     if (!SE.isSCEVable(Phi->getType()))
1735       continue;
1736 
1737     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1738     if (!OrigPhiRef) {
1739       OrigPhiRef = Phi;
1740       if (Phi->getType()->isIntegerTy() && TTI &&
1741           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1742         // Make sure we only rewrite using simple induction variables;
1743         // otherwise, we can make the trip count of a loop unanalyzable
1744         // to SCEV.
1745         const SCEV *PhiExpr = SE.getSCEV(Phi);
1746         if (isa<SCEVAddRecExpr>(PhiExpr)) {
1747           // This phi can be freely truncated to the narrowest phi type. Map the
1748           // truncated expression to it so it will be reused for narrow types.
1749           const SCEV *TruncExpr =
1750               SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1751           ExprToIVMap[TruncExpr] = Phi;
1752         }
1753       }
1754       continue;
1755     }
1756 
1757     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1758     // sense.
1759     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1760       continue;
1761 
1762     replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1763     SCEV_DEBUG_WITH_TYPE(DebugType,
1764                          dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1765                                 << '\n');
1766     SCEV_DEBUG_WITH_TYPE(
1767         DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1768     ++NumElim;
1769     Value *NewIV = OrigPhiRef;
1770     if (OrigPhiRef->getType() != Phi->getType()) {
1771       IRBuilder<> Builder(L->getHeader(),
1772                           L->getHeader()->getFirstInsertionPt());
1773       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1774       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1775     }
1776     Phi->replaceAllUsesWith(NewIV);
1777     DeadInsts.emplace_back(Phi);
1778   }
1779   return NumElim;
1780 }
1781 
1782 bool SCEVExpander::hasRelatedExistingExpansion(const SCEV *S,
1783                                                const Instruction *At,
1784                                                Loop *L) {
1785   using namespace llvm::PatternMatch;
1786 
1787   SmallVector<BasicBlock *, 4> ExitingBlocks;
1788   L->getExitingBlocks(ExitingBlocks);
1789 
1790   // Look for suitable value in simple conditions at the loop exits.
1791   for (BasicBlock *BB : ExitingBlocks) {
1792     ICmpInst::Predicate Pred;
1793     Instruction *LHS, *RHS;
1794 
1795     if (!match(BB->getTerminator(),
1796                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1797                     m_BasicBlock(), m_BasicBlock())))
1798       continue;
1799 
1800     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1801       return true;
1802 
1803     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1804       return true;
1805   }
1806 
1807   // Use expand's logic which is used for reusing a previous Value in
1808   // ExprValueMap.  Note that we don't currently model the cost of
1809   // needing to drop poison generating flags on the instruction if we
1810   // want to reuse it.  We effectively assume that has zero cost.
1811   SmallVector<Instruction *> DropPoisonGeneratingInsts;
1812   return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) != nullptr;
1813 }
1814 
1815 template<typename T> static InstructionCost costAndCollectOperands(
1816   const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
1817   TargetTransformInfo::TargetCostKind CostKind,
1818   SmallVectorImpl<SCEVOperand> &Worklist) {
1819 
1820   const T *S = cast<T>(WorkItem.S);
1821   InstructionCost Cost = 0;
1822   // Object to help map SCEV operands to expanded IR instructions.
1823   struct OperationIndices {
1824     OperationIndices(unsigned Opc, size_t min, size_t max) :
1825       Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1826     unsigned Opcode;
1827     size_t MinIdx;
1828     size_t MaxIdx;
1829   };
1830 
1831   // Collect the operations of all the instructions that will be needed to
1832   // expand the SCEVExpr. This is so that when we come to cost the operands,
1833   // we know what the generated user(s) will be.
1834   SmallVector<OperationIndices, 2> Operations;
1835 
1836   auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1837     Operations.emplace_back(Opcode, 0, 0);
1838     return TTI.getCastInstrCost(Opcode, S->getType(),
1839                                 S->getOperand(0)->getType(),
1840                                 TTI::CastContextHint::None, CostKind);
1841   };
1842 
1843   auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1844                        unsigned MinIdx = 0,
1845                        unsigned MaxIdx = 1) -> InstructionCost {
1846     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1847     return NumRequired *
1848       TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1849   };
1850 
1851   auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1852                         unsigned MaxIdx) -> InstructionCost {
1853     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1854     Type *OpType = S->getType();
1855     return NumRequired * TTI.getCmpSelInstrCost(
1856                              Opcode, OpType, CmpInst::makeCmpResultType(OpType),
1857                              CmpInst::BAD_ICMP_PREDICATE, CostKind);
1858   };
1859 
1860   switch (S->getSCEVType()) {
1861   case scCouldNotCompute:
1862     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1863   case scUnknown:
1864   case scConstant:
1865   case scVScale:
1866     return 0;
1867   case scPtrToInt:
1868     Cost = CastCost(Instruction::PtrToInt);
1869     break;
1870   case scTruncate:
1871     Cost = CastCost(Instruction::Trunc);
1872     break;
1873   case scZeroExtend:
1874     Cost = CastCost(Instruction::ZExt);
1875     break;
1876   case scSignExtend:
1877     Cost = CastCost(Instruction::SExt);
1878     break;
1879   case scUDivExpr: {
1880     unsigned Opcode = Instruction::UDiv;
1881     if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1882       if (SC->getAPInt().isPowerOf2())
1883         Opcode = Instruction::LShr;
1884     Cost = ArithCost(Opcode, 1);
1885     break;
1886   }
1887   case scAddExpr:
1888     Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1889     break;
1890   case scMulExpr:
1891     // TODO: this is a very pessimistic cost modelling for Mul,
1892     // because of Bin Pow algorithm actually used by the expander,
1893     // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
1894     Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1895     break;
1896   case scSMaxExpr:
1897   case scUMaxExpr:
1898   case scSMinExpr:
1899   case scUMinExpr:
1900   case scSequentialUMinExpr: {
1901     // FIXME: should this ask the cost for Intrinsic's?
1902     // The reduction tree.
1903     Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
1904     Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
1905     switch (S->getSCEVType()) {
1906     case scSequentialUMinExpr: {
1907       // The safety net against poison.
1908       // FIXME: this is broken.
1909       Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
1910       Cost += ArithCost(Instruction::Or,
1911                         S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
1912       Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
1913       break;
1914     }
1915     default:
1916       assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
1917              "Unhandled SCEV expression type?");
1918       break;
1919     }
1920     break;
1921   }
1922   case scAddRecExpr: {
1923     // Addrec expands to a phi and add per recurrence.
1924     unsigned NumRecurrences = S->getNumOperands() - 1;
1925     Cost += TTI.getCFInstrCost(Instruction::PHI, CostKind) * NumRecurrences;
1926     Cost +=
1927         TTI.getArithmeticInstrCost(Instruction::Add, S->getType(), CostKind) *
1928         NumRecurrences;
1929     // AR start is used in phi.
1930     Worklist.emplace_back(Instruction::PHI, 0, S->getOperand(0));
1931     // Other operands are used in add.
1932     for (const SCEV *Op : S->operands().drop_front())
1933       Worklist.emplace_back(Instruction::Add, 1, Op);
1934     break;
1935   }
1936   }
1937 
1938   for (auto &CostOp : Operations) {
1939     for (auto SCEVOp : enumerate(S->operands())) {
1940       // Clamp the index to account for multiple IR operations being chained.
1941       size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
1942       size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
1943       Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
1944     }
1945   }
1946   return Cost;
1947 }
1948 
1949 bool SCEVExpander::isHighCostExpansionHelper(
1950     const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
1951     InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
1952     SmallPtrSetImpl<const SCEV *> &Processed,
1953     SmallVectorImpl<SCEVOperand> &Worklist) {
1954   if (Cost > Budget)
1955     return true; // Already run out of budget, give up.
1956 
1957   const SCEV *S = WorkItem.S;
1958   // Was the cost of expansion of this expression already accounted for?
1959   if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
1960     return false; // We have already accounted for this expression.
1961 
1962   // If we can find an existing value for this scev available at the point "At"
1963   // then consider the expression cheap.
1964   if (hasRelatedExistingExpansion(S, &At, L))
1965     return false; // Consider the expression to be free.
1966 
1967   TargetTransformInfo::TargetCostKind CostKind =
1968       L->getHeader()->getParent()->hasMinSize()
1969           ? TargetTransformInfo::TCK_CodeSize
1970           : TargetTransformInfo::TCK_RecipThroughput;
1971 
1972   switch (S->getSCEVType()) {
1973   case scCouldNotCompute:
1974     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1975   case scUnknown:
1976   case scVScale:
1977     // Assume to be zero-cost.
1978     return false;
1979   case scConstant: {
1980     // Only evalulate the costs of constants when optimizing for size.
1981     if (CostKind != TargetTransformInfo::TCK_CodeSize)
1982       return false;
1983     const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
1984     Type *Ty = S->getType();
1985     Cost += TTI.getIntImmCostInst(
1986         WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
1987     return Cost > Budget;
1988   }
1989   case scTruncate:
1990   case scPtrToInt:
1991   case scZeroExtend:
1992   case scSignExtend: {
1993     Cost +=
1994         costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
1995     return false; // Will answer upon next entry into this function.
1996   }
1997   case scUDivExpr: {
1998     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1999     // HowManyLessThans produced to compute a precise expression, rather than a
2000     // UDiv from the user's code. If we can't find a UDiv in the code with some
2001     // simple searching, we need to account for it's cost.
2002 
2003     // At the beginning of this function we already tried to find existing
2004     // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2005     // pattern involving division. This is just a simple search heuristic.
2006     if (hasRelatedExistingExpansion(
2007             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2008       return false; // Consider it to be free.
2009 
2010     Cost +=
2011         costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2012     return false; // Will answer upon next entry into this function.
2013   }
2014   case scAddExpr:
2015   case scMulExpr:
2016   case scUMaxExpr:
2017   case scSMaxExpr:
2018   case scUMinExpr:
2019   case scSMinExpr:
2020   case scSequentialUMinExpr: {
2021     assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2022            "Nary expr should have more than 1 operand.");
2023     // The simple nary expr will require one less op (or pair of ops)
2024     // than the number of it's terms.
2025     Cost +=
2026         costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2027     return Cost > Budget;
2028   }
2029   case scAddRecExpr: {
2030     assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2031            "Polynomial should be at least linear");
2032     Cost += costAndCollectOperands<SCEVAddRecExpr>(
2033         WorkItem, TTI, CostKind, Worklist);
2034     return Cost > Budget;
2035   }
2036   }
2037   llvm_unreachable("Unknown SCEV kind!");
2038 }
2039 
2040 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2041                                             Instruction *IP) {
2042   assert(IP);
2043   switch (Pred->getKind()) {
2044   case SCEVPredicate::P_Union:
2045     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2046   case SCEVPredicate::P_Compare:
2047     return expandComparePredicate(cast<SCEVComparePredicate>(Pred), IP);
2048   case SCEVPredicate::P_Wrap: {
2049     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2050     return expandWrapPredicate(AddRecPred, IP);
2051   }
2052   }
2053   llvm_unreachable("Unknown SCEV predicate type");
2054 }
2055 
2056 Value *SCEVExpander::expandComparePredicate(const SCEVComparePredicate *Pred,
2057                                             Instruction *IP) {
2058   Value *Expr0 = expand(Pred->getLHS(), IP);
2059   Value *Expr1 = expand(Pred->getRHS(), IP);
2060 
2061   Builder.SetInsertPoint(IP);
2062   auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2063   auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2064   return I;
2065 }
2066 
2067 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2068                                            Instruction *Loc, bool Signed) {
2069   assert(AR->isAffine() && "Cannot generate RT check for "
2070                            "non-affine expression");
2071 
2072   // FIXME: It is highly suspicious that we're ignoring the predicates here.
2073   SmallVector<const SCEVPredicate *, 4> Pred;
2074   const SCEV *ExitCount =
2075       SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->getLoop(), Pred);
2076 
2077   assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2078 
2079   const SCEV *Step = AR->getStepRecurrence(SE);
2080   const SCEV *Start = AR->getStart();
2081 
2082   Type *ARTy = AR->getType();
2083   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2084   unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2085 
2086   // The expression {Start,+,Step} has nusw/nssw if
2087   //   Step < 0, Start - |Step| * Backedge <= Start
2088   //   Step >= 0, Start + |Step| * Backedge > Start
2089   // and |Step| * Backedge doesn't unsigned overflow.
2090 
2091   Builder.SetInsertPoint(Loc);
2092   Value *TripCountVal = expand(ExitCount, Loc);
2093 
2094   IntegerType *Ty =
2095       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2096 
2097   Value *StepValue = expand(Step, Loc);
2098   Value *NegStepValue = expand(SE.getNegativeSCEV(Step), Loc);
2099   Value *StartValue = expand(Start, Loc);
2100 
2101   ConstantInt *Zero =
2102       ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2103 
2104   Builder.SetInsertPoint(Loc);
2105   // Compute |Step|
2106   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2107   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2108 
2109   // Compute |Step| * Backedge
2110   // Compute:
2111   //   1. Start + |Step| * Backedge < Start
2112   //   2. Start - |Step| * Backedge > Start
2113   //
2114   // And select either 1. or 2. depending on whether step is positive or
2115   // negative. If Step is known to be positive or negative, only create
2116   // either 1. or 2.
2117   auto ComputeEndCheck = [&]() -> Value * {
2118     // Checking <u 0 is always false.
2119     if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
2120       return ConstantInt::getFalse(Loc->getContext());
2121 
2122     // Get the backedge taken count and truncate or extended to the AR type.
2123     Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2124 
2125     Value *MulV, *OfMul;
2126     if (Step->isOne()) {
2127       // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
2128       // needed, there is never an overflow, so to avoid artificially inflating
2129       // the cost of the check, directly emit the optimized IR.
2130       MulV = TruncTripCount;
2131       OfMul = ConstantInt::getFalse(MulV->getContext());
2132     } else {
2133       auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2134                                              Intrinsic::umul_with_overflow, Ty);
2135       CallInst *Mul =
2136           Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2137       MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2138       OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2139     }
2140 
2141     Value *Add = nullptr, *Sub = nullptr;
2142     bool NeedPosCheck = !SE.isKnownNegative(Step);
2143     bool NeedNegCheck = !SE.isKnownPositive(Step);
2144 
2145     if (isa<PointerType>(ARTy)) {
2146       Value *NegMulV = Builder.CreateNeg(MulV);
2147       if (NeedPosCheck)
2148         Add = Builder.CreatePtrAdd(StartValue, MulV);
2149       if (NeedNegCheck)
2150         Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2151     } else {
2152       if (NeedPosCheck)
2153         Add = Builder.CreateAdd(StartValue, MulV);
2154       if (NeedNegCheck)
2155         Sub = Builder.CreateSub(StartValue, MulV);
2156     }
2157 
2158     Value *EndCompareLT = nullptr;
2159     Value *EndCompareGT = nullptr;
2160     Value *EndCheck = nullptr;
2161     if (NeedPosCheck)
2162       EndCheck = EndCompareLT = Builder.CreateICmp(
2163           Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2164     if (NeedNegCheck)
2165       EndCheck = EndCompareGT = Builder.CreateICmp(
2166           Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2167     if (NeedPosCheck && NeedNegCheck) {
2168       // Select the answer based on the sign of Step.
2169       EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2170     }
2171     return Builder.CreateOr(EndCheck, OfMul);
2172   };
2173   Value *EndCheck = ComputeEndCheck();
2174 
2175   // If the backedge taken count type is larger than the AR type,
2176   // check that we don't drop any bits by truncating it. If we are
2177   // dropping bits, then we have overflow (unless the step is zero).
2178   if (SrcBits > DstBits) {
2179     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2180     auto *BackedgeCheck =
2181         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2182                            ConstantInt::get(Loc->getContext(), MaxVal));
2183     BackedgeCheck = Builder.CreateAnd(
2184         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2185 
2186     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2187   }
2188 
2189   return EndCheck;
2190 }
2191 
2192 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2193                                          Instruction *IP) {
2194   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2195   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2196 
2197   // Add a check for NUSW
2198   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2199     NUSWCheck = generateOverflowCheck(A, IP, false);
2200 
2201   // Add a check for NSSW
2202   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2203     NSSWCheck = generateOverflowCheck(A, IP, true);
2204 
2205   if (NUSWCheck && NSSWCheck)
2206     return Builder.CreateOr(NUSWCheck, NSSWCheck);
2207 
2208   if (NUSWCheck)
2209     return NUSWCheck;
2210 
2211   if (NSSWCheck)
2212     return NSSWCheck;
2213 
2214   return ConstantInt::getFalse(IP->getContext());
2215 }
2216 
2217 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2218                                           Instruction *IP) {
2219   // Loop over all checks in this set.
2220   SmallVector<Value *> Checks;
2221   for (const auto *Pred : Union->getPredicates()) {
2222     Checks.push_back(expandCodeForPredicate(Pred, IP));
2223     Builder.SetInsertPoint(IP);
2224   }
2225 
2226   if (Checks.empty())
2227     return ConstantInt::getFalse(IP->getContext());
2228   return Builder.CreateOr(Checks);
2229 }
2230 
2231 Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2232   auto *DefI = dyn_cast<Instruction>(V);
2233   if (!PreserveLCSSA || !DefI)
2234     return V;
2235 
2236   BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
2237   Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2238   Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2239   if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2240     return V;
2241 
2242   // Create a temporary instruction to at the current insertion point, so we
2243   // can hand it off to the helper to create LCSSA PHIs if required for the
2244   // new use.
2245   // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
2246   // would accept a insertion point and return an LCSSA phi for that
2247   // insertion point, so there is no need to insert & remove the temporary
2248   // instruction.
2249   Type *ToTy;
2250   if (DefI->getType()->isIntegerTy())
2251     ToTy = PointerType::get(DefI->getContext(), 0);
2252   else
2253     ToTy = Type::getInt32Ty(DefI->getContext());
2254   Instruction *User =
2255       CastInst::CreateBitOrPointerCast(DefI, ToTy, "tmp.lcssa.user", InsertPt);
2256   auto RemoveUserOnExit =
2257       make_scope_exit([User]() { User->eraseFromParent(); });
2258 
2259   SmallVector<Instruction *, 1> ToUpdate;
2260   ToUpdate.push_back(DefI);
2261   SmallVector<PHINode *, 16> PHIsToRemove;
2262   SmallVector<PHINode *, 16> InsertedPHIs;
2263   formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, &PHIsToRemove,
2264                            &InsertedPHIs);
2265   for (PHINode *PN : InsertedPHIs)
2266     rememberInstruction(PN);
2267   for (PHINode *PN : PHIsToRemove) {
2268     if (!PN->use_empty())
2269       continue;
2270     InsertedValues.erase(PN);
2271     InsertedPostIncValues.erase(PN);
2272     PN->eraseFromParent();
2273   }
2274 
2275   return User->getOperand(0);
2276 }
2277 
2278 namespace {
2279 // Search for a SCEV subexpression that is not safe to expand.  Any expression
2280 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2281 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2282 // instruction, but the important thing is that we prove the denominator is
2283 // nonzero before expansion.
2284 //
2285 // IVUsers already checks that IV-derived expressions are safe. So this check is
2286 // only needed when the expression includes some subexpression that is not IV
2287 // derived.
2288 //
2289 // Currently, we only allow division by a value provably non-zero here.
2290 //
2291 // We cannot generally expand recurrences unless the step dominates the loop
2292 // header. The expander handles the special case of affine recurrences by
2293 // scaling the recurrence outside the loop, but this technique isn't generally
2294 // applicable. Expanding a nested recurrence outside a loop requires computing
2295 // binomial coefficients. This could be done, but the recurrence has to be in a
2296 // perfectly reduced form, which can't be guaranteed.
2297 struct SCEVFindUnsafe {
2298   ScalarEvolution &SE;
2299   bool CanonicalMode;
2300   bool IsUnsafe = false;
2301 
2302   SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2303       : SE(SE), CanonicalMode(CanonicalMode) {}
2304 
2305   bool follow(const SCEV *S) {
2306     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2307       if (!SE.isKnownNonZero(D->getRHS())) {
2308         IsUnsafe = true;
2309         return false;
2310       }
2311     }
2312     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2313       // For non-affine addrecs or in non-canonical mode we need a preheader
2314       // to insert into.
2315       if (!AR->getLoop()->getLoopPreheader() &&
2316           (!CanonicalMode || !AR->isAffine())) {
2317         IsUnsafe = true;
2318         return false;
2319       }
2320     }
2321     return true;
2322   }
2323   bool isDone() const { return IsUnsafe; }
2324 };
2325 } // namespace
2326 
2327 bool SCEVExpander::isSafeToExpand(const SCEV *S) const {
2328   SCEVFindUnsafe Search(SE, CanonicalMode);
2329   visitAll(S, Search);
2330   return !Search.IsUnsafe;
2331 }
2332 
2333 bool SCEVExpander::isSafeToExpandAt(const SCEV *S,
2334                                     const Instruction *InsertionPoint) const {
2335   if (!isSafeToExpand(S))
2336     return false;
2337   // We have to prove that the expanded site of S dominates InsertionPoint.
2338   // This is easy when not in the same block, but hard when S is an instruction
2339   // to be expanded somewhere inside the same block as our insertion point.
2340   // What we really need here is something analogous to an OrderedBasicBlock,
2341   // but for the moment, we paper over the problem by handling two common and
2342   // cheap to check cases.
2343   if (SE.properlyDominates(S, InsertionPoint->getParent()))
2344     return true;
2345   if (SE.dominates(S, InsertionPoint->getParent())) {
2346     if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2347       return true;
2348     if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2349       if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2350         return true;
2351   }
2352   return false;
2353 }
2354 
2355 void SCEVExpanderCleaner::cleanup() {
2356   // Result is used, nothing to remove.
2357   if (ResultUsed)
2358     return;
2359 
2360   // Restore original poison flags.
2361   for (auto [I, Flags] : Expander.OrigFlags)
2362     Flags.apply(I);
2363 
2364   auto InsertedInstructions = Expander.getAllInsertedInstructions();
2365 #ifndef NDEBUG
2366   SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2367                                             InsertedInstructions.end());
2368   (void)InsertedSet;
2369 #endif
2370   // Remove sets with value handles.
2371   Expander.clear();
2372 
2373   // Remove all inserted instructions.
2374   for (Instruction *I : reverse(InsertedInstructions)) {
2375 #ifndef NDEBUG
2376     assert(all_of(I->users(),
2377                   [&InsertedSet](Value *U) {
2378                     return InsertedSet.contains(cast<Instruction>(U));
2379                   }) &&
2380            "removed instruction should only be used by instructions inserted "
2381            "during expansion");
2382 #endif
2383     assert(!I->getType()->isVoidTy() &&
2384            "inserted instruction should have non-void types");
2385     I->replaceAllUsesWith(PoisonValue::get(I->getType()));
2386     I->eraseFromParent();
2387   }
2388 }
2389