xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/Utils/LoopUtils.h"
30 
31 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
32 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
33 #else
34 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
35 #endif
36 
37 using namespace llvm;
38 
39 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
40     "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
41     cl::desc("When performing SCEV expansion only if it is cheap to do, this "
42              "controls the budget that is considered cheap (default = 4)"));
43 
44 using namespace PatternMatch;
45 
46 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
47 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
48 /// creating a new one.
49 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
50                                        Instruction::CastOps Op,
51                                        BasicBlock::iterator IP) {
52   // This function must be called with the builder having a valid insertion
53   // point. It doesn't need to be the actual IP where the uses of the returned
54   // cast will be added, but it must dominate such IP.
55   // We use this precondition to produce a cast that will dominate all its
56   // uses. In particular, this is crucial for the case where the builder's
57   // insertion point *is* the point where we were asked to put the cast.
58   // Since we don't know the builder's insertion point is actually
59   // where the uses will be added (only that it dominates it), we are
60   // not allowed to move it.
61   BasicBlock::iterator BIP = Builder.GetInsertPoint();
62 
63   Value *Ret = nullptr;
64 
65   // Check to see if there is already a cast!
66   for (User *U : V->users()) {
67     if (U->getType() != Ty)
68       continue;
69     CastInst *CI = dyn_cast<CastInst>(U);
70     if (!CI || CI->getOpcode() != Op)
71       continue;
72 
73     // Found a suitable cast that is at IP or comes before IP. Use it. Note that
74     // the cast must also properly dominate the Builder's insertion point.
75     if (IP->getParent() == CI->getParent() && &*BIP != CI &&
76         (&*IP == CI || CI->comesBefore(&*IP))) {
77       Ret = CI;
78       break;
79     }
80   }
81 
82   // Create a new cast.
83   if (!Ret) {
84     SCEVInsertPointGuard Guard(Builder, this);
85     Builder.SetInsertPoint(&*IP);
86     Ret = Builder.CreateCast(Op, V, Ty, V->getName());
87   }
88 
89   // We assert at the end of the function since IP might point to an
90   // instruction with different dominance properties than a cast
91   // (an invoke for example) and not dominate BIP (but the cast does).
92   assert(!isa<Instruction>(Ret) ||
93          SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
94 
95   return Ret;
96 }
97 
98 BasicBlock::iterator
99 SCEVExpander::findInsertPointAfter(Instruction *I,
100                                    Instruction *MustDominate) const {
101   BasicBlock::iterator IP = ++I->getIterator();
102   if (auto *II = dyn_cast<InvokeInst>(I))
103     IP = II->getNormalDest()->begin();
104 
105   while (isa<PHINode>(IP))
106     ++IP;
107 
108   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
109     ++IP;
110   } else if (isa<CatchSwitchInst>(IP)) {
111     IP = MustDominate->getParent()->getFirstInsertionPt();
112   } else {
113     assert(!IP->isEHPad() && "unexpected eh pad!");
114   }
115 
116   // Adjust insert point to be after instructions inserted by the expander, so
117   // we can re-use already inserted instructions. Avoid skipping past the
118   // original \p MustDominate, in case it is an inserted instruction.
119   while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
120     ++IP;
121 
122   return IP;
123 }
124 
125 BasicBlock::iterator
126 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
127   // Cast the argument at the beginning of the entry block, after
128   // any bitcasts of other arguments.
129   if (Argument *A = dyn_cast<Argument>(V)) {
130     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
131     while ((isa<BitCastInst>(IP) &&
132             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133             cast<BitCastInst>(IP)->getOperand(0) != A) ||
134            isa<DbgInfoIntrinsic>(IP))
135       ++IP;
136     return IP;
137   }
138 
139   // Cast the instruction immediately after the instruction.
140   if (Instruction *I = dyn_cast<Instruction>(V))
141     return findInsertPointAfter(I, &*Builder.GetInsertPoint());
142 
143   // Otherwise, this must be some kind of a constant,
144   // so let's plop this cast into the function's entry block.
145   assert(isa<Constant>(V) &&
146          "Expected the cast argument to be a global/constant");
147   return Builder.GetInsertBlock()
148       ->getParent()
149       ->getEntryBlock()
150       .getFirstInsertionPt();
151 }
152 
153 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
154 /// which must be possible with a noop cast, doing what we can to share
155 /// the casts.
156 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
157   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
158   assert((Op == Instruction::BitCast ||
159           Op == Instruction::PtrToInt ||
160           Op == Instruction::IntToPtr) &&
161          "InsertNoopCastOfTo cannot perform non-noop casts!");
162   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
163          "InsertNoopCastOfTo cannot change sizes!");
164 
165   // inttoptr only works for integral pointers. For non-integral pointers, we
166   // can create a GEP on null with the integral value as index. Note that
167   // it is safe to use GEP of null instead of inttoptr here, because only
168   // expressions already based on a GEP of null should be converted to pointers
169   // during expansion.
170   if (Op == Instruction::IntToPtr) {
171     auto *PtrTy = cast<PointerType>(Ty);
172     if (DL.isNonIntegralPointerType(PtrTy)) {
173       assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 &&
174              "alloc size of i8 must by 1 byte for the GEP to be correct");
175       return Builder.CreateGEP(
176           Builder.getInt8Ty(), Constant::getNullValue(PtrTy), V, "scevgep");
177     }
178   }
179   // Short-circuit unnecessary bitcasts.
180   if (Op == Instruction::BitCast) {
181     if (V->getType() == Ty)
182       return V;
183     if (CastInst *CI = dyn_cast<CastInst>(V)) {
184       if (CI->getOperand(0)->getType() == Ty)
185         return CI->getOperand(0);
186     }
187   }
188   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
189   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
190       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
191     if (CastInst *CI = dyn_cast<CastInst>(V))
192       if ((CI->getOpcode() == Instruction::PtrToInt ||
193            CI->getOpcode() == Instruction::IntToPtr) &&
194           SE.getTypeSizeInBits(CI->getType()) ==
195           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
196         return CI->getOperand(0);
197     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
198       if ((CE->getOpcode() == Instruction::PtrToInt ||
199            CE->getOpcode() == Instruction::IntToPtr) &&
200           SE.getTypeSizeInBits(CE->getType()) ==
201           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
202         return CE->getOperand(0);
203   }
204 
205   // Fold a cast of a constant.
206   if (Constant *C = dyn_cast<Constant>(V))
207     return ConstantExpr::getCast(Op, C, Ty);
208 
209   // Try to reuse existing cast, or insert one.
210   return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
211 }
212 
213 /// InsertBinop - Insert the specified binary operator, doing a small amount
214 /// of work to avoid inserting an obviously redundant operation, and hoisting
215 /// to an outer loop when the opportunity is there and it is safe.
216 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
217                                  Value *LHS, Value *RHS,
218                                  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
219   // Fold a binop with constant operands.
220   if (Constant *CLHS = dyn_cast<Constant>(LHS))
221     if (Constant *CRHS = dyn_cast<Constant>(RHS))
222       if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, DL))
223         return Res;
224 
225   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
226   unsigned ScanLimit = 6;
227   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
228   // Scanning starts from the last instruction before the insertion point.
229   BasicBlock::iterator IP = Builder.GetInsertPoint();
230   if (IP != BlockBegin) {
231     --IP;
232     for (; ScanLimit; --IP, --ScanLimit) {
233       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
234       // generated code.
235       if (isa<DbgInfoIntrinsic>(IP))
236         ScanLimit++;
237 
238       auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
239         // Ensure that no-wrap flags match.
240         if (isa<OverflowingBinaryOperator>(I)) {
241           if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
242             return true;
243           if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
244             return true;
245         }
246         // Conservatively, do not use any instruction which has any of exact
247         // flags installed.
248         if (isa<PossiblyExactOperator>(I) && I->isExact())
249           return true;
250         return false;
251       };
252       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
253           IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
254         return &*IP;
255       if (IP == BlockBegin) break;
256     }
257   }
258 
259   // Save the original insertion point so we can restore it when we're done.
260   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
261   SCEVInsertPointGuard Guard(Builder, this);
262 
263   if (IsSafeToHoist) {
264     // Move the insertion point out of as many loops as we can.
265     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
266       if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
267       BasicBlock *Preheader = L->getLoopPreheader();
268       if (!Preheader) break;
269 
270       // Ok, move up a level.
271       Builder.SetInsertPoint(Preheader->getTerminator());
272     }
273   }
274 
275   // If we haven't found this binop, insert it.
276   // TODO: Use the Builder, which will make CreateBinOp below fold with
277   // InstSimplifyFolder.
278   Instruction *BO = Builder.Insert(BinaryOperator::Create(Opcode, LHS, RHS));
279   BO->setDebugLoc(Loc);
280   if (Flags & SCEV::FlagNUW)
281     BO->setHasNoUnsignedWrap();
282   if (Flags & SCEV::FlagNSW)
283     BO->setHasNoSignedWrap();
284 
285   return BO;
286 }
287 
288 /// expandAddToGEP - Expand an addition expression with a pointer type into
289 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
290 /// BasicAliasAnalysis and other passes analyze the result. See the rules
291 /// for getelementptr vs. inttoptr in
292 /// http://llvm.org/docs/LangRef.html#pointeraliasing
293 /// for details.
294 ///
295 /// Design note: The correctness of using getelementptr here depends on
296 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
297 /// they may introduce pointer arithmetic which may not be safely converted
298 /// into getelementptr.
299 ///
300 /// Design note: It might seem desirable for this function to be more
301 /// loop-aware. If some of the indices are loop-invariant while others
302 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
303 /// loop-invariant portions of the overall computation outside the loop.
304 /// However, there are a few reasons this is not done here. Hoisting simple
305 /// arithmetic is a low-level optimization that often isn't very
306 /// important until late in the optimization process. In fact, passes
307 /// like InstructionCombining will combine GEPs, even if it means
308 /// pushing loop-invariant computation down into loops, so even if the
309 /// GEPs were split here, the work would quickly be undone. The
310 /// LoopStrengthReduction pass, which is usually run quite late (and
311 /// after the last InstructionCombining pass), takes care of hoisting
312 /// loop-invariant portions of expressions, after considering what
313 /// can be folded using target addressing modes.
314 ///
315 Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V) {
316   assert(!isa<Instruction>(V) ||
317          SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
318 
319   Value *Idx = expand(Offset);
320 
321   // Fold a GEP with constant operands.
322   if (Constant *CLHS = dyn_cast<Constant>(V))
323     if (Constant *CRHS = dyn_cast<Constant>(Idx))
324       return Builder.CreateGEP(Builder.getInt8Ty(), CLHS, CRHS);
325 
326   // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
327   unsigned ScanLimit = 6;
328   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
329   // Scanning starts from the last instruction before the insertion point.
330   BasicBlock::iterator IP = Builder.GetInsertPoint();
331   if (IP != BlockBegin) {
332     --IP;
333     for (; ScanLimit; --IP, --ScanLimit) {
334       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
335       // generated code.
336       if (isa<DbgInfoIntrinsic>(IP))
337         ScanLimit++;
338       if (IP->getOpcode() == Instruction::GetElementPtr &&
339           IP->getOperand(0) == V && IP->getOperand(1) == Idx &&
340           cast<GEPOperator>(&*IP)->getSourceElementType() ==
341               Builder.getInt8Ty())
342         return &*IP;
343       if (IP == BlockBegin) break;
344     }
345   }
346 
347   // Save the original insertion point so we can restore it when we're done.
348   SCEVInsertPointGuard Guard(Builder, this);
349 
350   // Move the insertion point out of as many loops as we can.
351   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
352     if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
353     BasicBlock *Preheader = L->getLoopPreheader();
354     if (!Preheader) break;
355 
356     // Ok, move up a level.
357     Builder.SetInsertPoint(Preheader->getTerminator());
358   }
359 
360   // Emit a GEP.
361   return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "scevgep");
362 }
363 
364 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
365 /// SCEV expansion. If they are nested, this is the most nested. If they are
366 /// neighboring, pick the later.
367 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
368                                         DominatorTree &DT) {
369   if (!A) return B;
370   if (!B) return A;
371   if (A->contains(B)) return B;
372   if (B->contains(A)) return A;
373   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
374   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
375   return A; // Arbitrarily break the tie.
376 }
377 
378 /// getRelevantLoop - Get the most relevant loop associated with the given
379 /// expression, according to PickMostRelevantLoop.
380 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
381   // Test whether we've already computed the most relevant loop for this SCEV.
382   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
383   if (!Pair.second)
384     return Pair.first->second;
385 
386   switch (S->getSCEVType()) {
387   case scConstant:
388   case scVScale:
389     return nullptr; // A constant has no relevant loops.
390   case scTruncate:
391   case scZeroExtend:
392   case scSignExtend:
393   case scPtrToInt:
394   case scAddExpr:
395   case scMulExpr:
396   case scUDivExpr:
397   case scAddRecExpr:
398   case scUMaxExpr:
399   case scSMaxExpr:
400   case scUMinExpr:
401   case scSMinExpr:
402   case scSequentialUMinExpr: {
403     const Loop *L = nullptr;
404     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
405       L = AR->getLoop();
406     for (const SCEV *Op : S->operands())
407       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
408     return RelevantLoops[S] = L;
409   }
410   case scUnknown: {
411     const SCEVUnknown *U = cast<SCEVUnknown>(S);
412     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
413       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
414     // A non-instruction has no relevant loops.
415     return nullptr;
416   }
417   case scCouldNotCompute:
418     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
419   }
420   llvm_unreachable("Unexpected SCEV type!");
421 }
422 
423 namespace {
424 
425 /// LoopCompare - Compare loops by PickMostRelevantLoop.
426 class LoopCompare {
427   DominatorTree &DT;
428 public:
429   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
430 
431   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
432                   std::pair<const Loop *, const SCEV *> RHS) const {
433     // Keep pointer operands sorted at the end.
434     if (LHS.second->getType()->isPointerTy() !=
435         RHS.second->getType()->isPointerTy())
436       return LHS.second->getType()->isPointerTy();
437 
438     // Compare loops with PickMostRelevantLoop.
439     if (LHS.first != RHS.first)
440       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
441 
442     // If one operand is a non-constant negative and the other is not,
443     // put the non-constant negative on the right so that a sub can
444     // be used instead of a negate and add.
445     if (LHS.second->isNonConstantNegative()) {
446       if (!RHS.second->isNonConstantNegative())
447         return false;
448     } else if (RHS.second->isNonConstantNegative())
449       return true;
450 
451     // Otherwise they are equivalent according to this comparison.
452     return false;
453   }
454 };
455 
456 }
457 
458 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
459   // Collect all the add operands in a loop, along with their associated loops.
460   // Iterate in reverse so that constants are emitted last, all else equal, and
461   // so that pointer operands are inserted first, which the code below relies on
462   // to form more involved GEPs.
463   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
464   for (const SCEV *Op : reverse(S->operands()))
465     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
466 
467   // Sort by loop. Use a stable sort so that constants follow non-constants and
468   // pointer operands precede non-pointer operands.
469   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
470 
471   // Emit instructions to add all the operands. Hoist as much as possible
472   // out of loops, and form meaningful getelementptrs where possible.
473   Value *Sum = nullptr;
474   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
475     const Loop *CurLoop = I->first;
476     const SCEV *Op = I->second;
477     if (!Sum) {
478       // This is the first operand. Just expand it.
479       Sum = expand(Op);
480       ++I;
481       continue;
482     }
483 
484     assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
485     if (isa<PointerType>(Sum->getType())) {
486       // The running sum expression is a pointer. Try to form a getelementptr
487       // at this level with that as the base.
488       SmallVector<const SCEV *, 4> NewOps;
489       for (; I != E && I->first == CurLoop; ++I) {
490         // If the operand is SCEVUnknown and not instructions, peek through
491         // it, to enable more of it to be folded into the GEP.
492         const SCEV *X = I->second;
493         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
494           if (!isa<Instruction>(U->getValue()))
495             X = SE.getSCEV(U->getValue());
496         NewOps.push_back(X);
497       }
498       Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum);
499     } else if (Op->isNonConstantNegative()) {
500       // Instead of doing a negate and add, just do a subtract.
501       Value *W = expand(SE.getNegativeSCEV(Op));
502       Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
503                         /*IsSafeToHoist*/ true);
504       ++I;
505     } else {
506       // A simple add.
507       Value *W = expand(Op);
508       // Canonicalize a constant to the RHS.
509       if (isa<Constant>(Sum))
510         std::swap(Sum, W);
511       Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
512                         /*IsSafeToHoist*/ true);
513       ++I;
514     }
515   }
516 
517   return Sum;
518 }
519 
520 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
521   Type *Ty = S->getType();
522 
523   // Collect all the mul operands in a loop, along with their associated loops.
524   // Iterate in reverse so that constants are emitted last, all else equal.
525   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
526   for (const SCEV *Op : reverse(S->operands()))
527     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
528 
529   // Sort by loop. Use a stable sort so that constants follow non-constants.
530   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
531 
532   // Emit instructions to mul all the operands. Hoist as much as possible
533   // out of loops.
534   Value *Prod = nullptr;
535   auto I = OpsAndLoops.begin();
536 
537   // Expand the calculation of X pow N in the following manner:
538   // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
539   // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
540   const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops]() {
541     auto E = I;
542     // Calculate how many times the same operand from the same loop is included
543     // into this power.
544     uint64_t Exponent = 0;
545     const uint64_t MaxExponent = UINT64_MAX >> 1;
546     // No one sane will ever try to calculate such huge exponents, but if we
547     // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
548     // below when the power of 2 exceeds our Exponent, and we want it to be
549     // 1u << 31 at most to not deal with unsigned overflow.
550     while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
551       ++Exponent;
552       ++E;
553     }
554     assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
555 
556     // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
557     // that are needed into the result.
558     Value *P = expand(I->second);
559     Value *Result = nullptr;
560     if (Exponent & 1)
561       Result = P;
562     for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
563       P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
564                       /*IsSafeToHoist*/ true);
565       if (Exponent & BinExp)
566         Result = Result ? InsertBinop(Instruction::Mul, Result, P,
567                                       SCEV::FlagAnyWrap,
568                                       /*IsSafeToHoist*/ true)
569                         : P;
570     }
571 
572     I = E;
573     assert(Result && "Nothing was expanded?");
574     return Result;
575   };
576 
577   while (I != OpsAndLoops.end()) {
578     if (!Prod) {
579       // This is the first operand. Just expand it.
580       Prod = ExpandOpBinPowN();
581     } else if (I->second->isAllOnesValue()) {
582       // Instead of doing a multiply by negative one, just do a negate.
583       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
584                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
585       ++I;
586     } else {
587       // A simple mul.
588       Value *W = ExpandOpBinPowN();
589       // Canonicalize a constant to the RHS.
590       if (isa<Constant>(Prod)) std::swap(Prod, W);
591       const APInt *RHS;
592       if (match(W, m_Power2(RHS))) {
593         // Canonicalize Prod*(1<<C) to Prod<<C.
594         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
595         auto NWFlags = S->getNoWrapFlags();
596         // clear nsw flag if shl will produce poison value.
597         if (RHS->logBase2() == RHS->getBitWidth() - 1)
598           NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
599         Prod = InsertBinop(Instruction::Shl, Prod,
600                            ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
601                            /*IsSafeToHoist*/ true);
602       } else {
603         Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
604                            /*IsSafeToHoist*/ true);
605       }
606     }
607   }
608 
609   return Prod;
610 }
611 
612 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
613   Value *LHS = expand(S->getLHS());
614   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
615     const APInt &RHS = SC->getAPInt();
616     if (RHS.isPowerOf2())
617       return InsertBinop(Instruction::LShr, LHS,
618                          ConstantInt::get(SC->getType(), RHS.logBase2()),
619                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
620   }
621 
622   Value *RHS = expand(S->getRHS());
623   return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
624                      /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
625 }
626 
627 /// Determine if this is a well-behaved chain of instructions leading back to
628 /// the PHI. If so, it may be reused by expanded expressions.
629 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
630                                          const Loop *L) {
631   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
632       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
633     return false;
634   // If any of the operands don't dominate the insert position, bail.
635   // Addrec operands are always loop-invariant, so this can only happen
636   // if there are instructions which haven't been hoisted.
637   if (L == IVIncInsertLoop) {
638     for (Use &Op : llvm::drop_begin(IncV->operands()))
639       if (Instruction *OInst = dyn_cast<Instruction>(Op))
640         if (!SE.DT.dominates(OInst, IVIncInsertPos))
641           return false;
642   }
643   // Advance to the next instruction.
644   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
645   if (!IncV)
646     return false;
647 
648   if (IncV->mayHaveSideEffects())
649     return false;
650 
651   if (IncV == PN)
652     return true;
653 
654   return isNormalAddRecExprPHI(PN, IncV, L);
655 }
656 
657 /// getIVIncOperand returns an induction variable increment's induction
658 /// variable operand.
659 ///
660 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
661 /// operands dominate InsertPos.
662 ///
663 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
664 /// simple patterns generated by getAddRecExprPHILiterally and
665 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
666 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
667                                            Instruction *InsertPos,
668                                            bool allowScale) {
669   if (IncV == InsertPos)
670     return nullptr;
671 
672   switch (IncV->getOpcode()) {
673   default:
674     return nullptr;
675   // Check for a simple Add/Sub or GEP of a loop invariant step.
676   case Instruction::Add:
677   case Instruction::Sub: {
678     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
679     if (!OInst || SE.DT.dominates(OInst, InsertPos))
680       return dyn_cast<Instruction>(IncV->getOperand(0));
681     return nullptr;
682   }
683   case Instruction::BitCast:
684     return dyn_cast<Instruction>(IncV->getOperand(0));
685   case Instruction::GetElementPtr:
686     for (Use &U : llvm::drop_begin(IncV->operands())) {
687       if (isa<Constant>(U))
688         continue;
689       if (Instruction *OInst = dyn_cast<Instruction>(U)) {
690         if (!SE.DT.dominates(OInst, InsertPos))
691           return nullptr;
692       }
693       if (allowScale) {
694         // allow any kind of GEP as long as it can be hoisted.
695         continue;
696       }
697       // GEPs produced by SCEVExpander use i8 element type.
698       if (!cast<GEPOperator>(IncV)->getSourceElementType()->isIntegerTy(8))
699         return nullptr;
700       break;
701     }
702     return dyn_cast<Instruction>(IncV->getOperand(0));
703   }
704 }
705 
706 /// If the insert point of the current builder or any of the builders on the
707 /// stack of saved builders has 'I' as its insert point, update it to point to
708 /// the instruction after 'I'.  This is intended to be used when the instruction
709 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
710 /// different block, the inconsistent insert point (with a mismatched
711 /// Instruction and Block) can lead to an instruction being inserted in a block
712 /// other than its parent.
713 void SCEVExpander::fixupInsertPoints(Instruction *I) {
714   BasicBlock::iterator It(*I);
715   BasicBlock::iterator NewInsertPt = std::next(It);
716   if (Builder.GetInsertPoint() == It)
717     Builder.SetInsertPoint(&*NewInsertPt);
718   for (auto *InsertPtGuard : InsertPointGuards)
719     if (InsertPtGuard->GetInsertPoint() == It)
720       InsertPtGuard->SetInsertPoint(NewInsertPt);
721 }
722 
723 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
724 /// it available to other uses in this loop. Recursively hoist any operands,
725 /// until we reach a value that dominates InsertPos.
726 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos,
727                               bool RecomputePoisonFlags) {
728   auto FixupPoisonFlags = [this](Instruction *I) {
729     // Drop flags that are potentially inferred from old context and infer flags
730     // in new context.
731     I->dropPoisonGeneratingFlags();
732     if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
733       if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
734         auto *BO = cast<BinaryOperator>(I);
735         BO->setHasNoUnsignedWrap(
736             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) == SCEV::FlagNUW);
737         BO->setHasNoSignedWrap(
738             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) == SCEV::FlagNSW);
739       }
740   };
741 
742   if (SE.DT.dominates(IncV, InsertPos)) {
743     if (RecomputePoisonFlags)
744       FixupPoisonFlags(IncV);
745     return true;
746   }
747 
748   // InsertPos must itself dominate IncV so that IncV's new position satisfies
749   // its existing users.
750   if (isa<PHINode>(InsertPos) ||
751       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
752     return false;
753 
754   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
755     return false;
756 
757   // Check that the chain of IV operands leading back to Phi can be hoisted.
758   SmallVector<Instruction*, 4> IVIncs;
759   for(;;) {
760     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
761     if (!Oper)
762       return false;
763     // IncV is safe to hoist.
764     IVIncs.push_back(IncV);
765     IncV = Oper;
766     if (SE.DT.dominates(IncV, InsertPos))
767       break;
768   }
769   for (Instruction *I : llvm::reverse(IVIncs)) {
770     fixupInsertPoints(I);
771     I->moveBefore(InsertPos);
772     if (RecomputePoisonFlags)
773       FixupPoisonFlags(I);
774   }
775   return true;
776 }
777 
778 /// Determine if this cyclic phi is in a form that would have been generated by
779 /// LSR. We don't care if the phi was actually expanded in this pass, as long
780 /// as it is in a low-cost form, for example, no implied multiplication. This
781 /// should match any patterns generated by getAddRecExprPHILiterally and
782 /// expandAddtoGEP.
783 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
784                                            const Loop *L) {
785   for(Instruction *IVOper = IncV;
786       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
787                                 /*allowScale=*/false));) {
788     if (IVOper == PN)
789       return true;
790   }
791   return false;
792 }
793 
794 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
795 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
796 /// need to materialize IV increments elsewhere to handle difficult situations.
797 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
798                                  bool useSubtract) {
799   Value *IncV;
800   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
801   if (PN->getType()->isPointerTy()) {
802     IncV = expandAddToGEP(SE.getSCEV(StepV), PN);
803   } else {
804     IncV = useSubtract ?
805       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
806       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
807   }
808   return IncV;
809 }
810 
811 /// Check whether we can cheaply express the requested SCEV in terms of
812 /// the available PHI SCEV by truncation and/or inversion of the step.
813 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
814                                     const SCEVAddRecExpr *Phi,
815                                     const SCEVAddRecExpr *Requested,
816                                     bool &InvertStep) {
817   // We can't transform to match a pointer PHI.
818   Type *PhiTy = Phi->getType();
819   Type *RequestedTy = Requested->getType();
820   if (PhiTy->isPointerTy() || RequestedTy->isPointerTy())
821     return false;
822 
823   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
824     return false;
825 
826   // Try truncate it if necessary.
827   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
828   if (!Phi)
829     return false;
830 
831   // Check whether truncation will help.
832   if (Phi == Requested) {
833     InvertStep = false;
834     return true;
835   }
836 
837   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
838   if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
839     InvertStep = true;
840     return true;
841   }
842 
843   return false;
844 }
845 
846 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
847   if (!isa<IntegerType>(AR->getType()))
848     return false;
849 
850   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
851   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
852   const SCEV *Step = AR->getStepRecurrence(SE);
853   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
854                                             SE.getSignExtendExpr(AR, WideTy));
855   const SCEV *ExtendAfterOp =
856     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
857   return ExtendAfterOp == OpAfterExtend;
858 }
859 
860 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
861   if (!isa<IntegerType>(AR->getType()))
862     return false;
863 
864   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
865   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
866   const SCEV *Step = AR->getStepRecurrence(SE);
867   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
868                                             SE.getZeroExtendExpr(AR, WideTy));
869   const SCEV *ExtendAfterOp =
870     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
871   return ExtendAfterOp == OpAfterExtend;
872 }
873 
874 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
875 /// the base addrec, which is the addrec without any non-loop-dominating
876 /// values, and return the PHI.
877 PHINode *
878 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
879                                         const Loop *L, Type *&TruncTy,
880                                         bool &InvertStep) {
881   assert((!IVIncInsertLoop || IVIncInsertPos) &&
882          "Uninitialized insert position");
883 
884   // Reuse a previously-inserted PHI, if present.
885   BasicBlock *LatchBlock = L->getLoopLatch();
886   if (LatchBlock) {
887     PHINode *AddRecPhiMatch = nullptr;
888     Instruction *IncV = nullptr;
889     TruncTy = nullptr;
890     InvertStep = false;
891 
892     // Only try partially matching scevs that need truncation and/or
893     // step-inversion if we know this loop is outside the current loop.
894     bool TryNonMatchingSCEV =
895         IVIncInsertLoop &&
896         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
897 
898     for (PHINode &PN : L->getHeader()->phis()) {
899       if (!SE.isSCEVable(PN.getType()))
900         continue;
901 
902       // We should not look for a incomplete PHI. Getting SCEV for a incomplete
903       // PHI has no meaning at all.
904       if (!PN.isComplete()) {
905         SCEV_DEBUG_WITH_TYPE(
906             DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
907         continue;
908       }
909 
910       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
911       if (!PhiSCEV)
912         continue;
913 
914       bool IsMatchingSCEV = PhiSCEV == Normalized;
915       // We only handle truncation and inversion of phi recurrences for the
916       // expanded expression if the expanded expression's loop dominates the
917       // loop we insert to. Check now, so we can bail out early.
918       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
919           continue;
920 
921       // TODO: this possibly can be reworked to avoid this cast at all.
922       Instruction *TempIncV =
923           dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
924       if (!TempIncV)
925         continue;
926 
927       // Check whether we can reuse this PHI node.
928       if (LSRMode) {
929         if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
930           continue;
931       } else {
932         if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
933           continue;
934       }
935 
936       // Stop if we have found an exact match SCEV.
937       if (IsMatchingSCEV) {
938         IncV = TempIncV;
939         TruncTy = nullptr;
940         InvertStep = false;
941         AddRecPhiMatch = &PN;
942         break;
943       }
944 
945       // Try whether the phi can be translated into the requested form
946       // (truncated and/or offset by a constant).
947       if ((!TruncTy || InvertStep) &&
948           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
949         // Record the phi node. But don't stop we might find an exact match
950         // later.
951         AddRecPhiMatch = &PN;
952         IncV = TempIncV;
953         TruncTy = Normalized->getType();
954       }
955     }
956 
957     if (AddRecPhiMatch) {
958       // Ok, the add recurrence looks usable.
959       // Remember this PHI, even in post-inc mode.
960       InsertedValues.insert(AddRecPhiMatch);
961       // Remember the increment.
962       rememberInstruction(IncV);
963       // Those values were not actually inserted but re-used.
964       ReusedValues.insert(AddRecPhiMatch);
965       ReusedValues.insert(IncV);
966       return AddRecPhiMatch;
967     }
968   }
969 
970   // Save the original insertion point so we can restore it when we're done.
971   SCEVInsertPointGuard Guard(Builder, this);
972 
973   // Another AddRec may need to be recursively expanded below. For example, if
974   // this AddRec is quadratic, the StepV may itself be an AddRec in this
975   // loop. Remove this loop from the PostIncLoops set before expanding such
976   // AddRecs. Otherwise, we cannot find a valid position for the step
977   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
978   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
979   // so it's not worth implementing SmallPtrSet::swap.
980   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
981   PostIncLoops.clear();
982 
983   // Expand code for the start value into the loop preheader.
984   assert(L->getLoopPreheader() &&
985          "Can't expand add recurrences without a loop preheader!");
986   Value *StartV =
987       expand(Normalized->getStart(), L->getLoopPreheader()->getTerminator());
988 
989   // StartV must have been be inserted into L's preheader to dominate the new
990   // phi.
991   assert(!isa<Instruction>(StartV) ||
992          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
993                                  L->getHeader()));
994 
995   // Expand code for the step value. Do this before creating the PHI so that PHI
996   // reuse code doesn't see an incomplete PHI.
997   const SCEV *Step = Normalized->getStepRecurrence(SE);
998   Type *ExpandTy = Normalized->getType();
999   // If the stride is negative, insert a sub instead of an add for the increment
1000   // (unless it's a constant, because subtracts of constants are canonicalized
1001   // to adds).
1002   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1003   if (useSubtract)
1004     Step = SE.getNegativeSCEV(Step);
1005   // Expand the step somewhere that dominates the loop header.
1006   Value *StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1007 
1008   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1009   // we actually do emit an addition.  It does not apply if we emit a
1010   // subtraction.
1011   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1012   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1013 
1014   // Create the PHI.
1015   BasicBlock *Header = L->getHeader();
1016   Builder.SetInsertPoint(Header, Header->begin());
1017   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1018   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1019                                   Twine(IVName) + ".iv");
1020 
1021   // Create the step instructions and populate the PHI.
1022   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1023     BasicBlock *Pred = *HPI;
1024 
1025     // Add a start value.
1026     if (!L->contains(Pred)) {
1027       PN->addIncoming(StartV, Pred);
1028       continue;
1029     }
1030 
1031     // Create a step value and add it to the PHI.
1032     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1033     // instructions at IVIncInsertPos.
1034     Instruction *InsertPos = L == IVIncInsertLoop ?
1035       IVIncInsertPos : Pred->getTerminator();
1036     Builder.SetInsertPoint(InsertPos);
1037     Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1038 
1039     if (isa<OverflowingBinaryOperator>(IncV)) {
1040       if (IncrementIsNUW)
1041         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1042       if (IncrementIsNSW)
1043         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1044     }
1045     PN->addIncoming(IncV, Pred);
1046   }
1047 
1048   // After expanding subexpressions, restore the PostIncLoops set so the caller
1049   // can ensure that IVIncrement dominates the current uses.
1050   PostIncLoops = SavedPostIncLoops;
1051 
1052   // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1053   // effective when we are able to use an IV inserted here, so record it.
1054   InsertedValues.insert(PN);
1055   InsertedIVs.push_back(PN);
1056   return PN;
1057 }
1058 
1059 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1060   const Loop *L = S->getLoop();
1061 
1062   // Determine a normalized form of this expression, which is the expression
1063   // before any post-inc adjustment is made.
1064   const SCEVAddRecExpr *Normalized = S;
1065   if (PostIncLoops.count(L)) {
1066     PostIncLoopSet Loops;
1067     Loops.insert(L);
1068     Normalized = cast<SCEVAddRecExpr>(
1069         normalizeForPostIncUse(S, Loops, SE, /*CheckInvertible=*/false));
1070   }
1071 
1072   [[maybe_unused]] const SCEV *Start = Normalized->getStart();
1073   const SCEV *Step = Normalized->getStepRecurrence(SE);
1074   assert(SE.properlyDominates(Start, L->getHeader()) &&
1075          "Start does not properly dominate loop header");
1076   assert(SE.dominates(Step, L->getHeader()) && "Step not dominate loop header");
1077 
1078   // In some cases, we decide to reuse an existing phi node but need to truncate
1079   // it and/or invert the step.
1080   Type *TruncTy = nullptr;
1081   bool InvertStep = false;
1082   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1083 
1084   // Accommodate post-inc mode, if necessary.
1085   Value *Result;
1086   if (!PostIncLoops.count(L))
1087     Result = PN;
1088   else {
1089     // In PostInc mode, use the post-incremented value.
1090     BasicBlock *LatchBlock = L->getLoopLatch();
1091     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1092     Result = PN->getIncomingValueForBlock(LatchBlock);
1093 
1094     // We might be introducing a new use of the post-inc IV that is not poison
1095     // safe, in which case we should drop poison generating flags. Only keep
1096     // those flags for which SCEV has proven that they always hold.
1097     if (isa<OverflowingBinaryOperator>(Result)) {
1098       auto *I = cast<Instruction>(Result);
1099       if (!S->hasNoUnsignedWrap())
1100         I->setHasNoUnsignedWrap(false);
1101       if (!S->hasNoSignedWrap())
1102         I->setHasNoSignedWrap(false);
1103     }
1104 
1105     // For an expansion to use the postinc form, the client must call
1106     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1107     // or dominated by IVIncInsertPos.
1108     if (isa<Instruction>(Result) &&
1109         !SE.DT.dominates(cast<Instruction>(Result),
1110                          &*Builder.GetInsertPoint())) {
1111       // The induction variable's postinc expansion does not dominate this use.
1112       // IVUsers tries to prevent this case, so it is rare. However, it can
1113       // happen when an IVUser outside the loop is not dominated by the latch
1114       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1115       // all cases. Consider a phi outside whose operand is replaced during
1116       // expansion with the value of the postinc user. Without fundamentally
1117       // changing the way postinc users are tracked, the only remedy is
1118       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1119       // but hopefully expandCodeFor handles that.
1120       bool useSubtract =
1121           !S->getType()->isPointerTy() && Step->isNonConstantNegative();
1122       if (useSubtract)
1123         Step = SE.getNegativeSCEV(Step);
1124       Value *StepV;
1125       {
1126         // Expand the step somewhere that dominates the loop header.
1127         SCEVInsertPointGuard Guard(Builder, this);
1128         StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1129       }
1130       Result = expandIVInc(PN, StepV, L, useSubtract);
1131     }
1132   }
1133 
1134   // We have decided to reuse an induction variable of a dominating loop. Apply
1135   // truncation and/or inversion of the step.
1136   if (TruncTy) {
1137     // Truncate the result.
1138     if (TruncTy != Result->getType())
1139       Result = Builder.CreateTrunc(Result, TruncTy);
1140 
1141     // Invert the result.
1142     if (InvertStep)
1143       Result = Builder.CreateSub(expand(Normalized->getStart()), Result);
1144   }
1145 
1146   return Result;
1147 }
1148 
1149 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1150   // In canonical mode we compute the addrec as an expression of a canonical IV
1151   // using evaluateAtIteration and expand the resulting SCEV expression. This
1152   // way we avoid introducing new IVs to carry on the computation of the addrec
1153   // throughout the loop.
1154   //
1155   // For nested addrecs evaluateAtIteration might need a canonical IV of a
1156   // type wider than the addrec itself. Emitting a canonical IV of the
1157   // proper type might produce non-legal types, for example expanding an i64
1158   // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1159   // back to non-canonical mode for nested addrecs.
1160   if (!CanonicalMode || (S->getNumOperands() > 2))
1161     return expandAddRecExprLiterally(S);
1162 
1163   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1164   const Loop *L = S->getLoop();
1165 
1166   // First check for an existing canonical IV in a suitable type.
1167   PHINode *CanonicalIV = nullptr;
1168   if (PHINode *PN = L->getCanonicalInductionVariable())
1169     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1170       CanonicalIV = PN;
1171 
1172   // Rewrite an AddRec in terms of the canonical induction variable, if
1173   // its type is more narrow.
1174   if (CanonicalIV &&
1175       SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1176       !S->getType()->isPointerTy()) {
1177     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1178     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1179       NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1180     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1181                                        S->getNoWrapFlags(SCEV::FlagNW)));
1182     BasicBlock::iterator NewInsertPt =
1183         findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1184     V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1185     return V;
1186   }
1187 
1188   // {X,+,F} --> X + {0,+,F}
1189   if (!S->getStart()->isZero()) {
1190     if (isa<PointerType>(S->getType())) {
1191       Value *StartV = expand(SE.getPointerBase(S));
1192       return expandAddToGEP(SE.removePointerBase(S), StartV);
1193     }
1194 
1195     SmallVector<const SCEV *, 4> NewOps(S->operands());
1196     NewOps[0] = SE.getConstant(Ty, 0);
1197     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1198                                         S->getNoWrapFlags(SCEV::FlagNW));
1199 
1200     // Just do a normal add. Pre-expand the operands to suppress folding.
1201     //
1202     // The LHS and RHS values are factored out of the expand call to make the
1203     // output independent of the argument evaluation order.
1204     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1205     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1206     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1207   }
1208 
1209   // If we don't yet have a canonical IV, create one.
1210   if (!CanonicalIV) {
1211     // Create and insert the PHI node for the induction variable in the
1212     // specified loop.
1213     BasicBlock *Header = L->getHeader();
1214     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1215     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar");
1216     CanonicalIV->insertBefore(Header->begin());
1217     rememberInstruction(CanonicalIV);
1218 
1219     SmallSet<BasicBlock *, 4> PredSeen;
1220     Constant *One = ConstantInt::get(Ty, 1);
1221     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1222       BasicBlock *HP = *HPI;
1223       if (!PredSeen.insert(HP).second) {
1224         // There must be an incoming value for each predecessor, even the
1225         // duplicates!
1226         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1227         continue;
1228       }
1229 
1230       if (L->contains(HP)) {
1231         // Insert a unit add instruction right before the terminator
1232         // corresponding to the back-edge.
1233         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1234                                                      "indvar.next",
1235                                                      HP->getTerminator());
1236         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1237         rememberInstruction(Add);
1238         CanonicalIV->addIncoming(Add, HP);
1239       } else {
1240         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1241       }
1242     }
1243   }
1244 
1245   // {0,+,1} --> Insert a canonical induction variable into the loop!
1246   if (S->isAffine() && S->getOperand(1)->isOne()) {
1247     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1248            "IVs with types different from the canonical IV should "
1249            "already have been handled!");
1250     return CanonicalIV;
1251   }
1252 
1253   // {0,+,F} --> {0,+,1} * F
1254 
1255   // If this is a simple linear addrec, emit it now as a special case.
1256   if (S->isAffine())    // {0,+,F} --> i*F
1257     return
1258       expand(SE.getTruncateOrNoop(
1259         SE.getMulExpr(SE.getUnknown(CanonicalIV),
1260                       SE.getNoopOrAnyExtend(S->getOperand(1),
1261                                             CanonicalIV->getType())),
1262         Ty));
1263 
1264   // If this is a chain of recurrences, turn it into a closed form, using the
1265   // folders, then expandCodeFor the closed form.  This allows the folders to
1266   // simplify the expression without having to build a bunch of special code
1267   // into this folder.
1268   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1269 
1270   // Promote S up to the canonical IV type, if the cast is foldable.
1271   const SCEV *NewS = S;
1272   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1273   if (isa<SCEVAddRecExpr>(Ext))
1274     NewS = Ext;
1275 
1276   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1277 
1278   // Truncate the result down to the original type, if needed.
1279   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1280   return expand(T);
1281 }
1282 
1283 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1284   Value *V = expand(S->getOperand());
1285   return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1286                            GetOptimalInsertionPointForCastOf(V));
1287 }
1288 
1289 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1290   Value *V = expand(S->getOperand());
1291   return Builder.CreateTrunc(V, S->getType());
1292 }
1293 
1294 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1295   Value *V = expand(S->getOperand());
1296   return Builder.CreateZExt(V, S->getType(), "",
1297                             SE.isKnownNonNegative(S->getOperand()));
1298 }
1299 
1300 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1301   Value *V = expand(S->getOperand());
1302   return Builder.CreateSExt(V, S->getType());
1303 }
1304 
1305 Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
1306                                       Intrinsic::ID IntrinID, Twine Name,
1307                                       bool IsSequential) {
1308   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1309   Type *Ty = LHS->getType();
1310   if (IsSequential)
1311     LHS = Builder.CreateFreeze(LHS);
1312   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1313     Value *RHS = expand(S->getOperand(i));
1314     if (IsSequential && i != 0)
1315       RHS = Builder.CreateFreeze(RHS);
1316     Value *Sel;
1317     if (Ty->isIntegerTy())
1318       Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1319                                     /*FMFSource=*/nullptr, Name);
1320     else {
1321       Value *ICmp =
1322           Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1323       Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1324     }
1325     LHS = Sel;
1326   }
1327   return LHS;
1328 }
1329 
1330 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1331   return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1332 }
1333 
1334 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1335   return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1336 }
1337 
1338 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1339   return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1340 }
1341 
1342 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1343   return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1344 }
1345 
1346 Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1347   return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
1348 }
1349 
1350 Value *SCEVExpander::visitVScale(const SCEVVScale *S) {
1351   return Builder.CreateVScale(ConstantInt::get(S->getType(), 1));
1352 }
1353 
1354 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1355                                    BasicBlock::iterator IP) {
1356   setInsertPoint(IP);
1357   Value *V = expandCodeFor(SH, Ty);
1358   return V;
1359 }
1360 
1361 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1362   // Expand the code for this SCEV.
1363   Value *V = expand(SH);
1364 
1365   if (Ty) {
1366     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1367            "non-trivial casts should be done with the SCEVs directly!");
1368     V = InsertNoopCastOfTo(V, Ty);
1369   }
1370   return V;
1371 }
1372 
1373 static bool
1374 canReuseInstruction(ScalarEvolution &SE, const SCEV *S, Instruction *I,
1375                     SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
1376   // If the instruction cannot be poison, it's always safe to reuse.
1377   if (programUndefinedIfPoison(I))
1378     return true;
1379 
1380   // Otherwise, it is possible that I is more poisonous that S. Collect the
1381   // poison-contributors of S, and then check whether I has any additional
1382   // poison-contributors. Poison that is contributed through poison-generating
1383   // flags is handled by dropping those flags instead.
1384   SmallPtrSet<const Value *, 8> PoisonVals;
1385   SE.getPoisonGeneratingValues(PoisonVals, S);
1386 
1387   SmallVector<Value *> Worklist;
1388   SmallPtrSet<Value *, 8> Visited;
1389   Worklist.push_back(I);
1390   while (!Worklist.empty()) {
1391     Value *V = Worklist.pop_back_val();
1392     if (!Visited.insert(V).second)
1393       continue;
1394 
1395     // Avoid walking large instruction graphs.
1396     if (Visited.size() > 16)
1397       return false;
1398 
1399     // Either the value can't be poison, or the S would also be poison if it
1400     // is.
1401     if (PoisonVals.contains(V) || isGuaranteedNotToBePoison(V))
1402       continue;
1403 
1404     auto *I = dyn_cast<Instruction>(V);
1405     if (!I)
1406       return false;
1407 
1408     // FIXME: Ignore vscale, even though it technically could be poison. Do this
1409     // because SCEV currently assumes it can't be poison. Remove this special
1410     // case once we proper model when vscale can be poison.
1411     if (auto *II = dyn_cast<IntrinsicInst>(I);
1412         II && II->getIntrinsicID() == Intrinsic::vscale)
1413       continue;
1414 
1415     if (canCreatePoison(cast<Operator>(I), /*ConsiderFlagsAndMetadata*/ false))
1416       return false;
1417 
1418     // If the instruction can't create poison, we can recurse to its operands.
1419     if (I->hasPoisonGeneratingFlagsOrMetadata())
1420       DropPoisonGeneratingInsts.push_back(I);
1421 
1422     for (Value *Op : I->operands())
1423       Worklist.push_back(Op);
1424   }
1425   return true;
1426 }
1427 
1428 Value *SCEVExpander::FindValueInExprValueMap(
1429     const SCEV *S, const Instruction *InsertPt,
1430     SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
1431   // If the expansion is not in CanonicalMode, and the SCEV contains any
1432   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1433   if (!CanonicalMode && SE.containsAddRecurrence(S))
1434     return nullptr;
1435 
1436   // If S is a constant, it may be worse to reuse an existing Value.
1437   if (isa<SCEVConstant>(S))
1438     return nullptr;
1439 
1440   for (Value *V : SE.getSCEVValues(S)) {
1441     Instruction *EntInst = dyn_cast<Instruction>(V);
1442     if (!EntInst)
1443       continue;
1444 
1445     // Choose a Value from the set which dominates the InsertPt.
1446     // InsertPt should be inside the Value's parent loop so as not to break
1447     // the LCSSA form.
1448     assert(EntInst->getFunction() == InsertPt->getFunction());
1449     if (S->getType() != V->getType() || !SE.DT.dominates(EntInst, InsertPt) ||
1450         !(SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1451           SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1452       continue;
1453 
1454     // Make sure reusing the instruction is poison-safe.
1455     if (canReuseInstruction(SE, S, EntInst, DropPoisonGeneratingInsts))
1456       return V;
1457     DropPoisonGeneratingInsts.clear();
1458   }
1459   return nullptr;
1460 }
1461 
1462 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1463 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1464 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1465 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1466 // the expansion will try to reuse Value from ExprValueMap, and only when it
1467 // fails, expand the SCEV literally.
1468 Value *SCEVExpander::expand(const SCEV *S) {
1469   // Compute an insertion point for this SCEV object. Hoist the instructions
1470   // as far out in the loop nest as possible.
1471   BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
1472 
1473   // We can move insertion point only if there is no div or rem operations
1474   // otherwise we are risky to move it over the check for zero denominator.
1475   auto SafeToHoist = [](const SCEV *S) {
1476     return !SCEVExprContains(S, [](const SCEV *S) {
1477               if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1478                 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1479                   // Division by non-zero constants can be hoisted.
1480                   return SC->getValue()->isZero();
1481                 // All other divisions should not be moved as they may be
1482                 // divisions by zero and should be kept within the
1483                 // conditions of the surrounding loops that guard their
1484                 // execution (see PR35406).
1485                 return true;
1486               }
1487               return false;
1488             });
1489   };
1490   if (SafeToHoist(S)) {
1491     for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1492          L = L->getParentLoop()) {
1493       if (SE.isLoopInvariant(S, L)) {
1494         if (!L) break;
1495         if (BasicBlock *Preheader = L->getLoopPreheader()) {
1496           InsertPt = Preheader->getTerminator()->getIterator();
1497         } else {
1498           // LSR sets the insertion point for AddRec start/step values to the
1499           // block start to simplify value reuse, even though it's an invalid
1500           // position. SCEVExpander must correct for this in all cases.
1501           InsertPt = L->getHeader()->getFirstInsertionPt();
1502         }
1503       } else {
1504         // If the SCEV is computable at this level, insert it into the header
1505         // after the PHIs (and after any other instructions that we've inserted
1506         // there) so that it is guaranteed to dominate any user inside the loop.
1507         if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1508           InsertPt = L->getHeader()->getFirstInsertionPt();
1509 
1510         while (InsertPt != Builder.GetInsertPoint() &&
1511                (isInsertedInstruction(&*InsertPt) ||
1512                 isa<DbgInfoIntrinsic>(&*InsertPt))) {
1513           InsertPt = std::next(InsertPt);
1514         }
1515         break;
1516       }
1517     }
1518   }
1519 
1520   // Check to see if we already expanded this here.
1521   auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1522   if (I != InsertedExpressions.end())
1523     return I->second;
1524 
1525   SCEVInsertPointGuard Guard(Builder, this);
1526   Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1527 
1528   // Expand the expression into instructions.
1529   SmallVector<Instruction *> DropPoisonGeneratingInsts;
1530   Value *V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1531   if (!V) {
1532     V = visit(S);
1533     V = fixupLCSSAFormFor(V);
1534   } else {
1535     for (Instruction *I : DropPoisonGeneratingInsts) {
1536       I->dropPoisonGeneratingFlagsAndMetadata();
1537       // See if we can re-infer from first principles any of the flags we just
1538       // dropped.
1539       if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
1540         if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1541           auto *BO = cast<BinaryOperator>(I);
1542           BO->setHasNoUnsignedWrap(
1543             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) == SCEV::FlagNUW);
1544           BO->setHasNoSignedWrap(
1545             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) == SCEV::FlagNSW);
1546         }
1547       if (auto *NNI = dyn_cast<PossiblyNonNegInst>(I)) {
1548         auto *Src = NNI->getOperand(0);
1549         if (isImpliedByDomCondition(ICmpInst::ICMP_SGE, Src,
1550                                     Constant::getNullValue(Src->getType()), I,
1551                                     DL).value_or(false))
1552           NNI->setNonNeg(true);
1553       }
1554     }
1555   }
1556   // Remember the expanded value for this SCEV at this location.
1557   //
1558   // This is independent of PostIncLoops. The mapped value simply materializes
1559   // the expression at this insertion point. If the mapped value happened to be
1560   // a postinc expansion, it could be reused by a non-postinc user, but only if
1561   // its insertion point was already at the head of the loop.
1562   InsertedExpressions[std::make_pair(S, &*InsertPt)] = V;
1563   return V;
1564 }
1565 
1566 void SCEVExpander::rememberInstruction(Value *I) {
1567   auto DoInsert = [this](Value *V) {
1568     if (!PostIncLoops.empty())
1569       InsertedPostIncValues.insert(V);
1570     else
1571       InsertedValues.insert(V);
1572   };
1573   DoInsert(I);
1574 }
1575 
1576 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1577 /// replace them with their most canonical representative. Return the number of
1578 /// phis eliminated.
1579 ///
1580 /// This does not depend on any SCEVExpander state but should be used in
1581 /// the same context that SCEVExpander is used.
1582 unsigned
1583 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1584                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts,
1585                                   const TargetTransformInfo *TTI) {
1586   // Find integer phis in order of increasing width.
1587   SmallVector<PHINode*, 8> Phis;
1588   for (PHINode &PN : L->getHeader()->phis())
1589     Phis.push_back(&PN);
1590 
1591   if (TTI)
1592     // Use stable_sort to preserve order of equivalent PHIs, so the order
1593     // of the sorted Phis is the same from run to run on the same loop.
1594     llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1595       // Put pointers at the back and make sure pointer < pointer = false.
1596       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1597         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1598       return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1599              LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1600     });
1601 
1602   unsigned NumElim = 0;
1603   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1604   // Process phis from wide to narrow. Map wide phis to their truncation
1605   // so narrow phis can reuse them.
1606   for (PHINode *Phi : Phis) {
1607     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1608       if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1609         return V;
1610       if (!SE.isSCEVable(PN->getType()))
1611         return nullptr;
1612       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1613       if (!Const)
1614         return nullptr;
1615       return Const->getValue();
1616     };
1617 
1618     // Fold constant phis. They may be congruent to other constant phis and
1619     // would confuse the logic below that expects proper IVs.
1620     if (Value *V = SimplifyPHINode(Phi)) {
1621       if (V->getType() != Phi->getType())
1622         continue;
1623       SE.forgetValue(Phi);
1624       Phi->replaceAllUsesWith(V);
1625       DeadInsts.emplace_back(Phi);
1626       ++NumElim;
1627       SCEV_DEBUG_WITH_TYPE(DebugType,
1628                            dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1629                                   << '\n');
1630       continue;
1631     }
1632 
1633     if (!SE.isSCEVable(Phi->getType()))
1634       continue;
1635 
1636     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1637     if (!OrigPhiRef) {
1638       OrigPhiRef = Phi;
1639       if (Phi->getType()->isIntegerTy() && TTI &&
1640           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1641         // Make sure we only rewrite using simple induction variables;
1642         // otherwise, we can make the trip count of a loop unanalyzable
1643         // to SCEV.
1644         const SCEV *PhiExpr = SE.getSCEV(Phi);
1645         if (isa<SCEVAddRecExpr>(PhiExpr)) {
1646           // This phi can be freely truncated to the narrowest phi type. Map the
1647           // truncated expression to it so it will be reused for narrow types.
1648           const SCEV *TruncExpr =
1649               SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1650           ExprToIVMap[TruncExpr] = Phi;
1651         }
1652       }
1653       continue;
1654     }
1655 
1656     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1657     // sense.
1658     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1659       continue;
1660 
1661     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1662       Instruction *OrigInc = dyn_cast<Instruction>(
1663           OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1664       Instruction *IsomorphicInc =
1665           dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1666 
1667       if (OrigInc && IsomorphicInc) {
1668         // If this phi has the same width but is more canonical, replace the
1669         // original with it. As part of the "more canonical" determination,
1670         // respect a prior decision to use an IV chain.
1671         if (OrigPhiRef->getType() == Phi->getType() &&
1672             !(ChainedPhis.count(Phi) ||
1673               isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1674             (ChainedPhis.count(Phi) ||
1675              isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1676           std::swap(OrigPhiRef, Phi);
1677           std::swap(OrigInc, IsomorphicInc);
1678         }
1679         // Replacing the congruent phi is sufficient because acyclic
1680         // redundancy elimination, CSE/GVN, should handle the
1681         // rest. However, once SCEV proves that a phi is congruent,
1682         // it's often the head of an IV user cycle that is isomorphic
1683         // with the original phi. It's worth eagerly cleaning up the
1684         // common case of a single IV increment so that DeleteDeadPHIs
1685         // can remove cycles that had postinc uses.
1686         // Because we may potentially introduce a new use of OrigIV that didn't
1687         // exist before at this point, its poison flags need readjustment.
1688         const SCEV *TruncExpr =
1689             SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1690         if (OrigInc != IsomorphicInc &&
1691             TruncExpr == SE.getSCEV(IsomorphicInc) &&
1692             SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1693             hoistIVInc(OrigInc, IsomorphicInc, /*RecomputePoisonFlags*/ true)) {
1694           SCEV_DEBUG_WITH_TYPE(
1695               DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1696                                 << *IsomorphicInc << '\n');
1697           Value *NewInc = OrigInc;
1698           if (OrigInc->getType() != IsomorphicInc->getType()) {
1699             BasicBlock::iterator IP;
1700             if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1701               IP = PN->getParent()->getFirstInsertionPt();
1702             else
1703               IP = OrigInc->getNextNonDebugInstruction()->getIterator();
1704 
1705             IRBuilder<> Builder(IP->getParent(), IP);
1706             Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1707             NewInc = Builder.CreateTruncOrBitCast(
1708                 OrigInc, IsomorphicInc->getType(), IVName);
1709           }
1710           IsomorphicInc->replaceAllUsesWith(NewInc);
1711           DeadInsts.emplace_back(IsomorphicInc);
1712         }
1713       }
1714     }
1715     SCEV_DEBUG_WITH_TYPE(DebugType,
1716                          dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1717                                 << '\n');
1718     SCEV_DEBUG_WITH_TYPE(
1719         DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1720     ++NumElim;
1721     Value *NewIV = OrigPhiRef;
1722     if (OrigPhiRef->getType() != Phi->getType()) {
1723       IRBuilder<> Builder(L->getHeader(),
1724                           L->getHeader()->getFirstInsertionPt());
1725       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1726       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1727     }
1728     Phi->replaceAllUsesWith(NewIV);
1729     DeadInsts.emplace_back(Phi);
1730   }
1731   return NumElim;
1732 }
1733 
1734 bool SCEVExpander::hasRelatedExistingExpansion(const SCEV *S,
1735                                                const Instruction *At,
1736                                                Loop *L) {
1737   using namespace llvm::PatternMatch;
1738 
1739   SmallVector<BasicBlock *, 4> ExitingBlocks;
1740   L->getExitingBlocks(ExitingBlocks);
1741 
1742   // Look for suitable value in simple conditions at the loop exits.
1743   for (BasicBlock *BB : ExitingBlocks) {
1744     ICmpInst::Predicate Pred;
1745     Instruction *LHS, *RHS;
1746 
1747     if (!match(BB->getTerminator(),
1748                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1749                     m_BasicBlock(), m_BasicBlock())))
1750       continue;
1751 
1752     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1753       return true;
1754 
1755     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1756       return true;
1757   }
1758 
1759   // Use expand's logic which is used for reusing a previous Value in
1760   // ExprValueMap.  Note that we don't currently model the cost of
1761   // needing to drop poison generating flags on the instruction if we
1762   // want to reuse it.  We effectively assume that has zero cost.
1763   SmallVector<Instruction *> DropPoisonGeneratingInsts;
1764   return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) != nullptr;
1765 }
1766 
1767 template<typename T> static InstructionCost costAndCollectOperands(
1768   const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
1769   TargetTransformInfo::TargetCostKind CostKind,
1770   SmallVectorImpl<SCEVOperand> &Worklist) {
1771 
1772   const T *S = cast<T>(WorkItem.S);
1773   InstructionCost Cost = 0;
1774   // Object to help map SCEV operands to expanded IR instructions.
1775   struct OperationIndices {
1776     OperationIndices(unsigned Opc, size_t min, size_t max) :
1777       Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1778     unsigned Opcode;
1779     size_t MinIdx;
1780     size_t MaxIdx;
1781   };
1782 
1783   // Collect the operations of all the instructions that will be needed to
1784   // expand the SCEVExpr. This is so that when we come to cost the operands,
1785   // we know what the generated user(s) will be.
1786   SmallVector<OperationIndices, 2> Operations;
1787 
1788   auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1789     Operations.emplace_back(Opcode, 0, 0);
1790     return TTI.getCastInstrCost(Opcode, S->getType(),
1791                                 S->getOperand(0)->getType(),
1792                                 TTI::CastContextHint::None, CostKind);
1793   };
1794 
1795   auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1796                        unsigned MinIdx = 0,
1797                        unsigned MaxIdx = 1) -> InstructionCost {
1798     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1799     return NumRequired *
1800       TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1801   };
1802 
1803   auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1804                         unsigned MaxIdx) -> InstructionCost {
1805     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1806     Type *OpType = S->getType();
1807     return NumRequired * TTI.getCmpSelInstrCost(
1808                              Opcode, OpType, CmpInst::makeCmpResultType(OpType),
1809                              CmpInst::BAD_ICMP_PREDICATE, CostKind);
1810   };
1811 
1812   switch (S->getSCEVType()) {
1813   case scCouldNotCompute:
1814     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1815   case scUnknown:
1816   case scConstant:
1817   case scVScale:
1818     return 0;
1819   case scPtrToInt:
1820     Cost = CastCost(Instruction::PtrToInt);
1821     break;
1822   case scTruncate:
1823     Cost = CastCost(Instruction::Trunc);
1824     break;
1825   case scZeroExtend:
1826     Cost = CastCost(Instruction::ZExt);
1827     break;
1828   case scSignExtend:
1829     Cost = CastCost(Instruction::SExt);
1830     break;
1831   case scUDivExpr: {
1832     unsigned Opcode = Instruction::UDiv;
1833     if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1834       if (SC->getAPInt().isPowerOf2())
1835         Opcode = Instruction::LShr;
1836     Cost = ArithCost(Opcode, 1);
1837     break;
1838   }
1839   case scAddExpr:
1840     Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1841     break;
1842   case scMulExpr:
1843     // TODO: this is a very pessimistic cost modelling for Mul,
1844     // because of Bin Pow algorithm actually used by the expander,
1845     // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
1846     Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1847     break;
1848   case scSMaxExpr:
1849   case scUMaxExpr:
1850   case scSMinExpr:
1851   case scUMinExpr:
1852   case scSequentialUMinExpr: {
1853     // FIXME: should this ask the cost for Intrinsic's?
1854     // The reduction tree.
1855     Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
1856     Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
1857     switch (S->getSCEVType()) {
1858     case scSequentialUMinExpr: {
1859       // The safety net against poison.
1860       // FIXME: this is broken.
1861       Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
1862       Cost += ArithCost(Instruction::Or,
1863                         S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
1864       Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
1865       break;
1866     }
1867     default:
1868       assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
1869              "Unhandled SCEV expression type?");
1870       break;
1871     }
1872     break;
1873   }
1874   case scAddRecExpr: {
1875     // In this polynominal, we may have some zero operands, and we shouldn't
1876     // really charge for those. So how many non-zero coefficients are there?
1877     int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
1878                                     return !Op->isZero();
1879                                   });
1880 
1881     assert(NumTerms >= 1 && "Polynominal should have at least one term.");
1882     assert(!(*std::prev(S->operands().end()))->isZero() &&
1883            "Last operand should not be zero");
1884 
1885     // Ignoring constant term (operand 0), how many of the coefficients are u> 1?
1886     int NumNonZeroDegreeNonOneTerms =
1887       llvm::count_if(S->operands(), [](const SCEV *Op) {
1888                       auto *SConst = dyn_cast<SCEVConstant>(Op);
1889                       return !SConst || SConst->getAPInt().ugt(1);
1890                     });
1891 
1892     // Much like with normal add expr, the polynominal will require
1893     // one less addition than the number of it's terms.
1894     InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
1895                                         /*MinIdx*/ 1, /*MaxIdx*/ 1);
1896     // Here, *each* one of those will require a multiplication.
1897     InstructionCost MulCost =
1898         ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
1899     Cost = AddCost + MulCost;
1900 
1901     // What is the degree of this polynominal?
1902     int PolyDegree = S->getNumOperands() - 1;
1903     assert(PolyDegree >= 1 && "Should be at least affine.");
1904 
1905     // The final term will be:
1906     //   Op_{PolyDegree} * x ^ {PolyDegree}
1907     // Where  x ^ {PolyDegree}  will again require PolyDegree-1 mul operations.
1908     // Note that  x ^ {PolyDegree} = x * x ^ {PolyDegree-1}  so charging for
1909     // x ^ {PolyDegree}  will give us  x ^ {2} .. x ^ {PolyDegree-1}  for free.
1910     // FIXME: this is conservatively correct, but might be overly pessimistic.
1911     Cost += MulCost * (PolyDegree - 1);
1912     break;
1913   }
1914   }
1915 
1916   for (auto &CostOp : Operations) {
1917     for (auto SCEVOp : enumerate(S->operands())) {
1918       // Clamp the index to account for multiple IR operations being chained.
1919       size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
1920       size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
1921       Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
1922     }
1923   }
1924   return Cost;
1925 }
1926 
1927 bool SCEVExpander::isHighCostExpansionHelper(
1928     const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
1929     InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
1930     SmallPtrSetImpl<const SCEV *> &Processed,
1931     SmallVectorImpl<SCEVOperand> &Worklist) {
1932   if (Cost > Budget)
1933     return true; // Already run out of budget, give up.
1934 
1935   const SCEV *S = WorkItem.S;
1936   // Was the cost of expansion of this expression already accounted for?
1937   if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
1938     return false; // We have already accounted for this expression.
1939 
1940   // If we can find an existing value for this scev available at the point "At"
1941   // then consider the expression cheap.
1942   if (hasRelatedExistingExpansion(S, &At, L))
1943     return false; // Consider the expression to be free.
1944 
1945   TargetTransformInfo::TargetCostKind CostKind =
1946       L->getHeader()->getParent()->hasMinSize()
1947           ? TargetTransformInfo::TCK_CodeSize
1948           : TargetTransformInfo::TCK_RecipThroughput;
1949 
1950   switch (S->getSCEVType()) {
1951   case scCouldNotCompute:
1952     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1953   case scUnknown:
1954   case scVScale:
1955     // Assume to be zero-cost.
1956     return false;
1957   case scConstant: {
1958     // Only evalulate the costs of constants when optimizing for size.
1959     if (CostKind != TargetTransformInfo::TCK_CodeSize)
1960       return false;
1961     const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
1962     Type *Ty = S->getType();
1963     Cost += TTI.getIntImmCostInst(
1964         WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
1965     return Cost > Budget;
1966   }
1967   case scTruncate:
1968   case scPtrToInt:
1969   case scZeroExtend:
1970   case scSignExtend: {
1971     Cost +=
1972         costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
1973     return false; // Will answer upon next entry into this function.
1974   }
1975   case scUDivExpr: {
1976     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1977     // HowManyLessThans produced to compute a precise expression, rather than a
1978     // UDiv from the user's code. If we can't find a UDiv in the code with some
1979     // simple searching, we need to account for it's cost.
1980 
1981     // At the beginning of this function we already tried to find existing
1982     // value for plain 'S'. Now try to lookup 'S + 1' since it is common
1983     // pattern involving division. This is just a simple search heuristic.
1984     if (hasRelatedExistingExpansion(
1985             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
1986       return false; // Consider it to be free.
1987 
1988     Cost +=
1989         costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
1990     return false; // Will answer upon next entry into this function.
1991   }
1992   case scAddExpr:
1993   case scMulExpr:
1994   case scUMaxExpr:
1995   case scSMaxExpr:
1996   case scUMinExpr:
1997   case scSMinExpr:
1998   case scSequentialUMinExpr: {
1999     assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2000            "Nary expr should have more than 1 operand.");
2001     // The simple nary expr will require one less op (or pair of ops)
2002     // than the number of it's terms.
2003     Cost +=
2004         costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2005     return Cost > Budget;
2006   }
2007   case scAddRecExpr: {
2008     assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2009            "Polynomial should be at least linear");
2010     Cost += costAndCollectOperands<SCEVAddRecExpr>(
2011         WorkItem, TTI, CostKind, Worklist);
2012     return Cost > Budget;
2013   }
2014   }
2015   llvm_unreachable("Unknown SCEV kind!");
2016 }
2017 
2018 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2019                                             Instruction *IP) {
2020   assert(IP);
2021   switch (Pred->getKind()) {
2022   case SCEVPredicate::P_Union:
2023     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2024   case SCEVPredicate::P_Compare:
2025     return expandComparePredicate(cast<SCEVComparePredicate>(Pred), IP);
2026   case SCEVPredicate::P_Wrap: {
2027     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2028     return expandWrapPredicate(AddRecPred, IP);
2029   }
2030   }
2031   llvm_unreachable("Unknown SCEV predicate type");
2032 }
2033 
2034 Value *SCEVExpander::expandComparePredicate(const SCEVComparePredicate *Pred,
2035                                             Instruction *IP) {
2036   Value *Expr0 = expand(Pred->getLHS(), IP);
2037   Value *Expr1 = expand(Pred->getRHS(), IP);
2038 
2039   Builder.SetInsertPoint(IP);
2040   auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2041   auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2042   return I;
2043 }
2044 
2045 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2046                                            Instruction *Loc, bool Signed) {
2047   assert(AR->isAffine() && "Cannot generate RT check for "
2048                            "non-affine expression");
2049 
2050   // FIXME: It is highly suspicious that we're ignoring the predicates here.
2051   SmallVector<const SCEVPredicate *, 4> Pred;
2052   const SCEV *ExitCount =
2053       SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2054 
2055   assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2056 
2057   const SCEV *Step = AR->getStepRecurrence(SE);
2058   const SCEV *Start = AR->getStart();
2059 
2060   Type *ARTy = AR->getType();
2061   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2062   unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2063 
2064   // The expression {Start,+,Step} has nusw/nssw if
2065   //   Step < 0, Start - |Step| * Backedge <= Start
2066   //   Step >= 0, Start + |Step| * Backedge > Start
2067   // and |Step| * Backedge doesn't unsigned overflow.
2068 
2069   Builder.SetInsertPoint(Loc);
2070   Value *TripCountVal = expand(ExitCount, Loc);
2071 
2072   IntegerType *Ty =
2073       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2074 
2075   Value *StepValue = expand(Step, Loc);
2076   Value *NegStepValue = expand(SE.getNegativeSCEV(Step), Loc);
2077   Value *StartValue = expand(Start, Loc);
2078 
2079   ConstantInt *Zero =
2080       ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2081 
2082   Builder.SetInsertPoint(Loc);
2083   // Compute |Step|
2084   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2085   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2086 
2087   // Compute |Step| * Backedge
2088   // Compute:
2089   //   1. Start + |Step| * Backedge < Start
2090   //   2. Start - |Step| * Backedge > Start
2091   //
2092   // And select either 1. or 2. depending on whether step is positive or
2093   // negative. If Step is known to be positive or negative, only create
2094   // either 1. or 2.
2095   auto ComputeEndCheck = [&]() -> Value * {
2096     // Checking <u 0 is always false.
2097     if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
2098       return ConstantInt::getFalse(Loc->getContext());
2099 
2100     // Get the backedge taken count and truncate or extended to the AR type.
2101     Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2102 
2103     Value *MulV, *OfMul;
2104     if (Step->isOne()) {
2105       // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
2106       // needed, there is never an overflow, so to avoid artificially inflating
2107       // the cost of the check, directly emit the optimized IR.
2108       MulV = TruncTripCount;
2109       OfMul = ConstantInt::getFalse(MulV->getContext());
2110     } else {
2111       auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2112                                              Intrinsic::umul_with_overflow, Ty);
2113       CallInst *Mul =
2114           Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2115       MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2116       OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2117     }
2118 
2119     Value *Add = nullptr, *Sub = nullptr;
2120     bool NeedPosCheck = !SE.isKnownNegative(Step);
2121     bool NeedNegCheck = !SE.isKnownPositive(Step);
2122 
2123     if (isa<PointerType>(ARTy)) {
2124       Value *NegMulV = Builder.CreateNeg(MulV);
2125       if (NeedPosCheck)
2126         Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
2127       if (NeedNegCheck)
2128         Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
2129     } else {
2130       if (NeedPosCheck)
2131         Add = Builder.CreateAdd(StartValue, MulV);
2132       if (NeedNegCheck)
2133         Sub = Builder.CreateSub(StartValue, MulV);
2134     }
2135 
2136     Value *EndCompareLT = nullptr;
2137     Value *EndCompareGT = nullptr;
2138     Value *EndCheck = nullptr;
2139     if (NeedPosCheck)
2140       EndCheck = EndCompareLT = Builder.CreateICmp(
2141           Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2142     if (NeedNegCheck)
2143       EndCheck = EndCompareGT = Builder.CreateICmp(
2144           Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2145     if (NeedPosCheck && NeedNegCheck) {
2146       // Select the answer based on the sign of Step.
2147       EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2148     }
2149     return Builder.CreateOr(EndCheck, OfMul);
2150   };
2151   Value *EndCheck = ComputeEndCheck();
2152 
2153   // If the backedge taken count type is larger than the AR type,
2154   // check that we don't drop any bits by truncating it. If we are
2155   // dropping bits, then we have overflow (unless the step is zero).
2156   if (SrcBits > DstBits) {
2157     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2158     auto *BackedgeCheck =
2159         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2160                            ConstantInt::get(Loc->getContext(), MaxVal));
2161     BackedgeCheck = Builder.CreateAnd(
2162         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2163 
2164     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2165   }
2166 
2167   return EndCheck;
2168 }
2169 
2170 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2171                                          Instruction *IP) {
2172   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2173   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2174 
2175   // Add a check for NUSW
2176   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2177     NUSWCheck = generateOverflowCheck(A, IP, false);
2178 
2179   // Add a check for NSSW
2180   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2181     NSSWCheck = generateOverflowCheck(A, IP, true);
2182 
2183   if (NUSWCheck && NSSWCheck)
2184     return Builder.CreateOr(NUSWCheck, NSSWCheck);
2185 
2186   if (NUSWCheck)
2187     return NUSWCheck;
2188 
2189   if (NSSWCheck)
2190     return NSSWCheck;
2191 
2192   return ConstantInt::getFalse(IP->getContext());
2193 }
2194 
2195 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2196                                           Instruction *IP) {
2197   // Loop over all checks in this set.
2198   SmallVector<Value *> Checks;
2199   for (const auto *Pred : Union->getPredicates()) {
2200     Checks.push_back(expandCodeForPredicate(Pred, IP));
2201     Builder.SetInsertPoint(IP);
2202   }
2203 
2204   if (Checks.empty())
2205     return ConstantInt::getFalse(IP->getContext());
2206   return Builder.CreateOr(Checks);
2207 }
2208 
2209 Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2210   auto *DefI = dyn_cast<Instruction>(V);
2211   if (!PreserveLCSSA || !DefI)
2212     return V;
2213 
2214   Instruction *InsertPt = &*Builder.GetInsertPoint();
2215   Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2216   Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2217   if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2218     return V;
2219 
2220   // Create a temporary instruction to at the current insertion point, so we
2221   // can hand it off to the helper to create LCSSA PHIs if required for the
2222   // new use.
2223   // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
2224   // would accept a insertion point and return an LCSSA phi for that
2225   // insertion point, so there is no need to insert & remove the temporary
2226   // instruction.
2227   Type *ToTy;
2228   if (DefI->getType()->isIntegerTy())
2229     ToTy = PointerType::get(DefI->getContext(), 0);
2230   else
2231     ToTy = Type::getInt32Ty(DefI->getContext());
2232   Instruction *User =
2233       CastInst::CreateBitOrPointerCast(DefI, ToTy, "tmp.lcssa.user", InsertPt);
2234   auto RemoveUserOnExit =
2235       make_scope_exit([User]() { User->eraseFromParent(); });
2236 
2237   SmallVector<Instruction *, 1> ToUpdate;
2238   ToUpdate.push_back(DefI);
2239   SmallVector<PHINode *, 16> PHIsToRemove;
2240   SmallVector<PHINode *, 16> InsertedPHIs;
2241   formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, &PHIsToRemove,
2242                            &InsertedPHIs);
2243   for (PHINode *PN : InsertedPHIs)
2244     rememberInstruction(PN);
2245   for (PHINode *PN : PHIsToRemove) {
2246     if (!PN->use_empty())
2247       continue;
2248     InsertedValues.erase(PN);
2249     InsertedPostIncValues.erase(PN);
2250     PN->eraseFromParent();
2251   }
2252 
2253   return User->getOperand(0);
2254 }
2255 
2256 namespace {
2257 // Search for a SCEV subexpression that is not safe to expand.  Any expression
2258 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2259 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2260 // instruction, but the important thing is that we prove the denominator is
2261 // nonzero before expansion.
2262 //
2263 // IVUsers already checks that IV-derived expressions are safe. So this check is
2264 // only needed when the expression includes some subexpression that is not IV
2265 // derived.
2266 //
2267 // Currently, we only allow division by a value provably non-zero here.
2268 //
2269 // We cannot generally expand recurrences unless the step dominates the loop
2270 // header. The expander handles the special case of affine recurrences by
2271 // scaling the recurrence outside the loop, but this technique isn't generally
2272 // applicable. Expanding a nested recurrence outside a loop requires computing
2273 // binomial coefficients. This could be done, but the recurrence has to be in a
2274 // perfectly reduced form, which can't be guaranteed.
2275 struct SCEVFindUnsafe {
2276   ScalarEvolution &SE;
2277   bool CanonicalMode;
2278   bool IsUnsafe = false;
2279 
2280   SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2281       : SE(SE), CanonicalMode(CanonicalMode) {}
2282 
2283   bool follow(const SCEV *S) {
2284     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2285       if (!SE.isKnownNonZero(D->getRHS())) {
2286         IsUnsafe = true;
2287         return false;
2288       }
2289     }
2290     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2291       // For non-affine addrecs or in non-canonical mode we need a preheader
2292       // to insert into.
2293       if (!AR->getLoop()->getLoopPreheader() &&
2294           (!CanonicalMode || !AR->isAffine())) {
2295         IsUnsafe = true;
2296         return false;
2297       }
2298     }
2299     return true;
2300   }
2301   bool isDone() const { return IsUnsafe; }
2302 };
2303 } // namespace
2304 
2305 bool SCEVExpander::isSafeToExpand(const SCEV *S) const {
2306   SCEVFindUnsafe Search(SE, CanonicalMode);
2307   visitAll(S, Search);
2308   return !Search.IsUnsafe;
2309 }
2310 
2311 bool SCEVExpander::isSafeToExpandAt(const SCEV *S,
2312                                     const Instruction *InsertionPoint) const {
2313   if (!isSafeToExpand(S))
2314     return false;
2315   // We have to prove that the expanded site of S dominates InsertionPoint.
2316   // This is easy when not in the same block, but hard when S is an instruction
2317   // to be expanded somewhere inside the same block as our insertion point.
2318   // What we really need here is something analogous to an OrderedBasicBlock,
2319   // but for the moment, we paper over the problem by handling two common and
2320   // cheap to check cases.
2321   if (SE.properlyDominates(S, InsertionPoint->getParent()))
2322     return true;
2323   if (SE.dominates(S, InsertionPoint->getParent())) {
2324     if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2325       return true;
2326     if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2327       if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2328         return true;
2329   }
2330   return false;
2331 }
2332 
2333 void SCEVExpanderCleaner::cleanup() {
2334   // Result is used, nothing to remove.
2335   if (ResultUsed)
2336     return;
2337 
2338   auto InsertedInstructions = Expander.getAllInsertedInstructions();
2339 #ifndef NDEBUG
2340   SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2341                                             InsertedInstructions.end());
2342   (void)InsertedSet;
2343 #endif
2344   // Remove sets with value handles.
2345   Expander.clear();
2346 
2347   // Remove all inserted instructions.
2348   for (Instruction *I : reverse(InsertedInstructions)) {
2349 #ifndef NDEBUG
2350     assert(all_of(I->users(),
2351                   [&InsertedSet](Value *U) {
2352                     return InsertedSet.contains(cast<Instruction>(U));
2353                   }) &&
2354            "removed instruction should only be used by instructions inserted "
2355            "during expansion");
2356 #endif
2357     assert(!I->getType()->isVoidTy() &&
2358            "inserted instruction should have non-void types");
2359     I->replaceAllUsesWith(PoisonValue::get(I->getType()));
2360     I->eraseFromParent();
2361   }
2362 }
2363