xref: /llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp (revision 0886fd5b3a374525c382145deee7049ec1d0fe91)
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Transforms/Utils/LoopUtils.h"
31 
32 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
33 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34 #else
35 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
36 #endif
37 
38 using namespace llvm;
39 
40 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
41     "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
42     cl::desc("When performing SCEV expansion only if it is cheap to do, this "
43              "controls the budget that is considered cheap (default = 4)"));
44 
45 using namespace PatternMatch;
46 
47 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
48 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
49 /// creating a new one.
50 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
51                                        Instruction::CastOps Op,
52                                        BasicBlock::iterator IP) {
53   // This function must be called with the builder having a valid insertion
54   // point. It doesn't need to be the actual IP where the uses of the returned
55   // cast will be added, but it must dominate such IP.
56   // We use this precondition to produce a cast that will dominate all its
57   // uses. In particular, this is crucial for the case where the builder's
58   // insertion point *is* the point where we were asked to put the cast.
59   // Since we don't know the builder's insertion point is actually
60   // where the uses will be added (only that it dominates it), we are
61   // not allowed to move it.
62   BasicBlock::iterator BIP = Builder.GetInsertPoint();
63 
64   Value *Ret = nullptr;
65 
66   // Check to see if there is already a cast!
67   for (User *U : V->users()) {
68     if (U->getType() != Ty)
69       continue;
70     CastInst *CI = dyn_cast<CastInst>(U);
71     if (!CI || CI->getOpcode() != Op)
72       continue;
73 
74     // Found a suitable cast that is at IP or comes before IP. Use it. Note that
75     // the cast must also properly dominate the Builder's insertion point.
76     if (IP->getParent() == CI->getParent() && &*BIP != CI &&
77         (&*IP == CI || CI->comesBefore(&*IP))) {
78       Ret = CI;
79       break;
80     }
81   }
82 
83   // Create a new cast.
84   if (!Ret) {
85     SCEVInsertPointGuard Guard(Builder, this);
86     Builder.SetInsertPoint(&*IP);
87     Ret = Builder.CreateCast(Op, V, Ty, V->getName());
88   }
89 
90   // We assert at the end of the function since IP might point to an
91   // instruction with different dominance properties than a cast
92   // (an invoke for example) and not dominate BIP (but the cast does).
93   assert(!isa<Instruction>(Ret) ||
94          SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
95 
96   return Ret;
97 }
98 
99 BasicBlock::iterator
100 SCEVExpander::findInsertPointAfter(Instruction *I,
101                                    Instruction *MustDominate) const {
102   BasicBlock::iterator IP = ++I->getIterator();
103   if (auto *II = dyn_cast<InvokeInst>(I))
104     IP = II->getNormalDest()->begin();
105 
106   while (isa<PHINode>(IP))
107     ++IP;
108 
109   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
110     ++IP;
111   } else if (isa<CatchSwitchInst>(IP)) {
112     IP = MustDominate->getParent()->getFirstInsertionPt();
113   } else {
114     assert(!IP->isEHPad() && "unexpected eh pad!");
115   }
116 
117   // Adjust insert point to be after instructions inserted by the expander, so
118   // we can re-use already inserted instructions. Avoid skipping past the
119   // original \p MustDominate, in case it is an inserted instruction.
120   while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
121     ++IP;
122 
123   return IP;
124 }
125 
126 BasicBlock::iterator
127 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
128   // Cast the argument at the beginning of the entry block, after
129   // any bitcasts of other arguments.
130   if (Argument *A = dyn_cast<Argument>(V)) {
131     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
132     while ((isa<BitCastInst>(IP) &&
133             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
134             cast<BitCastInst>(IP)->getOperand(0) != A) ||
135            isa<DbgInfoIntrinsic>(IP))
136       ++IP;
137     return IP;
138   }
139 
140   // Cast the instruction immediately after the instruction.
141   if (Instruction *I = dyn_cast<Instruction>(V))
142     return findInsertPointAfter(I, &*Builder.GetInsertPoint());
143 
144   // Otherwise, this must be some kind of a constant,
145   // so let's plop this cast into the function's entry block.
146   assert(isa<Constant>(V) &&
147          "Expected the cast argument to be a global/constant");
148   return Builder.GetInsertBlock()
149       ->getParent()
150       ->getEntryBlock()
151       .getFirstInsertionPt();
152 }
153 
154 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
155 /// which must be possible with a noop cast, doing what we can to share
156 /// the casts.
157 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
158   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
159   assert((Op == Instruction::BitCast ||
160           Op == Instruction::PtrToInt ||
161           Op == Instruction::IntToPtr) &&
162          "InsertNoopCastOfTo cannot perform non-noop casts!");
163   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
164          "InsertNoopCastOfTo cannot change sizes!");
165 
166   // inttoptr only works for integral pointers. For non-integral pointers, we
167   // can create a GEP on i8* null  with the integral value as index. Note that
168   // it is safe to use GEP of null instead of inttoptr here, because only
169   // expressions already based on a GEP of null should be converted to pointers
170   // during expansion.
171   if (Op == Instruction::IntToPtr) {
172     auto *PtrTy = cast<PointerType>(Ty);
173     if (DL.isNonIntegralPointerType(PtrTy)) {
174       auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
175       assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 &&
176              "alloc size of i8 must by 1 byte for the GEP to be correct");
177       auto *GEP = Builder.CreateGEP(
178           Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
179       return Builder.CreateBitCast(GEP, Ty);
180     }
181   }
182   // Short-circuit unnecessary bitcasts.
183   if (Op == Instruction::BitCast) {
184     if (V->getType() == Ty)
185       return V;
186     if (CastInst *CI = dyn_cast<CastInst>(V)) {
187       if (CI->getOperand(0)->getType() == Ty)
188         return CI->getOperand(0);
189     }
190   }
191   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
192   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
193       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
194     if (CastInst *CI = dyn_cast<CastInst>(V))
195       if ((CI->getOpcode() == Instruction::PtrToInt ||
196            CI->getOpcode() == Instruction::IntToPtr) &&
197           SE.getTypeSizeInBits(CI->getType()) ==
198           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
199         return CI->getOperand(0);
200     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
201       if ((CE->getOpcode() == Instruction::PtrToInt ||
202            CE->getOpcode() == Instruction::IntToPtr) &&
203           SE.getTypeSizeInBits(CE->getType()) ==
204           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
205         return CE->getOperand(0);
206   }
207 
208   // Fold a cast of a constant.
209   if (Constant *C = dyn_cast<Constant>(V))
210     return ConstantExpr::getCast(Op, C, Ty);
211 
212   // Try to reuse existing cast, or insert one.
213   return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
214 }
215 
216 /// InsertBinop - Insert the specified binary operator, doing a small amount
217 /// of work to avoid inserting an obviously redundant operation, and hoisting
218 /// to an outer loop when the opportunity is there and it is safe.
219 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
220                                  Value *LHS, Value *RHS,
221                                  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
222   // Fold a binop with constant operands.
223   if (Constant *CLHS = dyn_cast<Constant>(LHS))
224     if (Constant *CRHS = dyn_cast<Constant>(RHS))
225       return ConstantExpr::get(Opcode, CLHS, CRHS);
226 
227   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
228   unsigned ScanLimit = 6;
229   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
230   // Scanning starts from the last instruction before the insertion point.
231   BasicBlock::iterator IP = Builder.GetInsertPoint();
232   if (IP != BlockBegin) {
233     --IP;
234     for (; ScanLimit; --IP, --ScanLimit) {
235       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
236       // generated code.
237       if (isa<DbgInfoIntrinsic>(IP))
238         ScanLimit++;
239 
240       auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
241         // Ensure that no-wrap flags match.
242         if (isa<OverflowingBinaryOperator>(I)) {
243           if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
244             return true;
245           if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
246             return true;
247         }
248         // Conservatively, do not use any instruction which has any of exact
249         // flags installed.
250         if (isa<PossiblyExactOperator>(I) && I->isExact())
251           return true;
252         return false;
253       };
254       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
255           IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
256         return &*IP;
257       if (IP == BlockBegin) break;
258     }
259   }
260 
261   // Save the original insertion point so we can restore it when we're done.
262   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
263   SCEVInsertPointGuard Guard(Builder, this);
264 
265   if (IsSafeToHoist) {
266     // Move the insertion point out of as many loops as we can.
267     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
268       if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
269       BasicBlock *Preheader = L->getLoopPreheader();
270       if (!Preheader) break;
271 
272       // Ok, move up a level.
273       Builder.SetInsertPoint(Preheader->getTerminator());
274     }
275   }
276 
277   // If we haven't found this binop, insert it.
278   Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
279   BO->setDebugLoc(Loc);
280   if (Flags & SCEV::FlagNUW)
281     BO->setHasNoUnsignedWrap();
282   if (Flags & SCEV::FlagNSW)
283     BO->setHasNoSignedWrap();
284 
285   return BO;
286 }
287 
288 /// FactorOutConstant - Test if S is divisible by Factor, using signed
289 /// division. If so, update S with Factor divided out and return true.
290 /// S need not be evenly divisible if a reasonable remainder can be
291 /// computed.
292 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
293                               const SCEV *Factor, ScalarEvolution &SE,
294                               const DataLayout &DL) {
295   // Everything is divisible by one.
296   if (Factor->isOne())
297     return true;
298 
299   // x/x == 1.
300   if (S == Factor) {
301     S = SE.getConstant(S->getType(), 1);
302     return true;
303   }
304 
305   // For a Constant, check for a multiple of the given factor.
306   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
307     // 0/x == 0.
308     if (C->isZero())
309       return true;
310     // Check for divisibility.
311     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
312       ConstantInt *CI =
313           ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
314       // If the quotient is zero and the remainder is non-zero, reject
315       // the value at this scale. It will be considered for subsequent
316       // smaller scales.
317       if (!CI->isZero()) {
318         const SCEV *Div = SE.getConstant(CI);
319         S = Div;
320         Remainder = SE.getAddExpr(
321             Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
322         return true;
323       }
324     }
325   }
326 
327   // In a Mul, check if there is a constant operand which is a multiple
328   // of the given factor.
329   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
330     // Size is known, check if there is a constant operand which is a multiple
331     // of the given factor. If so, we can factor it.
332     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
333       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
334         if (!C->getAPInt().srem(FC->getAPInt())) {
335           SmallVector<const SCEV *, 4> NewMulOps(M->operands());
336           NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
337           S = SE.getMulExpr(NewMulOps);
338           return true;
339         }
340   }
341 
342   // In an AddRec, check if both start and step are divisible.
343   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
344     const SCEV *Step = A->getStepRecurrence(SE);
345     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
346     if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
347       return false;
348     if (!StepRem->isZero())
349       return false;
350     const SCEV *Start = A->getStart();
351     if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
352       return false;
353     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
354                          A->getNoWrapFlags(SCEV::FlagNW));
355     return true;
356   }
357 
358   return false;
359 }
360 
361 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
362 /// is the number of SCEVAddRecExprs present, which are kept at the end of
363 /// the list.
364 ///
365 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
366                                 Type *Ty,
367                                 ScalarEvolution &SE) {
368   unsigned NumAddRecs = 0;
369   for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
370     ++NumAddRecs;
371   // Group Ops into non-addrecs and addrecs.
372   SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
373   SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
374   // Let ScalarEvolution sort and simplify the non-addrecs list.
375   const SCEV *Sum = NoAddRecs.empty() ?
376                     SE.getConstant(Ty, 0) :
377                     SE.getAddExpr(NoAddRecs);
378   // If it returned an add, use the operands. Otherwise it simplified
379   // the sum into a single value, so just use that.
380   Ops.clear();
381   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
382     Ops.append(Add->op_begin(), Add->op_end());
383   else if (!Sum->isZero())
384     Ops.push_back(Sum);
385   // Then append the addrecs.
386   Ops.append(AddRecs.begin(), AddRecs.end());
387 }
388 
389 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
390 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
391 /// This helps expose more opportunities for folding parts of the expressions
392 /// into GEP indices.
393 ///
394 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
395                          Type *Ty,
396                          ScalarEvolution &SE) {
397   // Find the addrecs.
398   SmallVector<const SCEV *, 8> AddRecs;
399   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
400     while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
401       const SCEV *Start = A->getStart();
402       if (Start->isZero()) break;
403       const SCEV *Zero = SE.getConstant(Ty, 0);
404       AddRecs.push_back(SE.getAddRecExpr(Zero,
405                                          A->getStepRecurrence(SE),
406                                          A->getLoop(),
407                                          A->getNoWrapFlags(SCEV::FlagNW)));
408       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
409         Ops[i] = Zero;
410         Ops.append(Add->op_begin(), Add->op_end());
411         e += Add->getNumOperands();
412       } else {
413         Ops[i] = Start;
414       }
415     }
416   if (!AddRecs.empty()) {
417     // Add the addrecs onto the end of the list.
418     Ops.append(AddRecs.begin(), AddRecs.end());
419     // Resort the operand list, moving any constants to the front.
420     SimplifyAddOperands(Ops, Ty, SE);
421   }
422 }
423 
424 /// expandAddToGEP - Expand an addition expression with a pointer type into
425 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
426 /// BasicAliasAnalysis and other passes analyze the result. See the rules
427 /// for getelementptr vs. inttoptr in
428 /// http://llvm.org/docs/LangRef.html#pointeraliasing
429 /// for details.
430 ///
431 /// Design note: The correctness of using getelementptr here depends on
432 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
433 /// they may introduce pointer arithmetic which may not be safely converted
434 /// into getelementptr.
435 ///
436 /// Design note: It might seem desirable for this function to be more
437 /// loop-aware. If some of the indices are loop-invariant while others
438 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
439 /// loop-invariant portions of the overall computation outside the loop.
440 /// However, there are a few reasons this is not done here. Hoisting simple
441 /// arithmetic is a low-level optimization that often isn't very
442 /// important until late in the optimization process. In fact, passes
443 /// like InstructionCombining will combine GEPs, even if it means
444 /// pushing loop-invariant computation down into loops, so even if the
445 /// GEPs were split here, the work would quickly be undone. The
446 /// LoopStrengthReduction pass, which is usually run quite late (and
447 /// after the last InstructionCombining pass), takes care of hoisting
448 /// loop-invariant portions of expressions, after considering what
449 /// can be folded using target addressing modes.
450 ///
451 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
452                                     const SCEV *const *op_end,
453                                     PointerType *PTy,
454                                     Type *Ty,
455                                     Value *V) {
456   SmallVector<Value *, 4> GepIndices;
457   SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
458   bool AnyNonZeroIndices = false;
459 
460   // Split AddRecs up into parts as either of the parts may be usable
461   // without the other.
462   SplitAddRecs(Ops, Ty, SE);
463 
464   Type *IntIdxTy = DL.getIndexType(PTy);
465 
466   // For opaque pointers, always generate i8 GEP.
467   if (!PTy->isOpaque()) {
468     // Descend down the pointer's type and attempt to convert the other
469     // operands into GEP indices, at each level. The first index in a GEP
470     // indexes into the array implied by the pointer operand; the rest of
471     // the indices index into the element or field type selected by the
472     // preceding index.
473     Type *ElTy = PTy->getElementType();
474     for (;;) {
475       // If the scale size is not 0, attempt to factor out a scale for
476       // array indexing.
477       SmallVector<const SCEV *, 8> ScaledOps;
478       if (ElTy->isSized()) {
479         const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
480         if (!ElSize->isZero()) {
481           SmallVector<const SCEV *, 8> NewOps;
482           for (const SCEV *Op : Ops) {
483             const SCEV *Remainder = SE.getConstant(Ty, 0);
484             if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
485               // Op now has ElSize factored out.
486               ScaledOps.push_back(Op);
487               if (!Remainder->isZero())
488                 NewOps.push_back(Remainder);
489               AnyNonZeroIndices = true;
490             } else {
491               // The operand was not divisible, so add it to the list of
492               // operands we'll scan next iteration.
493               NewOps.push_back(Op);
494             }
495           }
496           // If we made any changes, update Ops.
497           if (!ScaledOps.empty()) {
498             Ops = NewOps;
499             SimplifyAddOperands(Ops, Ty, SE);
500           }
501         }
502       }
503 
504       // Record the scaled array index for this level of the type. If
505       // we didn't find any operands that could be factored, tentatively
506       // assume that element zero was selected (since the zero offset
507       // would obviously be folded away).
508       Value *Scaled =
509           ScaledOps.empty()
510               ? Constant::getNullValue(Ty)
511               : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
512       GepIndices.push_back(Scaled);
513 
514       // Collect struct field index operands.
515       while (StructType *STy = dyn_cast<StructType>(ElTy)) {
516         bool FoundFieldNo = false;
517         // An empty struct has no fields.
518         if (STy->getNumElements() == 0) break;
519         // Field offsets are known. See if a constant offset falls within any of
520         // the struct fields.
521         if (Ops.empty())
522           break;
523         if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
524           if (SE.getTypeSizeInBits(C->getType()) <= 64) {
525             const StructLayout &SL = *DL.getStructLayout(STy);
526             uint64_t FullOffset = C->getValue()->getZExtValue();
527             if (FullOffset < SL.getSizeInBytes()) {
528               unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
529               GepIndices.push_back(
530                   ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
531               ElTy = STy->getTypeAtIndex(ElIdx);
532               Ops[0] =
533                   SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
534               AnyNonZeroIndices = true;
535               FoundFieldNo = true;
536             }
537           }
538         // If no struct field offsets were found, tentatively assume that
539         // field zero was selected (since the zero offset would obviously
540         // be folded away).
541         if (!FoundFieldNo) {
542           ElTy = STy->getTypeAtIndex(0u);
543           GepIndices.push_back(
544             Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
545         }
546       }
547 
548       if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
549         ElTy = ATy->getElementType();
550       else
551         // FIXME: Handle VectorType.
552         // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
553         // constant, therefore can not be factored out. The generated IR is less
554         // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
555         break;
556     }
557   }
558 
559   // If none of the operands were convertible to proper GEP indices, cast
560   // the base to i8* and do an ugly getelementptr with that. It's still
561   // better than ptrtoint+arithmetic+inttoptr at least.
562   if (!AnyNonZeroIndices) {
563     // Cast the base to i8*.
564     if (!PTy->isOpaque())
565       V = InsertNoopCastOfTo(V,
566          Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
567 
568     assert(!isa<Instruction>(V) ||
569            SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
570 
571     // Expand the operands for a plain byte offset.
572     Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
573 
574     // Fold a GEP with constant operands.
575     if (Constant *CLHS = dyn_cast<Constant>(V))
576       if (Constant *CRHS = dyn_cast<Constant>(Idx))
577         return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
578                                               CLHS, CRHS);
579 
580     // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
581     unsigned ScanLimit = 6;
582     BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
583     // Scanning starts from the last instruction before the insertion point.
584     BasicBlock::iterator IP = Builder.GetInsertPoint();
585     if (IP != BlockBegin) {
586       --IP;
587       for (; ScanLimit; --IP, --ScanLimit) {
588         // Don't count dbg.value against the ScanLimit, to avoid perturbing the
589         // generated code.
590         if (isa<DbgInfoIntrinsic>(IP))
591           ScanLimit++;
592         if (IP->getOpcode() == Instruction::GetElementPtr &&
593             IP->getOperand(0) == V && IP->getOperand(1) == Idx)
594           return &*IP;
595         if (IP == BlockBegin) break;
596       }
597     }
598 
599     // Save the original insertion point so we can restore it when we're done.
600     SCEVInsertPointGuard Guard(Builder, this);
601 
602     // Move the insertion point out of as many loops as we can.
603     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
604       if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
605       BasicBlock *Preheader = L->getLoopPreheader();
606       if (!Preheader) break;
607 
608       // Ok, move up a level.
609       Builder.SetInsertPoint(Preheader->getTerminator());
610     }
611 
612     // Emit a GEP.
613     return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
614   }
615 
616   {
617     SCEVInsertPointGuard Guard(Builder, this);
618 
619     // Move the insertion point out of as many loops as we can.
620     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
621       if (!L->isLoopInvariant(V)) break;
622 
623       bool AnyIndexNotLoopInvariant = any_of(
624           GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
625 
626       if (AnyIndexNotLoopInvariant)
627         break;
628 
629       BasicBlock *Preheader = L->getLoopPreheader();
630       if (!Preheader) break;
631 
632       // Ok, move up a level.
633       Builder.SetInsertPoint(Preheader->getTerminator());
634     }
635 
636     // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
637     // because ScalarEvolution may have changed the address arithmetic to
638     // compute a value which is beyond the end of the allocated object.
639     Value *Casted = V;
640     if (V->getType() != PTy)
641       Casted = InsertNoopCastOfTo(Casted, PTy);
642     Value *GEP = Builder.CreateGEP(PTy->getElementType(), Casted, GepIndices,
643                                    "scevgep");
644     Ops.push_back(SE.getUnknown(GEP));
645   }
646 
647   return expand(SE.getAddExpr(Ops));
648 }
649 
650 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
651                                     Value *V) {
652   const SCEV *const Ops[1] = {Op};
653   return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
654 }
655 
656 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
657 /// SCEV expansion. If they are nested, this is the most nested. If they are
658 /// neighboring, pick the later.
659 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
660                                         DominatorTree &DT) {
661   if (!A) return B;
662   if (!B) return A;
663   if (A->contains(B)) return B;
664   if (B->contains(A)) return A;
665   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
666   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
667   return A; // Arbitrarily break the tie.
668 }
669 
670 /// getRelevantLoop - Get the most relevant loop associated with the given
671 /// expression, according to PickMostRelevantLoop.
672 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
673   // Test whether we've already computed the most relevant loop for this SCEV.
674   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
675   if (!Pair.second)
676     return Pair.first->second;
677 
678   if (isa<SCEVConstant>(S))
679     // A constant has no relevant loops.
680     return nullptr;
681   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
682     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
683       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
684     // A non-instruction has no relevant loops.
685     return nullptr;
686   }
687   if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
688     const Loop *L = nullptr;
689     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
690       L = AR->getLoop();
691     for (const SCEV *Op : N->operands())
692       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
693     return RelevantLoops[N] = L;
694   }
695   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
696     const Loop *Result = getRelevantLoop(C->getOperand());
697     return RelevantLoops[C] = Result;
698   }
699   if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
700     const Loop *Result = PickMostRelevantLoop(
701         getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
702     return RelevantLoops[D] = Result;
703   }
704   llvm_unreachable("Unexpected SCEV type!");
705 }
706 
707 namespace {
708 
709 /// LoopCompare - Compare loops by PickMostRelevantLoop.
710 class LoopCompare {
711   DominatorTree &DT;
712 public:
713   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
714 
715   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
716                   std::pair<const Loop *, const SCEV *> RHS) const {
717     // Keep pointer operands sorted at the end.
718     if (LHS.second->getType()->isPointerTy() !=
719         RHS.second->getType()->isPointerTy())
720       return LHS.second->getType()->isPointerTy();
721 
722     // Compare loops with PickMostRelevantLoop.
723     if (LHS.first != RHS.first)
724       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
725 
726     // If one operand is a non-constant negative and the other is not,
727     // put the non-constant negative on the right so that a sub can
728     // be used instead of a negate and add.
729     if (LHS.second->isNonConstantNegative()) {
730       if (!RHS.second->isNonConstantNegative())
731         return false;
732     } else if (RHS.second->isNonConstantNegative())
733       return true;
734 
735     // Otherwise they are equivalent according to this comparison.
736     return false;
737   }
738 };
739 
740 }
741 
742 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
743   Type *Ty = SE.getEffectiveSCEVType(S->getType());
744 
745   // Collect all the add operands in a loop, along with their associated loops.
746   // Iterate in reverse so that constants are emitted last, all else equal, and
747   // so that pointer operands are inserted first, which the code below relies on
748   // to form more involved GEPs.
749   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
750   for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
751        E(S->op_begin()); I != E; ++I)
752     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
753 
754   // Sort by loop. Use a stable sort so that constants follow non-constants and
755   // pointer operands precede non-pointer operands.
756   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
757 
758   // Emit instructions to add all the operands. Hoist as much as possible
759   // out of loops, and form meaningful getelementptrs where possible.
760   Value *Sum = nullptr;
761   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
762     const Loop *CurLoop = I->first;
763     const SCEV *Op = I->second;
764     if (!Sum) {
765       // This is the first operand. Just expand it.
766       Sum = expand(Op);
767       ++I;
768       continue;
769     }
770 
771     assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
772     if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
773       // The running sum expression is a pointer. Try to form a getelementptr
774       // at this level with that as the base.
775       SmallVector<const SCEV *, 4> NewOps;
776       for (; I != E && I->first == CurLoop; ++I) {
777         // If the operand is SCEVUnknown and not instructions, peek through
778         // it, to enable more of it to be folded into the GEP.
779         const SCEV *X = I->second;
780         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
781           if (!isa<Instruction>(U->getValue()))
782             X = SE.getSCEV(U->getValue());
783         NewOps.push_back(X);
784       }
785       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
786     } else if (Op->isNonConstantNegative()) {
787       // Instead of doing a negate and add, just do a subtract.
788       Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
789       Sum = InsertNoopCastOfTo(Sum, Ty);
790       Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
791                         /*IsSafeToHoist*/ true);
792       ++I;
793     } else {
794       // A simple add.
795       Value *W = expandCodeForImpl(Op, Ty, false);
796       Sum = InsertNoopCastOfTo(Sum, Ty);
797       // Canonicalize a constant to the RHS.
798       if (isa<Constant>(Sum)) std::swap(Sum, W);
799       Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
800                         /*IsSafeToHoist*/ true);
801       ++I;
802     }
803   }
804 
805   return Sum;
806 }
807 
808 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
809   Type *Ty = SE.getEffectiveSCEVType(S->getType());
810 
811   // Collect all the mul operands in a loop, along with their associated loops.
812   // Iterate in reverse so that constants are emitted last, all else equal.
813   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
814   for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
815        E(S->op_begin()); I != E; ++I)
816     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
817 
818   // Sort by loop. Use a stable sort so that constants follow non-constants.
819   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
820 
821   // Emit instructions to mul all the operands. Hoist as much as possible
822   // out of loops.
823   Value *Prod = nullptr;
824   auto I = OpsAndLoops.begin();
825 
826   // Expand the calculation of X pow N in the following manner:
827   // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
828   // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
829   const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
830     auto E = I;
831     // Calculate how many times the same operand from the same loop is included
832     // into this power.
833     uint64_t Exponent = 0;
834     const uint64_t MaxExponent = UINT64_MAX >> 1;
835     // No one sane will ever try to calculate such huge exponents, but if we
836     // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
837     // below when the power of 2 exceeds our Exponent, and we want it to be
838     // 1u << 31 at most to not deal with unsigned overflow.
839     while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
840       ++Exponent;
841       ++E;
842     }
843     assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
844 
845     // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
846     // that are needed into the result.
847     Value *P = expandCodeForImpl(I->second, Ty, false);
848     Value *Result = nullptr;
849     if (Exponent & 1)
850       Result = P;
851     for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
852       P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
853                       /*IsSafeToHoist*/ true);
854       if (Exponent & BinExp)
855         Result = Result ? InsertBinop(Instruction::Mul, Result, P,
856                                       SCEV::FlagAnyWrap,
857                                       /*IsSafeToHoist*/ true)
858                         : P;
859     }
860 
861     I = E;
862     assert(Result && "Nothing was expanded?");
863     return Result;
864   };
865 
866   while (I != OpsAndLoops.end()) {
867     if (!Prod) {
868       // This is the first operand. Just expand it.
869       Prod = ExpandOpBinPowN();
870     } else if (I->second->isAllOnesValue()) {
871       // Instead of doing a multiply by negative one, just do a negate.
872       Prod = InsertNoopCastOfTo(Prod, Ty);
873       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
874                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
875       ++I;
876     } else {
877       // A simple mul.
878       Value *W = ExpandOpBinPowN();
879       Prod = InsertNoopCastOfTo(Prod, Ty);
880       // Canonicalize a constant to the RHS.
881       if (isa<Constant>(Prod)) std::swap(Prod, W);
882       const APInt *RHS;
883       if (match(W, m_Power2(RHS))) {
884         // Canonicalize Prod*(1<<C) to Prod<<C.
885         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
886         auto NWFlags = S->getNoWrapFlags();
887         // clear nsw flag if shl will produce poison value.
888         if (RHS->logBase2() == RHS->getBitWidth() - 1)
889           NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
890         Prod = InsertBinop(Instruction::Shl, Prod,
891                            ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
892                            /*IsSafeToHoist*/ true);
893       } else {
894         Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
895                            /*IsSafeToHoist*/ true);
896       }
897     }
898   }
899 
900   return Prod;
901 }
902 
903 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
904   Type *Ty = SE.getEffectiveSCEVType(S->getType());
905 
906   Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
907   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
908     const APInt &RHS = SC->getAPInt();
909     if (RHS.isPowerOf2())
910       return InsertBinop(Instruction::LShr, LHS,
911                          ConstantInt::get(Ty, RHS.logBase2()),
912                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
913   }
914 
915   Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
916   return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
917                      /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
918 }
919 
920 /// Move parts of Base into Rest to leave Base with the minimal
921 /// expression that provides a pointer operand suitable for a
922 /// GEP expansion.
923 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
924                               ScalarEvolution &SE) {
925   while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
926     Base = A->getStart();
927     Rest = SE.getAddExpr(Rest,
928                          SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
929                                           A->getStepRecurrence(SE),
930                                           A->getLoop(),
931                                           A->getNoWrapFlags(SCEV::FlagNW)));
932   }
933   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
934     Base = A->getOperand(A->getNumOperands()-1);
935     SmallVector<const SCEV *, 8> NewAddOps(A->operands());
936     NewAddOps.back() = Rest;
937     Rest = SE.getAddExpr(NewAddOps);
938     ExposePointerBase(Base, Rest, SE);
939   }
940 }
941 
942 /// Determine if this is a well-behaved chain of instructions leading back to
943 /// the PHI. If so, it may be reused by expanded expressions.
944 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
945                                          const Loop *L) {
946   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
947       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
948     return false;
949   // If any of the operands don't dominate the insert position, bail.
950   // Addrec operands are always loop-invariant, so this can only happen
951   // if there are instructions which haven't been hoisted.
952   if (L == IVIncInsertLoop) {
953     for (Use &Op : llvm::drop_begin(IncV->operands()))
954       if (Instruction *OInst = dyn_cast<Instruction>(Op))
955         if (!SE.DT.dominates(OInst, IVIncInsertPos))
956           return false;
957   }
958   // Advance to the next instruction.
959   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
960   if (!IncV)
961     return false;
962 
963   if (IncV->mayHaveSideEffects())
964     return false;
965 
966   if (IncV == PN)
967     return true;
968 
969   return isNormalAddRecExprPHI(PN, IncV, L);
970 }
971 
972 /// getIVIncOperand returns an induction variable increment's induction
973 /// variable operand.
974 ///
975 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
976 /// operands dominate InsertPos.
977 ///
978 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
979 /// simple patterns generated by getAddRecExprPHILiterally and
980 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
981 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
982                                            Instruction *InsertPos,
983                                            bool allowScale) {
984   if (IncV == InsertPos)
985     return nullptr;
986 
987   switch (IncV->getOpcode()) {
988   default:
989     return nullptr;
990   // Check for a simple Add/Sub or GEP of a loop invariant step.
991   case Instruction::Add:
992   case Instruction::Sub: {
993     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
994     if (!OInst || SE.DT.dominates(OInst, InsertPos))
995       return dyn_cast<Instruction>(IncV->getOperand(0));
996     return nullptr;
997   }
998   case Instruction::BitCast:
999     return dyn_cast<Instruction>(IncV->getOperand(0));
1000   case Instruction::GetElementPtr:
1001     for (Use &U : llvm::drop_begin(IncV->operands())) {
1002       if (isa<Constant>(U))
1003         continue;
1004       if (Instruction *OInst = dyn_cast<Instruction>(U)) {
1005         if (!SE.DT.dominates(OInst, InsertPos))
1006           return nullptr;
1007       }
1008       if (allowScale) {
1009         // allow any kind of GEP as long as it can be hoisted.
1010         continue;
1011       }
1012       // This must be a pointer addition of constants (pretty), which is already
1013       // handled, or some number of address-size elements (ugly). Ugly geps
1014       // have 2 operands. i1* is used by the expander to represent an
1015       // address-size element.
1016       if (IncV->getNumOperands() != 2)
1017         return nullptr;
1018       unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
1019       if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
1020           && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
1021         return nullptr;
1022       break;
1023     }
1024     return dyn_cast<Instruction>(IncV->getOperand(0));
1025   }
1026 }
1027 
1028 /// If the insert point of the current builder or any of the builders on the
1029 /// stack of saved builders has 'I' as its insert point, update it to point to
1030 /// the instruction after 'I'.  This is intended to be used when the instruction
1031 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
1032 /// different block, the inconsistent insert point (with a mismatched
1033 /// Instruction and Block) can lead to an instruction being inserted in a block
1034 /// other than its parent.
1035 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1036   BasicBlock::iterator It(*I);
1037   BasicBlock::iterator NewInsertPt = std::next(It);
1038   if (Builder.GetInsertPoint() == It)
1039     Builder.SetInsertPoint(&*NewInsertPt);
1040   for (auto *InsertPtGuard : InsertPointGuards)
1041     if (InsertPtGuard->GetInsertPoint() == It)
1042       InsertPtGuard->SetInsertPoint(NewInsertPt);
1043 }
1044 
1045 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1046 /// it available to other uses in this loop. Recursively hoist any operands,
1047 /// until we reach a value that dominates InsertPos.
1048 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
1049   if (SE.DT.dominates(IncV, InsertPos))
1050       return true;
1051 
1052   // InsertPos must itself dominate IncV so that IncV's new position satisfies
1053   // its existing users.
1054   if (isa<PHINode>(InsertPos) ||
1055       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1056     return false;
1057 
1058   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1059     return false;
1060 
1061   // Check that the chain of IV operands leading back to Phi can be hoisted.
1062   SmallVector<Instruction*, 4> IVIncs;
1063   for(;;) {
1064     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1065     if (!Oper)
1066       return false;
1067     // IncV is safe to hoist.
1068     IVIncs.push_back(IncV);
1069     IncV = Oper;
1070     if (SE.DT.dominates(IncV, InsertPos))
1071       break;
1072   }
1073   for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
1074     fixupInsertPoints(*I);
1075     (*I)->moveBefore(InsertPos);
1076   }
1077   return true;
1078 }
1079 
1080 /// Determine if this cyclic phi is in a form that would have been generated by
1081 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1082 /// as it is in a low-cost form, for example, no implied multiplication. This
1083 /// should match any patterns generated by getAddRecExprPHILiterally and
1084 /// expandAddtoGEP.
1085 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1086                                            const Loop *L) {
1087   for(Instruction *IVOper = IncV;
1088       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1089                                 /*allowScale=*/false));) {
1090     if (IVOper == PN)
1091       return true;
1092   }
1093   return false;
1094 }
1095 
1096 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1097 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1098 /// need to materialize IV increments elsewhere to handle difficult situations.
1099 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1100                                  Type *ExpandTy, Type *IntTy,
1101                                  bool useSubtract) {
1102   Value *IncV;
1103   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1104   if (ExpandTy->isPointerTy()) {
1105     PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1106     // If the step isn't constant, don't use an implicitly scaled GEP, because
1107     // that would require a multiply inside the loop.
1108     if (!isa<ConstantInt>(StepV))
1109       GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1110                                   GEPPtrTy->getAddressSpace());
1111     IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1112     if (IncV->getType() != PN->getType())
1113       IncV = Builder.CreateBitCast(IncV, PN->getType());
1114   } else {
1115     IncV = useSubtract ?
1116       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1117       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1118   }
1119   return IncV;
1120 }
1121 
1122 /// Check whether we can cheaply express the requested SCEV in terms of
1123 /// the available PHI SCEV by truncation and/or inversion of the step.
1124 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1125                                     const SCEVAddRecExpr *Phi,
1126                                     const SCEVAddRecExpr *Requested,
1127                                     bool &InvertStep) {
1128   // We can't transform to match a pointer PHI.
1129   if (Phi->getType()->isPointerTy())
1130     return false;
1131 
1132   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1133   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1134 
1135   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1136     return false;
1137 
1138   // Try truncate it if necessary.
1139   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1140   if (!Phi)
1141     return false;
1142 
1143   // Check whether truncation will help.
1144   if (Phi == Requested) {
1145     InvertStep = false;
1146     return true;
1147   }
1148 
1149   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1150   if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
1151     InvertStep = true;
1152     return true;
1153   }
1154 
1155   return false;
1156 }
1157 
1158 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1159   if (!isa<IntegerType>(AR->getType()))
1160     return false;
1161 
1162   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1163   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1164   const SCEV *Step = AR->getStepRecurrence(SE);
1165   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1166                                             SE.getSignExtendExpr(AR, WideTy));
1167   const SCEV *ExtendAfterOp =
1168     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1169   return ExtendAfterOp == OpAfterExtend;
1170 }
1171 
1172 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1173   if (!isa<IntegerType>(AR->getType()))
1174     return false;
1175 
1176   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1177   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1178   const SCEV *Step = AR->getStepRecurrence(SE);
1179   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1180                                             SE.getZeroExtendExpr(AR, WideTy));
1181   const SCEV *ExtendAfterOp =
1182     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1183   return ExtendAfterOp == OpAfterExtend;
1184 }
1185 
1186 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1187 /// the base addrec, which is the addrec without any non-loop-dominating
1188 /// values, and return the PHI.
1189 PHINode *
1190 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1191                                         const Loop *L,
1192                                         Type *ExpandTy,
1193                                         Type *IntTy,
1194                                         Type *&TruncTy,
1195                                         bool &InvertStep) {
1196   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1197 
1198   // Reuse a previously-inserted PHI, if present.
1199   BasicBlock *LatchBlock = L->getLoopLatch();
1200   if (LatchBlock) {
1201     PHINode *AddRecPhiMatch = nullptr;
1202     Instruction *IncV = nullptr;
1203     TruncTy = nullptr;
1204     InvertStep = false;
1205 
1206     // Only try partially matching scevs that need truncation and/or
1207     // step-inversion if we know this loop is outside the current loop.
1208     bool TryNonMatchingSCEV =
1209         IVIncInsertLoop &&
1210         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1211 
1212     for (PHINode &PN : L->getHeader()->phis()) {
1213       if (!SE.isSCEVable(PN.getType()))
1214         continue;
1215 
1216       // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1217       // PHI has no meaning at all.
1218       if (!PN.isComplete()) {
1219         SCEV_DEBUG_WITH_TYPE(
1220             DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1221         continue;
1222       }
1223 
1224       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1225       if (!PhiSCEV)
1226         continue;
1227 
1228       bool IsMatchingSCEV = PhiSCEV == Normalized;
1229       // We only handle truncation and inversion of phi recurrences for the
1230       // expanded expression if the expanded expression's loop dominates the
1231       // loop we insert to. Check now, so we can bail out early.
1232       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1233           continue;
1234 
1235       // TODO: this possibly can be reworked to avoid this cast at all.
1236       Instruction *TempIncV =
1237           dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1238       if (!TempIncV)
1239         continue;
1240 
1241       // Check whether we can reuse this PHI node.
1242       if (LSRMode) {
1243         if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1244           continue;
1245       } else {
1246         if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1247           continue;
1248       }
1249 
1250       // Stop if we have found an exact match SCEV.
1251       if (IsMatchingSCEV) {
1252         IncV = TempIncV;
1253         TruncTy = nullptr;
1254         InvertStep = false;
1255         AddRecPhiMatch = &PN;
1256         break;
1257       }
1258 
1259       // Try whether the phi can be translated into the requested form
1260       // (truncated and/or offset by a constant).
1261       if ((!TruncTy || InvertStep) &&
1262           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1263         // Record the phi node. But don't stop we might find an exact match
1264         // later.
1265         AddRecPhiMatch = &PN;
1266         IncV = TempIncV;
1267         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1268       }
1269     }
1270 
1271     if (AddRecPhiMatch) {
1272       // Ok, the add recurrence looks usable.
1273       // Remember this PHI, even in post-inc mode.
1274       InsertedValues.insert(AddRecPhiMatch);
1275       // Remember the increment.
1276       rememberInstruction(IncV);
1277       // Those values were not actually inserted but re-used.
1278       ReusedValues.insert(AddRecPhiMatch);
1279       ReusedValues.insert(IncV);
1280       return AddRecPhiMatch;
1281     }
1282   }
1283 
1284   // Save the original insertion point so we can restore it when we're done.
1285   SCEVInsertPointGuard Guard(Builder, this);
1286 
1287   // Another AddRec may need to be recursively expanded below. For example, if
1288   // this AddRec is quadratic, the StepV may itself be an AddRec in this
1289   // loop. Remove this loop from the PostIncLoops set before expanding such
1290   // AddRecs. Otherwise, we cannot find a valid position for the step
1291   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1292   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1293   // so it's not worth implementing SmallPtrSet::swap.
1294   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1295   PostIncLoops.clear();
1296 
1297   // Expand code for the start value into the loop preheader.
1298   assert(L->getLoopPreheader() &&
1299          "Can't expand add recurrences without a loop preheader!");
1300   Value *StartV =
1301       expandCodeForImpl(Normalized->getStart(), ExpandTy,
1302                         L->getLoopPreheader()->getTerminator(), false);
1303 
1304   // StartV must have been be inserted into L's preheader to dominate the new
1305   // phi.
1306   assert(!isa<Instruction>(StartV) ||
1307          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1308                                  L->getHeader()));
1309 
1310   // Expand code for the step value. Do this before creating the PHI so that PHI
1311   // reuse code doesn't see an incomplete PHI.
1312   const SCEV *Step = Normalized->getStepRecurrence(SE);
1313   // If the stride is negative, insert a sub instead of an add for the increment
1314   // (unless it's a constant, because subtracts of constants are canonicalized
1315   // to adds).
1316   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1317   if (useSubtract)
1318     Step = SE.getNegativeSCEV(Step);
1319   // Expand the step somewhere that dominates the loop header.
1320   Value *StepV = expandCodeForImpl(
1321       Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1322 
1323   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1324   // we actually do emit an addition.  It does not apply if we emit a
1325   // subtraction.
1326   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1327   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1328 
1329   // Create the PHI.
1330   BasicBlock *Header = L->getHeader();
1331   Builder.SetInsertPoint(Header, Header->begin());
1332   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1333   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1334                                   Twine(IVName) + ".iv");
1335 
1336   // Create the step instructions and populate the PHI.
1337   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1338     BasicBlock *Pred = *HPI;
1339 
1340     // Add a start value.
1341     if (!L->contains(Pred)) {
1342       PN->addIncoming(StartV, Pred);
1343       continue;
1344     }
1345 
1346     // Create a step value and add it to the PHI.
1347     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1348     // instructions at IVIncInsertPos.
1349     Instruction *InsertPos = L == IVIncInsertLoop ?
1350       IVIncInsertPos : Pred->getTerminator();
1351     Builder.SetInsertPoint(InsertPos);
1352     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1353 
1354     if (isa<OverflowingBinaryOperator>(IncV)) {
1355       if (IncrementIsNUW)
1356         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1357       if (IncrementIsNSW)
1358         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1359     }
1360     PN->addIncoming(IncV, Pred);
1361   }
1362 
1363   // After expanding subexpressions, restore the PostIncLoops set so the caller
1364   // can ensure that IVIncrement dominates the current uses.
1365   PostIncLoops = SavedPostIncLoops;
1366 
1367   // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1368   // effective when we are able to use an IV inserted here, so record it.
1369   InsertedValues.insert(PN);
1370   InsertedIVs.push_back(PN);
1371   return PN;
1372 }
1373 
1374 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1375   Type *STy = S->getType();
1376   Type *IntTy = SE.getEffectiveSCEVType(STy);
1377   const Loop *L = S->getLoop();
1378 
1379   // Determine a normalized form of this expression, which is the expression
1380   // before any post-inc adjustment is made.
1381   const SCEVAddRecExpr *Normalized = S;
1382   if (PostIncLoops.count(L)) {
1383     PostIncLoopSet Loops;
1384     Loops.insert(L);
1385     Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1386   }
1387 
1388   // Strip off any non-loop-dominating component from the addrec start.
1389   const SCEV *Start = Normalized->getStart();
1390   const SCEV *PostLoopOffset = nullptr;
1391   if (!SE.properlyDominates(Start, L->getHeader())) {
1392     PostLoopOffset = Start;
1393     Start = SE.getConstant(Normalized->getType(), 0);
1394     Normalized = cast<SCEVAddRecExpr>(
1395       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1396                        Normalized->getLoop(),
1397                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
1398   }
1399 
1400   // Strip off any non-loop-dominating component from the addrec step.
1401   const SCEV *Step = Normalized->getStepRecurrence(SE);
1402   const SCEV *PostLoopScale = nullptr;
1403   if (!SE.dominates(Step, L->getHeader())) {
1404     PostLoopScale = Step;
1405     Step = SE.getConstant(Normalized->getType(), 1);
1406     if (!Start->isZero()) {
1407         // The normalization below assumes that Start is constant zero, so if
1408         // it isn't re-associate Start to PostLoopOffset.
1409         assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1410         PostLoopOffset = Start;
1411         Start = SE.getConstant(Normalized->getType(), 0);
1412     }
1413     Normalized =
1414       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1415                              Start, Step, Normalized->getLoop(),
1416                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
1417   }
1418 
1419   // Expand the core addrec. If we need post-loop scaling, force it to
1420   // expand to an integer type to avoid the need for additional casting.
1421   Type *ExpandTy = PostLoopScale ? IntTy : STy;
1422   // We can't use a pointer type for the addrec if the pointer type is
1423   // non-integral.
1424   Type *AddRecPHIExpandTy =
1425       DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1426 
1427   // In some cases, we decide to reuse an existing phi node but need to truncate
1428   // it and/or invert the step.
1429   Type *TruncTy = nullptr;
1430   bool InvertStep = false;
1431   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1432                                           IntTy, TruncTy, InvertStep);
1433 
1434   // Accommodate post-inc mode, if necessary.
1435   Value *Result;
1436   if (!PostIncLoops.count(L))
1437     Result = PN;
1438   else {
1439     // In PostInc mode, use the post-incremented value.
1440     BasicBlock *LatchBlock = L->getLoopLatch();
1441     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1442     Result = PN->getIncomingValueForBlock(LatchBlock);
1443 
1444     // We might be introducing a new use of the post-inc IV that is not poison
1445     // safe, in which case we should drop poison generating flags. Only keep
1446     // those flags for which SCEV has proven that they always hold.
1447     if (isa<OverflowingBinaryOperator>(Result)) {
1448       auto *I = cast<Instruction>(Result);
1449       if (!S->hasNoUnsignedWrap())
1450         I->setHasNoUnsignedWrap(false);
1451       if (!S->hasNoSignedWrap())
1452         I->setHasNoSignedWrap(false);
1453     }
1454 
1455     // For an expansion to use the postinc form, the client must call
1456     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1457     // or dominated by IVIncInsertPos.
1458     if (isa<Instruction>(Result) &&
1459         !SE.DT.dominates(cast<Instruction>(Result),
1460                          &*Builder.GetInsertPoint())) {
1461       // The induction variable's postinc expansion does not dominate this use.
1462       // IVUsers tries to prevent this case, so it is rare. However, it can
1463       // happen when an IVUser outside the loop is not dominated by the latch
1464       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1465       // all cases. Consider a phi outside whose operand is replaced during
1466       // expansion with the value of the postinc user. Without fundamentally
1467       // changing the way postinc users are tracked, the only remedy is
1468       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1469       // but hopefully expandCodeFor handles that.
1470       bool useSubtract =
1471         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1472       if (useSubtract)
1473         Step = SE.getNegativeSCEV(Step);
1474       Value *StepV;
1475       {
1476         // Expand the step somewhere that dominates the loop header.
1477         SCEVInsertPointGuard Guard(Builder, this);
1478         StepV = expandCodeForImpl(
1479             Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1480       }
1481       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1482     }
1483   }
1484 
1485   // We have decided to reuse an induction variable of a dominating loop. Apply
1486   // truncation and/or inversion of the step.
1487   if (TruncTy) {
1488     Type *ResTy = Result->getType();
1489     // Normalize the result type.
1490     if (ResTy != SE.getEffectiveSCEVType(ResTy))
1491       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1492     // Truncate the result.
1493     if (TruncTy != Result->getType())
1494       Result = Builder.CreateTrunc(Result, TruncTy);
1495 
1496     // Invert the result.
1497     if (InvertStep)
1498       Result = Builder.CreateSub(
1499           expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1500   }
1501 
1502   // Re-apply any non-loop-dominating scale.
1503   if (PostLoopScale) {
1504     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1505     Result = InsertNoopCastOfTo(Result, IntTy);
1506     Result = Builder.CreateMul(Result,
1507                                expandCodeForImpl(PostLoopScale, IntTy, false));
1508   }
1509 
1510   // Re-apply any non-loop-dominating offset.
1511   if (PostLoopOffset) {
1512     if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1513       if (Result->getType()->isIntegerTy()) {
1514         Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1515         Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1516       } else {
1517         Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1518       }
1519     } else {
1520       Result = InsertNoopCastOfTo(Result, IntTy);
1521       Result = Builder.CreateAdd(
1522           Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1523     }
1524   }
1525 
1526   return Result;
1527 }
1528 
1529 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1530   // In canonical mode we compute the addrec as an expression of a canonical IV
1531   // using evaluateAtIteration and expand the resulting SCEV expression. This
1532   // way we avoid introducing new IVs to carry on the comutation of the addrec
1533   // throughout the loop.
1534   //
1535   // For nested addrecs evaluateAtIteration might need a canonical IV of a
1536   // type wider than the addrec itself. Emitting a canonical IV of the
1537   // proper type might produce non-legal types, for example expanding an i64
1538   // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1539   // back to non-canonical mode for nested addrecs.
1540   if (!CanonicalMode || (S->getNumOperands() > 2))
1541     return expandAddRecExprLiterally(S);
1542 
1543   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1544   const Loop *L = S->getLoop();
1545 
1546   // First check for an existing canonical IV in a suitable type.
1547   PHINode *CanonicalIV = nullptr;
1548   if (PHINode *PN = L->getCanonicalInductionVariable())
1549     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1550       CanonicalIV = PN;
1551 
1552   // Rewrite an AddRec in terms of the canonical induction variable, if
1553   // its type is more narrow.
1554   if (CanonicalIV &&
1555       SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1556       !S->getType()->isPointerTy()) {
1557     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1558     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1559       NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1560     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1561                                        S->getNoWrapFlags(SCEV::FlagNW)));
1562     BasicBlock::iterator NewInsertPt =
1563         findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1564     V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1565                           &*NewInsertPt, false);
1566     return V;
1567   }
1568 
1569   // {X,+,F} --> X + {0,+,F}
1570   if (!S->getStart()->isZero()) {
1571     SmallVector<const SCEV *, 4> NewOps(S->operands());
1572     NewOps[0] = SE.getConstant(Ty, 0);
1573     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1574                                         S->getNoWrapFlags(SCEV::FlagNW));
1575 
1576     // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1577     // comments on expandAddToGEP for details.
1578     const SCEV *Base = S->getStart();
1579     // Dig into the expression to find the pointer base for a GEP.
1580     const SCEV *ExposedRest = Rest;
1581     ExposePointerBase(Base, ExposedRest, SE);
1582     // If we found a pointer, expand the AddRec with a GEP.
1583     if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1584       Value *StartV = expand(Base);
1585       assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1586       return expandAddToGEP(ExposedRest, PTy, Ty, StartV);
1587     }
1588 
1589     // Just do a normal add. Pre-expand the operands to suppress folding.
1590     //
1591     // The LHS and RHS values are factored out of the expand call to make the
1592     // output independent of the argument evaluation order.
1593     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1594     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1595     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1596   }
1597 
1598   // If we don't yet have a canonical IV, create one.
1599   if (!CanonicalIV) {
1600     // Create and insert the PHI node for the induction variable in the
1601     // specified loop.
1602     BasicBlock *Header = L->getHeader();
1603     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1604     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1605                                   &Header->front());
1606     rememberInstruction(CanonicalIV);
1607 
1608     SmallSet<BasicBlock *, 4> PredSeen;
1609     Constant *One = ConstantInt::get(Ty, 1);
1610     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1611       BasicBlock *HP = *HPI;
1612       if (!PredSeen.insert(HP).second) {
1613         // There must be an incoming value for each predecessor, even the
1614         // duplicates!
1615         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1616         continue;
1617       }
1618 
1619       if (L->contains(HP)) {
1620         // Insert a unit add instruction right before the terminator
1621         // corresponding to the back-edge.
1622         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1623                                                      "indvar.next",
1624                                                      HP->getTerminator());
1625         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1626         rememberInstruction(Add);
1627         CanonicalIV->addIncoming(Add, HP);
1628       } else {
1629         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1630       }
1631     }
1632   }
1633 
1634   // {0,+,1} --> Insert a canonical induction variable into the loop!
1635   if (S->isAffine() && S->getOperand(1)->isOne()) {
1636     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1637            "IVs with types different from the canonical IV should "
1638            "already have been handled!");
1639     return CanonicalIV;
1640   }
1641 
1642   // {0,+,F} --> {0,+,1} * F
1643 
1644   // If this is a simple linear addrec, emit it now as a special case.
1645   if (S->isAffine())    // {0,+,F} --> i*F
1646     return
1647       expand(SE.getTruncateOrNoop(
1648         SE.getMulExpr(SE.getUnknown(CanonicalIV),
1649                       SE.getNoopOrAnyExtend(S->getOperand(1),
1650                                             CanonicalIV->getType())),
1651         Ty));
1652 
1653   // If this is a chain of recurrences, turn it into a closed form, using the
1654   // folders, then expandCodeFor the closed form.  This allows the folders to
1655   // simplify the expression without having to build a bunch of special code
1656   // into this folder.
1657   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1658 
1659   // Promote S up to the canonical IV type, if the cast is foldable.
1660   const SCEV *NewS = S;
1661   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1662   if (isa<SCEVAddRecExpr>(Ext))
1663     NewS = Ext;
1664 
1665   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1666   //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
1667 
1668   // Truncate the result down to the original type, if needed.
1669   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1670   return expand(T);
1671 }
1672 
1673 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1674   Value *V =
1675       expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1676   return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1677                            GetOptimalInsertionPointForCastOf(V));
1678 }
1679 
1680 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1681   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1682   Value *V = expandCodeForImpl(
1683       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1684       false);
1685   return Builder.CreateTrunc(V, Ty);
1686 }
1687 
1688 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1689   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1690   Value *V = expandCodeForImpl(
1691       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1692       false);
1693   return Builder.CreateZExt(V, Ty);
1694 }
1695 
1696 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1697   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1698   Value *V = expandCodeForImpl(
1699       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1700       false);
1701   return Builder.CreateSExt(V, Ty);
1702 }
1703 
1704 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1705   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1706   Type *Ty = LHS->getType();
1707   for (int i = S->getNumOperands()-2; i >= 0; --i) {
1708     // In the case of mixed integer and pointer types, do the
1709     // rest of the comparisons as integer.
1710     Type *OpTy = S->getOperand(i)->getType();
1711     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1712       Ty = SE.getEffectiveSCEVType(Ty);
1713       LHS = InsertNoopCastOfTo(LHS, Ty);
1714     }
1715     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1716     Value *Sel;
1717     if (Ty->isIntegerTy())
1718       Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS},
1719                                     /*FMFSource=*/nullptr, "smax");
1720     else {
1721       Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1722       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1723     }
1724     LHS = Sel;
1725   }
1726   // In the case of mixed integer and pointer types, cast the
1727   // final result back to the pointer type.
1728   if (LHS->getType() != S->getType())
1729     LHS = InsertNoopCastOfTo(LHS, S->getType());
1730   return LHS;
1731 }
1732 
1733 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1734   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1735   Type *Ty = LHS->getType();
1736   for (int i = S->getNumOperands()-2; i >= 0; --i) {
1737     // In the case of mixed integer and pointer types, do the
1738     // rest of the comparisons as integer.
1739     Type *OpTy = S->getOperand(i)->getType();
1740     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1741       Ty = SE.getEffectiveSCEVType(Ty);
1742       LHS = InsertNoopCastOfTo(LHS, Ty);
1743     }
1744     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1745     Value *Sel;
1746     if (Ty->isIntegerTy())
1747       Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS},
1748                                     /*FMFSource=*/nullptr, "umax");
1749     else {
1750       Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1751       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1752     }
1753     LHS = Sel;
1754   }
1755   // In the case of mixed integer and pointer types, cast the
1756   // final result back to the pointer type.
1757   if (LHS->getType() != S->getType())
1758     LHS = InsertNoopCastOfTo(LHS, S->getType());
1759   return LHS;
1760 }
1761 
1762 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1763   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1764   Type *Ty = LHS->getType();
1765   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1766     // In the case of mixed integer and pointer types, do the
1767     // rest of the comparisons as integer.
1768     Type *OpTy = S->getOperand(i)->getType();
1769     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1770       Ty = SE.getEffectiveSCEVType(Ty);
1771       LHS = InsertNoopCastOfTo(LHS, Ty);
1772     }
1773     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1774     Value *Sel;
1775     if (Ty->isIntegerTy())
1776       Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS},
1777                                     /*FMFSource=*/nullptr, "smin");
1778     else {
1779       Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1780       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1781     }
1782     LHS = Sel;
1783   }
1784   // In the case of mixed integer and pointer types, cast the
1785   // final result back to the pointer type.
1786   if (LHS->getType() != S->getType())
1787     LHS = InsertNoopCastOfTo(LHS, S->getType());
1788   return LHS;
1789 }
1790 
1791 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1792   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1793   Type *Ty = LHS->getType();
1794   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1795     // In the case of mixed integer and pointer types, do the
1796     // rest of the comparisons as integer.
1797     Type *OpTy = S->getOperand(i)->getType();
1798     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1799       Ty = SE.getEffectiveSCEVType(Ty);
1800       LHS = InsertNoopCastOfTo(LHS, Ty);
1801     }
1802     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1803     Value *Sel;
1804     if (Ty->isIntegerTy())
1805       Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS},
1806                                     /*FMFSource=*/nullptr, "umin");
1807     else {
1808       Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1809       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1810     }
1811     LHS = Sel;
1812   }
1813   // In the case of mixed integer and pointer types, cast the
1814   // final result back to the pointer type.
1815   if (LHS->getType() != S->getType())
1816     LHS = InsertNoopCastOfTo(LHS, S->getType());
1817   return LHS;
1818 }
1819 
1820 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1821                                        Instruction *IP, bool Root) {
1822   setInsertPoint(IP);
1823   Value *V = expandCodeForImpl(SH, Ty, Root);
1824   return V;
1825 }
1826 
1827 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1828   // Expand the code for this SCEV.
1829   Value *V = expand(SH);
1830 
1831   if (PreserveLCSSA) {
1832     if (auto *Inst = dyn_cast<Instruction>(V)) {
1833       // Create a temporary instruction to at the current insertion point, so we
1834       // can hand it off to the helper to create LCSSA PHIs if required for the
1835       // new use.
1836       // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1837       // would accept a insertion point and return an LCSSA phi for that
1838       // insertion point, so there is no need to insert & remove the temporary
1839       // instruction.
1840       Instruction *Tmp;
1841       if (Inst->getType()->isIntegerTy())
1842         Tmp =
1843             cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user"));
1844       else {
1845         assert(Inst->getType()->isPointerTy());
1846         Tmp = cast<Instruction>(Builder.CreatePtrToInt(
1847             Inst, Type::getInt32Ty(Inst->getContext()), "tmp.lcssa.user"));
1848       }
1849       V = fixupLCSSAFormFor(Tmp, 0);
1850 
1851       // Clean up temporary instruction.
1852       InsertedValues.erase(Tmp);
1853       InsertedPostIncValues.erase(Tmp);
1854       Tmp->eraseFromParent();
1855     }
1856   }
1857 
1858   InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1859   if (Ty) {
1860     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1861            "non-trivial casts should be done with the SCEVs directly!");
1862     V = InsertNoopCastOfTo(V, Ty);
1863   }
1864   return V;
1865 }
1866 
1867 ScalarEvolution::ValueOffsetPair
1868 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1869                                       const Instruction *InsertPt) {
1870   auto *Set = SE.getSCEVValues(S);
1871   // If the expansion is not in CanonicalMode, and the SCEV contains any
1872   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1873   if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1874     // If S is scConstant, it may be worse to reuse an existing Value.
1875     if (S->getSCEVType() != scConstant && Set) {
1876       // Choose a Value from the set which dominates the insertPt.
1877       // insertPt should be inside the Value's parent loop so as not to break
1878       // the LCSSA form.
1879       for (auto const &VOPair : *Set) {
1880         Value *V = VOPair.first;
1881         ConstantInt *Offset = VOPair.second;
1882         Instruction *EntInst = nullptr;
1883         if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1884             S->getType() == V->getType() &&
1885             EntInst->getFunction() == InsertPt->getFunction() &&
1886             SE.DT.dominates(EntInst, InsertPt) &&
1887             (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1888              SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1889           return {V, Offset};
1890       }
1891     }
1892   }
1893   return {nullptr, nullptr};
1894 }
1895 
1896 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1897 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1898 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1899 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1900 // the expansion will try to reuse Value from ExprValueMap, and only when it
1901 // fails, expand the SCEV literally.
1902 Value *SCEVExpander::expand(const SCEV *S) {
1903   // Compute an insertion point for this SCEV object. Hoist the instructions
1904   // as far out in the loop nest as possible.
1905   Instruction *InsertPt = &*Builder.GetInsertPoint();
1906 
1907   // We can move insertion point only if there is no div or rem operations
1908   // otherwise we are risky to move it over the check for zero denominator.
1909   auto SafeToHoist = [](const SCEV *S) {
1910     return !SCEVExprContains(S, [](const SCEV *S) {
1911               if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1912                 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1913                   // Division by non-zero constants can be hoisted.
1914                   return SC->getValue()->isZero();
1915                 // All other divisions should not be moved as they may be
1916                 // divisions by zero and should be kept within the
1917                 // conditions of the surrounding loops that guard their
1918                 // execution (see PR35406).
1919                 return true;
1920               }
1921               return false;
1922             });
1923   };
1924   if (SafeToHoist(S)) {
1925     for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1926          L = L->getParentLoop()) {
1927       if (SE.isLoopInvariant(S, L)) {
1928         if (!L) break;
1929         if (BasicBlock *Preheader = L->getLoopPreheader())
1930           InsertPt = Preheader->getTerminator();
1931         else
1932           // LSR sets the insertion point for AddRec start/step values to the
1933           // block start to simplify value reuse, even though it's an invalid
1934           // position. SCEVExpander must correct for this in all cases.
1935           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1936       } else {
1937         // If the SCEV is computable at this level, insert it into the header
1938         // after the PHIs (and after any other instructions that we've inserted
1939         // there) so that it is guaranteed to dominate any user inside the loop.
1940         if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1941           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1942 
1943         while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1944                (isInsertedInstruction(InsertPt) ||
1945                 isa<DbgInfoIntrinsic>(InsertPt))) {
1946           InsertPt = &*std::next(InsertPt->getIterator());
1947         }
1948         break;
1949       }
1950     }
1951   }
1952 
1953   // Check to see if we already expanded this here.
1954   auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1955   if (I != InsertedExpressions.end())
1956     return I->second;
1957 
1958   SCEVInsertPointGuard Guard(Builder, this);
1959   Builder.SetInsertPoint(InsertPt);
1960 
1961   // Expand the expression into instructions.
1962   ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1963   Value *V = VO.first;
1964 
1965   if (!V)
1966     V = visit(S);
1967   else if (VO.second) {
1968     if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1969       Type *Ety = Vty->getPointerElementType();
1970       int64_t Offset = VO.second->getSExtValue();
1971       int64_t ESize = SE.getTypeSizeInBits(Ety);
1972       if ((Offset * 8) % ESize == 0) {
1973         ConstantInt *Idx =
1974             ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1975         V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1976       } else {
1977         ConstantInt *Idx =
1978             ConstantInt::getSigned(VO.second->getType(), -Offset);
1979         unsigned AS = Vty->getAddressSpace();
1980         V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1981         V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1982                               "uglygep");
1983         V = Builder.CreateBitCast(V, Vty);
1984       }
1985     } else {
1986       V = Builder.CreateSub(V, VO.second);
1987     }
1988   }
1989   // Remember the expanded value for this SCEV at this location.
1990   //
1991   // This is independent of PostIncLoops. The mapped value simply materializes
1992   // the expression at this insertion point. If the mapped value happened to be
1993   // a postinc expansion, it could be reused by a non-postinc user, but only if
1994   // its insertion point was already at the head of the loop.
1995   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1996   return V;
1997 }
1998 
1999 void SCEVExpander::rememberInstruction(Value *I) {
2000   auto DoInsert = [this](Value *V) {
2001     if (!PostIncLoops.empty())
2002       InsertedPostIncValues.insert(V);
2003     else
2004       InsertedValues.insert(V);
2005   };
2006   DoInsert(I);
2007 
2008   if (!PreserveLCSSA)
2009     return;
2010 
2011   if (auto *Inst = dyn_cast<Instruction>(I)) {
2012     // A new instruction has been added, which might introduce new uses outside
2013     // a defining loop. Fix LCSSA from for each operand of the new instruction,
2014     // if required.
2015     for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
2016          OpIdx++)
2017       fixupLCSSAFormFor(Inst, OpIdx);
2018   }
2019 }
2020 
2021 /// replaceCongruentIVs - Check for congruent phis in this loop header and
2022 /// replace them with their most canonical representative. Return the number of
2023 /// phis eliminated.
2024 ///
2025 /// This does not depend on any SCEVExpander state but should be used in
2026 /// the same context that SCEVExpander is used.
2027 unsigned
2028 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
2029                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2030                                   const TargetTransformInfo *TTI) {
2031   // Find integer phis in order of increasing width.
2032   SmallVector<PHINode*, 8> Phis;
2033   for (PHINode &PN : L->getHeader()->phis())
2034     Phis.push_back(&PN);
2035 
2036   if (TTI)
2037     llvm::sort(Phis, [](Value *LHS, Value *RHS) {
2038       // Put pointers at the back and make sure pointer < pointer = false.
2039       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
2040         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
2041       return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
2042              LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
2043     });
2044 
2045   unsigned NumElim = 0;
2046   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
2047   // Process phis from wide to narrow. Map wide phis to their truncation
2048   // so narrow phis can reuse them.
2049   for (PHINode *Phi : Phis) {
2050     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
2051       if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
2052         return V;
2053       if (!SE.isSCEVable(PN->getType()))
2054         return nullptr;
2055       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
2056       if (!Const)
2057         return nullptr;
2058       return Const->getValue();
2059     };
2060 
2061     // Fold constant phis. They may be congruent to other constant phis and
2062     // would confuse the logic below that expects proper IVs.
2063     if (Value *V = SimplifyPHINode(Phi)) {
2064       if (V->getType() != Phi->getType())
2065         continue;
2066       Phi->replaceAllUsesWith(V);
2067       DeadInsts.emplace_back(Phi);
2068       ++NumElim;
2069       SCEV_DEBUG_WITH_TYPE(DebugType,
2070                            dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
2071                                   << '\n');
2072       continue;
2073     }
2074 
2075     if (!SE.isSCEVable(Phi->getType()))
2076       continue;
2077 
2078     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
2079     if (!OrigPhiRef) {
2080       OrigPhiRef = Phi;
2081       if (Phi->getType()->isIntegerTy() && TTI &&
2082           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2083         // This phi can be freely truncated to the narrowest phi type. Map the
2084         // truncated expression to it so it will be reused for narrow types.
2085         const SCEV *TruncExpr =
2086           SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2087         ExprToIVMap[TruncExpr] = Phi;
2088       }
2089       continue;
2090     }
2091 
2092     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2093     // sense.
2094     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2095       continue;
2096 
2097     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2098       Instruction *OrigInc = dyn_cast<Instruction>(
2099           OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2100       Instruction *IsomorphicInc =
2101           dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2102 
2103       if (OrigInc && IsomorphicInc) {
2104         // If this phi has the same width but is more canonical, replace the
2105         // original with it. As part of the "more canonical" determination,
2106         // respect a prior decision to use an IV chain.
2107         if (OrigPhiRef->getType() == Phi->getType() &&
2108             !(ChainedPhis.count(Phi) ||
2109               isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2110             (ChainedPhis.count(Phi) ||
2111              isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2112           std::swap(OrigPhiRef, Phi);
2113           std::swap(OrigInc, IsomorphicInc);
2114         }
2115         // Replacing the congruent phi is sufficient because acyclic
2116         // redundancy elimination, CSE/GVN, should handle the
2117         // rest. However, once SCEV proves that a phi is congruent,
2118         // it's often the head of an IV user cycle that is isomorphic
2119         // with the original phi. It's worth eagerly cleaning up the
2120         // common case of a single IV increment so that DeleteDeadPHIs
2121         // can remove cycles that had postinc uses.
2122         const SCEV *TruncExpr =
2123             SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2124         if (OrigInc != IsomorphicInc &&
2125             TruncExpr == SE.getSCEV(IsomorphicInc) &&
2126             SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2127             hoistIVInc(OrigInc, IsomorphicInc)) {
2128           SCEV_DEBUG_WITH_TYPE(
2129               DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2130                                 << *IsomorphicInc << '\n');
2131           Value *NewInc = OrigInc;
2132           if (OrigInc->getType() != IsomorphicInc->getType()) {
2133             Instruction *IP = nullptr;
2134             if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2135               IP = &*PN->getParent()->getFirstInsertionPt();
2136             else
2137               IP = OrigInc->getNextNode();
2138 
2139             IRBuilder<> Builder(IP);
2140             Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2141             NewInc = Builder.CreateTruncOrBitCast(
2142                 OrigInc, IsomorphicInc->getType(), IVName);
2143           }
2144           IsomorphicInc->replaceAllUsesWith(NewInc);
2145           DeadInsts.emplace_back(IsomorphicInc);
2146         }
2147       }
2148     }
2149     SCEV_DEBUG_WITH_TYPE(DebugType,
2150                          dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
2151                                 << '\n');
2152     SCEV_DEBUG_WITH_TYPE(
2153         DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
2154     ++NumElim;
2155     Value *NewIV = OrigPhiRef;
2156     if (OrigPhiRef->getType() != Phi->getType()) {
2157       IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2158       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2159       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2160     }
2161     Phi->replaceAllUsesWith(NewIV);
2162     DeadInsts.emplace_back(Phi);
2163   }
2164   return NumElim;
2165 }
2166 
2167 Optional<ScalarEvolution::ValueOffsetPair>
2168 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
2169                                           Loop *L) {
2170   using namespace llvm::PatternMatch;
2171 
2172   SmallVector<BasicBlock *, 4> ExitingBlocks;
2173   L->getExitingBlocks(ExitingBlocks);
2174 
2175   // Look for suitable value in simple conditions at the loop exits.
2176   for (BasicBlock *BB : ExitingBlocks) {
2177     ICmpInst::Predicate Pred;
2178     Instruction *LHS, *RHS;
2179 
2180     if (!match(BB->getTerminator(),
2181                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2182                     m_BasicBlock(), m_BasicBlock())))
2183       continue;
2184 
2185     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2186       return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2187 
2188     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2189       return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2190   }
2191 
2192   // Use expand's logic which is used for reusing a previous Value in
2193   // ExprValueMap.
2194   ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2195   if (VO.first)
2196     return VO;
2197 
2198   // There is potential to make this significantly smarter, but this simple
2199   // heuristic already gets some interesting cases.
2200 
2201   // Can not find suitable value.
2202   return None;
2203 }
2204 
2205 template<typename T> static InstructionCost costAndCollectOperands(
2206   const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2207   TargetTransformInfo::TargetCostKind CostKind,
2208   SmallVectorImpl<SCEVOperand> &Worklist) {
2209 
2210   const T *S = cast<T>(WorkItem.S);
2211   InstructionCost Cost = 0;
2212   // Object to help map SCEV operands to expanded IR instructions.
2213   struct OperationIndices {
2214     OperationIndices(unsigned Opc, size_t min, size_t max) :
2215       Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2216     unsigned Opcode;
2217     size_t MinIdx;
2218     size_t MaxIdx;
2219   };
2220 
2221   // Collect the operations of all the instructions that will be needed to
2222   // expand the SCEVExpr. This is so that when we come to cost the operands,
2223   // we know what the generated user(s) will be.
2224   SmallVector<OperationIndices, 2> Operations;
2225 
2226   auto CastCost = [&](unsigned Opcode) -> InstructionCost {
2227     Operations.emplace_back(Opcode, 0, 0);
2228     return TTI.getCastInstrCost(Opcode, S->getType(),
2229                                 S->getOperand(0)->getType(),
2230                                 TTI::CastContextHint::None, CostKind);
2231   };
2232 
2233   auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2234                        unsigned MinIdx = 0,
2235                        unsigned MaxIdx = 1) -> InstructionCost {
2236     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2237     return NumRequired *
2238       TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2239   };
2240 
2241   auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
2242                         unsigned MaxIdx) -> InstructionCost {
2243     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2244     Type *OpType = S->getOperand(0)->getType();
2245     return NumRequired * TTI.getCmpSelInstrCost(
2246                              Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2247                              CmpInst::BAD_ICMP_PREDICATE, CostKind);
2248   };
2249 
2250   switch (S->getSCEVType()) {
2251   case scCouldNotCompute:
2252     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2253   case scUnknown:
2254   case scConstant:
2255     return 0;
2256   case scPtrToInt:
2257     Cost = CastCost(Instruction::PtrToInt);
2258     break;
2259   case scTruncate:
2260     Cost = CastCost(Instruction::Trunc);
2261     break;
2262   case scZeroExtend:
2263     Cost = CastCost(Instruction::ZExt);
2264     break;
2265   case scSignExtend:
2266     Cost = CastCost(Instruction::SExt);
2267     break;
2268   case scUDivExpr: {
2269     unsigned Opcode = Instruction::UDiv;
2270     if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2271       if (SC->getAPInt().isPowerOf2())
2272         Opcode = Instruction::LShr;
2273     Cost = ArithCost(Opcode, 1);
2274     break;
2275   }
2276   case scAddExpr:
2277     Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2278     break;
2279   case scMulExpr:
2280     // TODO: this is a very pessimistic cost modelling for Mul,
2281     // because of Bin Pow algorithm actually used by the expander,
2282     // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2283     Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2284     break;
2285   case scSMaxExpr:
2286   case scUMaxExpr:
2287   case scSMinExpr:
2288   case scUMinExpr: {
2289     // FIXME: should this ask the cost for Intrinsic's?
2290     Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2291     Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2292     break;
2293   }
2294   case scAddRecExpr: {
2295     // In this polynominal, we may have some zero operands, and we shouldn't
2296     // really charge for those. So how many non-zero coeffients are there?
2297     int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2298                                     return !Op->isZero();
2299                                   });
2300 
2301     assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2302     assert(!(*std::prev(S->operands().end()))->isZero() &&
2303            "Last operand should not be zero");
2304 
2305     // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2306     int NumNonZeroDegreeNonOneTerms =
2307       llvm::count_if(S->operands(), [](const SCEV *Op) {
2308                       auto *SConst = dyn_cast<SCEVConstant>(Op);
2309                       return !SConst || SConst->getAPInt().ugt(1);
2310                     });
2311 
2312     // Much like with normal add expr, the polynominal will require
2313     // one less addition than the number of it's terms.
2314     InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2315                                         /*MinIdx*/ 1, /*MaxIdx*/ 1);
2316     // Here, *each* one of those will require a multiplication.
2317     InstructionCost MulCost =
2318         ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2319     Cost = AddCost + MulCost;
2320 
2321     // What is the degree of this polynominal?
2322     int PolyDegree = S->getNumOperands() - 1;
2323     assert(PolyDegree >= 1 && "Should be at least affine.");
2324 
2325     // The final term will be:
2326     //   Op_{PolyDegree} * x ^ {PolyDegree}
2327     // Where  x ^ {PolyDegree}  will again require PolyDegree-1 mul operations.
2328     // Note that  x ^ {PolyDegree} = x * x ^ {PolyDegree-1}  so charging for
2329     // x ^ {PolyDegree}  will give us  x ^ {2} .. x ^ {PolyDegree-1}  for free.
2330     // FIXME: this is conservatively correct, but might be overly pessimistic.
2331     Cost += MulCost * (PolyDegree - 1);
2332     break;
2333   }
2334   }
2335 
2336   for (auto &CostOp : Operations) {
2337     for (auto SCEVOp : enumerate(S->operands())) {
2338       // Clamp the index to account for multiple IR operations being chained.
2339       size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2340       size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2341       Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2342     }
2343   }
2344   return Cost;
2345 }
2346 
2347 bool SCEVExpander::isHighCostExpansionHelper(
2348     const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2349     InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2350     SmallPtrSetImpl<const SCEV *> &Processed,
2351     SmallVectorImpl<SCEVOperand> &Worklist) {
2352   if (Cost > Budget)
2353     return true; // Already run out of budget, give up.
2354 
2355   const SCEV *S = WorkItem.S;
2356   // Was the cost of expansion of this expression already accounted for?
2357   if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2358     return false; // We have already accounted for this expression.
2359 
2360   // If we can find an existing value for this scev available at the point "At"
2361   // then consider the expression cheap.
2362   if (getRelatedExistingExpansion(S, &At, L))
2363     return false; // Consider the expression to be free.
2364 
2365   TargetTransformInfo::TargetCostKind CostKind =
2366       L->getHeader()->getParent()->hasMinSize()
2367           ? TargetTransformInfo::TCK_CodeSize
2368           : TargetTransformInfo::TCK_RecipThroughput;
2369 
2370   switch (S->getSCEVType()) {
2371   case scCouldNotCompute:
2372     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2373   case scUnknown:
2374     // Assume to be zero-cost.
2375     return false;
2376   case scConstant: {
2377     // Only evalulate the costs of constants when optimizing for size.
2378     if (CostKind != TargetTransformInfo::TCK_CodeSize)
2379       return 0;
2380     const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2381     Type *Ty = S->getType();
2382     Cost += TTI.getIntImmCostInst(
2383         WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2384     return Cost > Budget;
2385   }
2386   case scTruncate:
2387   case scPtrToInt:
2388   case scZeroExtend:
2389   case scSignExtend: {
2390     Cost +=
2391         costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2392     return false; // Will answer upon next entry into this function.
2393   }
2394   case scUDivExpr: {
2395     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2396     // HowManyLessThans produced to compute a precise expression, rather than a
2397     // UDiv from the user's code. If we can't find a UDiv in the code with some
2398     // simple searching, we need to account for it's cost.
2399 
2400     // At the beginning of this function we already tried to find existing
2401     // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2402     // pattern involving division. This is just a simple search heuristic.
2403     if (getRelatedExistingExpansion(
2404             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2405       return false; // Consider it to be free.
2406 
2407     Cost +=
2408         costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2409     return false; // Will answer upon next entry into this function.
2410   }
2411   case scAddExpr:
2412   case scMulExpr:
2413   case scUMaxExpr:
2414   case scSMaxExpr:
2415   case scUMinExpr:
2416   case scSMinExpr: {
2417     assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2418            "Nary expr should have more than 1 operand.");
2419     // The simple nary expr will require one less op (or pair of ops)
2420     // than the number of it's terms.
2421     Cost +=
2422         costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2423     return Cost > Budget;
2424   }
2425   case scAddRecExpr: {
2426     assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2427            "Polynomial should be at least linear");
2428     Cost += costAndCollectOperands<SCEVAddRecExpr>(
2429         WorkItem, TTI, CostKind, Worklist);
2430     return Cost > Budget;
2431   }
2432   }
2433   llvm_unreachable("Unknown SCEV kind!");
2434 }
2435 
2436 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2437                                             Instruction *IP) {
2438   assert(IP);
2439   switch (Pred->getKind()) {
2440   case SCEVPredicate::P_Union:
2441     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2442   case SCEVPredicate::P_Equal:
2443     return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2444   case SCEVPredicate::P_Wrap: {
2445     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2446     return expandWrapPredicate(AddRecPred, IP);
2447   }
2448   }
2449   llvm_unreachable("Unknown SCEV predicate type");
2450 }
2451 
2452 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2453                                           Instruction *IP) {
2454   Value *Expr0 =
2455       expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2456   Value *Expr1 =
2457       expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2458 
2459   Builder.SetInsertPoint(IP);
2460   auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2461   return I;
2462 }
2463 
2464 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2465                                            Instruction *Loc, bool Signed) {
2466   assert(AR->isAffine() && "Cannot generate RT check for "
2467                            "non-affine expression");
2468 
2469   SCEVUnionPredicate Pred;
2470   const SCEV *ExitCount =
2471       SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2472 
2473   assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2474 
2475   const SCEV *Step = AR->getStepRecurrence(SE);
2476   const SCEV *Start = AR->getStart();
2477 
2478   Type *ARTy = AR->getType();
2479   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2480   unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2481 
2482   // The expression {Start,+,Step} has nusw/nssw if
2483   //   Step < 0, Start - |Step| * Backedge <= Start
2484   //   Step >= 0, Start + |Step| * Backedge > Start
2485   // and |Step| * Backedge doesn't unsigned overflow.
2486 
2487   IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2488   Builder.SetInsertPoint(Loc);
2489   Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2490 
2491   IntegerType *Ty =
2492       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2493   Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty;
2494 
2495   Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2496   Value *NegStepValue =
2497       expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2498   Value *StartValue = expandCodeForImpl(
2499       isa<PointerType>(ARExpandTy) ? Start
2500                                    : SE.getPtrToIntExpr(Start, ARExpandTy),
2501       ARExpandTy, Loc, false);
2502 
2503   ConstantInt *Zero =
2504       ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
2505 
2506   Builder.SetInsertPoint(Loc);
2507   // Compute |Step|
2508   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2509   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2510 
2511   // Get the backedge taken count and truncate or extended to the AR type.
2512   Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2513   auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2514                                          Intrinsic::umul_with_overflow, Ty);
2515 
2516   // Compute |Step| * Backedge
2517   CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2518   Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2519   Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2520 
2521   // Compute:
2522   //   Start + |Step| * Backedge < Start
2523   //   Start - |Step| * Backedge > Start
2524   Value *Add = nullptr, *Sub = nullptr;
2525   if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) {
2526     const SCEV *MulS = SE.getSCEV(MulV);
2527     const SCEV *NegMulS = SE.getNegativeSCEV(MulS);
2528     Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue),
2529                                 ARPtrTy);
2530     Sub = Builder.CreateBitCast(
2531         expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy);
2532   } else {
2533     Add = Builder.CreateAdd(StartValue, MulV);
2534     Sub = Builder.CreateSub(StartValue, MulV);
2535   }
2536 
2537   Value *EndCompareGT = Builder.CreateICmp(
2538       Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2539 
2540   Value *EndCompareLT = Builder.CreateICmp(
2541       Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2542 
2543   // Select the answer based on the sign of Step.
2544   Value *EndCheck =
2545       Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2546 
2547   // If the backedge taken count type is larger than the AR type,
2548   // check that we don't drop any bits by truncating it. If we are
2549   // dropping bits, then we have overflow (unless the step is zero).
2550   if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2551     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2552     auto *BackedgeCheck =
2553         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2554                            ConstantInt::get(Loc->getContext(), MaxVal));
2555     BackedgeCheck = Builder.CreateAnd(
2556         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2557 
2558     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2559   }
2560 
2561   return Builder.CreateOr(EndCheck, OfMul);
2562 }
2563 
2564 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2565                                          Instruction *IP) {
2566   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2567   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2568 
2569   // Add a check for NUSW
2570   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2571     NUSWCheck = generateOverflowCheck(A, IP, false);
2572 
2573   // Add a check for NSSW
2574   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2575     NSSWCheck = generateOverflowCheck(A, IP, true);
2576 
2577   if (NUSWCheck && NSSWCheck)
2578     return Builder.CreateOr(NUSWCheck, NSSWCheck);
2579 
2580   if (NUSWCheck)
2581     return NUSWCheck;
2582 
2583   if (NSSWCheck)
2584     return NSSWCheck;
2585 
2586   return ConstantInt::getFalse(IP->getContext());
2587 }
2588 
2589 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2590                                           Instruction *IP) {
2591   auto *BoolType = IntegerType::get(IP->getContext(), 1);
2592   Value *Check = ConstantInt::getNullValue(BoolType);
2593 
2594   // Loop over all checks in this set.
2595   for (auto Pred : Union->getPredicates()) {
2596     auto *NextCheck = expandCodeForPredicate(Pred, IP);
2597     Builder.SetInsertPoint(IP);
2598     Check = Builder.CreateOr(Check, NextCheck);
2599   }
2600 
2601   return Check;
2602 }
2603 
2604 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2605   assert(PreserveLCSSA);
2606   SmallVector<Instruction *, 1> ToUpdate;
2607 
2608   auto *OpV = User->getOperand(OpIdx);
2609   auto *OpI = dyn_cast<Instruction>(OpV);
2610   if (!OpI)
2611     return OpV;
2612 
2613   Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2614   Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2615   if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2616     return OpV;
2617 
2618   ToUpdate.push_back(OpI);
2619   SmallVector<PHINode *, 16> PHIsToRemove;
2620   formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2621   for (PHINode *PN : PHIsToRemove) {
2622     if (!PN->use_empty())
2623       continue;
2624     InsertedValues.erase(PN);
2625     InsertedPostIncValues.erase(PN);
2626     PN->eraseFromParent();
2627   }
2628 
2629   return User->getOperand(OpIdx);
2630 }
2631 
2632 namespace {
2633 // Search for a SCEV subexpression that is not safe to expand.  Any expression
2634 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2635 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2636 // instruction, but the important thing is that we prove the denominator is
2637 // nonzero before expansion.
2638 //
2639 // IVUsers already checks that IV-derived expressions are safe. So this check is
2640 // only needed when the expression includes some subexpression that is not IV
2641 // derived.
2642 //
2643 // Currently, we only allow division by a nonzero constant here. If this is
2644 // inadequate, we could easily allow division by SCEVUnknown by using
2645 // ValueTracking to check isKnownNonZero().
2646 //
2647 // We cannot generally expand recurrences unless the step dominates the loop
2648 // header. The expander handles the special case of affine recurrences by
2649 // scaling the recurrence outside the loop, but this technique isn't generally
2650 // applicable. Expanding a nested recurrence outside a loop requires computing
2651 // binomial coefficients. This could be done, but the recurrence has to be in a
2652 // perfectly reduced form, which can't be guaranteed.
2653 struct SCEVFindUnsafe {
2654   ScalarEvolution &SE;
2655   bool IsUnsafe;
2656 
2657   SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2658 
2659   bool follow(const SCEV *S) {
2660     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2661       const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2662       if (!SC || SC->getValue()->isZero()) {
2663         IsUnsafe = true;
2664         return false;
2665       }
2666     }
2667     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2668       const SCEV *Step = AR->getStepRecurrence(SE);
2669       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2670         IsUnsafe = true;
2671         return false;
2672       }
2673     }
2674     return true;
2675   }
2676   bool isDone() const { return IsUnsafe; }
2677 };
2678 }
2679 
2680 namespace llvm {
2681 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2682   SCEVFindUnsafe Search(SE);
2683   visitAll(S, Search);
2684   return !Search.IsUnsafe;
2685 }
2686 
2687 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2688                       ScalarEvolution &SE) {
2689   if (!isSafeToExpand(S, SE))
2690     return false;
2691   // We have to prove that the expanded site of S dominates InsertionPoint.
2692   // This is easy when not in the same block, but hard when S is an instruction
2693   // to be expanded somewhere inside the same block as our insertion point.
2694   // What we really need here is something analogous to an OrderedBasicBlock,
2695   // but for the moment, we paper over the problem by handling two common and
2696   // cheap to check cases.
2697   if (SE.properlyDominates(S, InsertionPoint->getParent()))
2698     return true;
2699   if (SE.dominates(S, InsertionPoint->getParent())) {
2700     if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2701       return true;
2702     if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2703       if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2704         return true;
2705   }
2706   return false;
2707 }
2708 
2709 void SCEVExpanderCleaner::cleanup() {
2710   // Result is used, nothing to remove.
2711   if (ResultUsed)
2712     return;
2713 
2714   auto InsertedInstructions = Expander.getAllInsertedInstructions();
2715 #ifndef NDEBUG
2716   SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2717                                             InsertedInstructions.end());
2718   (void)InsertedSet;
2719 #endif
2720   // Remove sets with value handles.
2721   Expander.clear();
2722 
2723   // Sort so that earlier instructions do not dominate later instructions.
2724   stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) {
2725     return DT.dominates(B, A);
2726   });
2727   // Remove all inserted instructions.
2728   for (Instruction *I : InsertedInstructions) {
2729 
2730 #ifndef NDEBUG
2731     assert(all_of(I->users(),
2732                   [&InsertedSet](Value *U) {
2733                     return InsertedSet.contains(cast<Instruction>(U));
2734                   }) &&
2735            "removed instruction should only be used by instructions inserted "
2736            "during expansion");
2737 #endif
2738     assert(!I->getType()->isVoidTy() &&
2739            "inserted instruction should have non-void types");
2740     I->replaceAllUsesWith(UndefValue::get(I->getType()));
2741     I->eraseFromParent();
2742   }
2743 }
2744 }
2745