xref: /llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp (revision 9e6845d8e12e4dba18e62a04a7980860eb3f46d2)
1 //===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into simpler forms suitable for subsequent
12 // analysis and transformation.
13 //
14 // If the trip count of a loop is computable, this pass also makes the following
15 // changes:
16 //   1. The exit condition for the loop is canonicalized to compare the
17 //      induction value against the exit value.  This turns loops like:
18 //        'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)'
19 //   2. Any use outside of the loop of an expression derived from the indvar
20 //      is changed to compute the derived value outside of the loop, eliminating
21 //      the dependence on the exit value of the induction variable.  If the only
22 //      purpose of the loop is to compute the exit value of some derived
23 //      expression, this transformation will make the loop dead.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "llvm/Transforms/Scalar/IndVarSimplify.h"
28 #include "llvm/ADT/APFloat.h"
29 #include "llvm/ADT/APInt.h"
30 #include "llvm/ADT/ArrayRef.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/None.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/ADT/Statistic.h"
38 #include "llvm/ADT/iterator_range.h"
39 #include "llvm/Analysis/LoopInfo.h"
40 #include "llvm/Analysis/LoopPass.h"
41 #include "llvm/Analysis/ScalarEvolution.h"
42 #include "llvm/Analysis/ScalarEvolutionExpander.h"
43 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
44 #include "llvm/Analysis/TargetLibraryInfo.h"
45 #include "llvm/Analysis/TargetTransformInfo.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/ConstantRange.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/IR/Use.h"
67 #include "llvm/IR/User.h"
68 #include "llvm/IR/Value.h"
69 #include "llvm/IR/ValueHandle.h"
70 #include "llvm/Pass.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/MathExtras.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Scalar.h"
79 #include "llvm/Transforms/Scalar/LoopPassManager.h"
80 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
81 #include "llvm/Transforms/Utils/LoopUtils.h"
82 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
83 #include <cassert>
84 #include <cstdint>
85 #include <utility>
86 
87 using namespace llvm;
88 
89 #define DEBUG_TYPE "indvars"
90 
91 STATISTIC(NumWidened     , "Number of indvars widened");
92 STATISTIC(NumReplaced    , "Number of exit values replaced");
93 STATISTIC(NumLFTR        , "Number of loop exit tests replaced");
94 STATISTIC(NumElimExt     , "Number of IV sign/zero extends eliminated");
95 STATISTIC(NumElimIV      , "Number of congruent IVs eliminated");
96 
97 // Trip count verification can be enabled by default under NDEBUG if we
98 // implement a strong expression equivalence checker in SCEV. Until then, we
99 // use the verify-indvars flag, which may assert in some cases.
100 static cl::opt<bool> VerifyIndvars(
101   "verify-indvars", cl::Hidden,
102   cl::desc("Verify the ScalarEvolution result after running indvars"));
103 
104 enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl };
105 
106 static cl::opt<ReplaceExitVal> ReplaceExitValue(
107     "replexitval", cl::Hidden, cl::init(OnlyCheapRepl),
108     cl::desc("Choose the strategy to replace exit value in IndVarSimplify"),
109     cl::values(clEnumValN(NeverRepl, "never", "never replace exit value"),
110                clEnumValN(OnlyCheapRepl, "cheap",
111                           "only replace exit value when the cost is cheap"),
112                clEnumValN(AlwaysRepl, "always",
113                           "always replace exit value whenever possible")));
114 
115 static cl::opt<bool> UsePostIncrementRanges(
116   "indvars-post-increment-ranges", cl::Hidden,
117   cl::desc("Use post increment control-dependent ranges in IndVarSimplify"),
118   cl::init(true));
119 
120 static cl::opt<bool>
121 DisableLFTR("disable-lftr", cl::Hidden, cl::init(false),
122             cl::desc("Disable Linear Function Test Replace optimization"));
123 
124 namespace {
125 
126 struct RewritePhi;
127 
128 class IndVarSimplify {
129   LoopInfo *LI;
130   ScalarEvolution *SE;
131   DominatorTree *DT;
132   const DataLayout &DL;
133   TargetLibraryInfo *TLI;
134   const TargetTransformInfo *TTI;
135 
136   SmallVector<WeakTrackingVH, 16> DeadInsts;
137   bool Changed = false;
138 
139   bool isValidRewrite(Value *FromVal, Value *ToVal);
140 
141   void handleFloatingPointIV(Loop *L, PHINode *PH);
142   void rewriteNonIntegerIVs(Loop *L);
143 
144   void simplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LoopInfo *LI);
145 
146   bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet);
147   void rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter);
148   void rewriteFirstIterationLoopExitValues(Loop *L);
149 
150   Value *linearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount,
151                                    PHINode *IndVar, SCEVExpander &Rewriter);
152 
153   void sinkUnusedInvariants(Loop *L);
154 
155 public:
156   IndVarSimplify(LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
157                  const DataLayout &DL, TargetLibraryInfo *TLI,
158                  TargetTransformInfo *TTI)
159       : LI(LI), SE(SE), DT(DT), DL(DL), TLI(TLI), TTI(TTI) {}
160 
161   bool run(Loop *L);
162 };
163 
164 } // end anonymous namespace
165 
166 /// Return true if the SCEV expansion generated by the rewriter can replace the
167 /// original value. SCEV guarantees that it produces the same value, but the way
168 /// it is produced may be illegal IR.  Ideally, this function will only be
169 /// called for verification.
170 bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) {
171   // If an SCEV expression subsumed multiple pointers, its expansion could
172   // reassociate the GEP changing the base pointer. This is illegal because the
173   // final address produced by a GEP chain must be inbounds relative to its
174   // underlying object. Otherwise basic alias analysis, among other things,
175   // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
176   // producing an expression involving multiple pointers. Until then, we must
177   // bail out here.
178   //
179   // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject
180   // because it understands lcssa phis while SCEV does not.
181   Value *FromPtr = FromVal;
182   Value *ToPtr = ToVal;
183   if (auto *GEP = dyn_cast<GEPOperator>(FromVal)) {
184     FromPtr = GEP->getPointerOperand();
185   }
186   if (auto *GEP = dyn_cast<GEPOperator>(ToVal)) {
187     ToPtr = GEP->getPointerOperand();
188   }
189   if (FromPtr != FromVal || ToPtr != ToVal) {
190     // Quickly check the common case
191     if (FromPtr == ToPtr)
192       return true;
193 
194     // SCEV may have rewritten an expression that produces the GEP's pointer
195     // operand. That's ok as long as the pointer operand has the same base
196     // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the
197     // base of a recurrence. This handles the case in which SCEV expansion
198     // converts a pointer type recurrence into a nonrecurrent pointer base
199     // indexed by an integer recurrence.
200 
201     // If the GEP base pointer is a vector of pointers, abort.
202     if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
203       return false;
204 
205     const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
206     const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
207     if (FromBase == ToBase)
208       return true;
209 
210     LLVM_DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " << *FromBase
211                       << " != " << *ToBase << "\n");
212 
213     return false;
214   }
215   return true;
216 }
217 
218 /// Determine the insertion point for this user. By default, insert immediately
219 /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
220 /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
221 /// common dominator for the incoming blocks.
222 static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
223                                           DominatorTree *DT, LoopInfo *LI) {
224   PHINode *PHI = dyn_cast<PHINode>(User);
225   if (!PHI)
226     return User;
227 
228   Instruction *InsertPt = nullptr;
229   for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
230     if (PHI->getIncomingValue(i) != Def)
231       continue;
232 
233     BasicBlock *InsertBB = PHI->getIncomingBlock(i);
234     if (!InsertPt) {
235       InsertPt = InsertBB->getTerminator();
236       continue;
237     }
238     InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
239     InsertPt = InsertBB->getTerminator();
240   }
241   assert(InsertPt && "Missing phi operand");
242 
243   auto *DefI = dyn_cast<Instruction>(Def);
244   if (!DefI)
245     return InsertPt;
246 
247   assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
248 
249   auto *L = LI->getLoopFor(DefI->getParent());
250   assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
251 
252   for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
253     if (LI->getLoopFor(DTN->getBlock()) == L)
254       return DTN->getBlock()->getTerminator();
255 
256   llvm_unreachable("DefI dominates InsertPt!");
257 }
258 
259 //===----------------------------------------------------------------------===//
260 // rewriteNonIntegerIVs and helpers. Prefer integer IVs.
261 //===----------------------------------------------------------------------===//
262 
263 /// Convert APF to an integer, if possible.
264 static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
265   bool isExact = false;
266   // See if we can convert this to an int64_t
267   uint64_t UIntVal;
268   if (APF.convertToInteger(makeMutableArrayRef(UIntVal), 64, true,
269                            APFloat::rmTowardZero, &isExact) != APFloat::opOK ||
270       !isExact)
271     return false;
272   IntVal = UIntVal;
273   return true;
274 }
275 
276 /// If the loop has floating induction variable then insert corresponding
277 /// integer induction variable if possible.
278 /// For example,
279 /// for(double i = 0; i < 10000; ++i)
280 ///   bar(i)
281 /// is converted into
282 /// for(int i = 0; i < 10000; ++i)
283 ///   bar((double)i);
284 void IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) {
285   unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
286   unsigned BackEdge     = IncomingEdge^1;
287 
288   // Check incoming value.
289   auto *InitValueVal = dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));
290 
291   int64_t InitValue;
292   if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
293     return;
294 
295   // Check IV increment. Reject this PN if increment operation is not
296   // an add or increment value can not be represented by an integer.
297   auto *Incr = dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
298   if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return;
299 
300   // If this is not an add of the PHI with a constantfp, or if the constant fp
301   // is not an integer, bail out.
302   ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
303   int64_t IncValue;
304   if (IncValueVal == nullptr || Incr->getOperand(0) != PN ||
305       !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
306     return;
307 
308   // Check Incr uses. One user is PN and the other user is an exit condition
309   // used by the conditional terminator.
310   Value::user_iterator IncrUse = Incr->user_begin();
311   Instruction *U1 = cast<Instruction>(*IncrUse++);
312   if (IncrUse == Incr->user_end()) return;
313   Instruction *U2 = cast<Instruction>(*IncrUse++);
314   if (IncrUse != Incr->user_end()) return;
315 
316   // Find exit condition, which is an fcmp.  If it doesn't exist, or if it isn't
317   // only used by a branch, we can't transform it.
318   FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
319   if (!Compare)
320     Compare = dyn_cast<FCmpInst>(U2);
321   if (!Compare || !Compare->hasOneUse() ||
322       !isa<BranchInst>(Compare->user_back()))
323     return;
324 
325   BranchInst *TheBr = cast<BranchInst>(Compare->user_back());
326 
327   // We need to verify that the branch actually controls the iteration count
328   // of the loop.  If not, the new IV can overflow and no one will notice.
329   // The branch block must be in the loop and one of the successors must be out
330   // of the loop.
331   assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
332   if (!L->contains(TheBr->getParent()) ||
333       (L->contains(TheBr->getSuccessor(0)) &&
334        L->contains(TheBr->getSuccessor(1))))
335     return;
336 
337   // If it isn't a comparison with an integer-as-fp (the exit value), we can't
338   // transform it.
339   ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
340   int64_t ExitValue;
341   if (ExitValueVal == nullptr ||
342       !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
343     return;
344 
345   // Find new predicate for integer comparison.
346   CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
347   switch (Compare->getPredicate()) {
348   default: return;  // Unknown comparison.
349   case CmpInst::FCMP_OEQ:
350   case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break;
351   case CmpInst::FCMP_ONE:
352   case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break;
353   case CmpInst::FCMP_OGT:
354   case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break;
355   case CmpInst::FCMP_OGE:
356   case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break;
357   case CmpInst::FCMP_OLT:
358   case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break;
359   case CmpInst::FCMP_OLE:
360   case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break;
361   }
362 
363   // We convert the floating point induction variable to a signed i32 value if
364   // we can.  This is only safe if the comparison will not overflow in a way
365   // that won't be trapped by the integer equivalent operations.  Check for this
366   // now.
367   // TODO: We could use i64 if it is native and the range requires it.
368 
369   // The start/stride/exit values must all fit in signed i32.
370   if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
371     return;
372 
373   // If not actually striding (add x, 0.0), avoid touching the code.
374   if (IncValue == 0)
375     return;
376 
377   // Positive and negative strides have different safety conditions.
378   if (IncValue > 0) {
379     // If we have a positive stride, we require the init to be less than the
380     // exit value.
381     if (InitValue >= ExitValue)
382       return;
383 
384     uint32_t Range = uint32_t(ExitValue-InitValue);
385     // Check for infinite loop, either:
386     // while (i <= Exit) or until (i > Exit)
387     if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) {
388       if (++Range == 0) return;  // Range overflows.
389     }
390 
391     unsigned Leftover = Range % uint32_t(IncValue);
392 
393     // If this is an equality comparison, we require that the strided value
394     // exactly land on the exit value, otherwise the IV condition will wrap
395     // around and do things the fp IV wouldn't.
396     if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
397         Leftover != 0)
398       return;
399 
400     // If the stride would wrap around the i32 before exiting, we can't
401     // transform the IV.
402     if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue)
403       return;
404   } else {
405     // If we have a negative stride, we require the init to be greater than the
406     // exit value.
407     if (InitValue <= ExitValue)
408       return;
409 
410     uint32_t Range = uint32_t(InitValue-ExitValue);
411     // Check for infinite loop, either:
412     // while (i >= Exit) or until (i < Exit)
413     if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) {
414       if (++Range == 0) return;  // Range overflows.
415     }
416 
417     unsigned Leftover = Range % uint32_t(-IncValue);
418 
419     // If this is an equality comparison, we require that the strided value
420     // exactly land on the exit value, otherwise the IV condition will wrap
421     // around and do things the fp IV wouldn't.
422     if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
423         Leftover != 0)
424       return;
425 
426     // If the stride would wrap around the i32 before exiting, we can't
427     // transform the IV.
428     if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue)
429       return;
430   }
431 
432   IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
433 
434   // Insert new integer induction variable.
435   PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN);
436   NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
437                       PN->getIncomingBlock(IncomingEdge));
438 
439   Value *NewAdd =
440     BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
441                               Incr->getName()+".int", Incr);
442   NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));
443 
444   ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd,
445                                       ConstantInt::get(Int32Ty, ExitValue),
446                                       Compare->getName());
447 
448   // In the following deletions, PN may become dead and may be deleted.
449   // Use a WeakTrackingVH to observe whether this happens.
450   WeakTrackingVH WeakPH = PN;
451 
452   // Delete the old floating point exit comparison.  The branch starts using the
453   // new comparison.
454   NewCompare->takeName(Compare);
455   Compare->replaceAllUsesWith(NewCompare);
456   RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI);
457 
458   // Delete the old floating point increment.
459   Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
460   RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI);
461 
462   // If the FP induction variable still has uses, this is because something else
463   // in the loop uses its value.  In order to canonicalize the induction
464   // variable, we chose to eliminate the IV and rewrite it in terms of an
465   // int->fp cast.
466   //
467   // We give preference to sitofp over uitofp because it is faster on most
468   // platforms.
469   if (WeakPH) {
470     Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
471                                  &*PN->getParent()->getFirstInsertionPt());
472     PN->replaceAllUsesWith(Conv);
473     RecursivelyDeleteTriviallyDeadInstructions(PN, TLI);
474   }
475   Changed = true;
476 }
477 
478 void IndVarSimplify::rewriteNonIntegerIVs(Loop *L) {
479   // First step.  Check to see if there are any floating-point recurrences.
480   // If there are, change them into integer recurrences, permitting analysis by
481   // the SCEV routines.
482   BasicBlock *Header = L->getHeader();
483 
484   SmallVector<WeakTrackingVH, 8> PHIs;
485   for (PHINode &PN : Header->phis())
486     PHIs.push_back(&PN);
487 
488   for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
489     if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i]))
490       handleFloatingPointIV(L, PN);
491 
492   // If the loop previously had floating-point IV, ScalarEvolution
493   // may not have been able to compute a trip count. Now that we've done some
494   // re-writing, the trip count may be computable.
495   if (Changed)
496     SE->forgetLoop(L);
497 }
498 
499 namespace {
500 
501 // Collect information about PHI nodes which can be transformed in
502 // rewriteLoopExitValues.
503 struct RewritePhi {
504   PHINode *PN;
505 
506   // Ith incoming value.
507   unsigned Ith;
508 
509   // Exit value after expansion.
510   Value *Val;
511 
512   // High Cost when expansion.
513   bool HighCost;
514 
515   RewritePhi(PHINode *P, unsigned I, Value *V, bool H)
516       : PN(P), Ith(I), Val(V), HighCost(H) {}
517 };
518 
519 } // end anonymous namespace
520 
521 //===----------------------------------------------------------------------===//
522 // rewriteLoopExitValues - Optimize IV users outside the loop.
523 // As a side effect, reduces the amount of IV processing within the loop.
524 //===----------------------------------------------------------------------===//
525 
526 /// Check to see if this loop has a computable loop-invariant execution count.
527 /// If so, this means that we can compute the final value of any expressions
528 /// that are recurrent in the loop, and substitute the exit values from the loop
529 /// into any instructions outside of the loop that use the final values of the
530 /// current expressions.
531 ///
532 /// This is mostly redundant with the regular IndVarSimplify activities that
533 /// happen later, except that it's more powerful in some cases, because it's
534 /// able to brute-force evaluate arbitrary instructions as long as they have
535 /// constant operands at the beginning of the loop.
536 void IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
537   // Check a pre-condition.
538   assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
539          "Indvars did not preserve LCSSA!");
540 
541   SmallVector<BasicBlock*, 8> ExitBlocks;
542   L->getUniqueExitBlocks(ExitBlocks);
543 
544   SmallVector<RewritePhi, 8> RewritePhiSet;
545   // Find all values that are computed inside the loop, but used outside of it.
546   // Because of LCSSA, these values will only occur in LCSSA PHI Nodes.  Scan
547   // the exit blocks of the loop to find them.
548   for (BasicBlock *ExitBB : ExitBlocks) {
549     // If there are no PHI nodes in this exit block, then no values defined
550     // inside the loop are used on this path, skip it.
551     PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
552     if (!PN) continue;
553 
554     unsigned NumPreds = PN->getNumIncomingValues();
555 
556     // Iterate over all of the PHI nodes.
557     BasicBlock::iterator BBI = ExitBB->begin();
558     while ((PN = dyn_cast<PHINode>(BBI++))) {
559       if (PN->use_empty())
560         continue; // dead use, don't replace it
561 
562       if (!SE->isSCEVable(PN->getType()))
563         continue;
564 
565       // It's necessary to tell ScalarEvolution about this explicitly so that
566       // it can walk the def-use list and forget all SCEVs, as it may not be
567       // watching the PHI itself. Once the new exit value is in place, there
568       // may not be a def-use connection between the loop and every instruction
569       // which got a SCEVAddRecExpr for that loop.
570       SE->forgetValue(PN);
571 
572       // Iterate over all of the values in all the PHI nodes.
573       for (unsigned i = 0; i != NumPreds; ++i) {
574         // If the value being merged in is not integer or is not defined
575         // in the loop, skip it.
576         Value *InVal = PN->getIncomingValue(i);
577         if (!isa<Instruction>(InVal))
578           continue;
579 
580         // If this pred is for a subloop, not L itself, skip it.
581         if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
582           continue; // The Block is in a subloop, skip it.
583 
584         // Check that InVal is defined in the loop.
585         Instruction *Inst = cast<Instruction>(InVal);
586         if (!L->contains(Inst))
587           continue;
588 
589         // Okay, this instruction has a user outside of the current loop
590         // and varies predictably *inside* the loop.  Evaluate the value it
591         // contains when the loop exits, if possible.
592         const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
593         if (!SE->isLoopInvariant(ExitValue, L) ||
594             !isSafeToExpand(ExitValue, *SE))
595           continue;
596 
597         // Computing the value outside of the loop brings no benefit if :
598         //  - it is definitely used inside the loop in a way which can not be
599         //    optimized away.
600         //  - no use outside of the loop can take advantage of hoisting the
601         //    computation out of the loop
602         if (ExitValue->getSCEVType()>=scMulExpr) {
603           unsigned NumHardInternalUses = 0;
604           unsigned NumSoftExternalUses = 0;
605           unsigned NumUses = 0;
606           for (auto IB = Inst->user_begin(), IE = Inst->user_end();
607                IB != IE && NumUses <= 6; ++IB) {
608             Instruction *UseInstr = cast<Instruction>(*IB);
609             unsigned Opc = UseInstr->getOpcode();
610             NumUses++;
611             if (L->contains(UseInstr)) {
612               if (Opc == Instruction::Call)
613                 NumHardInternalUses++;
614             } else {
615               if (Opc == Instruction::PHI) {
616                 // Do not count the Phi as a use. LCSSA may have inserted
617                 // plenty of trivial ones.
618                 NumUses--;
619                 for (auto PB = UseInstr->user_begin(),
620                           PE = UseInstr->user_end();
621                      PB != PE && NumUses <= 6; ++PB, ++NumUses) {
622                   unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode();
623                   if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret)
624                     NumSoftExternalUses++;
625                 }
626                 continue;
627               }
628               if (Opc != Instruction::Call && Opc != Instruction::Ret)
629                 NumSoftExternalUses++;
630             }
631           }
632           if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses)
633             continue;
634         }
635 
636         bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst);
637         Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst);
638 
639         LLVM_DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal
640                           << '\n'
641                           << "  LoopVal = " << *Inst << "\n");
642 
643         if (!isValidRewrite(Inst, ExitVal)) {
644           DeadInsts.push_back(ExitVal);
645           continue;
646         }
647 
648 #ifndef NDEBUG
649         // If we reuse an instruction from a loop which is neither L nor one of
650         // its containing loops, we end up breaking LCSSA form for this loop by
651         // creating a new use of its instruction.
652         if (auto *ExitInsn = dyn_cast<Instruction>(ExitVal))
653           if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
654             if (EVL != L)
655               assert(EVL->contains(L) && "LCSSA breach detected!");
656 #endif
657 
658         // Collect all the candidate PHINodes to be rewritten.
659         RewritePhiSet.emplace_back(PN, i, ExitVal, HighCost);
660       }
661     }
662   }
663 
664   bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
665 
666   // Transformation.
667   for (const RewritePhi &Phi : RewritePhiSet) {
668     PHINode *PN = Phi.PN;
669     Value *ExitVal = Phi.Val;
670 
671     // Only do the rewrite when the ExitValue can be expanded cheaply.
672     // If LoopCanBeDel is true, rewrite exit value aggressively.
673     if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) {
674       DeadInsts.push_back(ExitVal);
675       continue;
676     }
677 
678     Changed = true;
679     ++NumReplaced;
680     Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith));
681     PN->setIncomingValue(Phi.Ith, ExitVal);
682 
683     // If this instruction is dead now, delete it. Don't do it now to avoid
684     // invalidating iterators.
685     if (isInstructionTriviallyDead(Inst, TLI))
686       DeadInsts.push_back(Inst);
687 
688     // Replace PN with ExitVal if that is legal and does not break LCSSA.
689     if (PN->getNumIncomingValues() == 1 &&
690         LI->replacementPreservesLCSSAForm(PN, ExitVal)) {
691       PN->replaceAllUsesWith(ExitVal);
692       PN->eraseFromParent();
693     }
694   }
695 
696   // The insertion point instruction may have been deleted; clear it out
697   // so that the rewriter doesn't trip over it later.
698   Rewriter.clearInsertPoint();
699 }
700 
701 //===---------------------------------------------------------------------===//
702 // rewriteFirstIterationLoopExitValues: Rewrite loop exit values if we know
703 // they will exit at the first iteration.
704 //===---------------------------------------------------------------------===//
705 
706 /// Check to see if this loop has loop invariant conditions which lead to loop
707 /// exits. If so, we know that if the exit path is taken, it is at the first
708 /// loop iteration. This lets us predict exit values of PHI nodes that live in
709 /// loop header.
710 void IndVarSimplify::rewriteFirstIterationLoopExitValues(Loop *L) {
711   // Verify the input to the pass is already in LCSSA form.
712   assert(L->isLCSSAForm(*DT));
713 
714   SmallVector<BasicBlock *, 8> ExitBlocks;
715   L->getUniqueExitBlocks(ExitBlocks);
716   auto *LoopHeader = L->getHeader();
717   assert(LoopHeader && "Invalid loop");
718 
719   for (auto *ExitBB : ExitBlocks) {
720     // If there are no more PHI nodes in this exit block, then no more
721     // values defined inside the loop are used on this path.
722     for (PHINode &PN : ExitBB->phis()) {
723       for (unsigned IncomingValIdx = 0, E = PN.getNumIncomingValues();
724            IncomingValIdx != E; ++IncomingValIdx) {
725         auto *IncomingBB = PN.getIncomingBlock(IncomingValIdx);
726 
727         // We currently only support loop exits from loop header. If the
728         // incoming block is not loop header, we need to recursively check
729         // all conditions starting from loop header are loop invariants.
730         // Additional support might be added in the future.
731         if (IncomingBB != LoopHeader)
732           continue;
733 
734         // Get condition that leads to the exit path.
735         auto *TermInst = IncomingBB->getTerminator();
736 
737         Value *Cond = nullptr;
738         if (auto *BI = dyn_cast<BranchInst>(TermInst)) {
739           // Must be a conditional branch, otherwise the block
740           // should not be in the loop.
741           Cond = BI->getCondition();
742         } else if (auto *SI = dyn_cast<SwitchInst>(TermInst))
743           Cond = SI->getCondition();
744         else
745           continue;
746 
747         if (!L->isLoopInvariant(Cond))
748           continue;
749 
750         auto *ExitVal = dyn_cast<PHINode>(PN.getIncomingValue(IncomingValIdx));
751 
752         // Only deal with PHIs.
753         if (!ExitVal)
754           continue;
755 
756         // If ExitVal is a PHI on the loop header, then we know its
757         // value along this exit because the exit can only be taken
758         // on the first iteration.
759         auto *LoopPreheader = L->getLoopPreheader();
760         assert(LoopPreheader && "Invalid loop");
761         int PreheaderIdx = ExitVal->getBasicBlockIndex(LoopPreheader);
762         if (PreheaderIdx != -1) {
763           assert(ExitVal->getParent() == LoopHeader &&
764                  "ExitVal must be in loop header");
765           PN.setIncomingValue(IncomingValIdx,
766                               ExitVal->getIncomingValue(PreheaderIdx));
767         }
768       }
769     }
770   }
771 }
772 
773 /// Check whether it is possible to delete the loop after rewriting exit
774 /// value. If it is possible, ignore ReplaceExitValue and do rewriting
775 /// aggressively.
776 bool IndVarSimplify::canLoopBeDeleted(
777     Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) {
778   BasicBlock *Preheader = L->getLoopPreheader();
779   // If there is no preheader, the loop will not be deleted.
780   if (!Preheader)
781     return false;
782 
783   // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1.
784   // We obviate multiple ExitingBlocks case for simplicity.
785   // TODO: If we see testcase with multiple ExitingBlocks can be deleted
786   // after exit value rewriting, we can enhance the logic here.
787   SmallVector<BasicBlock *, 4> ExitingBlocks;
788   L->getExitingBlocks(ExitingBlocks);
789   SmallVector<BasicBlock *, 8> ExitBlocks;
790   L->getUniqueExitBlocks(ExitBlocks);
791   if (ExitBlocks.size() > 1 || ExitingBlocks.size() > 1)
792     return false;
793 
794   BasicBlock *ExitBlock = ExitBlocks[0];
795   BasicBlock::iterator BI = ExitBlock->begin();
796   while (PHINode *P = dyn_cast<PHINode>(BI)) {
797     Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]);
798 
799     // If the Incoming value of P is found in RewritePhiSet, we know it
800     // could be rewritten to use a loop invariant value in transformation
801     // phase later. Skip it in the loop invariant check below.
802     bool found = false;
803     for (const RewritePhi &Phi : RewritePhiSet) {
804       unsigned i = Phi.Ith;
805       if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
806         found = true;
807         break;
808       }
809     }
810 
811     Instruction *I;
812     if (!found && (I = dyn_cast<Instruction>(Incoming)))
813       if (!L->hasLoopInvariantOperands(I))
814         return false;
815 
816     ++BI;
817   }
818 
819   for (auto *BB : L->blocks())
820     if (llvm::any_of(*BB, [](Instruction &I) {
821           return I.mayHaveSideEffects();
822         }))
823       return false;
824 
825   return true;
826 }
827 
828 //===----------------------------------------------------------------------===//
829 //  IV Widening - Extend the width of an IV to cover its widest uses.
830 //===----------------------------------------------------------------------===//
831 
832 namespace {
833 
834 // Collect information about induction variables that are used by sign/zero
835 // extend operations. This information is recorded by CollectExtend and provides
836 // the input to WidenIV.
837 struct WideIVInfo {
838   PHINode *NarrowIV = nullptr;
839 
840   // Widest integer type created [sz]ext
841   Type *WidestNativeType = nullptr;
842 
843   // Was a sext user seen before a zext?
844   bool IsSigned = false;
845 };
846 
847 } // end anonymous namespace
848 
849 /// Update information about the induction variable that is extended by this
850 /// sign or zero extend operation. This is used to determine the final width of
851 /// the IV before actually widening it.
852 static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,
853                         const TargetTransformInfo *TTI) {
854   bool IsSigned = Cast->getOpcode() == Instruction::SExt;
855   if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
856     return;
857 
858   Type *Ty = Cast->getType();
859   uint64_t Width = SE->getTypeSizeInBits(Ty);
860   if (!Cast->getModule()->getDataLayout().isLegalInteger(Width))
861     return;
862 
863   // Check that `Cast` actually extends the induction variable (we rely on this
864   // later).  This takes care of cases where `Cast` is extending a truncation of
865   // the narrow induction variable, and thus can end up being narrower than the
866   // "narrow" induction variable.
867   uint64_t NarrowIVWidth = SE->getTypeSizeInBits(WI.NarrowIV->getType());
868   if (NarrowIVWidth >= Width)
869     return;
870 
871   // Cast is either an sext or zext up to this point.
872   // We should not widen an indvar if arithmetics on the wider indvar are more
873   // expensive than those on the narrower indvar. We check only the cost of ADD
874   // because at least an ADD is required to increment the induction variable. We
875   // could compute more comprehensively the cost of all instructions on the
876   // induction variable when necessary.
877   if (TTI &&
878       TTI->getArithmeticInstrCost(Instruction::Add, Ty) >
879           TTI->getArithmeticInstrCost(Instruction::Add,
880                                       Cast->getOperand(0)->getType())) {
881     return;
882   }
883 
884   if (!WI.WidestNativeType) {
885     WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
886     WI.IsSigned = IsSigned;
887     return;
888   }
889 
890   // We extend the IV to satisfy the sign of its first user, arbitrarily.
891   if (WI.IsSigned != IsSigned)
892     return;
893 
894   if (Width > SE->getTypeSizeInBits(WI.WidestNativeType))
895     WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
896 }
897 
898 namespace {
899 
900 /// Record a link in the Narrow IV def-use chain along with the WideIV that
901 /// computes the same value as the Narrow IV def.  This avoids caching Use*
902 /// pointers.
903 struct NarrowIVDefUse {
904   Instruction *NarrowDef = nullptr;
905   Instruction *NarrowUse = nullptr;
906   Instruction *WideDef = nullptr;
907 
908   // True if the narrow def is never negative.  Tracking this information lets
909   // us use a sign extension instead of a zero extension or vice versa, when
910   // profitable and legal.
911   bool NeverNegative = false;
912 
913   NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
914                  bool NeverNegative)
915       : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
916         NeverNegative(NeverNegative) {}
917 };
918 
919 /// The goal of this transform is to remove sign and zero extends without
920 /// creating any new induction variables. To do this, it creates a new phi of
921 /// the wider type and redirects all users, either removing extends or inserting
922 /// truncs whenever we stop propagating the type.
923 class WidenIV {
924   // Parameters
925   PHINode *OrigPhi;
926   Type *WideType;
927 
928   // Context
929   LoopInfo        *LI;
930   Loop            *L;
931   ScalarEvolution *SE;
932   DominatorTree   *DT;
933 
934   // Does the module have any calls to the llvm.experimental.guard intrinsic
935   // at all? If not we can avoid scanning instructions looking for guards.
936   bool HasGuards;
937 
938   // Result
939   PHINode *WidePhi = nullptr;
940   Instruction *WideInc = nullptr;
941   const SCEV *WideIncExpr = nullptr;
942   SmallVectorImpl<WeakTrackingVH> &DeadInsts;
943 
944   SmallPtrSet<Instruction *,16> Widened;
945   SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
946 
947   enum ExtendKind { ZeroExtended, SignExtended, Unknown };
948 
949   // A map tracking the kind of extension used to widen each narrow IV
950   // and narrow IV user.
951   // Key: pointer to a narrow IV or IV user.
952   // Value: the kind of extension used to widen this Instruction.
953   DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
954 
955   using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
956 
957   // A map with control-dependent ranges for post increment IV uses. The key is
958   // a pair of IV def and a use of this def denoting the context. The value is
959   // a ConstantRange representing possible values of the def at the given
960   // context.
961   DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
962 
963   Optional<ConstantRange> getPostIncRangeInfo(Value *Def,
964                                               Instruction *UseI) {
965     DefUserPair Key(Def, UseI);
966     auto It = PostIncRangeInfos.find(Key);
967     return It == PostIncRangeInfos.end()
968                ? Optional<ConstantRange>(None)
969                : Optional<ConstantRange>(It->second);
970   }
971 
972   void calculatePostIncRanges(PHINode *OrigPhi);
973   void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
974 
975   void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
976     DefUserPair Key(Def, UseI);
977     auto It = PostIncRangeInfos.find(Key);
978     if (It == PostIncRangeInfos.end())
979       PostIncRangeInfos.insert({Key, R});
980     else
981       It->second = R.intersectWith(It->second);
982   }
983 
984 public:
985   WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
986           DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
987           bool HasGuards)
988       : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
989         L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
990         HasGuards(HasGuards), DeadInsts(DI) {
991     assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
992     ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended : ZeroExtended;
993   }
994 
995   PHINode *createWideIV(SCEVExpander &Rewriter);
996 
997 protected:
998   Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
999                           Instruction *Use);
1000 
1001   Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
1002   Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
1003                                      const SCEVAddRecExpr *WideAR);
1004   Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
1005 
1006   ExtendKind getExtendKind(Instruction *I);
1007 
1008   using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
1009 
1010   WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
1011 
1012   WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
1013 
1014   const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1015                               unsigned OpCode) const;
1016 
1017   Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
1018 
1019   bool widenLoopCompare(NarrowIVDefUse DU);
1020 
1021   void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
1022 };
1023 
1024 } // end anonymous namespace
1025 
1026 /// Perform a quick domtree based check for loop invariance assuming that V is
1027 /// used within the loop. LoopInfo::isLoopInvariant() seems gratuitous for this
1028 /// purpose.
1029 static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) {
1030   Instruction *Inst = dyn_cast<Instruction>(V);
1031   if (!Inst)
1032     return true;
1033 
1034   return DT->properlyDominates(Inst->getParent(), L->getHeader());
1035 }
1036 
1037 Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
1038                                  bool IsSigned, Instruction *Use) {
1039   // Set the debug location and conservative insertion point.
1040   IRBuilder<> Builder(Use);
1041   // Hoist the insertion point into loop preheaders as far as possible.
1042   for (const Loop *L = LI->getLoopFor(Use->getParent());
1043        L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT);
1044        L = L->getParentLoop())
1045     Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
1046 
1047   return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
1048                     Builder.CreateZExt(NarrowOper, WideType);
1049 }
1050 
1051 /// Instantiate a wide operation to replace a narrow operation. This only needs
1052 /// to handle operations that can evaluation to SCEVAddRec. It can safely return
1053 /// 0 for any operation we decide not to clone.
1054 Instruction *WidenIV::cloneIVUser(NarrowIVDefUse DU,
1055                                   const SCEVAddRecExpr *WideAR) {
1056   unsigned Opcode = DU.NarrowUse->getOpcode();
1057   switch (Opcode) {
1058   default:
1059     return nullptr;
1060   case Instruction::Add:
1061   case Instruction::Mul:
1062   case Instruction::UDiv:
1063   case Instruction::Sub:
1064     return cloneArithmeticIVUser(DU, WideAR);
1065 
1066   case Instruction::And:
1067   case Instruction::Or:
1068   case Instruction::Xor:
1069   case Instruction::Shl:
1070   case Instruction::LShr:
1071   case Instruction::AShr:
1072     return cloneBitwiseIVUser(DU);
1073   }
1074 }
1075 
1076 Instruction *WidenIV::cloneBitwiseIVUser(NarrowIVDefUse DU) {
1077   Instruction *NarrowUse = DU.NarrowUse;
1078   Instruction *NarrowDef = DU.NarrowDef;
1079   Instruction *WideDef = DU.WideDef;
1080 
1081   LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
1082 
1083   // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
1084   // about the narrow operand yet so must insert a [sz]ext. It is probably loop
1085   // invariant and will be folded or hoisted. If it actually comes from a
1086   // widened IV, it should be removed during a future call to widenIVUse.
1087   bool IsSigned = getExtendKind(NarrowDef) == SignExtended;
1088   Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1089                    ? WideDef
1090                    : createExtendInst(NarrowUse->getOperand(0), WideType,
1091                                       IsSigned, NarrowUse);
1092   Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1093                    ? WideDef
1094                    : createExtendInst(NarrowUse->getOperand(1), WideType,
1095                                       IsSigned, NarrowUse);
1096 
1097   auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1098   auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1099                                         NarrowBO->getName());
1100   IRBuilder<> Builder(NarrowUse);
1101   Builder.Insert(WideBO);
1102   WideBO->copyIRFlags(NarrowBO);
1103   return WideBO;
1104 }
1105 
1106 Instruction *WidenIV::cloneArithmeticIVUser(NarrowIVDefUse DU,
1107                                             const SCEVAddRecExpr *WideAR) {
1108   Instruction *NarrowUse = DU.NarrowUse;
1109   Instruction *NarrowDef = DU.NarrowDef;
1110   Instruction *WideDef = DU.WideDef;
1111 
1112   LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1113 
1114   unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
1115 
1116   // We're trying to find X such that
1117   //
1118   //  Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
1119   //
1120   // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
1121   // and check using SCEV if any of them are correct.
1122 
1123   // Returns true if extending NonIVNarrowDef according to `SignExt` is a
1124   // correct solution to X.
1125   auto GuessNonIVOperand = [&](bool SignExt) {
1126     const SCEV *WideLHS;
1127     const SCEV *WideRHS;
1128 
1129     auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
1130       if (SignExt)
1131         return SE->getSignExtendExpr(S, Ty);
1132       return SE->getZeroExtendExpr(S, Ty);
1133     };
1134 
1135     if (IVOpIdx == 0) {
1136       WideLHS = SE->getSCEV(WideDef);
1137       const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
1138       WideRHS = GetExtend(NarrowRHS, WideType);
1139     } else {
1140       const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
1141       WideLHS = GetExtend(NarrowLHS, WideType);
1142       WideRHS = SE->getSCEV(WideDef);
1143     }
1144 
1145     // WideUse is "WideDef `op.wide` X" as described in the comment.
1146     const SCEV *WideUse = nullptr;
1147 
1148     switch (NarrowUse->getOpcode()) {
1149     default:
1150       llvm_unreachable("No other possibility!");
1151 
1152     case Instruction::Add:
1153       WideUse = SE->getAddExpr(WideLHS, WideRHS);
1154       break;
1155 
1156     case Instruction::Mul:
1157       WideUse = SE->getMulExpr(WideLHS, WideRHS);
1158       break;
1159 
1160     case Instruction::UDiv:
1161       WideUse = SE->getUDivExpr(WideLHS, WideRHS);
1162       break;
1163 
1164     case Instruction::Sub:
1165       WideUse = SE->getMinusSCEV(WideLHS, WideRHS);
1166       break;
1167     }
1168 
1169     return WideUse == WideAR;
1170   };
1171 
1172   bool SignExtend = getExtendKind(NarrowDef) == SignExtended;
1173   if (!GuessNonIVOperand(SignExtend)) {
1174     SignExtend = !SignExtend;
1175     if (!GuessNonIVOperand(SignExtend))
1176       return nullptr;
1177   }
1178 
1179   Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1180                    ? WideDef
1181                    : createExtendInst(NarrowUse->getOperand(0), WideType,
1182                                       SignExtend, NarrowUse);
1183   Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1184                    ? WideDef
1185                    : createExtendInst(NarrowUse->getOperand(1), WideType,
1186                                       SignExtend, NarrowUse);
1187 
1188   auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1189   auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1190                                         NarrowBO->getName());
1191 
1192   IRBuilder<> Builder(NarrowUse);
1193   Builder.Insert(WideBO);
1194   WideBO->copyIRFlags(NarrowBO);
1195   return WideBO;
1196 }
1197 
1198 WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
1199   auto It = ExtendKindMap.find(I);
1200   assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
1201   return It->second;
1202 }
1203 
1204 const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1205                                      unsigned OpCode) const {
1206   if (OpCode == Instruction::Add)
1207     return SE->getAddExpr(LHS, RHS);
1208   if (OpCode == Instruction::Sub)
1209     return SE->getMinusSCEV(LHS, RHS);
1210   if (OpCode == Instruction::Mul)
1211     return SE->getMulExpr(LHS, RHS);
1212 
1213   llvm_unreachable("Unsupported opcode.");
1214 }
1215 
1216 /// No-wrap operations can transfer sign extension of their result to their
1217 /// operands. Generate the SCEV value for the widened operation without
1218 /// actually modifying the IR yet. If the expression after extending the
1219 /// operands is an AddRec for this loop, return the AddRec and the kind of
1220 /// extension used.
1221 WidenIV::WidenedRecTy WidenIV::getExtendedOperandRecurrence(NarrowIVDefUse DU) {
1222   // Handle the common case of add<nsw/nuw>
1223   const unsigned OpCode = DU.NarrowUse->getOpcode();
1224   // Only Add/Sub/Mul instructions supported yet.
1225   if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1226       OpCode != Instruction::Mul)
1227     return {nullptr, Unknown};
1228 
1229   // One operand (NarrowDef) has already been extended to WideDef. Now determine
1230   // if extending the other will lead to a recurrence.
1231   const unsigned ExtendOperIdx =
1232       DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
1233   assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
1234 
1235   const SCEV *ExtendOperExpr = nullptr;
1236   const OverflowingBinaryOperator *OBO =
1237     cast<OverflowingBinaryOperator>(DU.NarrowUse);
1238   ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
1239   if (ExtKind == SignExtended && OBO->hasNoSignedWrap())
1240     ExtendOperExpr = SE->getSignExtendExpr(
1241       SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1242   else if(ExtKind == ZeroExtended && OBO->hasNoUnsignedWrap())
1243     ExtendOperExpr = SE->getZeroExtendExpr(
1244       SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1245   else
1246     return {nullptr, Unknown};
1247 
1248   // When creating this SCEV expr, don't apply the current operations NSW or NUW
1249   // flags. This instruction may be guarded by control flow that the no-wrap
1250   // behavior depends on. Non-control-equivalent instructions can be mapped to
1251   // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
1252   // semantics to those operations.
1253   const SCEV *lhs = SE->getSCEV(DU.WideDef);
1254   const SCEV *rhs = ExtendOperExpr;
1255 
1256   // Let's swap operands to the initial order for the case of non-commutative
1257   // operations, like SUB. See PR21014.
1258   if (ExtendOperIdx == 0)
1259     std::swap(lhs, rhs);
1260   const SCEVAddRecExpr *AddRec =
1261       dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
1262 
1263   if (!AddRec || AddRec->getLoop() != L)
1264     return {nullptr, Unknown};
1265 
1266   return {AddRec, ExtKind};
1267 }
1268 
1269 /// Is this instruction potentially interesting for further simplification after
1270 /// widening it's type? In other words, can the extend be safely hoisted out of
1271 /// the loop with SCEV reducing the value to a recurrence on the same loop. If
1272 /// so, return the extended recurrence and the kind of extension used. Otherwise
1273 /// return {nullptr, Unknown}.
1274 WidenIV::WidenedRecTy WidenIV::getWideRecurrence(NarrowIVDefUse DU) {
1275   if (!SE->isSCEVable(DU.NarrowUse->getType()))
1276     return {nullptr, Unknown};
1277 
1278   const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
1279   if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
1280       SE->getTypeSizeInBits(WideType)) {
1281     // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
1282     // index. So don't follow this use.
1283     return {nullptr, Unknown};
1284   }
1285 
1286   const SCEV *WideExpr;
1287   ExtendKind ExtKind;
1288   if (DU.NeverNegative) {
1289     WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1290     if (isa<SCEVAddRecExpr>(WideExpr))
1291       ExtKind = SignExtended;
1292     else {
1293       WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1294       ExtKind = ZeroExtended;
1295     }
1296   } else if (getExtendKind(DU.NarrowDef) == SignExtended) {
1297     WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1298     ExtKind = SignExtended;
1299   } else {
1300     WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1301     ExtKind = ZeroExtended;
1302   }
1303   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
1304   if (!AddRec || AddRec->getLoop() != L)
1305     return {nullptr, Unknown};
1306   return {AddRec, ExtKind};
1307 }
1308 
1309 /// This IV user cannot be widen. Replace this use of the original narrow IV
1310 /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
1311 static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT, LoopInfo *LI) {
1312   LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
1313                     << *DU.NarrowUse << "\n");
1314   IRBuilder<> Builder(
1315       getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI));
1316   Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
1317   DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
1318 }
1319 
1320 /// If the narrow use is a compare instruction, then widen the compare
1321 //  (and possibly the other operand).  The extend operation is hoisted into the
1322 // loop preheader as far as possible.
1323 bool WidenIV::widenLoopCompare(NarrowIVDefUse DU) {
1324   ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
1325   if (!Cmp)
1326     return false;
1327 
1328   // We can legally widen the comparison in the following two cases:
1329   //
1330   //  - The signedness of the IV extension and comparison match
1331   //
1332   //  - The narrow IV is always positive (and thus its sign extension is equal
1333   //    to its zero extension).  For instance, let's say we're zero extending
1334   //    %narrow for the following use
1335   //
1336   //      icmp slt i32 %narrow, %val   ... (A)
1337   //
1338   //    and %narrow is always positive.  Then
1339   //
1340   //      (A) == icmp slt i32 sext(%narrow), sext(%val)
1341   //          == icmp slt i32 zext(%narrow), sext(%val)
1342   bool IsSigned = getExtendKind(DU.NarrowDef) == SignExtended;
1343   if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
1344     return false;
1345 
1346   Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
1347   unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
1348   unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1349   assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
1350 
1351   // Widen the compare instruction.
1352   IRBuilder<> Builder(
1353       getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI));
1354   DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1355 
1356   // Widen the other operand of the compare, if necessary.
1357   if (CastWidth < IVWidth) {
1358     Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
1359     DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
1360   }
1361   return true;
1362 }
1363 
1364 /// Determine whether an individual user of the narrow IV can be widened. If so,
1365 /// return the wide clone of the user.
1366 Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
1367   assert(ExtendKindMap.count(DU.NarrowDef) &&
1368          "Should already know the kind of extension used to widen NarrowDef");
1369 
1370   // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
1371   if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
1372     if (LI->getLoopFor(UsePhi->getParent()) != L) {
1373       // For LCSSA phis, sink the truncate outside the loop.
1374       // After SimplifyCFG most loop exit targets have a single predecessor.
1375       // Otherwise fall back to a truncate within the loop.
1376       if (UsePhi->getNumOperands() != 1)
1377         truncateIVUse(DU, DT, LI);
1378       else {
1379         // Widening the PHI requires us to insert a trunc.  The logical place
1380         // for this trunc is in the same BB as the PHI.  This is not possible if
1381         // the BB is terminated by a catchswitch.
1382         if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
1383           return nullptr;
1384 
1385         PHINode *WidePhi =
1386           PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
1387                           UsePhi);
1388         WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
1389         IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
1390         Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
1391         UsePhi->replaceAllUsesWith(Trunc);
1392         DeadInsts.emplace_back(UsePhi);
1393         LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
1394                           << *WidePhi << "\n");
1395       }
1396       return nullptr;
1397     }
1398   }
1399 
1400   // This narrow use can be widened by a sext if it's non-negative or its narrow
1401   // def was widended by a sext. Same for zext.
1402   auto canWidenBySExt = [&]() {
1403     return DU.NeverNegative || getExtendKind(DU.NarrowDef) == SignExtended;
1404   };
1405   auto canWidenByZExt = [&]() {
1406     return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ZeroExtended;
1407   };
1408 
1409   // Our raison d'etre! Eliminate sign and zero extension.
1410   if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
1411       (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
1412     Value *NewDef = DU.WideDef;
1413     if (DU.NarrowUse->getType() != WideType) {
1414       unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
1415       unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1416       if (CastWidth < IVWidth) {
1417         // The cast isn't as wide as the IV, so insert a Trunc.
1418         IRBuilder<> Builder(DU.NarrowUse);
1419         NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
1420       }
1421       else {
1422         // A wider extend was hidden behind a narrower one. This may induce
1423         // another round of IV widening in which the intermediate IV becomes
1424         // dead. It should be very rare.
1425         LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
1426                           << " not wide enough to subsume " << *DU.NarrowUse
1427                           << "\n");
1428         DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1429         NewDef = DU.NarrowUse;
1430       }
1431     }
1432     if (NewDef != DU.NarrowUse) {
1433       LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
1434                         << " replaced by " << *DU.WideDef << "\n");
1435       ++NumElimExt;
1436       DU.NarrowUse->replaceAllUsesWith(NewDef);
1437       DeadInsts.emplace_back(DU.NarrowUse);
1438     }
1439     // Now that the extend is gone, we want to expose it's uses for potential
1440     // further simplification. We don't need to directly inform SimplifyIVUsers
1441     // of the new users, because their parent IV will be processed later as a
1442     // new loop phi. If we preserved IVUsers analysis, we would also want to
1443     // push the uses of WideDef here.
1444 
1445     // No further widening is needed. The deceased [sz]ext had done it for us.
1446     return nullptr;
1447   }
1448 
1449   // Does this user itself evaluate to a recurrence after widening?
1450   WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
1451   if (!WideAddRec.first)
1452     WideAddRec = getWideRecurrence(DU);
1453 
1454   assert((WideAddRec.first == nullptr) == (WideAddRec.second == Unknown));
1455   if (!WideAddRec.first) {
1456     // If use is a loop condition, try to promote the condition instead of
1457     // truncating the IV first.
1458     if (widenLoopCompare(DU))
1459       return nullptr;
1460 
1461     // This user does not evaluate to a recurrence after widening, so don't
1462     // follow it. Instead insert a Trunc to kill off the original use,
1463     // eventually isolating the original narrow IV so it can be removed.
1464     truncateIVUse(DU, DT, LI);
1465     return nullptr;
1466   }
1467   // Assume block terminators cannot evaluate to a recurrence. We can't to
1468   // insert a Trunc after a terminator if there happens to be a critical edge.
1469   assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() &&
1470          "SCEV is not expected to evaluate a block terminator");
1471 
1472   // Reuse the IV increment that SCEVExpander created as long as it dominates
1473   // NarrowUse.
1474   Instruction *WideUse = nullptr;
1475   if (WideAddRec.first == WideIncExpr &&
1476       Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
1477     WideUse = WideInc;
1478   else {
1479     WideUse = cloneIVUser(DU, WideAddRec.first);
1480     if (!WideUse)
1481       return nullptr;
1482   }
1483   // Evaluation of WideAddRec ensured that the narrow expression could be
1484   // extended outside the loop without overflow. This suggests that the wide use
1485   // evaluates to the same expression as the extended narrow use, but doesn't
1486   // absolutely guarantee it. Hence the following failsafe check. In rare cases
1487   // where it fails, we simply throw away the newly created wide use.
1488   if (WideAddRec.first != SE->getSCEV(WideUse)) {
1489     LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
1490                       << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
1491                       << "\n");
1492     DeadInsts.emplace_back(WideUse);
1493     return nullptr;
1494   }
1495 
1496   ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
1497   // Returning WideUse pushes it on the worklist.
1498   return WideUse;
1499 }
1500 
1501 /// Add eligible users of NarrowDef to NarrowIVUsers.
1502 void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
1503   const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
1504   bool NonNegativeDef =
1505       SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
1506                            SE->getConstant(NarrowSCEV->getType(), 0));
1507   for (User *U : NarrowDef->users()) {
1508     Instruction *NarrowUser = cast<Instruction>(U);
1509 
1510     // Handle data flow merges and bizarre phi cycles.
1511     if (!Widened.insert(NarrowUser).second)
1512       continue;
1513 
1514     bool NonNegativeUse = false;
1515     if (!NonNegativeDef) {
1516       // We might have a control-dependent range information for this context.
1517       if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
1518         NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
1519     }
1520 
1521     NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
1522                                NonNegativeDef || NonNegativeUse);
1523   }
1524 }
1525 
1526 /// Process a single induction variable. First use the SCEVExpander to create a
1527 /// wide induction variable that evaluates to the same recurrence as the
1528 /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
1529 /// def-use chain. After widenIVUse has processed all interesting IV users, the
1530 /// narrow IV will be isolated for removal by DeleteDeadPHIs.
1531 ///
1532 /// It would be simpler to delete uses as they are processed, but we must avoid
1533 /// invalidating SCEV expressions.
1534 PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
1535   // Is this phi an induction variable?
1536   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
1537   if (!AddRec)
1538     return nullptr;
1539 
1540   // Widen the induction variable expression.
1541   const SCEV *WideIVExpr = getExtendKind(OrigPhi) == SignExtended
1542                                ? SE->getSignExtendExpr(AddRec, WideType)
1543                                : SE->getZeroExtendExpr(AddRec, WideType);
1544 
1545   assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
1546          "Expect the new IV expression to preserve its type");
1547 
1548   // Can the IV be extended outside the loop without overflow?
1549   AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
1550   if (!AddRec || AddRec->getLoop() != L)
1551     return nullptr;
1552 
1553   // An AddRec must have loop-invariant operands. Since this AddRec is
1554   // materialized by a loop header phi, the expression cannot have any post-loop
1555   // operands, so they must dominate the loop header.
1556   assert(
1557       SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
1558       SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
1559       "Loop header phi recurrence inputs do not dominate the loop");
1560 
1561   // Iterate over IV uses (including transitive ones) looking for IV increments
1562   // of the form 'add nsw %iv, <const>'. For each increment and each use of
1563   // the increment calculate control-dependent range information basing on
1564   // dominating conditions inside of the loop (e.g. a range check inside of the
1565   // loop). Calculated ranges are stored in PostIncRangeInfos map.
1566   //
1567   // Control-dependent range information is later used to prove that a narrow
1568   // definition is not negative (see pushNarrowIVUsers). It's difficult to do
1569   // this on demand because when pushNarrowIVUsers needs this information some
1570   // of the dominating conditions might be already widened.
1571   if (UsePostIncrementRanges)
1572     calculatePostIncRanges(OrigPhi);
1573 
1574   // The rewriter provides a value for the desired IV expression. This may
1575   // either find an existing phi or materialize a new one. Either way, we
1576   // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
1577   // of the phi-SCC dominates the loop entry.
1578   Instruction *InsertPt = &L->getHeader()->front();
1579   WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt));
1580 
1581   // Remembering the WideIV increment generated by SCEVExpander allows
1582   // widenIVUse to reuse it when widening the narrow IV's increment. We don't
1583   // employ a general reuse mechanism because the call above is the only call to
1584   // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
1585   if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1586     WideInc =
1587       cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
1588     WideIncExpr = SE->getSCEV(WideInc);
1589     // Propagate the debug location associated with the original loop increment
1590     // to the new (widened) increment.
1591     auto *OrigInc =
1592       cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1593     WideInc->setDebugLoc(OrigInc->getDebugLoc());
1594   }
1595 
1596   LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
1597   ++NumWidened;
1598 
1599   // Traverse the def-use chain using a worklist starting at the original IV.
1600   assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
1601 
1602   Widened.insert(OrigPhi);
1603   pushNarrowIVUsers(OrigPhi, WidePhi);
1604 
1605   while (!NarrowIVUsers.empty()) {
1606     NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
1607 
1608     // Process a def-use edge. This may replace the use, so don't hold a
1609     // use_iterator across it.
1610     Instruction *WideUse = widenIVUse(DU, Rewriter);
1611 
1612     // Follow all def-use edges from the previous narrow use.
1613     if (WideUse)
1614       pushNarrowIVUsers(DU.NarrowUse, WideUse);
1615 
1616     // widenIVUse may have removed the def-use edge.
1617     if (DU.NarrowDef->use_empty())
1618       DeadInsts.emplace_back(DU.NarrowDef);
1619   }
1620 
1621   // Attach any debug information to the new PHI. Since OrigPhi and WidePHI
1622   // evaluate the same recurrence, we can just copy the debug info over.
1623   SmallVector<DbgValueInst *, 1> DbgValues;
1624   llvm::findDbgValues(DbgValues, OrigPhi);
1625   auto *MDPhi = MetadataAsValue::get(WidePhi->getContext(),
1626                                      ValueAsMetadata::get(WidePhi));
1627   for (auto &DbgValue : DbgValues)
1628     DbgValue->setOperand(0, MDPhi);
1629   return WidePhi;
1630 }
1631 
1632 /// Calculates control-dependent range for the given def at the given context
1633 /// by looking at dominating conditions inside of the loop
1634 void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
1635                                     Instruction *NarrowUser) {
1636   using namespace llvm::PatternMatch;
1637 
1638   Value *NarrowDefLHS;
1639   const APInt *NarrowDefRHS;
1640   if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
1641                                  m_APInt(NarrowDefRHS))) ||
1642       !NarrowDefRHS->isNonNegative())
1643     return;
1644 
1645   auto UpdateRangeFromCondition = [&] (Value *Condition,
1646                                        bool TrueDest) {
1647     CmpInst::Predicate Pred;
1648     Value *CmpRHS;
1649     if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
1650                                  m_Value(CmpRHS))))
1651       return;
1652 
1653     CmpInst::Predicate P =
1654             TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
1655 
1656     auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
1657     auto CmpConstrainedLHSRange =
1658             ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
1659     auto NarrowDefRange =
1660             CmpConstrainedLHSRange.addWithNoSignedWrap(*NarrowDefRHS);
1661 
1662     updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
1663   };
1664 
1665   auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
1666     if (!HasGuards)
1667       return;
1668 
1669     for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
1670                                      Ctx->getParent()->rend())) {
1671       Value *C = nullptr;
1672       if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
1673         UpdateRangeFromCondition(C, /*TrueDest=*/true);
1674     }
1675   };
1676 
1677   UpdateRangeFromGuards(NarrowUser);
1678 
1679   BasicBlock *NarrowUserBB = NarrowUser->getParent();
1680   // If NarrowUserBB is statically unreachable asking dominator queries may
1681   // yield surprising results. (e.g. the block may not have a dom tree node)
1682   if (!DT->isReachableFromEntry(NarrowUserBB))
1683     return;
1684 
1685   for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
1686        L->contains(DTB->getBlock());
1687        DTB = DTB->getIDom()) {
1688     auto *BB = DTB->getBlock();
1689     auto *TI = BB->getTerminator();
1690     UpdateRangeFromGuards(TI);
1691 
1692     auto *BI = dyn_cast<BranchInst>(TI);
1693     if (!BI || !BI->isConditional())
1694       continue;
1695 
1696     auto *TrueSuccessor = BI->getSuccessor(0);
1697     auto *FalseSuccessor = BI->getSuccessor(1);
1698 
1699     auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
1700       return BBE.isSingleEdge() &&
1701              DT->dominates(BBE, NarrowUser->getParent());
1702     };
1703 
1704     if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
1705       UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
1706 
1707     if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
1708       UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
1709   }
1710 }
1711 
1712 /// Calculates PostIncRangeInfos map for the given IV
1713 void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
1714   SmallPtrSet<Instruction *, 16> Visited;
1715   SmallVector<Instruction *, 6> Worklist;
1716   Worklist.push_back(OrigPhi);
1717   Visited.insert(OrigPhi);
1718 
1719   while (!Worklist.empty()) {
1720     Instruction *NarrowDef = Worklist.pop_back_val();
1721 
1722     for (Use &U : NarrowDef->uses()) {
1723       auto *NarrowUser = cast<Instruction>(U.getUser());
1724 
1725       // Don't go looking outside the current loop.
1726       auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
1727       if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
1728         continue;
1729 
1730       if (!Visited.insert(NarrowUser).second)
1731         continue;
1732 
1733       Worklist.push_back(NarrowUser);
1734 
1735       calculatePostIncRange(NarrowDef, NarrowUser);
1736     }
1737   }
1738 }
1739 
1740 //===----------------------------------------------------------------------===//
1741 //  Live IV Reduction - Minimize IVs live across the loop.
1742 //===----------------------------------------------------------------------===//
1743 
1744 //===----------------------------------------------------------------------===//
1745 //  Simplification of IV users based on SCEV evaluation.
1746 //===----------------------------------------------------------------------===//
1747 
1748 namespace {
1749 
1750 class IndVarSimplifyVisitor : public IVVisitor {
1751   ScalarEvolution *SE;
1752   const TargetTransformInfo *TTI;
1753   PHINode *IVPhi;
1754 
1755 public:
1756   WideIVInfo WI;
1757 
1758   IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
1759                         const TargetTransformInfo *TTI,
1760                         const DominatorTree *DTree)
1761     : SE(SCEV), TTI(TTI), IVPhi(IV) {
1762     DT = DTree;
1763     WI.NarrowIV = IVPhi;
1764   }
1765 
1766   // Implement the interface used by simplifyUsersOfIV.
1767   void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
1768 };
1769 
1770 } // end anonymous namespace
1771 
1772 /// Iteratively perform simplification on a worklist of IV users. Each
1773 /// successive simplification may push more users which may themselves be
1774 /// candidates for simplification.
1775 ///
1776 /// Sign/Zero extend elimination is interleaved with IV simplification.
1777 void IndVarSimplify::simplifyAndExtend(Loop *L,
1778                                        SCEVExpander &Rewriter,
1779                                        LoopInfo *LI) {
1780   SmallVector<WideIVInfo, 8> WideIVs;
1781 
1782   auto *GuardDecl = L->getBlocks()[0]->getModule()->getFunction(
1783           Intrinsic::getName(Intrinsic::experimental_guard));
1784   bool HasGuards = GuardDecl && !GuardDecl->use_empty();
1785 
1786   SmallVector<PHINode*, 8> LoopPhis;
1787   for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
1788     LoopPhis.push_back(cast<PHINode>(I));
1789   }
1790   // Each round of simplification iterates through the SimplifyIVUsers worklist
1791   // for all current phis, then determines whether any IVs can be
1792   // widened. Widening adds new phis to LoopPhis, inducing another round of
1793   // simplification on the wide IVs.
1794   while (!LoopPhis.empty()) {
1795     // Evaluate as many IV expressions as possible before widening any IVs. This
1796     // forces SCEV to set no-wrap flags before evaluating sign/zero
1797     // extension. The first time SCEV attempts to normalize sign/zero extension,
1798     // the result becomes final. So for the most predictable results, we delay
1799     // evaluation of sign/zero extend evaluation until needed, and avoid running
1800     // other SCEV based analysis prior to simplifyAndExtend.
1801     do {
1802       PHINode *CurrIV = LoopPhis.pop_back_val();
1803 
1804       // Information about sign/zero extensions of CurrIV.
1805       IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT);
1806 
1807       Changed |=
1808           simplifyUsersOfIV(CurrIV, SE, DT, LI, DeadInsts, Rewriter, &Visitor);
1809 
1810       if (Visitor.WI.WidestNativeType) {
1811         WideIVs.push_back(Visitor.WI);
1812       }
1813     } while(!LoopPhis.empty());
1814 
1815     for (; !WideIVs.empty(); WideIVs.pop_back()) {
1816       WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts, HasGuards);
1817       if (PHINode *WidePhi = Widener.createWideIV(Rewriter)) {
1818         Changed = true;
1819         LoopPhis.push_back(WidePhi);
1820       }
1821     }
1822   }
1823 }
1824 
1825 //===----------------------------------------------------------------------===//
1826 //  linearFunctionTestReplace and its kin. Rewrite the loop exit condition.
1827 //===----------------------------------------------------------------------===//
1828 
1829 /// Return true if this loop's backedge taken count expression can be safely and
1830 /// cheaply expanded into an instruction sequence that can be used by
1831 /// linearFunctionTestReplace.
1832 ///
1833 /// TODO: This fails for pointer-type loop counters with greater than one byte
1834 /// strides, consequently preventing LFTR from running. For the purpose of LFTR
1835 /// we could skip this check in the case that the LFTR loop counter (chosen by
1836 /// FindLoopCounter) is also pointer type. Instead, we could directly convert
1837 /// the loop test to an inequality test by checking the target data's alignment
1838 /// of element types (given that the initial pointer value originates from or is
1839 /// used by ABI constrained operation, as opposed to inttoptr/ptrtoint).
1840 /// However, we don't yet have a strong motivation for converting loop tests
1841 /// into inequality tests.
1842 static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE,
1843                                         SCEVExpander &Rewriter) {
1844   const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
1845   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
1846       BackedgeTakenCount->isZero())
1847     return false;
1848 
1849   if (!L->getExitingBlock())
1850     return false;
1851 
1852   // Can't rewrite non-branch yet.
1853   if (!isa<BranchInst>(L->getExitingBlock()->getTerminator()))
1854     return false;
1855 
1856   if (Rewriter.isHighCostExpansion(BackedgeTakenCount, L))
1857     return false;
1858 
1859   return true;
1860 }
1861 
1862 /// Return the loop header phi IFF IncV adds a loop invariant value to the phi.
1863 static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
1864   Instruction *IncI = dyn_cast<Instruction>(IncV);
1865   if (!IncI)
1866     return nullptr;
1867 
1868   switch (IncI->getOpcode()) {
1869   case Instruction::Add:
1870   case Instruction::Sub:
1871     break;
1872   case Instruction::GetElementPtr:
1873     // An IV counter must preserve its type.
1874     if (IncI->getNumOperands() == 2)
1875       break;
1876     LLVM_FALLTHROUGH;
1877   default:
1878     return nullptr;
1879   }
1880 
1881   PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
1882   if (Phi && Phi->getParent() == L->getHeader()) {
1883     if (isLoopInvariant(IncI->getOperand(1), L, DT))
1884       return Phi;
1885     return nullptr;
1886   }
1887   if (IncI->getOpcode() == Instruction::GetElementPtr)
1888     return nullptr;
1889 
1890   // Allow add/sub to be commuted.
1891   Phi = dyn_cast<PHINode>(IncI->getOperand(1));
1892   if (Phi && Phi->getParent() == L->getHeader()) {
1893     if (isLoopInvariant(IncI->getOperand(0), L, DT))
1894       return Phi;
1895   }
1896   return nullptr;
1897 }
1898 
1899 /// Return the compare guarding the loop latch, or NULL for unrecognized tests.
1900 static ICmpInst *getLoopTest(Loop *L) {
1901   assert(L->getExitingBlock() && "expected loop exit");
1902 
1903   BasicBlock *LatchBlock = L->getLoopLatch();
1904   // Don't bother with LFTR if the loop is not properly simplified.
1905   if (!LatchBlock)
1906     return nullptr;
1907 
1908   BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
1909   assert(BI && "expected exit branch");
1910 
1911   return dyn_cast<ICmpInst>(BI->getCondition());
1912 }
1913 
1914 /// linearFunctionTestReplace policy. Return true unless we can show that the
1915 /// current exit test is already sufficiently canonical.
1916 static bool needsLFTR(Loop *L, DominatorTree *DT) {
1917   // Do LFTR to simplify the exit condition to an ICMP.
1918   ICmpInst *Cond = getLoopTest(L);
1919   if (!Cond)
1920     return true;
1921 
1922   // Do LFTR to simplify the exit ICMP to EQ/NE
1923   ICmpInst::Predicate Pred = Cond->getPredicate();
1924   if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ)
1925     return true;
1926 
1927   // Look for a loop invariant RHS
1928   Value *LHS = Cond->getOperand(0);
1929   Value *RHS = Cond->getOperand(1);
1930   if (!isLoopInvariant(RHS, L, DT)) {
1931     if (!isLoopInvariant(LHS, L, DT))
1932       return true;
1933     std::swap(LHS, RHS);
1934   }
1935   // Look for a simple IV counter LHS
1936   PHINode *Phi = dyn_cast<PHINode>(LHS);
1937   if (!Phi)
1938     Phi = getLoopPhiForCounter(LHS, L, DT);
1939 
1940   if (!Phi)
1941     return true;
1942 
1943   // Do LFTR if PHI node is defined in the loop, but is *not* a counter.
1944   int Idx = Phi->getBasicBlockIndex(L->getLoopLatch());
1945   if (Idx < 0)
1946     return true;
1947 
1948   // Do LFTR if the exit condition's IV is *not* a simple counter.
1949   Value *IncV = Phi->getIncomingValue(Idx);
1950   return Phi != getLoopPhiForCounter(IncV, L, DT);
1951 }
1952 
1953 /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils
1954 /// down to checking that all operands are constant and listing instructions
1955 /// that may hide undef.
1956 static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited,
1957                                unsigned Depth) {
1958   if (isa<Constant>(V))
1959     return !isa<UndefValue>(V);
1960 
1961   if (Depth >= 6)
1962     return false;
1963 
1964   // Conservatively handle non-constant non-instructions. For example, Arguments
1965   // may be undef.
1966   Instruction *I = dyn_cast<Instruction>(V);
1967   if (!I)
1968     return false;
1969 
1970   // Load and return values may be undef.
1971   if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I))
1972     return false;
1973 
1974   // Optimistically handle other instructions.
1975   for (Value *Op : I->operands()) {
1976     if (!Visited.insert(Op).second)
1977       continue;
1978     if (!hasConcreteDefImpl(Op, Visited, Depth+1))
1979       return false;
1980   }
1981   return true;
1982 }
1983 
1984 /// Return true if the given value is concrete. We must prove that undef can
1985 /// never reach it.
1986 ///
1987 /// TODO: If we decide that this is a good approach to checking for undef, we
1988 /// may factor it into a common location.
1989 static bool hasConcreteDef(Value *V) {
1990   SmallPtrSet<Value*, 8> Visited;
1991   Visited.insert(V);
1992   return hasConcreteDefImpl(V, Visited, 0);
1993 }
1994 
1995 /// Return true if this IV has any uses other than the (soon to be rewritten)
1996 /// loop exit test.
1997 static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
1998   int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
1999   Value *IncV = Phi->getIncomingValue(LatchIdx);
2000 
2001   for (User *U : Phi->users())
2002     if (U != Cond && U != IncV) return false;
2003 
2004   for (User *U : IncV->users())
2005     if (U != Cond && U != Phi) return false;
2006   return true;
2007 }
2008 
2009 /// Find an affine IV in canonical form.
2010 ///
2011 /// BECount may be an i8* pointer type. The pointer difference is already
2012 /// valid count without scaling the address stride, so it remains a pointer
2013 /// expression as far as SCEV is concerned.
2014 ///
2015 /// Currently only valid for LFTR. See the comments on hasConcreteDef below.
2016 ///
2017 /// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount
2018 ///
2019 /// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
2020 /// This is difficult in general for SCEV because of potential overflow. But we
2021 /// could at least handle constant BECounts.
2022 static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount,
2023                                 ScalarEvolution *SE, DominatorTree *DT) {
2024   uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
2025 
2026   Value *Cond =
2027     cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition();
2028 
2029   // Loop over all of the PHI nodes, looking for a simple counter.
2030   PHINode *BestPhi = nullptr;
2031   const SCEV *BestInit = nullptr;
2032   BasicBlock *LatchBlock = L->getLoopLatch();
2033   assert(LatchBlock && "needsLFTR should guarantee a loop latch");
2034   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2035 
2036   for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
2037     PHINode *Phi = cast<PHINode>(I);
2038     if (!SE->isSCEVable(Phi->getType()))
2039       continue;
2040 
2041     // Avoid comparing an integer IV against a pointer Limit.
2042     if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy())
2043       continue;
2044 
2045     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
2046     if (!AR || AR->getLoop() != L || !AR->isAffine())
2047       continue;
2048 
2049     // AR may be a pointer type, while BECount is an integer type.
2050     // AR may be wider than BECount. With eq/ne tests overflow is immaterial.
2051     // AR may not be a narrower type, or we may never exit.
2052     uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
2053     if (PhiWidth < BCWidth || !DL.isLegalInteger(PhiWidth))
2054       continue;
2055 
2056     const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
2057     if (!Step || !Step->isOne())
2058       continue;
2059 
2060     int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
2061     Value *IncV = Phi->getIncomingValue(LatchIdx);
2062     if (getLoopPhiForCounter(IncV, L, DT) != Phi)
2063       continue;
2064 
2065     // Avoid reusing a potentially undef value to compute other values that may
2066     // have originally had a concrete definition.
2067     if (!hasConcreteDef(Phi)) {
2068       // We explicitly allow unknown phis as long as they are already used by
2069       // the loop test. In this case we assume that performing LFTR could not
2070       // increase the number of undef users.
2071       if (ICmpInst *Cond = getLoopTest(L)) {
2072         if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) &&
2073             Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) {
2074           continue;
2075         }
2076       }
2077     }
2078     const SCEV *Init = AR->getStart();
2079 
2080     if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) {
2081       // Don't force a live loop counter if another IV can be used.
2082       if (AlmostDeadIV(Phi, LatchBlock, Cond))
2083         continue;
2084 
2085       // Prefer to count-from-zero. This is a more "canonical" counter form. It
2086       // also prefers integer to pointer IVs.
2087       if (BestInit->isZero() != Init->isZero()) {
2088         if (BestInit->isZero())
2089           continue;
2090       }
2091       // If two IVs both count from zero or both count from nonzero then the
2092       // narrower is likely a dead phi that has been widened. Use the wider phi
2093       // to allow the other to be eliminated.
2094       else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
2095         continue;
2096     }
2097     BestPhi = Phi;
2098     BestInit = Init;
2099   }
2100   return BestPhi;
2101 }
2102 
2103 /// Help linearFunctionTestReplace by generating a value that holds the RHS of
2104 /// the new loop test.
2105 static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
2106                            SCEVExpander &Rewriter, ScalarEvolution *SE) {
2107   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
2108   assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
2109   const SCEV *IVInit = AR->getStart();
2110 
2111   // IVInit may be a pointer while IVCount is an integer when FindLoopCounter
2112   // finds a valid pointer IV. Sign extend BECount in order to materialize a
2113   // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing
2114   // the existing GEPs whenever possible.
2115   if (IndVar->getType()->isPointerTy() && !IVCount->getType()->isPointerTy()) {
2116     // IVOffset will be the new GEP offset that is interpreted by GEP as a
2117     // signed value. IVCount on the other hand represents the loop trip count,
2118     // which is an unsigned value. FindLoopCounter only allows induction
2119     // variables that have a positive unit stride of one. This means we don't
2120     // have to handle the case of negative offsets (yet) and just need to zero
2121     // extend IVCount.
2122     Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType());
2123     const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy);
2124 
2125     // Expand the code for the iteration count.
2126     assert(SE->isLoopInvariant(IVOffset, L) &&
2127            "Computed iteration count is not loop invariant!");
2128     BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
2129     Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI);
2130 
2131     Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader());
2132     assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter");
2133     // We could handle pointer IVs other than i8*, but we need to compensate for
2134     // gep index scaling. See canExpandBackedgeTakenCount comments.
2135     assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()),
2136                              cast<PointerType>(GEPBase->getType())
2137                                  ->getElementType())->isOne() &&
2138            "unit stride pointer IV must be i8*");
2139 
2140     IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2141     return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit");
2142   } else {
2143     // In any other case, convert both IVInit and IVCount to integers before
2144     // comparing. This may result in SCEV expansion of pointers, but in practice
2145     // SCEV will fold the pointer arithmetic away as such:
2146     // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc).
2147     //
2148     // Valid Cases: (1) both integers is most common; (2) both may be pointers
2149     // for simple memset-style loops.
2150     //
2151     // IVInit integer and IVCount pointer would only occur if a canonical IV
2152     // were generated on top of case #2, which is not expected.
2153 
2154     const SCEV *IVLimit = nullptr;
2155     // For unit stride, IVCount = Start + BECount with 2's complement overflow.
2156     // For non-zero Start, compute IVCount here.
2157     if (AR->getStart()->isZero())
2158       IVLimit = IVCount;
2159     else {
2160       assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
2161       const SCEV *IVInit = AR->getStart();
2162 
2163       // For integer IVs, truncate the IV before computing IVInit + BECount.
2164       if (SE->getTypeSizeInBits(IVInit->getType())
2165           > SE->getTypeSizeInBits(IVCount->getType()))
2166         IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
2167 
2168       IVLimit = SE->getAddExpr(IVInit, IVCount);
2169     }
2170     // Expand the code for the iteration count.
2171     BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
2172     IRBuilder<> Builder(BI);
2173     assert(SE->isLoopInvariant(IVLimit, L) &&
2174            "Computed iteration count is not loop invariant!");
2175     // Ensure that we generate the same type as IndVar, or a smaller integer
2176     // type. In the presence of null pointer values, we have an integer type
2177     // SCEV expression (IVInit) for a pointer type IV value (IndVar).
2178     Type *LimitTy = IVCount->getType()->isPointerTy() ?
2179       IndVar->getType() : IVCount->getType();
2180     return Rewriter.expandCodeFor(IVLimit, LimitTy, BI);
2181   }
2182 }
2183 
2184 /// This method rewrites the exit condition of the loop to be a canonical !=
2185 /// comparison against the incremented loop induction variable.  This pass is
2186 /// able to rewrite the exit tests of any loop where the SCEV analysis can
2187 /// determine a loop-invariant trip count of the loop, which is actually a much
2188 /// broader range than just linear tests.
2189 Value *IndVarSimplify::
2190 linearFunctionTestReplace(Loop *L,
2191                           const SCEV *BackedgeTakenCount,
2192                           PHINode *IndVar,
2193                           SCEVExpander &Rewriter) {
2194   assert(canExpandBackedgeTakenCount(L, SE, Rewriter) && "precondition");
2195 
2196   // Initialize CmpIndVar and IVCount to their preincremented values.
2197   Value *CmpIndVar = IndVar;
2198   const SCEV *IVCount = BackedgeTakenCount;
2199 
2200   assert(L->getLoopLatch() && "Loop no longer in simplified form?");
2201 
2202   // If the exiting block is the same as the backedge block, we prefer to
2203   // compare against the post-incremented value, otherwise we must compare
2204   // against the preincremented value.
2205   if (L->getExitingBlock() == L->getLoopLatch()) {
2206     // Add one to the "backedge-taken" count to get the trip count.
2207     // This addition may overflow, which is valid as long as the comparison is
2208     // truncated to BackedgeTakenCount->getType().
2209     IVCount = SE->getAddExpr(BackedgeTakenCount,
2210                              SE->getOne(BackedgeTakenCount->getType()));
2211     // The BackedgeTaken expression contains the number of times that the
2212     // backedge branches to the loop header.  This is one less than the
2213     // number of times the loop executes, so use the incremented indvar.
2214     CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
2215   }
2216 
2217   Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
2218   assert(ExitCnt->getType()->isPointerTy() ==
2219              IndVar->getType()->isPointerTy() &&
2220          "genLoopLimit missed a cast");
2221 
2222   // Insert a new icmp_ne or icmp_eq instruction before the branch.
2223   BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
2224   ICmpInst::Predicate P;
2225   if (L->contains(BI->getSuccessor(0)))
2226     P = ICmpInst::ICMP_NE;
2227   else
2228     P = ICmpInst::ICMP_EQ;
2229 
2230   LLVM_DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
2231                     << "      LHS:" << *CmpIndVar << '\n'
2232                     << "       op:\t" << (P == ICmpInst::ICMP_NE ? "!=" : "==")
2233                     << "\n"
2234                     << "      RHS:\t" << *ExitCnt << "\n"
2235                     << "  IVCount:\t" << *IVCount << "\n");
2236 
2237   IRBuilder<> Builder(BI);
2238 
2239   // The new loop exit condition should reuse the debug location of the
2240   // original loop exit condition.
2241   if (auto *Cond = dyn_cast<Instruction>(BI->getCondition()))
2242     Builder.SetCurrentDebugLocation(Cond->getDebugLoc());
2243 
2244   // LFTR can ignore IV overflow and truncate to the width of
2245   // BECount. This avoids materializing the add(zext(add)) expression.
2246   unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType());
2247   unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType());
2248   if (CmpIndVarSize > ExitCntSize) {
2249     const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
2250     const SCEV *ARStart = AR->getStart();
2251     const SCEV *ARStep = AR->getStepRecurrence(*SE);
2252     // For constant IVCount, avoid truncation.
2253     if (isa<SCEVConstant>(ARStart) && isa<SCEVConstant>(IVCount)) {
2254       const APInt &Start = cast<SCEVConstant>(ARStart)->getAPInt();
2255       APInt Count = cast<SCEVConstant>(IVCount)->getAPInt();
2256       // Note that the post-inc value of BackedgeTakenCount may have overflowed
2257       // above such that IVCount is now zero.
2258       if (IVCount != BackedgeTakenCount && Count == 0) {
2259         Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize);
2260         ++Count;
2261       }
2262       else
2263         Count = Count.zext(CmpIndVarSize);
2264       APInt NewLimit;
2265       if (cast<SCEVConstant>(ARStep)->getValue()->isNegative())
2266         NewLimit = Start - Count;
2267       else
2268         NewLimit = Start + Count;
2269       ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit);
2270 
2271       LLVM_DEBUG(dbgs() << "  Widen RHS:\t" << *ExitCnt << "\n");
2272     } else {
2273       // We try to extend trip count first. If that doesn't work we truncate IV.
2274       // Zext(trunc(IV)) == IV implies equivalence of the following two:
2275       // Trunc(IV) == ExitCnt and IV == zext(ExitCnt). Similarly for sext. If
2276       // one of the two holds, extend the trip count, otherwise we truncate IV.
2277       bool Extended = false;
2278       const SCEV *IV = SE->getSCEV(CmpIndVar);
2279       const SCEV *ZExtTrunc =
2280            SE->getZeroExtendExpr(SE->getTruncateExpr(SE->getSCEV(CmpIndVar),
2281                                                      ExitCnt->getType()),
2282                                  CmpIndVar->getType());
2283 
2284       if (ZExtTrunc == IV) {
2285         Extended = true;
2286         ExitCnt = Builder.CreateZExt(ExitCnt, IndVar->getType(),
2287                                      "wide.trip.count");
2288       } else {
2289         const SCEV *SExtTrunc =
2290           SE->getSignExtendExpr(SE->getTruncateExpr(SE->getSCEV(CmpIndVar),
2291                                                     ExitCnt->getType()),
2292                                 CmpIndVar->getType());
2293         if (SExtTrunc == IV) {
2294           Extended = true;
2295           ExitCnt = Builder.CreateSExt(ExitCnt, IndVar->getType(),
2296                                        "wide.trip.count");
2297         }
2298       }
2299 
2300       if (!Extended)
2301         CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(),
2302                                         "lftr.wideiv");
2303     }
2304   }
2305   Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond");
2306   Value *OrigCond = BI->getCondition();
2307   // It's tempting to use replaceAllUsesWith here to fully replace the old
2308   // comparison, but that's not immediately safe, since users of the old
2309   // comparison may not be dominated by the new comparison. Instead, just
2310   // update the branch to use the new comparison; in the common case this
2311   // will make old comparison dead.
2312   BI->setCondition(Cond);
2313   DeadInsts.push_back(OrigCond);
2314 
2315   ++NumLFTR;
2316   Changed = true;
2317   return Cond;
2318 }
2319 
2320 //===----------------------------------------------------------------------===//
2321 //  sinkUnusedInvariants. A late subpass to cleanup loop preheaders.
2322 //===----------------------------------------------------------------------===//
2323 
2324 /// If there's a single exit block, sink any loop-invariant values that
2325 /// were defined in the preheader but not used inside the loop into the
2326 /// exit block to reduce register pressure in the loop.
2327 void IndVarSimplify::sinkUnusedInvariants(Loop *L) {
2328   BasicBlock *ExitBlock = L->getExitBlock();
2329   if (!ExitBlock) return;
2330 
2331   BasicBlock *Preheader = L->getLoopPreheader();
2332   if (!Preheader) return;
2333 
2334   BasicBlock::iterator InsertPt = ExitBlock->getFirstInsertionPt();
2335   BasicBlock::iterator I(Preheader->getTerminator());
2336   while (I != Preheader->begin()) {
2337     --I;
2338     // New instructions were inserted at the end of the preheader.
2339     if (isa<PHINode>(I))
2340       break;
2341 
2342     // Don't move instructions which might have side effects, since the side
2343     // effects need to complete before instructions inside the loop.  Also don't
2344     // move instructions which might read memory, since the loop may modify
2345     // memory. Note that it's okay if the instruction might have undefined
2346     // behavior: LoopSimplify guarantees that the preheader dominates the exit
2347     // block.
2348     if (I->mayHaveSideEffects() || I->mayReadFromMemory())
2349       continue;
2350 
2351     // Skip debug info intrinsics.
2352     if (isa<DbgInfoIntrinsic>(I))
2353       continue;
2354 
2355     // Skip eh pad instructions.
2356     if (I->isEHPad())
2357       continue;
2358 
2359     // Don't sink alloca: we never want to sink static alloca's out of the
2360     // entry block, and correctly sinking dynamic alloca's requires
2361     // checks for stacksave/stackrestore intrinsics.
2362     // FIXME: Refactor this check somehow?
2363     if (isa<AllocaInst>(I))
2364       continue;
2365 
2366     // Determine if there is a use in or before the loop (direct or
2367     // otherwise).
2368     bool UsedInLoop = false;
2369     for (Use &U : I->uses()) {
2370       Instruction *User = cast<Instruction>(U.getUser());
2371       BasicBlock *UseBB = User->getParent();
2372       if (PHINode *P = dyn_cast<PHINode>(User)) {
2373         unsigned i =
2374           PHINode::getIncomingValueNumForOperand(U.getOperandNo());
2375         UseBB = P->getIncomingBlock(i);
2376       }
2377       if (UseBB == Preheader || L->contains(UseBB)) {
2378         UsedInLoop = true;
2379         break;
2380       }
2381     }
2382 
2383     // If there is, the def must remain in the preheader.
2384     if (UsedInLoop)
2385       continue;
2386 
2387     // Otherwise, sink it to the exit block.
2388     Instruction *ToMove = &*I;
2389     bool Done = false;
2390 
2391     if (I != Preheader->begin()) {
2392       // Skip debug info intrinsics.
2393       do {
2394         --I;
2395       } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin());
2396 
2397       if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin())
2398         Done = true;
2399     } else {
2400       Done = true;
2401     }
2402 
2403     ToMove->moveBefore(*ExitBlock, InsertPt);
2404     if (Done) break;
2405     InsertPt = ToMove->getIterator();
2406   }
2407 }
2408 
2409 //===----------------------------------------------------------------------===//
2410 //  IndVarSimplify driver. Manage several subpasses of IV simplification.
2411 //===----------------------------------------------------------------------===//
2412 
2413 bool IndVarSimplify::run(Loop *L) {
2414   // We need (and expect!) the incoming loop to be in LCSSA.
2415   assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
2416          "LCSSA required to run indvars!");
2417 
2418   // If LoopSimplify form is not available, stay out of trouble. Some notes:
2419   //  - LSR currently only supports LoopSimplify-form loops. Indvars'
2420   //    canonicalization can be a pessimization without LSR to "clean up"
2421   //    afterwards.
2422   //  - We depend on having a preheader; in particular,
2423   //    Loop::getCanonicalInductionVariable only supports loops with preheaders,
2424   //    and we're in trouble if we can't find the induction variable even when
2425   //    we've manually inserted one.
2426   //  - LFTR relies on having a single backedge.
2427   if (!L->isLoopSimplifyForm())
2428     return false;
2429 
2430   // If there are any floating-point recurrences, attempt to
2431   // transform them to use integer recurrences.
2432   rewriteNonIntegerIVs(L);
2433 
2434   const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2435 
2436   // Create a rewriter object which we'll use to transform the code with.
2437   SCEVExpander Rewriter(*SE, DL, "indvars");
2438 #ifndef NDEBUG
2439   Rewriter.setDebugType(DEBUG_TYPE);
2440 #endif
2441 
2442   // Eliminate redundant IV users.
2443   //
2444   // Simplification works best when run before other consumers of SCEV. We
2445   // attempt to avoid evaluating SCEVs for sign/zero extend operations until
2446   // other expressions involving loop IVs have been evaluated. This helps SCEV
2447   // set no-wrap flags before normalizing sign/zero extension.
2448   Rewriter.disableCanonicalMode();
2449   simplifyAndExtend(L, Rewriter, LI);
2450 
2451   // Check to see if this loop has a computable loop-invariant execution count.
2452   // If so, this means that we can compute the final value of any expressions
2453   // that are recurrent in the loop, and substitute the exit values from the
2454   // loop into any instructions outside of the loop that use the final values of
2455   // the current expressions.
2456   //
2457   if (ReplaceExitValue != NeverRepl &&
2458       !isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2459     rewriteLoopExitValues(L, Rewriter);
2460 
2461   // Eliminate redundant IV cycles.
2462   NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts);
2463 
2464   // If we have a trip count expression, rewrite the loop's exit condition
2465   // using it.  We can currently only handle loops with a single exit.
2466   if (!DisableLFTR && canExpandBackedgeTakenCount(L, SE, Rewriter) &&
2467       needsLFTR(L, DT)) {
2468     PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT);
2469     if (IndVar) {
2470       // Check preconditions for proper SCEVExpander operation. SCEV does not
2471       // express SCEVExpander's dependencies, such as LoopSimplify. Instead any
2472       // pass that uses the SCEVExpander must do it. This does not work well for
2473       // loop passes because SCEVExpander makes assumptions about all loops,
2474       // while LoopPassManager only forces the current loop to be simplified.
2475       //
2476       // FIXME: SCEV expansion has no way to bail out, so the caller must
2477       // explicitly check any assumptions made by SCEV. Brittle.
2478       const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount);
2479       if (!AR || AR->getLoop()->getLoopPreheader())
2480         (void)linearFunctionTestReplace(L, BackedgeTakenCount, IndVar,
2481                                         Rewriter);
2482     }
2483   }
2484   // Clear the rewriter cache, because values that are in the rewriter's cache
2485   // can be deleted in the loop below, causing the AssertingVH in the cache to
2486   // trigger.
2487   Rewriter.clear();
2488 
2489   // Now that we're done iterating through lists, clean up any instructions
2490   // which are now dead.
2491   while (!DeadInsts.empty())
2492     if (Instruction *Inst =
2493             dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
2494       Changed |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
2495 
2496   // The Rewriter may not be used from this point on.
2497 
2498   // Loop-invariant instructions in the preheader that aren't used in the
2499   // loop may be sunk below the loop to reduce register pressure.
2500   sinkUnusedInvariants(L);
2501 
2502   // rewriteFirstIterationLoopExitValues does not rely on the computation of
2503   // trip count and therefore can further simplify exit values in addition to
2504   // rewriteLoopExitValues.
2505   rewriteFirstIterationLoopExitValues(L);
2506 
2507   // Clean up dead instructions.
2508   Changed |= DeleteDeadPHIs(L->getHeader(), TLI);
2509 
2510   // Check a post-condition.
2511   assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
2512          "Indvars did not preserve LCSSA!");
2513 
2514   // Verify that LFTR, and any other change have not interfered with SCEV's
2515   // ability to compute trip count.
2516 #ifndef NDEBUG
2517   if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
2518     SE->forgetLoop(L);
2519     const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
2520     if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
2521         SE->getTypeSizeInBits(NewBECount->getType()))
2522       NewBECount = SE->getTruncateOrNoop(NewBECount,
2523                                          BackedgeTakenCount->getType());
2524     else
2525       BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount,
2526                                                  NewBECount->getType());
2527     assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV");
2528   }
2529 #endif
2530 
2531   return Changed;
2532 }
2533 
2534 PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM,
2535                                           LoopStandardAnalysisResults &AR,
2536                                           LPMUpdater &) {
2537   Function *F = L.getHeader()->getParent();
2538   const DataLayout &DL = F->getParent()->getDataLayout();
2539 
2540   IndVarSimplify IVS(&AR.LI, &AR.SE, &AR.DT, DL, &AR.TLI, &AR.TTI);
2541   if (!IVS.run(&L))
2542     return PreservedAnalyses::all();
2543 
2544   auto PA = getLoopPassPreservedAnalyses();
2545   PA.preserveSet<CFGAnalyses>();
2546   return PA;
2547 }
2548 
2549 namespace {
2550 
2551 struct IndVarSimplifyLegacyPass : public LoopPass {
2552   static char ID; // Pass identification, replacement for typeid
2553 
2554   IndVarSimplifyLegacyPass() : LoopPass(ID) {
2555     initializeIndVarSimplifyLegacyPassPass(*PassRegistry::getPassRegistry());
2556   }
2557 
2558   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
2559     if (skipLoop(L))
2560       return false;
2561 
2562     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2563     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2564     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2565     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2566     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
2567     auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
2568     auto *TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent()) : nullptr;
2569     const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2570 
2571     IndVarSimplify IVS(LI, SE, DT, DL, TLI, TTI);
2572     return IVS.run(L);
2573   }
2574 
2575   void getAnalysisUsage(AnalysisUsage &AU) const override {
2576     AU.setPreservesCFG();
2577     getLoopAnalysisUsage(AU);
2578   }
2579 };
2580 
2581 } // end anonymous namespace
2582 
2583 char IndVarSimplifyLegacyPass::ID = 0;
2584 
2585 INITIALIZE_PASS_BEGIN(IndVarSimplifyLegacyPass, "indvars",
2586                       "Induction Variable Simplification", false, false)
2587 INITIALIZE_PASS_DEPENDENCY(LoopPass)
2588 INITIALIZE_PASS_END(IndVarSimplifyLegacyPass, "indvars",
2589                     "Induction Variable Simplification", false, false)
2590 
2591 Pass *llvm::createIndVarSimplifyPass() {
2592   return new IndVarSimplifyLegacyPass();
2593 }
2594