xref: /llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp (revision c3f87f02b1a433428353e2599e7b08be4a716418)
1 //===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass implements a simple loop unroller.  It works best when loops have
11 // been canonicalized by the -indvars pass, allowing it to determine the trip
12 // counts of loops easily.
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Scalar/LoopUnrollPass.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/CodeMetrics.h"
19 #include "llvm/Analysis/GlobalsModRef.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/LoopPass.h"
22 #include "llvm/Analysis/LoopUnrollAnalyzer.h"
23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Metadata.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Transforms/Scalar.h"
35 #include "llvm/Transforms/Scalar/LoopPassManager.h"
36 #include "llvm/Transforms/Utils/LoopUtils.h"
37 #include "llvm/Transforms/Utils/UnrollLoop.h"
38 #include <climits>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "loop-unroll"
44 
45 static cl::opt<unsigned>
46     UnrollThreshold("unroll-threshold", cl::Hidden,
47                     cl::desc("The cost threshold for loop unrolling"));
48 
49 static cl::opt<unsigned> UnrollPartialThreshold(
50     "unroll-partial-threshold", cl::Hidden,
51     cl::desc("The cost threshold for partial loop unrolling"));
52 
53 static cl::opt<unsigned> UnrollMaxPercentThresholdBoost(
54     "unroll-max-percent-threshold-boost", cl::init(400), cl::Hidden,
55     cl::desc("The maximum 'boost' (represented as a percentage >= 100) applied "
56              "to the threshold when aggressively unrolling a loop due to the "
57              "dynamic cost savings. If completely unrolling a loop will reduce "
58              "the total runtime from X to Y, we boost the loop unroll "
59              "threshold to DefaultThreshold*std::min(MaxPercentThresholdBoost, "
60              "X/Y). This limit avoids excessive code bloat."));
61 
62 static cl::opt<unsigned> UnrollMaxIterationsCountToAnalyze(
63     "unroll-max-iteration-count-to-analyze", cl::init(10), cl::Hidden,
64     cl::desc("Don't allow loop unrolling to simulate more than this number of"
65              "iterations when checking full unroll profitability"));
66 
67 static cl::opt<unsigned> UnrollCount(
68     "unroll-count", cl::Hidden,
69     cl::desc("Use this unroll count for all loops including those with "
70              "unroll_count pragma values, for testing purposes"));
71 
72 static cl::opt<unsigned> UnrollMaxCount(
73     "unroll-max-count", cl::Hidden,
74     cl::desc("Set the max unroll count for partial and runtime unrolling, for"
75              "testing purposes"));
76 
77 static cl::opt<unsigned> UnrollFullMaxCount(
78     "unroll-full-max-count", cl::Hidden,
79     cl::desc(
80         "Set the max unroll count for full unrolling, for testing purposes"));
81 
82 static cl::opt<bool>
83     UnrollAllowPartial("unroll-allow-partial", cl::Hidden,
84                        cl::desc("Allows loops to be partially unrolled until "
85                                 "-unroll-threshold loop size is reached."));
86 
87 static cl::opt<bool> UnrollAllowRemainder(
88     "unroll-allow-remainder", cl::Hidden,
89     cl::desc("Allow generation of a loop remainder (extra iterations) "
90              "when unrolling a loop."));
91 
92 static cl::opt<bool>
93     UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::Hidden,
94                   cl::desc("Unroll loops with run-time trip counts"));
95 
96 static cl::opt<unsigned> UnrollMaxUpperBound(
97     "unroll-max-upperbound", cl::init(8), cl::Hidden,
98     cl::desc(
99         "The max of trip count upper bound that is considered in unrolling"));
100 
101 static cl::opt<unsigned> PragmaUnrollThreshold(
102     "pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
103     cl::desc("Unrolled size limit for loops with an unroll(full) or "
104              "unroll_count pragma."));
105 
106 static cl::opt<unsigned> FlatLoopTripCountThreshold(
107     "flat-loop-tripcount-threshold", cl::init(5), cl::Hidden,
108     cl::desc("If the runtime tripcount for the loop is lower than the "
109              "threshold, the loop is considered as flat and will be less "
110              "aggressively unrolled."));
111 
112 static cl::opt<bool>
113     UnrollAllowPeeling("unroll-allow-peeling", cl::Hidden,
114                        cl::desc("Allows loops to be peeled when the dynamic "
115                                 "trip count is known to be low."));
116 
117 /// A magic value for use with the Threshold parameter to indicate
118 /// that the loop unroll should be performed regardless of how much
119 /// code expansion would result.
120 static const unsigned NoThreshold = UINT_MAX;
121 
122 /// Gather the various unrolling parameters based on the defaults, compiler
123 /// flags, TTI overrides and user specified parameters.
124 static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
125     Loop *L, const TargetTransformInfo &TTI, Optional<unsigned> UserThreshold,
126     Optional<unsigned> UserCount, Optional<bool> UserAllowPartial,
127     Optional<bool> UserRuntime, Optional<bool> UserUpperBound) {
128   TargetTransformInfo::UnrollingPreferences UP;
129 
130   // Set up the defaults
131   UP.Threshold = 150;
132   UP.MaxPercentThresholdBoost = 400;
133   UP.OptSizeThreshold = 0;
134   UP.PartialThreshold = 150;
135   UP.PartialOptSizeThreshold = 0;
136   UP.Count = 0;
137   UP.PeelCount = 0;
138   UP.DefaultUnrollRuntimeCount = 8;
139   UP.MaxCount = UINT_MAX;
140   UP.FullUnrollMaxCount = UINT_MAX;
141   UP.BEInsns = 2;
142   UP.Partial = false;
143   UP.Runtime = false;
144   UP.AllowRemainder = true;
145   UP.AllowExpensiveTripCount = false;
146   UP.Force = false;
147   UP.UpperBound = false;
148   UP.AllowPeeling = false;
149 
150   // Override with any target specific settings
151   TTI.getUnrollingPreferences(L, UP);
152 
153   // Apply size attributes
154   if (L->getHeader()->getParent()->optForSize()) {
155     UP.Threshold = UP.OptSizeThreshold;
156     UP.PartialThreshold = UP.PartialOptSizeThreshold;
157   }
158 
159   // Apply any user values specified by cl::opt
160   if (UnrollThreshold.getNumOccurrences() > 0)
161     UP.Threshold = UnrollThreshold;
162   if (UnrollPartialThreshold.getNumOccurrences() > 0)
163     UP.PartialThreshold = UnrollPartialThreshold;
164   if (UnrollMaxPercentThresholdBoost.getNumOccurrences() > 0)
165     UP.MaxPercentThresholdBoost = UnrollMaxPercentThresholdBoost;
166   if (UnrollMaxCount.getNumOccurrences() > 0)
167     UP.MaxCount = UnrollMaxCount;
168   if (UnrollFullMaxCount.getNumOccurrences() > 0)
169     UP.FullUnrollMaxCount = UnrollFullMaxCount;
170   if (UnrollAllowPartial.getNumOccurrences() > 0)
171     UP.Partial = UnrollAllowPartial;
172   if (UnrollAllowRemainder.getNumOccurrences() > 0)
173     UP.AllowRemainder = UnrollAllowRemainder;
174   if (UnrollRuntime.getNumOccurrences() > 0)
175     UP.Runtime = UnrollRuntime;
176   if (UnrollMaxUpperBound == 0)
177     UP.UpperBound = false;
178   if (UnrollAllowPeeling.getNumOccurrences() > 0)
179     UP.AllowPeeling = UnrollAllowPeeling;
180 
181   // Apply user values provided by argument
182   if (UserThreshold.hasValue()) {
183     UP.Threshold = *UserThreshold;
184     UP.PartialThreshold = *UserThreshold;
185   }
186   if (UserCount.hasValue())
187     UP.Count = *UserCount;
188   if (UserAllowPartial.hasValue())
189     UP.Partial = *UserAllowPartial;
190   if (UserRuntime.hasValue())
191     UP.Runtime = *UserRuntime;
192   if (UserUpperBound.hasValue())
193     UP.UpperBound = *UserUpperBound;
194 
195   return UP;
196 }
197 
198 namespace {
199 /// A struct to densely store the state of an instruction after unrolling at
200 /// each iteration.
201 ///
202 /// This is designed to work like a tuple of <Instruction *, int> for the
203 /// purposes of hashing and lookup, but to be able to associate two boolean
204 /// states with each key.
205 struct UnrolledInstState {
206   Instruction *I;
207   int Iteration : 30;
208   unsigned IsFree : 1;
209   unsigned IsCounted : 1;
210 };
211 
212 /// Hashing and equality testing for a set of the instruction states.
213 struct UnrolledInstStateKeyInfo {
214   typedef DenseMapInfo<Instruction *> PtrInfo;
215   typedef DenseMapInfo<std::pair<Instruction *, int>> PairInfo;
216   static inline UnrolledInstState getEmptyKey() {
217     return {PtrInfo::getEmptyKey(), 0, 0, 0};
218   }
219   static inline UnrolledInstState getTombstoneKey() {
220     return {PtrInfo::getTombstoneKey(), 0, 0, 0};
221   }
222   static inline unsigned getHashValue(const UnrolledInstState &S) {
223     return PairInfo::getHashValue({S.I, S.Iteration});
224   }
225   static inline bool isEqual(const UnrolledInstState &LHS,
226                              const UnrolledInstState &RHS) {
227     return PairInfo::isEqual({LHS.I, LHS.Iteration}, {RHS.I, RHS.Iteration});
228   }
229 };
230 }
231 
232 namespace {
233 struct EstimatedUnrollCost {
234   /// \brief The estimated cost after unrolling.
235   unsigned UnrolledCost;
236 
237   /// \brief The estimated dynamic cost of executing the instructions in the
238   /// rolled form.
239   unsigned RolledDynamicCost;
240 };
241 }
242 
243 /// \brief Figure out if the loop is worth full unrolling.
244 ///
245 /// Complete loop unrolling can make some loads constant, and we need to know
246 /// if that would expose any further optimization opportunities.  This routine
247 /// estimates this optimization.  It computes cost of unrolled loop
248 /// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
249 /// dynamic cost we mean that we won't count costs of blocks that are known not
250 /// to be executed (i.e. if we have a branch in the loop and we know that at the
251 /// given iteration its condition would be resolved to true, we won't add up the
252 /// cost of the 'false'-block).
253 /// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
254 /// the analysis failed (no benefits expected from the unrolling, or the loop is
255 /// too big to analyze), the returned value is None.
256 static Optional<EstimatedUnrollCost>
257 analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, DominatorTree &DT,
258                       ScalarEvolution &SE, const TargetTransformInfo &TTI,
259                       unsigned MaxUnrolledLoopSize) {
260   // We want to be able to scale offsets by the trip count and add more offsets
261   // to them without checking for overflows, and we already don't want to
262   // analyze *massive* trip counts, so we force the max to be reasonably small.
263   assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) &&
264          "The unroll iterations max is too large!");
265 
266   // Only analyze inner loops. We can't properly estimate cost of nested loops
267   // and we won't visit inner loops again anyway.
268   if (!L->empty())
269     return None;
270 
271   // Don't simulate loops with a big or unknown tripcount
272   if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
273       TripCount > UnrollMaxIterationsCountToAnalyze)
274     return None;
275 
276   SmallSetVector<BasicBlock *, 16> BBWorklist;
277   SmallSetVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitWorklist;
278   DenseMap<Value *, Constant *> SimplifiedValues;
279   SmallVector<std::pair<Value *, Constant *>, 4> SimplifiedInputValues;
280 
281   // The estimated cost of the unrolled form of the loop. We try to estimate
282   // this by simplifying as much as we can while computing the estimate.
283   unsigned UnrolledCost = 0;
284 
285   // We also track the estimated dynamic (that is, actually executed) cost in
286   // the rolled form. This helps identify cases when the savings from unrolling
287   // aren't just exposing dead control flows, but actual reduced dynamic
288   // instructions due to the simplifications which we expect to occur after
289   // unrolling.
290   unsigned RolledDynamicCost = 0;
291 
292   // We track the simplification of each instruction in each iteration. We use
293   // this to recursively merge costs into the unrolled cost on-demand so that
294   // we don't count the cost of any dead code. This is essentially a map from
295   // <instruction, int> to <bool, bool>, but stored as a densely packed struct.
296   DenseSet<UnrolledInstState, UnrolledInstStateKeyInfo> InstCostMap;
297 
298   // A small worklist used to accumulate cost of instructions from each
299   // observable and reached root in the loop.
300   SmallVector<Instruction *, 16> CostWorklist;
301 
302   // PHI-used worklist used between iterations while accumulating cost.
303   SmallVector<Instruction *, 4> PHIUsedList;
304 
305   // Helper function to accumulate cost for instructions in the loop.
306   auto AddCostRecursively = [&](Instruction &RootI, int Iteration) {
307     assert(Iteration >= 0 && "Cannot have a negative iteration!");
308     assert(CostWorklist.empty() && "Must start with an empty cost list");
309     assert(PHIUsedList.empty() && "Must start with an empty phi used list");
310     CostWorklist.push_back(&RootI);
311     for (;; --Iteration) {
312       do {
313         Instruction *I = CostWorklist.pop_back_val();
314 
315         // InstCostMap only uses I and Iteration as a key, the other two values
316         // don't matter here.
317         auto CostIter = InstCostMap.find({I, Iteration, 0, 0});
318         if (CostIter == InstCostMap.end())
319           // If an input to a PHI node comes from a dead path through the loop
320           // we may have no cost data for it here. What that actually means is
321           // that it is free.
322           continue;
323         auto &Cost = *CostIter;
324         if (Cost.IsCounted)
325           // Already counted this instruction.
326           continue;
327 
328         // Mark that we are counting the cost of this instruction now.
329         Cost.IsCounted = true;
330 
331         // If this is a PHI node in the loop header, just add it to the PHI set.
332         if (auto *PhiI = dyn_cast<PHINode>(I))
333           if (PhiI->getParent() == L->getHeader()) {
334             assert(Cost.IsFree && "Loop PHIs shouldn't be evaluated as they "
335                                   "inherently simplify during unrolling.");
336             if (Iteration == 0)
337               continue;
338 
339             // Push the incoming value from the backedge into the PHI used list
340             // if it is an in-loop instruction. We'll use this to populate the
341             // cost worklist for the next iteration (as we count backwards).
342             if (auto *OpI = dyn_cast<Instruction>(
343                     PhiI->getIncomingValueForBlock(L->getLoopLatch())))
344               if (L->contains(OpI))
345                 PHIUsedList.push_back(OpI);
346             continue;
347           }
348 
349         // First accumulate the cost of this instruction.
350         if (!Cost.IsFree) {
351           UnrolledCost += TTI.getUserCost(I);
352           DEBUG(dbgs() << "Adding cost of instruction (iteration " << Iteration
353                        << "): ");
354           DEBUG(I->dump());
355         }
356 
357         // We must count the cost of every operand which is not free,
358         // recursively. If we reach a loop PHI node, simply add it to the set
359         // to be considered on the next iteration (backwards!).
360         for (Value *Op : I->operands()) {
361           // Check whether this operand is free due to being a constant or
362           // outside the loop.
363           auto *OpI = dyn_cast<Instruction>(Op);
364           if (!OpI || !L->contains(OpI))
365             continue;
366 
367           // Otherwise accumulate its cost.
368           CostWorklist.push_back(OpI);
369         }
370       } while (!CostWorklist.empty());
371 
372       if (PHIUsedList.empty())
373         // We've exhausted the search.
374         break;
375 
376       assert(Iteration > 0 &&
377              "Cannot track PHI-used values past the first iteration!");
378       CostWorklist.append(PHIUsedList.begin(), PHIUsedList.end());
379       PHIUsedList.clear();
380     }
381   };
382 
383   // Ensure that we don't violate the loop structure invariants relied on by
384   // this analysis.
385   assert(L->isLoopSimplifyForm() && "Must put loop into normal form first.");
386   assert(L->isLCSSAForm(DT) &&
387          "Must have loops in LCSSA form to track live-out values.");
388 
389   DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n");
390 
391   // Simulate execution of each iteration of the loop counting instructions,
392   // which would be simplified.
393   // Since the same load will take different values on different iterations,
394   // we literally have to go through all loop's iterations.
395   for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) {
396     DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n");
397 
398     // Prepare for the iteration by collecting any simplified entry or backedge
399     // inputs.
400     for (Instruction &I : *L->getHeader()) {
401       auto *PHI = dyn_cast<PHINode>(&I);
402       if (!PHI)
403         break;
404 
405       // The loop header PHI nodes must have exactly two input: one from the
406       // loop preheader and one from the loop latch.
407       assert(
408           PHI->getNumIncomingValues() == 2 &&
409           "Must have an incoming value only for the preheader and the latch.");
410 
411       Value *V = PHI->getIncomingValueForBlock(
412           Iteration == 0 ? L->getLoopPreheader() : L->getLoopLatch());
413       Constant *C = dyn_cast<Constant>(V);
414       if (Iteration != 0 && !C)
415         C = SimplifiedValues.lookup(V);
416       if (C)
417         SimplifiedInputValues.push_back({PHI, C});
418     }
419 
420     // Now clear and re-populate the map for the next iteration.
421     SimplifiedValues.clear();
422     while (!SimplifiedInputValues.empty())
423       SimplifiedValues.insert(SimplifiedInputValues.pop_back_val());
424 
425     UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, SE, L);
426 
427     BBWorklist.clear();
428     BBWorklist.insert(L->getHeader());
429     // Note that we *must not* cache the size, this loop grows the worklist.
430     for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
431       BasicBlock *BB = BBWorklist[Idx];
432 
433       // Visit all instructions in the given basic block and try to simplify
434       // it.  We don't change the actual IR, just count optimization
435       // opportunities.
436       for (Instruction &I : *BB) {
437         if (isa<DbgInfoIntrinsic>(I))
438           continue;
439 
440         // Track this instruction's expected baseline cost when executing the
441         // rolled loop form.
442         RolledDynamicCost += TTI.getUserCost(&I);
443 
444         // Visit the instruction to analyze its loop cost after unrolling,
445         // and if the visitor returns true, mark the instruction as free after
446         // unrolling and continue.
447         bool IsFree = Analyzer.visit(I);
448         bool Inserted = InstCostMap.insert({&I, (int)Iteration,
449                                            (unsigned)IsFree,
450                                            /*IsCounted*/ false}).second;
451         (void)Inserted;
452         assert(Inserted && "Cannot have a state for an unvisited instruction!");
453 
454         if (IsFree)
455           continue;
456 
457         // Can't properly model a cost of a call.
458         // FIXME: With a proper cost model we should be able to do it.
459         if(isa<CallInst>(&I))
460           return None;
461 
462         // If the instruction might have a side-effect recursively account for
463         // the cost of it and all the instructions leading up to it.
464         if (I.mayHaveSideEffects())
465           AddCostRecursively(I, Iteration);
466 
467         // If unrolled body turns out to be too big, bail out.
468         if (UnrolledCost > MaxUnrolledLoopSize) {
469           DEBUG(dbgs() << "  Exceeded threshold.. exiting.\n"
470                        << "  UnrolledCost: " << UnrolledCost
471                        << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize
472                        << "\n");
473           return None;
474         }
475       }
476 
477       TerminatorInst *TI = BB->getTerminator();
478 
479       // Add in the live successors by first checking whether we have terminator
480       // that may be simplified based on the values simplified by this call.
481       BasicBlock *KnownSucc = nullptr;
482       if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
483         if (BI->isConditional()) {
484           if (Constant *SimpleCond =
485                   SimplifiedValues.lookup(BI->getCondition())) {
486             // Just take the first successor if condition is undef
487             if (isa<UndefValue>(SimpleCond))
488               KnownSucc = BI->getSuccessor(0);
489             else if (ConstantInt *SimpleCondVal =
490                          dyn_cast<ConstantInt>(SimpleCond))
491               KnownSucc = BI->getSuccessor(SimpleCondVal->isZero() ? 1 : 0);
492           }
493         }
494       } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
495         if (Constant *SimpleCond =
496                 SimplifiedValues.lookup(SI->getCondition())) {
497           // Just take the first successor if condition is undef
498           if (isa<UndefValue>(SimpleCond))
499             KnownSucc = SI->getSuccessor(0);
500           else if (ConstantInt *SimpleCondVal =
501                        dyn_cast<ConstantInt>(SimpleCond))
502             KnownSucc = SI->findCaseValue(SimpleCondVal).getCaseSuccessor();
503         }
504       }
505       if (KnownSucc) {
506         if (L->contains(KnownSucc))
507           BBWorklist.insert(KnownSucc);
508         else
509           ExitWorklist.insert({BB, KnownSucc});
510         continue;
511       }
512 
513       // Add BB's successors to the worklist.
514       for (BasicBlock *Succ : successors(BB))
515         if (L->contains(Succ))
516           BBWorklist.insert(Succ);
517         else
518           ExitWorklist.insert({BB, Succ});
519       AddCostRecursively(*TI, Iteration);
520     }
521 
522     // If we found no optimization opportunities on the first iteration, we
523     // won't find them on later ones too.
524     if (UnrolledCost == RolledDynamicCost) {
525       DEBUG(dbgs() << "  No opportunities found.. exiting.\n"
526                    << "  UnrolledCost: " << UnrolledCost << "\n");
527       return None;
528     }
529   }
530 
531   while (!ExitWorklist.empty()) {
532     BasicBlock *ExitingBB, *ExitBB;
533     std::tie(ExitingBB, ExitBB) = ExitWorklist.pop_back_val();
534 
535     for (Instruction &I : *ExitBB) {
536       auto *PN = dyn_cast<PHINode>(&I);
537       if (!PN)
538         break;
539 
540       Value *Op = PN->getIncomingValueForBlock(ExitingBB);
541       if (auto *OpI = dyn_cast<Instruction>(Op))
542         if (L->contains(OpI))
543           AddCostRecursively(*OpI, TripCount - 1);
544     }
545   }
546 
547   DEBUG(dbgs() << "Analysis finished:\n"
548                << "UnrolledCost: " << UnrolledCost << ", "
549                << "RolledDynamicCost: " << RolledDynamicCost << "\n");
550   return {{UnrolledCost, RolledDynamicCost}};
551 }
552 
553 /// ApproximateLoopSize - Approximate the size of the loop.
554 static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
555                                     bool &NotDuplicatable, bool &Convergent,
556                                     const TargetTransformInfo &TTI,
557                                     AssumptionCache *AC, unsigned BEInsns) {
558   SmallPtrSet<const Value *, 32> EphValues;
559   CodeMetrics::collectEphemeralValues(L, AC, EphValues);
560 
561   CodeMetrics Metrics;
562   for (BasicBlock *BB : L->blocks())
563     Metrics.analyzeBasicBlock(BB, TTI, EphValues);
564   NumCalls = Metrics.NumInlineCandidates;
565   NotDuplicatable = Metrics.notDuplicatable;
566   Convergent = Metrics.convergent;
567 
568   unsigned LoopSize = Metrics.NumInsts;
569 
570   // Don't allow an estimate of size zero.  This would allows unrolling of loops
571   // with huge iteration counts, which is a compile time problem even if it's
572   // not a problem for code quality. Also, the code using this size may assume
573   // that each loop has at least three instructions (likely a conditional
574   // branch, a comparison feeding that branch, and some kind of loop increment
575   // feeding that comparison instruction).
576   LoopSize = std::max(LoopSize, BEInsns + 1);
577 
578   return LoopSize;
579 }
580 
581 // Returns the loop hint metadata node with the given name (for example,
582 // "llvm.loop.unroll.count").  If no such metadata node exists, then nullptr is
583 // returned.
584 static MDNode *GetUnrollMetadataForLoop(const Loop *L, StringRef Name) {
585   if (MDNode *LoopID = L->getLoopID())
586     return GetUnrollMetadata(LoopID, Name);
587   return nullptr;
588 }
589 
590 // Returns true if the loop has an unroll(full) pragma.
591 static bool HasUnrollFullPragma(const Loop *L) {
592   return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.full");
593 }
594 
595 // Returns true if the loop has an unroll(enable) pragma. This metadata is used
596 // for both "#pragma unroll" and "#pragma clang loop unroll(enable)" directives.
597 static bool HasUnrollEnablePragma(const Loop *L) {
598   return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.enable");
599 }
600 
601 // Returns true if the loop has an unroll(disable) pragma.
602 static bool HasUnrollDisablePragma(const Loop *L) {
603   return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.disable");
604 }
605 
606 // Returns true if the loop has an runtime unroll(disable) pragma.
607 static bool HasRuntimeUnrollDisablePragma(const Loop *L) {
608   return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.runtime.disable");
609 }
610 
611 // If loop has an unroll_count pragma return the (necessarily
612 // positive) value from the pragma.  Otherwise return 0.
613 static unsigned UnrollCountPragmaValue(const Loop *L) {
614   MDNode *MD = GetUnrollMetadataForLoop(L, "llvm.loop.unroll.count");
615   if (MD) {
616     assert(MD->getNumOperands() == 2 &&
617            "Unroll count hint metadata should have two operands.");
618     unsigned Count =
619         mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue();
620     assert(Count >= 1 && "Unroll count must be positive.");
621     return Count;
622   }
623   return 0;
624 }
625 
626 // Remove existing unroll metadata and add unroll disable metadata to
627 // indicate the loop has already been unrolled.  This prevents a loop
628 // from being unrolled more than is directed by a pragma if the loop
629 // unrolling pass is run more than once (which it generally is).
630 static void SetLoopAlreadyUnrolled(Loop *L) {
631   MDNode *LoopID = L->getLoopID();
632   // First remove any existing loop unrolling metadata.
633   SmallVector<Metadata *, 4> MDs;
634   // Reserve first location for self reference to the LoopID metadata node.
635   MDs.push_back(nullptr);
636 
637   if (LoopID) {
638     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
639       bool IsUnrollMetadata = false;
640       MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
641       if (MD) {
642         const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
643         IsUnrollMetadata = S && S->getString().startswith("llvm.loop.unroll.");
644       }
645       if (!IsUnrollMetadata)
646         MDs.push_back(LoopID->getOperand(i));
647     }
648   }
649 
650   // Add unroll(disable) metadata to disable future unrolling.
651   LLVMContext &Context = L->getHeader()->getContext();
652   SmallVector<Metadata *, 1> DisableOperands;
653   DisableOperands.push_back(MDString::get(Context, "llvm.loop.unroll.disable"));
654   MDNode *DisableNode = MDNode::get(Context, DisableOperands);
655   MDs.push_back(DisableNode);
656 
657   MDNode *NewLoopID = MDNode::get(Context, MDs);
658   // Set operand 0 to refer to the loop id itself.
659   NewLoopID->replaceOperandWith(0, NewLoopID);
660   L->setLoopID(NewLoopID);
661 }
662 
663 // Computes the boosting factor for complete unrolling.
664 // If fully unrolling the loop would save a lot of RolledDynamicCost, it would
665 // be beneficial to fully unroll the loop even if unrolledcost is large. We
666 // use (RolledDynamicCost / UnrolledCost) to model the unroll benefits to adjust
667 // the unroll threshold.
668 static unsigned getFullUnrollBoostingFactor(const EstimatedUnrollCost &Cost,
669                                             unsigned MaxPercentThresholdBoost) {
670   if (Cost.RolledDynamicCost >= UINT_MAX / 100)
671     return 100;
672   else if (Cost.UnrolledCost != 0)
673     // The boosting factor is RolledDynamicCost / UnrolledCost
674     return std::min(100 * Cost.RolledDynamicCost / Cost.UnrolledCost,
675                     MaxPercentThresholdBoost);
676   else
677     return MaxPercentThresholdBoost;
678 }
679 
680 // Returns loop size estimation for unrolled loop.
681 static uint64_t getUnrolledLoopSize(
682     unsigned LoopSize,
683     TargetTransformInfo::UnrollingPreferences &UP) {
684   assert(LoopSize >= UP.BEInsns && "LoopSize should not be less than BEInsns!");
685   return (uint64_t)(LoopSize - UP.BEInsns) * UP.Count + UP.BEInsns;
686 }
687 
688 // Returns true if unroll count was set explicitly.
689 // Calculates unroll count and writes it to UP.Count.
690 static bool computeUnrollCount(
691     Loop *L, const TargetTransformInfo &TTI, DominatorTree &DT, LoopInfo *LI,
692     ScalarEvolution *SE, OptimizationRemarkEmitter *ORE, unsigned &TripCount,
693     unsigned MaxTripCount, unsigned &TripMultiple, unsigned LoopSize,
694     TargetTransformInfo::UnrollingPreferences &UP, bool &UseUpperBound) {
695   // Check for explicit Count.
696   // 1st priority is unroll count set by "unroll-count" option.
697   bool UserUnrollCount = UnrollCount.getNumOccurrences() > 0;
698   if (UserUnrollCount) {
699     UP.Count = UnrollCount;
700     UP.AllowExpensiveTripCount = true;
701     UP.Force = true;
702     if (UP.AllowRemainder && getUnrolledLoopSize(LoopSize, UP) < UP.Threshold)
703       return true;
704   }
705 
706   // 2nd priority is unroll count set by pragma.
707   unsigned PragmaCount = UnrollCountPragmaValue(L);
708   if (PragmaCount > 0) {
709     UP.Count = PragmaCount;
710     UP.Runtime = true;
711     UP.AllowExpensiveTripCount = true;
712     UP.Force = true;
713     if (UP.AllowRemainder &&
714         getUnrolledLoopSize(LoopSize, UP) < PragmaUnrollThreshold)
715       return true;
716   }
717   bool PragmaFullUnroll = HasUnrollFullPragma(L);
718   if (PragmaFullUnroll && TripCount != 0) {
719     UP.Count = TripCount;
720     if (getUnrolledLoopSize(LoopSize, UP) < PragmaUnrollThreshold)
721       return false;
722   }
723 
724   bool PragmaEnableUnroll = HasUnrollEnablePragma(L);
725   bool ExplicitUnroll = PragmaCount > 0 || PragmaFullUnroll ||
726                         PragmaEnableUnroll || UserUnrollCount;
727 
728   if (ExplicitUnroll && TripCount != 0) {
729     // If the loop has an unrolling pragma, we want to be more aggressive with
730     // unrolling limits. Set thresholds to at least the PragmaThreshold value
731     // which is larger than the default limits.
732     UP.Threshold = std::max<unsigned>(UP.Threshold, PragmaUnrollThreshold);
733     UP.PartialThreshold =
734         std::max<unsigned>(UP.PartialThreshold, PragmaUnrollThreshold);
735   }
736 
737   // 3rd priority is full unroll count.
738   // Full unroll makes sense only when TripCount or its upper bound could be
739   // statically calculated.
740   // Also we need to check if we exceed FullUnrollMaxCount.
741   // If using the upper bound to unroll, TripMultiple should be set to 1 because
742   // we do not know when loop may exit.
743   // MaxTripCount and ExactTripCount cannot both be non zero since we only
744   // compute the former when the latter is zero.
745   unsigned ExactTripCount = TripCount;
746   assert((ExactTripCount == 0 || MaxTripCount == 0) &&
747          "ExtractTripCound and MaxTripCount cannot both be non zero.");
748   unsigned FullUnrollTripCount = ExactTripCount ? ExactTripCount : MaxTripCount;
749   UP.Count = FullUnrollTripCount;
750   if (FullUnrollTripCount && FullUnrollTripCount <= UP.FullUnrollMaxCount) {
751     // When computing the unrolled size, note that BEInsns are not replicated
752     // like the rest of the loop body.
753     if (getUnrolledLoopSize(LoopSize, UP) < UP.Threshold) {
754       UseUpperBound = (MaxTripCount == FullUnrollTripCount);
755       TripCount = FullUnrollTripCount;
756       TripMultiple = UP.UpperBound ? 1 : TripMultiple;
757       return ExplicitUnroll;
758     } else {
759       // The loop isn't that small, but we still can fully unroll it if that
760       // helps to remove a significant number of instructions.
761       // To check that, run additional analysis on the loop.
762       if (Optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost(
763               L, FullUnrollTripCount, DT, *SE, TTI,
764               UP.Threshold * UP.MaxPercentThresholdBoost / 100)) {
765         unsigned Boost =
766             getFullUnrollBoostingFactor(*Cost, UP.MaxPercentThresholdBoost);
767         if (Cost->UnrolledCost < UP.Threshold * Boost / 100) {
768           UseUpperBound = (MaxTripCount == FullUnrollTripCount);
769           TripCount = FullUnrollTripCount;
770           TripMultiple = UP.UpperBound ? 1 : TripMultiple;
771           return ExplicitUnroll;
772         }
773       }
774     }
775   }
776 
777   // 4rd priority is partial unrolling.
778   // Try partial unroll only when TripCount could be staticaly calculated.
779   if (TripCount) {
780     UP.Partial |= ExplicitUnroll;
781     if (!UP.Partial) {
782       DEBUG(dbgs() << "  will not try to unroll partially because "
783                    << "-unroll-allow-partial not given\n");
784       UP.Count = 0;
785       return false;
786     }
787     if (UP.Count == 0)
788       UP.Count = TripCount;
789     if (UP.PartialThreshold != NoThreshold) {
790       // Reduce unroll count to be modulo of TripCount for partial unrolling.
791       if (getUnrolledLoopSize(LoopSize, UP) > UP.PartialThreshold)
792         UP.Count =
793             (std::max(UP.PartialThreshold, UP.BEInsns + 1) - UP.BEInsns) /
794             (LoopSize - UP.BEInsns);
795       if (UP.Count > UP.MaxCount)
796         UP.Count = UP.MaxCount;
797       while (UP.Count != 0 && TripCount % UP.Count != 0)
798         UP.Count--;
799       if (UP.AllowRemainder && UP.Count <= 1) {
800         // If there is no Count that is modulo of TripCount, set Count to
801         // largest power-of-two factor that satisfies the threshold limit.
802         // As we'll create fixup loop, do the type of unrolling only if
803         // remainder loop is allowed.
804         UP.Count = UP.DefaultUnrollRuntimeCount;
805         while (UP.Count != 0 &&
806                getUnrolledLoopSize(LoopSize, UP) > UP.PartialThreshold)
807           UP.Count >>= 1;
808       }
809       if (UP.Count < 2) {
810         if (PragmaEnableUnroll)
811           ORE->emit(
812               OptimizationRemarkMissed(DEBUG_TYPE, "UnrollAsDirectedTooLarge",
813                                        L->getStartLoc(), L->getHeader())
814               << "Unable to unroll loop as directed by unroll(enable) pragma "
815                  "because unrolled size is too large.");
816         UP.Count = 0;
817       }
818     } else {
819       UP.Count = TripCount;
820     }
821     if ((PragmaFullUnroll || PragmaEnableUnroll) && TripCount &&
822         UP.Count != TripCount)
823       ORE->emit(
824           OptimizationRemarkMissed(DEBUG_TYPE, "FullUnrollAsDirectedTooLarge",
825                                    L->getStartLoc(), L->getHeader())
826           << "Unable to fully unroll loop as directed by unroll pragma because "
827              "unrolled size is too large.");
828     return ExplicitUnroll;
829   }
830   assert(TripCount == 0 &&
831          "All cases when TripCount is constant should be covered here.");
832   if (PragmaFullUnroll)
833     ORE->emit(
834         OptimizationRemarkMissed(DEBUG_TYPE,
835                                  "CantFullUnrollAsDirectedRuntimeTripCount",
836                                  L->getStartLoc(), L->getHeader())
837         << "Unable to fully unroll loop as directed by unroll(full) pragma "
838            "because loop has a runtime trip count.");
839 
840   // 5th priority is loop peeling
841   computePeelCount(L, LoopSize, UP);
842   if (UP.PeelCount) {
843     UP.Runtime = false;
844     UP.Count = 1;
845     return ExplicitUnroll;
846   }
847 
848   // 6th priority is runtime unrolling.
849   // Don't unroll a runtime trip count loop when it is disabled.
850   if (HasRuntimeUnrollDisablePragma(L)) {
851     UP.Count = 0;
852     return false;
853   }
854 
855   // Check if the runtime trip count is too small when profile is available.
856   if (L->getHeader()->getParent()->getEntryCount()) {
857     if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
858       if (*ProfileTripCount < FlatLoopTripCountThreshold)
859         return false;
860       else
861         UP.AllowExpensiveTripCount = true;
862     }
863   }
864 
865   // Reduce count based on the type of unrolling and the threshold values.
866   UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount;
867   if (!UP.Runtime) {
868     DEBUG(dbgs() << "  will not try to unroll loop with runtime trip count "
869                  << "-unroll-runtime not given\n");
870     UP.Count = 0;
871     return false;
872   }
873   if (UP.Count == 0)
874     UP.Count = UP.DefaultUnrollRuntimeCount;
875 
876   // Reduce unroll count to be the largest power-of-two factor of
877   // the original count which satisfies the threshold limit.
878   while (UP.Count != 0 &&
879          getUnrolledLoopSize(LoopSize, UP) > UP.PartialThreshold)
880     UP.Count >>= 1;
881 
882 #ifndef NDEBUG
883   unsigned OrigCount = UP.Count;
884 #endif
885 
886   if (!UP.AllowRemainder && UP.Count != 0 && (TripMultiple % UP.Count) != 0) {
887     while (UP.Count != 0 && TripMultiple % UP.Count != 0)
888       UP.Count >>= 1;
889     DEBUG(dbgs() << "Remainder loop is restricted (that could architecture "
890                     "specific or because the loop contains a convergent "
891                     "instruction), so unroll count must divide the trip "
892                     "multiple, "
893                  << TripMultiple << ".  Reducing unroll count from "
894                  << OrigCount << " to " << UP.Count << ".\n");
895     using namespace ore;
896     if (PragmaCount > 0 && !UP.AllowRemainder)
897       ORE->emit(
898           OptimizationRemarkMissed(DEBUG_TYPE,
899                                    "DifferentUnrollCountFromDirected",
900                                    L->getStartLoc(), L->getHeader())
901           << "Unable to unroll loop the number of times directed by "
902              "unroll_count pragma because remainder loop is restricted "
903              "(that could architecture specific or because the loop "
904              "contains a convergent instruction) and so must have an unroll "
905              "count that divides the loop trip multiple of "
906           << NV("TripMultiple", TripMultiple) << ".  Unrolling instead "
907           << NV("UnrollCount", UP.Count) << " time(s).");
908   }
909 
910   if (UP.Count > UP.MaxCount)
911     UP.Count = UP.MaxCount;
912   DEBUG(dbgs() << "  partially unrolling with count: " << UP.Count << "\n");
913   if (UP.Count < 2)
914     UP.Count = 0;
915   return ExplicitUnroll;
916 }
917 
918 static bool tryToUnrollLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
919                             ScalarEvolution *SE, const TargetTransformInfo &TTI,
920                             AssumptionCache &AC, OptimizationRemarkEmitter &ORE,
921                             bool PreserveLCSSA,
922                             Optional<unsigned> ProvidedCount,
923                             Optional<unsigned> ProvidedThreshold,
924                             Optional<bool> ProvidedAllowPartial,
925                             Optional<bool> ProvidedRuntime,
926                             Optional<bool> ProvidedUpperBound) {
927   DEBUG(dbgs() << "Loop Unroll: F[" << L->getHeader()->getParent()->getName()
928                << "] Loop %" << L->getHeader()->getName() << "\n");
929   if (HasUnrollDisablePragma(L))
930     return false;
931   if (!L->isLoopSimplifyForm()) {
932     DEBUG(
933         dbgs() << "  Not unrolling loop which is not in loop-simplify form.\n");
934     return false;
935   }
936 
937   unsigned NumInlineCandidates;
938   bool NotDuplicatable;
939   bool Convergent;
940   TargetTransformInfo::UnrollingPreferences UP = gatherUnrollingPreferences(
941       L, TTI, ProvidedThreshold, ProvidedCount, ProvidedAllowPartial,
942       ProvidedRuntime, ProvidedUpperBound);
943   // Exit early if unrolling is disabled.
944   if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0))
945     return false;
946   unsigned LoopSize = ApproximateLoopSize(
947       L, NumInlineCandidates, NotDuplicatable, Convergent, TTI, &AC, UP.BEInsns);
948   DEBUG(dbgs() << "  Loop Size = " << LoopSize << "\n");
949   if (NotDuplicatable) {
950     DEBUG(dbgs() << "  Not unrolling loop which contains non-duplicatable"
951                  << " instructions.\n");
952     return false;
953   }
954   if (NumInlineCandidates != 0) {
955     DEBUG(dbgs() << "  Not unrolling loop with inlinable calls.\n");
956     return false;
957   }
958 
959   // Find trip count and trip multiple if count is not available
960   unsigned TripCount = 0;
961   unsigned MaxTripCount = 0;
962   unsigned TripMultiple = 1;
963   // If there are multiple exiting blocks but one of them is the latch, use the
964   // latch for the trip count estimation. Otherwise insist on a single exiting
965   // block for the trip count estimation.
966   BasicBlock *ExitingBlock = L->getLoopLatch();
967   if (!ExitingBlock || !L->isLoopExiting(ExitingBlock))
968     ExitingBlock = L->getExitingBlock();
969   if (ExitingBlock) {
970     TripCount = SE->getSmallConstantTripCount(L, ExitingBlock);
971     TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock);
972   }
973 
974   // If the loop contains a convergent operation, the prelude we'd add
975   // to do the first few instructions before we hit the unrolled loop
976   // is unsafe -- it adds a control-flow dependency to the convergent
977   // operation.  Therefore restrict remainder loop (try unrollig without).
978   //
979   // TODO: This is quite conservative.  In practice, convergent_op()
980   // is likely to be called unconditionally in the loop.  In this
981   // case, the program would be ill-formed (on most architectures)
982   // unless n were the same on all threads in a thread group.
983   // Assuming n is the same on all threads, any kind of unrolling is
984   // safe.  But currently llvm's notion of convergence isn't powerful
985   // enough to express this.
986   if (Convergent)
987     UP.AllowRemainder = false;
988 
989   // Try to find the trip count upper bound if we cannot find the exact trip
990   // count.
991   bool MaxOrZero = false;
992   if (!TripCount) {
993     MaxTripCount = SE->getSmallConstantMaxTripCount(L);
994     MaxOrZero = SE->isBackedgeTakenCountMaxOrZero(L);
995     // We can unroll by the upper bound amount if it's generally allowed or if
996     // we know that the loop is executed either the upper bound or zero times.
997     // (MaxOrZero unrolling keeps only the first loop test, so the number of
998     // loop tests remains the same compared to the non-unrolled version, whereas
999     // the generic upper bound unrolling keeps all but the last loop test so the
1000     // number of loop tests goes up which may end up being worse on targets with
1001     // constriained branch predictor resources so is controlled by an option.)
1002     // In addition we only unroll small upper bounds.
1003     if (!(UP.UpperBound || MaxOrZero) || MaxTripCount > UnrollMaxUpperBound) {
1004       MaxTripCount = 0;
1005     }
1006   }
1007 
1008   // computeUnrollCount() decides whether it is beneficial to use upper bound to
1009   // fully unroll the loop.
1010   bool UseUpperBound = false;
1011   bool IsCountSetExplicitly =
1012       computeUnrollCount(L, TTI, DT, LI, SE, &ORE, TripCount, MaxTripCount,
1013                          TripMultiple, LoopSize, UP, UseUpperBound);
1014   if (!UP.Count)
1015     return false;
1016   // Unroll factor (Count) must be less or equal to TripCount.
1017   if (TripCount && UP.Count > TripCount)
1018     UP.Count = TripCount;
1019 
1020   // Unroll the loop.
1021   if (!UnrollLoop(L, UP.Count, TripCount, UP.Force, UP.Runtime,
1022                   UP.AllowExpensiveTripCount, UseUpperBound, MaxOrZero,
1023                   TripMultiple, UP.PeelCount, LI, SE, &DT, &AC, &ORE,
1024                   PreserveLCSSA))
1025     return false;
1026 
1027   // If loop has an unroll count pragma or unrolled by explicitly set count
1028   // mark loop as unrolled to prevent unrolling beyond that requested.
1029   // If the loop was peeled, we already "used up" the profile information
1030   // we had, so we don't want to unroll or peel again.
1031   if (IsCountSetExplicitly || UP.PeelCount)
1032     SetLoopAlreadyUnrolled(L);
1033 
1034   return true;
1035 }
1036 
1037 namespace {
1038 class LoopUnroll : public LoopPass {
1039 public:
1040   static char ID; // Pass ID, replacement for typeid
1041   LoopUnroll(Optional<unsigned> Threshold = None,
1042              Optional<unsigned> Count = None,
1043              Optional<bool> AllowPartial = None, Optional<bool> Runtime = None,
1044              Optional<bool> UpperBound = None)
1045       : LoopPass(ID), ProvidedCount(std::move(Count)),
1046         ProvidedThreshold(Threshold), ProvidedAllowPartial(AllowPartial),
1047         ProvidedRuntime(Runtime), ProvidedUpperBound(UpperBound) {
1048     initializeLoopUnrollPass(*PassRegistry::getPassRegistry());
1049   }
1050 
1051   Optional<unsigned> ProvidedCount;
1052   Optional<unsigned> ProvidedThreshold;
1053   Optional<bool> ProvidedAllowPartial;
1054   Optional<bool> ProvidedRuntime;
1055   Optional<bool> ProvidedUpperBound;
1056 
1057   bool runOnLoop(Loop *L, LPPassManager &) override {
1058     if (skipLoop(L))
1059       return false;
1060 
1061     Function &F = *L->getHeader()->getParent();
1062 
1063     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1064     LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1065     ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1066     const TargetTransformInfo &TTI =
1067         getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1068     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1069     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
1070     // pass.  Function analyses need to be preserved across loop transformations
1071     // but ORE cannot be preserved (see comment before the pass definition).
1072     OptimizationRemarkEmitter ORE(&F);
1073     bool PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
1074 
1075     return tryToUnrollLoop(L, DT, LI, SE, TTI, AC, ORE, PreserveLCSSA,
1076                            ProvidedCount, ProvidedThreshold,
1077                            ProvidedAllowPartial, ProvidedRuntime,
1078                            ProvidedUpperBound);
1079   }
1080 
1081   /// This transformation requires natural loop information & requires that
1082   /// loop preheaders be inserted into the CFG...
1083   ///
1084   void getAnalysisUsage(AnalysisUsage &AU) const override {
1085     AU.addRequired<AssumptionCacheTracker>();
1086     AU.addRequired<TargetTransformInfoWrapperPass>();
1087     // FIXME: Loop passes are required to preserve domtree, and for now we just
1088     // recreate dom info if anything gets unrolled.
1089     getLoopAnalysisUsage(AU);
1090   }
1091 };
1092 }
1093 
1094 char LoopUnroll::ID = 0;
1095 INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
1096 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1097 INITIALIZE_PASS_DEPENDENCY(LoopPass)
1098 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1099 INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
1100 
1101 Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
1102                                  int Runtime, int UpperBound) {
1103   // TODO: It would make more sense for this function to take the optionals
1104   // directly, but that's dangerous since it would silently break out of tree
1105   // callers.
1106   return new LoopUnroll(Threshold == -1 ? None : Optional<unsigned>(Threshold),
1107                         Count == -1 ? None : Optional<unsigned>(Count),
1108                         AllowPartial == -1 ? None
1109                                            : Optional<bool>(AllowPartial),
1110                         Runtime == -1 ? None : Optional<bool>(Runtime),
1111                         UpperBound == -1 ? None : Optional<bool>(UpperBound));
1112 }
1113 
1114 Pass *llvm::createSimpleLoopUnrollPass() {
1115   return llvm::createLoopUnrollPass(-1, -1, 0, 0, 0);
1116 }
1117 
1118 PreservedAnalyses LoopUnrollPass::run(Loop &L, LoopAnalysisManager &AM,
1119                                       LoopStandardAnalysisResults &AR,
1120                                       LPMUpdater &) {
1121   const auto &FAM =
1122       AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager();
1123   Function *F = L.getHeader()->getParent();
1124 
1125   auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F);
1126   // FIXME: This should probably be optional rather than required.
1127   if (!ORE)
1128     report_fatal_error("LoopUnrollPass: OptimizationRemarkEmitterAnalysis not "
1129                        "cached at a higher level");
1130 
1131   bool Changed = tryToUnrollLoop(&L, AR.DT, &AR.LI, &AR.SE, AR.TTI, AR.AC, *ORE,
1132                                  /*PreserveLCSSA*/ true, ProvidedCount,
1133                                  ProvidedThreshold, ProvidedAllowPartial,
1134                                  ProvidedRuntime, ProvidedUpperBound);
1135   if (!Changed)
1136     return PreservedAnalyses::all();
1137 
1138   return getLoopPassPreservedAnalyses();
1139 }
1140