xref: /llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp (revision d1b19da854fde9077a53400fabff486dc6dba022)
1 //===- LoopPeel.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Loop Peeling Utilities.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/Transforms/Utils/LoopPeel.h"
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/Optional.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/Analysis/LoopIterator.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/IR/BasicBlock.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/IR/ProfDataUtils.h"
33 #include "llvm/Support/Casting.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Transforms/Utils/Cloning.h"
39 #include "llvm/Transforms/Utils/LoopSimplify.h"
40 #include "llvm/Transforms/Utils/LoopUtils.h"
41 #include "llvm/Transforms/Utils/ValueMapper.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 
46 using namespace llvm;
47 using namespace llvm::PatternMatch;
48 
49 #define DEBUG_TYPE "loop-peel"
50 
51 STATISTIC(NumPeeled, "Number of loops peeled");
52 
53 static cl::opt<unsigned> UnrollPeelCount(
54     "unroll-peel-count", cl::Hidden,
55     cl::desc("Set the unroll peeling count, for testing purposes"));
56 
57 static cl::opt<bool>
58     UnrollAllowPeeling("unroll-allow-peeling", cl::init(true), cl::Hidden,
59                        cl::desc("Allows loops to be peeled when the dynamic "
60                                 "trip count is known to be low."));
61 
62 static cl::opt<bool>
63     UnrollAllowLoopNestsPeeling("unroll-allow-loop-nests-peeling",
64                                 cl::init(false), cl::Hidden,
65                                 cl::desc("Allows loop nests to be peeled."));
66 
67 static cl::opt<unsigned> UnrollPeelMaxCount(
68     "unroll-peel-max-count", cl::init(7), cl::Hidden,
69     cl::desc("Max average trip count which will cause loop peeling."));
70 
71 static cl::opt<unsigned> UnrollForcePeelCount(
72     "unroll-force-peel-count", cl::init(0), cl::Hidden,
73     cl::desc("Force a peel count regardless of profiling information."));
74 
75 static cl::opt<bool> DisableAdvancedPeeling(
76     "disable-advanced-peeling", cl::init(false), cl::Hidden,
77     cl::desc(
78         "Disable advance peeling. Issues for convergent targets (D134803)."));
79 
80 static const char *PeeledCountMetaData = "llvm.loop.peeled.count";
81 
82 // Check whether we are capable of peeling this loop.
83 bool llvm::canPeel(Loop *L) {
84   // Make sure the loop is in simplified form
85   if (!L->isLoopSimplifyForm())
86     return false;
87   if (!DisableAdvancedPeeling)
88     return true;
89 
90   SmallVector<BasicBlock *, 4> Exits;
91   L->getUniqueNonLatchExitBlocks(Exits);
92   // The latch must either be the only exiting block or all non-latch exit
93   // blocks have either a deopt or unreachable terminator or compose a chain of
94   // blocks where the last one is either deopt or unreachable terminated. Both
95   // deopt and unreachable terminators are a strong indication they are not
96   // taken. Note that this is a profitability check, not a legality check. Also
97   // note that LoopPeeling currently can only update the branch weights of latch
98   // blocks and branch weights to blocks with deopt or unreachable do not need
99   // updating.
100   return llvm::all_of(Exits, IsBlockFollowedByDeoptOrUnreachable);
101 }
102 
103 // This function calculates the number of iterations after which the given Phi
104 // becomes an invariant. The pre-calculated values are memorized in the map. The
105 // function (shortcut is I) is calculated according to the following definition:
106 // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge].
107 //   If %y is a loop invariant, then I(%x) = 1.
108 //   If %y is a Phi from the loop header, I(%x) = I(%y) + 1.
109 //   Otherwise, I(%x) is infinite.
110 // TODO: Actually if %y is an expression that depends only on Phi %z and some
111 //       loop invariants, we can estimate I(%x) = I(%z) + 1. The example
112 //       looks like:
113 //         %x = phi(0, %a),  <-- becomes invariant starting from 3rd iteration.
114 //         %y = phi(0, 5),
115 //         %a = %y + 1.
116 static Optional<unsigned> calculateIterationsToInvariance(
117     PHINode *Phi, Loop *L, BasicBlock *BackEdge,
118     SmallDenseMap<PHINode *, Optional<unsigned> > &IterationsToInvariance) {
119   assert(Phi->getParent() == L->getHeader() &&
120          "Non-loop Phi should not be checked for turning into invariant.");
121   assert(BackEdge == L->getLoopLatch() && "Wrong latch?");
122   // If we already know the answer, take it from the map.
123   auto I = IterationsToInvariance.find(Phi);
124   if (I != IterationsToInvariance.end())
125     return I->second;
126 
127   // Otherwise we need to analyze the input from the back edge.
128   Value *Input = Phi->getIncomingValueForBlock(BackEdge);
129   // Place infinity to map to avoid infinite recursion for cycled Phis. Such
130   // cycles can never stop on an invariant.
131   IterationsToInvariance[Phi] = None;
132   Optional<unsigned> ToInvariance;
133 
134   if (L->isLoopInvariant(Input))
135     ToInvariance = 1u;
136   else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) {
137     // Only consider Phis in header block.
138     if (IncPhi->getParent() != L->getHeader())
139       return None;
140     // If the input becomes an invariant after X iterations, then our Phi
141     // becomes an invariant after X + 1 iterations.
142     auto InputToInvariance = calculateIterationsToInvariance(
143         IncPhi, L, BackEdge, IterationsToInvariance);
144     if (InputToInvariance)
145       ToInvariance = *InputToInvariance + 1u;
146   }
147 
148   // If we found that this Phi lies in an invariant chain, update the map.
149   if (ToInvariance)
150     IterationsToInvariance[Phi] = ToInvariance;
151   return ToInvariance;
152 }
153 
154 // Try to find any invariant memory reads that will become dereferenceable in
155 // the remainder loop after peeling. The load must also be used (transitively)
156 // by an exit condition. Returns the number of iterations to peel off (at the
157 // moment either 0 or 1).
158 static unsigned peelToTurnInvariantLoadsDerefencebale(Loop &L,
159                                                       DominatorTree &DT,
160                                                       AssumptionCache *AC) {
161   // Skip loops with a single exiting block, because there should be no benefit
162   // for the heuristic below.
163   if (L.getExitingBlock())
164     return 0;
165 
166   // All non-latch exit blocks must have an UnreachableInst terminator.
167   // Otherwise the heuristic below may not be profitable.
168   SmallVector<BasicBlock *, 4> Exits;
169   L.getUniqueNonLatchExitBlocks(Exits);
170   if (any_of(Exits, [](const BasicBlock *BB) {
171         return !isa<UnreachableInst>(BB->getTerminator());
172       }))
173     return 0;
174 
175   // Now look for invariant loads that dominate the latch and are not known to
176   // be dereferenceable. If there are such loads and no writes, they will become
177   // dereferenceable in the loop if the first iteration is peeled off. Also
178   // collect the set of instructions controlled by such loads. Only peel if an
179   // exit condition uses (transitively) such a load.
180   BasicBlock *Header = L.getHeader();
181   BasicBlock *Latch = L.getLoopLatch();
182   SmallPtrSet<Value *, 8> LoadUsers;
183   const DataLayout &DL = L.getHeader()->getModule()->getDataLayout();
184   for (BasicBlock *BB : L.blocks()) {
185     for (Instruction &I : *BB) {
186       if (I.mayWriteToMemory())
187         return 0;
188 
189       auto Iter = LoadUsers.find(&I);
190       if (Iter != LoadUsers.end()) {
191         for (Value *U : I.users())
192           LoadUsers.insert(U);
193       }
194       // Do not look for reads in the header; they can already be hoisted
195       // without peeling.
196       if (BB == Header)
197         continue;
198       if (auto *LI = dyn_cast<LoadInst>(&I)) {
199         Value *Ptr = LI->getPointerOperand();
200         if (DT.dominates(BB, Latch) && L.isLoopInvariant(Ptr) &&
201             !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, AC, &DT))
202           for (Value *U : I.users())
203             LoadUsers.insert(U);
204       }
205     }
206   }
207   SmallVector<BasicBlock *> ExitingBlocks;
208   L.getExitingBlocks(ExitingBlocks);
209   if (any_of(ExitingBlocks, [&LoadUsers](BasicBlock *Exiting) {
210         return LoadUsers.contains(Exiting->getTerminator());
211       }))
212     return 1;
213   return 0;
214 }
215 
216 // Return the number of iterations to peel off that make conditions in the
217 // body true/false. For example, if we peel 2 iterations off the loop below,
218 // the condition i < 2 can be evaluated at compile time.
219 //  for (i = 0; i < n; i++)
220 //    if (i < 2)
221 //      ..
222 //    else
223 //      ..
224 //   }
225 static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount,
226                                          ScalarEvolution &SE) {
227   assert(L.isLoopSimplifyForm() && "Loop needs to be in loop simplify form");
228   unsigned DesiredPeelCount = 0;
229 
230   for (auto *BB : L.blocks()) {
231     auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
232     if (!BI || BI->isUnconditional())
233       continue;
234 
235     // Ignore loop exit condition.
236     if (L.getLoopLatch() == BB)
237       continue;
238 
239     Value *Condition = BI->getCondition();
240     Value *LeftVal, *RightVal;
241     CmpInst::Predicate Pred;
242     if (!match(Condition, m_ICmp(Pred, m_Value(LeftVal), m_Value(RightVal))))
243       continue;
244 
245     const SCEV *LeftSCEV = SE.getSCEV(LeftVal);
246     const SCEV *RightSCEV = SE.getSCEV(RightVal);
247 
248     // Do not consider predicates that are known to be true or false
249     // independently of the loop iteration.
250     if (SE.evaluatePredicate(Pred, LeftSCEV, RightSCEV))
251       continue;
252 
253     // Check if we have a condition with one AddRec and one non AddRec
254     // expression. Normalize LeftSCEV to be the AddRec.
255     if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
256       if (isa<SCEVAddRecExpr>(RightSCEV)) {
257         std::swap(LeftSCEV, RightSCEV);
258         Pred = ICmpInst::getSwappedPredicate(Pred);
259       } else
260         continue;
261     }
262 
263     const SCEVAddRecExpr *LeftAR = cast<SCEVAddRecExpr>(LeftSCEV);
264 
265     // Avoid huge SCEV computations in the loop below, make sure we only
266     // consider AddRecs of the loop we are trying to peel.
267     if (!LeftAR->isAffine() || LeftAR->getLoop() != &L)
268       continue;
269     if (!(ICmpInst::isEquality(Pred) && LeftAR->hasNoSelfWrap()) &&
270         !SE.getMonotonicPredicateType(LeftAR, Pred))
271       continue;
272 
273     // Check if extending the current DesiredPeelCount lets us evaluate Pred
274     // or !Pred in the loop body statically.
275     unsigned NewPeelCount = DesiredPeelCount;
276 
277     const SCEV *IterVal = LeftAR->evaluateAtIteration(
278         SE.getConstant(LeftSCEV->getType(), NewPeelCount), SE);
279 
280     // If the original condition is not known, get the negated predicate
281     // (which holds on the else branch) and check if it is known. This allows
282     // us to peel of iterations that make the original condition false.
283     if (!SE.isKnownPredicate(Pred, IterVal, RightSCEV))
284       Pred = ICmpInst::getInversePredicate(Pred);
285 
286     const SCEV *Step = LeftAR->getStepRecurrence(SE);
287     const SCEV *NextIterVal = SE.getAddExpr(IterVal, Step);
288     auto PeelOneMoreIteration = [&IterVal, &NextIterVal, &SE, Step,
289                                  &NewPeelCount]() {
290       IterVal = NextIterVal;
291       NextIterVal = SE.getAddExpr(IterVal, Step);
292       NewPeelCount++;
293     };
294 
295     auto CanPeelOneMoreIteration = [&NewPeelCount, &MaxPeelCount]() {
296       return NewPeelCount < MaxPeelCount;
297     };
298 
299     while (CanPeelOneMoreIteration() &&
300            SE.isKnownPredicate(Pred, IterVal, RightSCEV))
301       PeelOneMoreIteration();
302 
303     // With *that* peel count, does the predicate !Pred become known in the
304     // first iteration of the loop body after peeling?
305     if (!SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), IterVal,
306                              RightSCEV))
307       continue; // If not, give up.
308 
309     // However, for equality comparisons, that isn't always sufficient to
310     // eliminate the comparsion in loop body, we may need to peel one more
311     // iteration. See if that makes !Pred become unknown again.
312     if (ICmpInst::isEquality(Pred) &&
313         !SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), NextIterVal,
314                              RightSCEV) &&
315         !SE.isKnownPredicate(Pred, IterVal, RightSCEV) &&
316         SE.isKnownPredicate(Pred, NextIterVal, RightSCEV)) {
317       if (!CanPeelOneMoreIteration())
318         continue; // Need to peel one more iteration, but can't. Give up.
319       PeelOneMoreIteration(); // Great!
320     }
321 
322     DesiredPeelCount = std::max(DesiredPeelCount, NewPeelCount);
323   }
324 
325   return DesiredPeelCount;
326 }
327 
328 /// This "heuristic" exactly matches implicit behavior which used to exist
329 /// inside getLoopEstimatedTripCount.  It was added here to keep an
330 /// improvement inside that API from causing peeling to become more aggressive.
331 /// This should probably be removed.
332 static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
333   BasicBlock *Latch = L->getLoopLatch();
334   if (!Latch)
335     return true;
336 
337   BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
338   if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
339     return true;
340 
341   assert((LatchBR->getSuccessor(0) == L->getHeader() ||
342           LatchBR->getSuccessor(1) == L->getHeader()) &&
343          "At least one edge out of the latch must go to the header");
344 
345   SmallVector<BasicBlock *, 4> ExitBlocks;
346   L->getUniqueNonLatchExitBlocks(ExitBlocks);
347   return any_of(ExitBlocks, [](const BasicBlock *EB) {
348       return !EB->getTerminatingDeoptimizeCall();
349     });
350 }
351 
352 
353 // Return the number of iterations we want to peel off.
354 void llvm::computePeelCount(Loop *L, unsigned LoopSize,
355                             TargetTransformInfo::PeelingPreferences &PP,
356                             unsigned TripCount, DominatorTree &DT,
357                             ScalarEvolution &SE, AssumptionCache *AC,
358                             unsigned Threshold) {
359   assert(LoopSize > 0 && "Zero loop size is not allowed!");
360   // Save the PP.PeelCount value set by the target in
361   // TTI.getPeelingPreferences or by the flag -unroll-peel-count.
362   unsigned TargetPeelCount = PP.PeelCount;
363   PP.PeelCount = 0;
364   if (!canPeel(L))
365     return;
366 
367   // Only try to peel innermost loops by default.
368   // The constraint can be relaxed by the target in TTI.getPeelingPreferences
369   // or by the flag -unroll-allow-loop-nests-peeling.
370   if (!PP.AllowLoopNestsPeeling && !L->isInnermost())
371     return;
372 
373   // If the user provided a peel count, use that.
374   bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
375   if (UserPeelCount) {
376     LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
377                       << " iterations.\n");
378     PP.PeelCount = UnrollForcePeelCount;
379     PP.PeelProfiledIterations = true;
380     return;
381   }
382 
383   // Skip peeling if it's disabled.
384   if (!PP.AllowPeeling)
385     return;
386 
387   // Check that we can peel at least one iteration.
388   if (2 * LoopSize > Threshold)
389     return;
390 
391   unsigned AlreadyPeeled = 0;
392   if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData))
393     AlreadyPeeled = *Peeled;
394   // Stop if we already peeled off the maximum number of iterations.
395   if (AlreadyPeeled >= UnrollPeelMaxCount)
396     return;
397 
398   // Here we try to get rid of Phis which become invariants after 1, 2, ..., N
399   // iterations of the loop. For this we compute the number for iterations after
400   // which every Phi is guaranteed to become an invariant, and try to peel the
401   // maximum number of iterations among these values, thus turning all those
402   // Phis into invariants.
403 
404   // Store the pre-calculated values here.
405   SmallDenseMap<PHINode *, Optional<unsigned>> IterationsToInvariance;
406   // Now go through all Phis to calculate their the number of iterations they
407   // need to become invariants.
408   // Start the max computation with the PP.PeelCount value set by the target
409   // in TTI.getPeelingPreferences or by the flag -unroll-peel-count.
410   unsigned DesiredPeelCount = TargetPeelCount;
411   BasicBlock *BackEdge = L->getLoopLatch();
412   assert(BackEdge && "Loop is not in simplified form?");
413   for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) {
414     PHINode *Phi = cast<PHINode>(&*BI);
415     auto ToInvariance = calculateIterationsToInvariance(Phi, L, BackEdge,
416                                                         IterationsToInvariance);
417     if (ToInvariance)
418       DesiredPeelCount = std::max(DesiredPeelCount, *ToInvariance);
419   }
420 
421   // Pay respect to limitations implied by loop size and the max peel count.
422   unsigned MaxPeelCount = UnrollPeelMaxCount;
423   MaxPeelCount = std::min(MaxPeelCount, Threshold / LoopSize - 1);
424 
425   DesiredPeelCount = std::max(DesiredPeelCount,
426                               countToEliminateCompares(*L, MaxPeelCount, SE));
427 
428   if (DesiredPeelCount == 0)
429     DesiredPeelCount = peelToTurnInvariantLoadsDerefencebale(*L, DT, AC);
430 
431   if (DesiredPeelCount > 0) {
432     DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount);
433     // Consider max peel count limitation.
434     assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
435     if (DesiredPeelCount + AlreadyPeeled <= UnrollPeelMaxCount) {
436       LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
437                         << " iteration(s) to turn"
438                         << " some Phis into invariants.\n");
439       PP.PeelCount = DesiredPeelCount;
440       PP.PeelProfiledIterations = false;
441       return;
442     }
443   }
444 
445   // Bail if we know the statically calculated trip count.
446   // In this case we rather prefer partial unrolling.
447   if (TripCount)
448     return;
449 
450   // Do not apply profile base peeling if it is disabled.
451   if (!PP.PeelProfiledIterations)
452     return;
453   // If we don't know the trip count, but have reason to believe the average
454   // trip count is low, peeling should be beneficial, since we will usually
455   // hit the peeled section.
456   // We only do this in the presence of profile information, since otherwise
457   // our estimates of the trip count are not reliable enough.
458   if (L->getHeader()->getParent()->hasProfileData()) {
459     if (violatesLegacyMultiExitLoopCheck(L))
460       return;
461     Optional<unsigned> EstimatedTripCount = getLoopEstimatedTripCount(L);
462     if (!EstimatedTripCount)
463       return;
464 
465     LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is "
466                       << *EstimatedTripCount << "\n");
467 
468     if (*EstimatedTripCount) {
469       if (*EstimatedTripCount + AlreadyPeeled <= MaxPeelCount) {
470         unsigned PeelCount = *EstimatedTripCount;
471         LLVM_DEBUG(dbgs() << "Peeling first " << PeelCount << " iterations.\n");
472         PP.PeelCount = PeelCount;
473         return;
474       }
475       LLVM_DEBUG(dbgs() << "Already peel count: " << AlreadyPeeled << "\n");
476       LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
477       LLVM_DEBUG(dbgs() << "Loop cost: " << LoopSize << "\n");
478       LLVM_DEBUG(dbgs() << "Max peel cost: " << Threshold << "\n");
479       LLVM_DEBUG(dbgs() << "Max peel count by cost: "
480                         << (Threshold / LoopSize - 1) << "\n");
481     }
482   }
483 }
484 
485 struct WeightInfo {
486   // Weights for current iteration.
487   SmallVector<uint32_t> Weights;
488   // Weights to subtract after each iteration.
489   const SmallVector<uint32_t> SubWeights;
490 };
491 
492 /// Update the branch weights of an exiting block of a peeled-off loop
493 /// iteration.
494 /// Let F is a weight of the edge to continue (fallthrough) into the loop.
495 /// Let E is a weight of the edge to an exit.
496 /// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to
497 /// go to exit.
498 /// Then, Estimated ExitCount = F / E.
499 /// For I-th (counting from 0) peeled off iteration we set the the weights for
500 /// the peeled exit as (EC - I, 1). It gives us reasonable distribution,
501 /// The probability to go to exit 1/(EC-I) increases. At the same time
502 /// the estimated exit count in the remainder loop reduces by I.
503 /// To avoid dealing with division rounding we can just multiple both part
504 /// of weights to E and use weight as (F - I * E, E).
505 static void updateBranchWeights(Instruction *Term, WeightInfo &Info) {
506   MDBuilder MDB(Term->getContext());
507   Term->setMetadata(LLVMContext::MD_prof,
508                     MDB.createBranchWeights(Info.Weights));
509   for (auto [Idx, SubWeight] : enumerate(Info.SubWeights))
510     if (SubWeight != 0)
511       Info.Weights[Idx] = Info.Weights[Idx] > SubWeight
512                               ? Info.Weights[Idx] - SubWeight
513                               : 1;
514 }
515 
516 /// Initialize the weights for all exiting blocks.
517 static void initBranchWeights(DenseMap<Instruction *, WeightInfo> &WeightInfos,
518                               Loop *L) {
519   SmallVector<BasicBlock *> ExitingBlocks;
520   L->getExitingBlocks(ExitingBlocks);
521   for (BasicBlock *ExitingBlock : ExitingBlocks) {
522     Instruction *Term = ExitingBlock->getTerminator();
523     SmallVector<uint32_t> Weights;
524     if (!extractBranchWeights(*Term, Weights))
525       continue;
526 
527     // See the comment on updateBranchWeights() for an explanation of what we
528     // do here.
529     uint32_t FallThroughWeights = 0;
530     uint32_t ExitWeights = 0;
531     for (auto [Succ, Weight] : zip(successors(Term), Weights)) {
532       if (L->contains(Succ))
533         FallThroughWeights += Weight;
534       else
535         ExitWeights += Weight;
536     }
537 
538     // Don't try to update weights for degenerate case.
539     if (FallThroughWeights == 0)
540       continue;
541 
542     SmallVector<uint32_t> SubWeights;
543     for (auto [Succ, Weight] : zip(successors(Term), Weights)) {
544       if (!L->contains(Succ)) {
545         // Exit weights stay the same.
546         SubWeights.push_back(0);
547         continue;
548       }
549 
550       // Subtract exit weights on each iteration, distributed across all
551       // fallthrough edges.
552       double W = (double)Weight / (double)FallThroughWeights;
553       SubWeights.push_back((uint32_t)(ExitWeights * W));
554     }
555 
556     WeightInfos.insert({Term, {std::move(Weights), std::move(SubWeights)}});
557   }
558 }
559 
560 /// Update the weights of original exiting block after peeling off all
561 /// iterations.
562 static void fixupBranchWeights(Instruction *Term, const WeightInfo &Info) {
563   MDBuilder MDB(Term->getContext());
564   Term->setMetadata(LLVMContext::MD_prof,
565                     MDB.createBranchWeights(Info.Weights));
566 }
567 
568 /// Clones the body of the loop L, putting it between \p InsertTop and \p
569 /// InsertBot.
570 /// \param IterNumber The serial number of the iteration currently being
571 /// peeled off.
572 /// \param ExitEdges The exit edges of the original loop.
573 /// \param[out] NewBlocks A list of the blocks in the newly created clone
574 /// \param[out] VMap The value map between the loop and the new clone.
575 /// \param LoopBlocks A helper for DFS-traversal of the loop.
576 /// \param LVMap A value-map that maps instructions from the original loop to
577 /// instructions in the last peeled-off iteration.
578 static void cloneLoopBlocks(
579     Loop *L, unsigned IterNumber, BasicBlock *InsertTop, BasicBlock *InsertBot,
580     SmallVectorImpl<std::pair<BasicBlock *, BasicBlock *>> &ExitEdges,
581     SmallVectorImpl<BasicBlock *> &NewBlocks, LoopBlocksDFS &LoopBlocks,
582     ValueToValueMapTy &VMap, ValueToValueMapTy &LVMap, DominatorTree *DT,
583     LoopInfo *LI, ArrayRef<MDNode *> LoopLocalNoAliasDeclScopes,
584     ScalarEvolution &SE) {
585   BasicBlock *Header = L->getHeader();
586   BasicBlock *Latch = L->getLoopLatch();
587   BasicBlock *PreHeader = L->getLoopPreheader();
588 
589   Function *F = Header->getParent();
590   LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
591   LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
592   Loop *ParentLoop = L->getParentLoop();
593 
594   // For each block in the original loop, create a new copy,
595   // and update the value map with the newly created values.
596   for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
597     BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F);
598     NewBlocks.push_back(NewBB);
599 
600     // If an original block is an immediate child of the loop L, its copy
601     // is a child of a ParentLoop after peeling. If a block is a child of
602     // a nested loop, it is handled in the cloneLoop() call below.
603     if (ParentLoop && LI->getLoopFor(*BB) == L)
604       ParentLoop->addBasicBlockToLoop(NewBB, *LI);
605 
606     VMap[*BB] = NewBB;
607 
608     // If dominator tree is available, insert nodes to represent cloned blocks.
609     if (DT) {
610       if (Header == *BB)
611         DT->addNewBlock(NewBB, InsertTop);
612       else {
613         DomTreeNode *IDom = DT->getNode(*BB)->getIDom();
614         // VMap must contain entry for IDom, as the iteration order is RPO.
615         DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()]));
616       }
617     }
618   }
619 
620   {
621     // Identify what other metadata depends on the cloned version. After
622     // cloning, replace the metadata with the corrected version for both
623     // memory instructions and noalias intrinsics.
624     std::string Ext = (Twine("Peel") + Twine(IterNumber)).str();
625     cloneAndAdaptNoAliasScopes(LoopLocalNoAliasDeclScopes, NewBlocks,
626                                Header->getContext(), Ext);
627   }
628 
629   // Recursively create the new Loop objects for nested loops, if any,
630   // to preserve LoopInfo.
631   for (Loop *ChildLoop : *L) {
632     cloneLoop(ChildLoop, ParentLoop, VMap, LI, nullptr);
633   }
634 
635   // Hook-up the control flow for the newly inserted blocks.
636   // The new header is hooked up directly to the "top", which is either
637   // the original loop preheader (for the first iteration) or the previous
638   // iteration's exiting block (for every other iteration)
639   InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header]));
640 
641   // Similarly, for the latch:
642   // The original exiting edge is still hooked up to the loop exit.
643   // The backedge now goes to the "bottom", which is either the loop's real
644   // header (for the last peeled iteration) or the copied header of the next
645   // iteration (for every other iteration)
646   BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]);
647   auto *LatchTerm = cast<Instruction>(NewLatch->getTerminator());
648   for (unsigned idx = 0, e = LatchTerm->getNumSuccessors(); idx < e; ++idx)
649     if (LatchTerm->getSuccessor(idx) == Header) {
650       LatchTerm->setSuccessor(idx, InsertBot);
651       break;
652     }
653   if (DT)
654     DT->changeImmediateDominator(InsertBot, NewLatch);
655 
656   // The new copy of the loop body starts with a bunch of PHI nodes
657   // that pick an incoming value from either the preheader, or the previous
658   // loop iteration. Since this copy is no longer part of the loop, we
659   // resolve this statically:
660   // For the first iteration, we use the value from the preheader directly.
661   // For any other iteration, we replace the phi with the value generated by
662   // the immediately preceding clone of the loop body (which represents
663   // the previous iteration).
664   for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
665     PHINode *NewPHI = cast<PHINode>(VMap[&*I]);
666     if (IterNumber == 0) {
667       VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader);
668     } else {
669       Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch);
670       Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
671       if (LatchInst && L->contains(LatchInst))
672         VMap[&*I] = LVMap[LatchInst];
673       else
674         VMap[&*I] = LatchVal;
675     }
676     cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI);
677   }
678 
679   // Fix up the outgoing values - we need to add a value for the iteration
680   // we've just created. Note that this must happen *after* the incoming
681   // values are adjusted, since the value going out of the latch may also be
682   // a value coming into the header.
683   for (auto Edge : ExitEdges)
684     for (PHINode &PHI : Edge.second->phis()) {
685       Value *LatchVal = PHI.getIncomingValueForBlock(Edge.first);
686       Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
687       if (LatchInst && L->contains(LatchInst))
688         LatchVal = VMap[LatchVal];
689       PHI.addIncoming(LatchVal, cast<BasicBlock>(VMap[Edge.first]));
690       SE.forgetValue(&PHI);
691     }
692 
693   // LastValueMap is updated with the values for the current loop
694   // which are used the next time this function is called.
695   for (auto KV : VMap)
696     LVMap[KV.first] = KV.second;
697 }
698 
699 TargetTransformInfo::PeelingPreferences llvm::gatherPeelingPreferences(
700     Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI,
701     Optional<bool> UserAllowPeeling,
702     Optional<bool> UserAllowProfileBasedPeeling, bool UnrollingSpecficValues) {
703   TargetTransformInfo::PeelingPreferences PP;
704 
705   // Set the default values.
706   PP.PeelCount = 0;
707   PP.AllowPeeling = true;
708   PP.AllowLoopNestsPeeling = false;
709   PP.PeelProfiledIterations = true;
710 
711   // Get the target specifc values.
712   TTI.getPeelingPreferences(L, SE, PP);
713 
714   // User specified values using cl::opt.
715   if (UnrollingSpecficValues) {
716     if (UnrollPeelCount.getNumOccurrences() > 0)
717       PP.PeelCount = UnrollPeelCount;
718     if (UnrollAllowPeeling.getNumOccurrences() > 0)
719       PP.AllowPeeling = UnrollAllowPeeling;
720     if (UnrollAllowLoopNestsPeeling.getNumOccurrences() > 0)
721       PP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling;
722   }
723 
724   // User specifed values provided by argument.
725   if (UserAllowPeeling)
726     PP.AllowPeeling = *UserAllowPeeling;
727   if (UserAllowProfileBasedPeeling)
728     PP.PeelProfiledIterations = *UserAllowProfileBasedPeeling;
729 
730   return PP;
731 }
732 
733 /// Peel off the first \p PeelCount iterations of loop \p L.
734 ///
735 /// Note that this does not peel them off as a single straight-line block.
736 /// Rather, each iteration is peeled off separately, and needs to check the
737 /// exit condition.
738 /// For loops that dynamically execute \p PeelCount iterations or less
739 /// this provides a benefit, since the peeled off iterations, which account
740 /// for the bulk of dynamic execution, can be further simplified by scalar
741 /// optimizations.
742 bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
743                     ScalarEvolution *SE, DominatorTree &DT, AssumptionCache *AC,
744                     bool PreserveLCSSA) {
745   assert(PeelCount > 0 && "Attempt to peel out zero iterations?");
746   assert(canPeel(L) && "Attempt to peel a loop which is not peelable?");
747 
748   LoopBlocksDFS LoopBlocks(L);
749   LoopBlocks.perform(LI);
750 
751   BasicBlock *Header = L->getHeader();
752   BasicBlock *PreHeader = L->getLoopPreheader();
753   BasicBlock *Latch = L->getLoopLatch();
754   SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitEdges;
755   L->getExitEdges(ExitEdges);
756 
757   // Remember dominators of blocks we might reach through exits to change them
758   // later. Immediate dominator of such block might change, because we add more
759   // routes which can lead to the exit: we can reach it from the peeled
760   // iterations too.
761   DenseMap<BasicBlock *, BasicBlock *> NonLoopBlocksIDom;
762   for (auto *BB : L->blocks()) {
763     auto *BBDomNode = DT.getNode(BB);
764     SmallVector<BasicBlock *, 16> ChildrenToUpdate;
765     for (auto *ChildDomNode : BBDomNode->children()) {
766       auto *ChildBB = ChildDomNode->getBlock();
767       if (!L->contains(ChildBB))
768         ChildrenToUpdate.push_back(ChildBB);
769     }
770     // The new idom of the block will be the nearest common dominator
771     // of all copies of the previous idom. This is equivalent to the
772     // nearest common dominator of the previous idom and the first latch,
773     // which dominates all copies of the previous idom.
774     BasicBlock *NewIDom = DT.findNearestCommonDominator(BB, Latch);
775     for (auto *ChildBB : ChildrenToUpdate)
776       NonLoopBlocksIDom[ChildBB] = NewIDom;
777   }
778 
779   Function *F = Header->getParent();
780 
781   // Set up all the necessary basic blocks. It is convenient to split the
782   // preheader into 3 parts - two blocks to anchor the peeled copy of the loop
783   // body, and a new preheader for the "real" loop.
784 
785   // Peeling the first iteration transforms.
786   //
787   // PreHeader:
788   // ...
789   // Header:
790   //   LoopBody
791   //   If (cond) goto Header
792   // Exit:
793   //
794   // into
795   //
796   // InsertTop:
797   //   LoopBody
798   //   If (!cond) goto Exit
799   // InsertBot:
800   // NewPreHeader:
801   // ...
802   // Header:
803   //  LoopBody
804   //  If (cond) goto Header
805   // Exit:
806   //
807   // Each following iteration will split the current bottom anchor in two,
808   // and put the new copy of the loop body between these two blocks. That is,
809   // after peeling another iteration from the example above, we'll split
810   // InsertBot, and get:
811   //
812   // InsertTop:
813   //   LoopBody
814   //   If (!cond) goto Exit
815   // InsertBot:
816   //   LoopBody
817   //   If (!cond) goto Exit
818   // InsertBot.next:
819   // NewPreHeader:
820   // ...
821   // Header:
822   //  LoopBody
823   //  If (cond) goto Header
824   // Exit:
825 
826   BasicBlock *InsertTop = SplitEdge(PreHeader, Header, &DT, LI);
827   BasicBlock *InsertBot =
828       SplitBlock(InsertTop, InsertTop->getTerminator(), &DT, LI);
829   BasicBlock *NewPreHeader =
830       SplitBlock(InsertBot, InsertBot->getTerminator(), &DT, LI);
831 
832   InsertTop->setName(Header->getName() + ".peel.begin");
833   InsertBot->setName(Header->getName() + ".peel.next");
834   NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
835 
836   ValueToValueMapTy LVMap;
837 
838   Instruction *LatchTerm =
839       cast<Instruction>(cast<BasicBlock>(Latch)->getTerminator());
840 
841   // If we have branch weight information, we'll want to update it for the
842   // newly created branches.
843   DenseMap<Instruction *, WeightInfo> Weights;
844   initBranchWeights(Weights, L);
845 
846   // Identify what noalias metadata is inside the loop: if it is inside the
847   // loop, the associated metadata must be cloned for each iteration.
848   SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes;
849   identifyNoAliasScopesToClone(L->getBlocks(), LoopLocalNoAliasDeclScopes);
850 
851   // For each peeled-off iteration, make a copy of the loop.
852   for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
853     SmallVector<BasicBlock *, 8> NewBlocks;
854     ValueToValueMapTy VMap;
855 
856     cloneLoopBlocks(L, Iter, InsertTop, InsertBot, ExitEdges, NewBlocks,
857                     LoopBlocks, VMap, LVMap, &DT, LI,
858                     LoopLocalNoAliasDeclScopes, *SE);
859 
860     // Remap to use values from the current iteration instead of the
861     // previous one.
862     remapInstructionsInBlocks(NewBlocks, VMap);
863 
864     // Update IDoms of the blocks reachable through exits.
865     if (Iter == 0)
866       for (auto BBIDom : NonLoopBlocksIDom)
867         DT.changeImmediateDominator(BBIDom.first,
868                                      cast<BasicBlock>(LVMap[BBIDom.second]));
869 #ifdef EXPENSIVE_CHECKS
870     assert(DT.verify(DominatorTree::VerificationLevel::Fast));
871 #endif
872 
873     for (auto &[Term, Info] : Weights) {
874       auto *TermCopy = cast<Instruction>(VMap[Term]);
875       updateBranchWeights(TermCopy, Info);
876     }
877 
878     // Remove Loop metadata from the latch branch instruction
879     // because it is not the Loop's latch branch anymore.
880     auto *LatchTermCopy = cast<Instruction>(VMap[LatchTerm]);
881     LatchTermCopy->setMetadata(LLVMContext::MD_loop, nullptr);
882 
883     InsertTop = InsertBot;
884     InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), &DT, LI);
885     InsertBot->setName(Header->getName() + ".peel.next");
886 
887     F->getBasicBlockList().splice(InsertTop->getIterator(),
888                                   F->getBasicBlockList(),
889                                   NewBlocks[0]->getIterator(), F->end());
890   }
891 
892   // Now adjust the phi nodes in the loop header to get their initial values
893   // from the last peeled-off iteration instead of the preheader.
894   for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
895     PHINode *PHI = cast<PHINode>(I);
896     Value *NewVal = PHI->getIncomingValueForBlock(Latch);
897     Instruction *LatchInst = dyn_cast<Instruction>(NewVal);
898     if (LatchInst && L->contains(LatchInst))
899       NewVal = LVMap[LatchInst];
900 
901     PHI->setIncomingValueForBlock(NewPreHeader, NewVal);
902   }
903 
904   for (const auto &[Term, Info] : Weights)
905     fixupBranchWeights(Term, Info);
906 
907   // Update Metadata for count of peeled off iterations.
908   unsigned AlreadyPeeled = 0;
909   if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData))
910     AlreadyPeeled = *Peeled;
911   addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount);
912 
913   if (Loop *ParentLoop = L->getParentLoop())
914     L = ParentLoop;
915 
916   // We modified the loop, update SE.
917   SE->forgetTopmostLoop(L);
918 
919 #ifdef EXPENSIVE_CHECKS
920   // Finally DomtTree must be correct.
921   assert(DT.verify(DominatorTree::VerificationLevel::Fast));
922 #endif
923 
924   // FIXME: Incrementally update loop-simplify
925   simplifyLoop(L, &DT, LI, SE, AC, nullptr, PreserveLCSSA);
926 
927   NumPeeled++;
928 
929   return true;
930 }
931