xref: /openbsd-src/gnu/llvm/llvm/lib/Transforms/Utils/LoopUnroll.cpp (revision 09467b48e8bc8b4905716062da846024139afbf2)
1*09467b48Spatrick //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===//
2*09467b48Spatrick //
3*09467b48Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*09467b48Spatrick // See https://llvm.org/LICENSE.txt for license information.
5*09467b48Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*09467b48Spatrick //
7*09467b48Spatrick //===----------------------------------------------------------------------===//
8*09467b48Spatrick //
9*09467b48Spatrick // This file implements some loop unrolling utilities. It does not define any
10*09467b48Spatrick // actual pass or policy, but provides a single function to perform loop
11*09467b48Spatrick // unrolling.
12*09467b48Spatrick //
13*09467b48Spatrick // The process of unrolling can produce extraneous basic blocks linked with
14*09467b48Spatrick // unconditional branches.  This will be corrected in the future.
15*09467b48Spatrick //
16*09467b48Spatrick //===----------------------------------------------------------------------===//
17*09467b48Spatrick 
18*09467b48Spatrick #include "llvm/ADT/SmallPtrSet.h"
19*09467b48Spatrick #include "llvm/ADT/Statistic.h"
20*09467b48Spatrick #include "llvm/Analysis/AssumptionCache.h"
21*09467b48Spatrick #include "llvm/Analysis/InstructionSimplify.h"
22*09467b48Spatrick #include "llvm/Analysis/LoopIterator.h"
23*09467b48Spatrick #include "llvm/Analysis/OptimizationRemarkEmitter.h"
24*09467b48Spatrick #include "llvm/Analysis/ScalarEvolution.h"
25*09467b48Spatrick #include "llvm/IR/BasicBlock.h"
26*09467b48Spatrick #include "llvm/IR/DataLayout.h"
27*09467b48Spatrick #include "llvm/IR/DebugInfoMetadata.h"
28*09467b48Spatrick #include "llvm/IR/Dominators.h"
29*09467b48Spatrick #include "llvm/IR/IntrinsicInst.h"
30*09467b48Spatrick #include "llvm/IR/LLVMContext.h"
31*09467b48Spatrick #include "llvm/Support/CommandLine.h"
32*09467b48Spatrick #include "llvm/Support/Debug.h"
33*09467b48Spatrick #include "llvm/Support/raw_ostream.h"
34*09467b48Spatrick #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35*09467b48Spatrick #include "llvm/Transforms/Utils/Cloning.h"
36*09467b48Spatrick #include "llvm/Transforms/Utils/Local.h"
37*09467b48Spatrick #include "llvm/Transforms/Utils/LoopSimplify.h"
38*09467b48Spatrick #include "llvm/Transforms/Utils/LoopUtils.h"
39*09467b48Spatrick #include "llvm/Transforms/Utils/SimplifyIndVar.h"
40*09467b48Spatrick #include "llvm/Transforms/Utils/UnrollLoop.h"
41*09467b48Spatrick using namespace llvm;
42*09467b48Spatrick 
43*09467b48Spatrick #define DEBUG_TYPE "loop-unroll"
44*09467b48Spatrick 
45*09467b48Spatrick // TODO: Should these be here or in LoopUnroll?
46*09467b48Spatrick STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
47*09467b48Spatrick STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
48*09467b48Spatrick STATISTIC(NumUnrolledWithHeader, "Number of loops unrolled without a "
49*09467b48Spatrick                                  "conditional latch (completely or otherwise)");
50*09467b48Spatrick 
51*09467b48Spatrick static cl::opt<bool>
52*09467b48Spatrick UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden,
53*09467b48Spatrick                     cl::desc("Allow runtime unrolled loops to be unrolled "
54*09467b48Spatrick                              "with epilog instead of prolog."));
55*09467b48Spatrick 
56*09467b48Spatrick static cl::opt<bool>
57*09467b48Spatrick UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden,
58*09467b48Spatrick                     cl::desc("Verify domtree after unrolling"),
59*09467b48Spatrick #ifdef EXPENSIVE_CHECKS
60*09467b48Spatrick     cl::init(true)
61*09467b48Spatrick #else
62*09467b48Spatrick     cl::init(false)
63*09467b48Spatrick #endif
64*09467b48Spatrick                     );
65*09467b48Spatrick 
66*09467b48Spatrick /// Convert the instruction operands from referencing the current values into
67*09467b48Spatrick /// those specified by VMap.
68*09467b48Spatrick void llvm::remapInstruction(Instruction *I, ValueToValueMapTy &VMap) {
69*09467b48Spatrick   for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
70*09467b48Spatrick     Value *Op = I->getOperand(op);
71*09467b48Spatrick 
72*09467b48Spatrick     // Unwrap arguments of dbg.value intrinsics.
73*09467b48Spatrick     bool Wrapped = false;
74*09467b48Spatrick     if (auto *V = dyn_cast<MetadataAsValue>(Op))
75*09467b48Spatrick       if (auto *Unwrapped = dyn_cast<ValueAsMetadata>(V->getMetadata())) {
76*09467b48Spatrick         Op = Unwrapped->getValue();
77*09467b48Spatrick         Wrapped = true;
78*09467b48Spatrick       }
79*09467b48Spatrick 
80*09467b48Spatrick     auto wrap = [&](Value *V) {
81*09467b48Spatrick       auto &C = I->getContext();
82*09467b48Spatrick       return Wrapped ? MetadataAsValue::get(C, ValueAsMetadata::get(V)) : V;
83*09467b48Spatrick     };
84*09467b48Spatrick 
85*09467b48Spatrick     ValueToValueMapTy::iterator It = VMap.find(Op);
86*09467b48Spatrick     if (It != VMap.end())
87*09467b48Spatrick       I->setOperand(op, wrap(It->second));
88*09467b48Spatrick   }
89*09467b48Spatrick 
90*09467b48Spatrick   if (PHINode *PN = dyn_cast<PHINode>(I)) {
91*09467b48Spatrick     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
92*09467b48Spatrick       ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i));
93*09467b48Spatrick       if (It != VMap.end())
94*09467b48Spatrick         PN->setIncomingBlock(i, cast<BasicBlock>(It->second));
95*09467b48Spatrick     }
96*09467b48Spatrick   }
97*09467b48Spatrick }
98*09467b48Spatrick 
99*09467b48Spatrick /// Check if unrolling created a situation where we need to insert phi nodes to
100*09467b48Spatrick /// preserve LCSSA form.
101*09467b48Spatrick /// \param Blocks is a vector of basic blocks representing unrolled loop.
102*09467b48Spatrick /// \param L is the outer loop.
103*09467b48Spatrick /// It's possible that some of the blocks are in L, and some are not. In this
104*09467b48Spatrick /// case, if there is a use is outside L, and definition is inside L, we need to
105*09467b48Spatrick /// insert a phi-node, otherwise LCSSA will be broken.
106*09467b48Spatrick /// The function is just a helper function for llvm::UnrollLoop that returns
107*09467b48Spatrick /// true if this situation occurs, indicating that LCSSA needs to be fixed.
108*09467b48Spatrick static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks,
109*09467b48Spatrick                                      LoopInfo *LI) {
110*09467b48Spatrick   for (BasicBlock *BB : Blocks) {
111*09467b48Spatrick     if (LI->getLoopFor(BB) == L)
112*09467b48Spatrick       continue;
113*09467b48Spatrick     for (Instruction &I : *BB) {
114*09467b48Spatrick       for (Use &U : I.operands()) {
115*09467b48Spatrick         if (auto Def = dyn_cast<Instruction>(U)) {
116*09467b48Spatrick           Loop *DefLoop = LI->getLoopFor(Def->getParent());
117*09467b48Spatrick           if (!DefLoop)
118*09467b48Spatrick             continue;
119*09467b48Spatrick           if (DefLoop->contains(L))
120*09467b48Spatrick             return true;
121*09467b48Spatrick         }
122*09467b48Spatrick       }
123*09467b48Spatrick     }
124*09467b48Spatrick   }
125*09467b48Spatrick   return false;
126*09467b48Spatrick }
127*09467b48Spatrick 
128*09467b48Spatrick /// Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary
129*09467b48Spatrick /// and adds a mapping from the original loop to the new loop to NewLoops.
130*09467b48Spatrick /// Returns nullptr if no new loop was created and a pointer to the
131*09467b48Spatrick /// original loop OriginalBB was part of otherwise.
132*09467b48Spatrick const Loop* llvm::addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
133*09467b48Spatrick                                            BasicBlock *ClonedBB, LoopInfo *LI,
134*09467b48Spatrick                                            NewLoopsMap &NewLoops) {
135*09467b48Spatrick   // Figure out which loop New is in.
136*09467b48Spatrick   const Loop *OldLoop = LI->getLoopFor(OriginalBB);
137*09467b48Spatrick   assert(OldLoop && "Should (at least) be in the loop being unrolled!");
138*09467b48Spatrick 
139*09467b48Spatrick   Loop *&NewLoop = NewLoops[OldLoop];
140*09467b48Spatrick   if (!NewLoop) {
141*09467b48Spatrick     // Found a new sub-loop.
142*09467b48Spatrick     assert(OriginalBB == OldLoop->getHeader() &&
143*09467b48Spatrick            "Header should be first in RPO");
144*09467b48Spatrick 
145*09467b48Spatrick     NewLoop = LI->AllocateLoop();
146*09467b48Spatrick     Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop());
147*09467b48Spatrick 
148*09467b48Spatrick     if (NewLoopParent)
149*09467b48Spatrick       NewLoopParent->addChildLoop(NewLoop);
150*09467b48Spatrick     else
151*09467b48Spatrick       LI->addTopLevelLoop(NewLoop);
152*09467b48Spatrick 
153*09467b48Spatrick     NewLoop->addBasicBlockToLoop(ClonedBB, *LI);
154*09467b48Spatrick     return OldLoop;
155*09467b48Spatrick   } else {
156*09467b48Spatrick     NewLoop->addBasicBlockToLoop(ClonedBB, *LI);
157*09467b48Spatrick     return nullptr;
158*09467b48Spatrick   }
159*09467b48Spatrick }
160*09467b48Spatrick 
161*09467b48Spatrick /// The function chooses which type of unroll (epilog or prolog) is more
162*09467b48Spatrick /// profitabale.
163*09467b48Spatrick /// Epilog unroll is more profitable when there is PHI that starts from
164*09467b48Spatrick /// constant.  In this case epilog will leave PHI start from constant,
165*09467b48Spatrick /// but prolog will convert it to non-constant.
166*09467b48Spatrick ///
167*09467b48Spatrick /// loop:
168*09467b48Spatrick ///   PN = PHI [I, Latch], [CI, PreHeader]
169*09467b48Spatrick ///   I = foo(PN)
170*09467b48Spatrick ///   ...
171*09467b48Spatrick ///
172*09467b48Spatrick /// Epilog unroll case.
173*09467b48Spatrick /// loop:
174*09467b48Spatrick ///   PN = PHI [I2, Latch], [CI, PreHeader]
175*09467b48Spatrick ///   I1 = foo(PN)
176*09467b48Spatrick ///   I2 = foo(I1)
177*09467b48Spatrick ///   ...
178*09467b48Spatrick /// Prolog unroll case.
179*09467b48Spatrick ///   NewPN = PHI [PrologI, Prolog], [CI, PreHeader]
180*09467b48Spatrick /// loop:
181*09467b48Spatrick ///   PN = PHI [I2, Latch], [NewPN, PreHeader]
182*09467b48Spatrick ///   I1 = foo(PN)
183*09467b48Spatrick ///   I2 = foo(I1)
184*09467b48Spatrick ///   ...
185*09467b48Spatrick ///
186*09467b48Spatrick static bool isEpilogProfitable(Loop *L) {
187*09467b48Spatrick   BasicBlock *PreHeader = L->getLoopPreheader();
188*09467b48Spatrick   BasicBlock *Header = L->getHeader();
189*09467b48Spatrick   assert(PreHeader && Header);
190*09467b48Spatrick   for (const PHINode &PN : Header->phis()) {
191*09467b48Spatrick     if (isa<ConstantInt>(PN.getIncomingValueForBlock(PreHeader)))
192*09467b48Spatrick       return true;
193*09467b48Spatrick   }
194*09467b48Spatrick   return false;
195*09467b48Spatrick }
196*09467b48Spatrick 
197*09467b48Spatrick /// Perform some cleanup and simplifications on loops after unrolling. It is
198*09467b48Spatrick /// useful to simplify the IV's in the new loop, as well as do a quick
199*09467b48Spatrick /// simplify/dce pass of the instructions.
200*09467b48Spatrick void llvm::simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
201*09467b48Spatrick                                    ScalarEvolution *SE, DominatorTree *DT,
202*09467b48Spatrick                                    AssumptionCache *AC) {
203*09467b48Spatrick   // Simplify any new induction variables in the partially unrolled loop.
204*09467b48Spatrick   if (SE && SimplifyIVs) {
205*09467b48Spatrick     SmallVector<WeakTrackingVH, 16> DeadInsts;
206*09467b48Spatrick     simplifyLoopIVs(L, SE, DT, LI, DeadInsts);
207*09467b48Spatrick 
208*09467b48Spatrick     // Aggressively clean up dead instructions that simplifyLoopIVs already
209*09467b48Spatrick     // identified. Any remaining should be cleaned up below.
210*09467b48Spatrick     while (!DeadInsts.empty())
211*09467b48Spatrick       if (Instruction *Inst =
212*09467b48Spatrick               dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
213*09467b48Spatrick         RecursivelyDeleteTriviallyDeadInstructions(Inst);
214*09467b48Spatrick   }
215*09467b48Spatrick 
216*09467b48Spatrick   // At this point, the code is well formed.  We now do a quick sweep over the
217*09467b48Spatrick   // inserted code, doing constant propagation and dead code elimination as we
218*09467b48Spatrick   // go.
219*09467b48Spatrick   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
220*09467b48Spatrick   for (BasicBlock *BB : L->getBlocks()) {
221*09467b48Spatrick     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
222*09467b48Spatrick       Instruction *Inst = &*I++;
223*09467b48Spatrick 
224*09467b48Spatrick       if (Value *V = SimplifyInstruction(Inst, {DL, nullptr, DT, AC}))
225*09467b48Spatrick         if (LI->replacementPreservesLCSSAForm(Inst, V))
226*09467b48Spatrick           Inst->replaceAllUsesWith(V);
227*09467b48Spatrick       if (isInstructionTriviallyDead(Inst))
228*09467b48Spatrick         BB->getInstList().erase(Inst);
229*09467b48Spatrick     }
230*09467b48Spatrick   }
231*09467b48Spatrick 
232*09467b48Spatrick   // TODO: after peeling or unrolling, previously loop variant conditions are
233*09467b48Spatrick   // likely to fold to constants, eagerly propagating those here will require
234*09467b48Spatrick   // fewer cleanup passes to be run.  Alternatively, a LoopEarlyCSE might be
235*09467b48Spatrick   // appropriate.
236*09467b48Spatrick }
237*09467b48Spatrick 
238*09467b48Spatrick /// Unroll the given loop by Count. The loop must be in LCSSA form.  Unrolling
239*09467b48Spatrick /// can only fail when the loop's latch block is not terminated by a conditional
240*09467b48Spatrick /// branch instruction. However, if the trip count (and multiple) are not known,
241*09467b48Spatrick /// loop unrolling will mostly produce more code that is no faster.
242*09467b48Spatrick ///
243*09467b48Spatrick /// TripCount is the upper bound of the iteration on which control exits
244*09467b48Spatrick /// LatchBlock. Control may exit the loop prior to TripCount iterations either
245*09467b48Spatrick /// via an early branch in other loop block or via LatchBlock terminator. This
246*09467b48Spatrick /// is relaxed from the general definition of trip count which is the number of
247*09467b48Spatrick /// times the loop header executes. Note that UnrollLoop assumes that the loop
248*09467b48Spatrick /// counter test is in LatchBlock in order to remove unnecesssary instances of
249*09467b48Spatrick /// the test.  If control can exit the loop from the LatchBlock's terminator
250*09467b48Spatrick /// prior to TripCount iterations, flag PreserveCondBr needs to be set.
251*09467b48Spatrick ///
252*09467b48Spatrick /// PreserveCondBr indicates whether the conditional branch of the LatchBlock
253*09467b48Spatrick /// needs to be preserved.  It is needed when we use trip count upper bound to
254*09467b48Spatrick /// fully unroll the loop. If PreserveOnlyFirst is also set then only the first
255*09467b48Spatrick /// conditional branch needs to be preserved.
256*09467b48Spatrick ///
257*09467b48Spatrick /// Similarly, TripMultiple divides the number of times that the LatchBlock may
258*09467b48Spatrick /// execute without exiting the loop.
259*09467b48Spatrick ///
260*09467b48Spatrick /// If AllowRuntime is true then UnrollLoop will consider unrolling loops that
261*09467b48Spatrick /// have a runtime (i.e. not compile time constant) trip count.  Unrolling these
262*09467b48Spatrick /// loops require a unroll "prologue" that runs "RuntimeTripCount % Count"
263*09467b48Spatrick /// iterations before branching into the unrolled loop.  UnrollLoop will not
264*09467b48Spatrick /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and
265*09467b48Spatrick /// AllowExpensiveTripCount is false.
266*09467b48Spatrick ///
267*09467b48Spatrick /// If we want to perform PGO-based loop peeling, PeelCount is set to the
268*09467b48Spatrick /// number of iterations we want to peel off.
269*09467b48Spatrick ///
270*09467b48Spatrick /// The LoopInfo Analysis that is passed will be kept consistent.
271*09467b48Spatrick ///
272*09467b48Spatrick /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
273*09467b48Spatrick /// DominatorTree if they are non-null.
274*09467b48Spatrick ///
275*09467b48Spatrick /// If RemainderLoop is non-null, it will receive the remainder loop (if
276*09467b48Spatrick /// required and not fully unrolled).
277*09467b48Spatrick LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
278*09467b48Spatrick                                   ScalarEvolution *SE, DominatorTree *DT,
279*09467b48Spatrick                                   AssumptionCache *AC,
280*09467b48Spatrick                                   OptimizationRemarkEmitter *ORE,
281*09467b48Spatrick                                   bool PreserveLCSSA, Loop **RemainderLoop) {
282*09467b48Spatrick 
283*09467b48Spatrick   BasicBlock *Preheader = L->getLoopPreheader();
284*09467b48Spatrick   if (!Preheader) {
285*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Can't unroll; loop preheader-insertion failed.\n");
286*09467b48Spatrick     return LoopUnrollResult::Unmodified;
287*09467b48Spatrick   }
288*09467b48Spatrick 
289*09467b48Spatrick   BasicBlock *LatchBlock = L->getLoopLatch();
290*09467b48Spatrick   if (!LatchBlock) {
291*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Can't unroll; loop exit-block-insertion failed.\n");
292*09467b48Spatrick     return LoopUnrollResult::Unmodified;
293*09467b48Spatrick   }
294*09467b48Spatrick 
295*09467b48Spatrick   // Loops with indirectbr cannot be cloned.
296*09467b48Spatrick   if (!L->isSafeToClone()) {
297*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Can't unroll; Loop body cannot be cloned.\n");
298*09467b48Spatrick     return LoopUnrollResult::Unmodified;
299*09467b48Spatrick   }
300*09467b48Spatrick 
301*09467b48Spatrick   // The current loop unroll pass can unroll loops with a single latch or header
302*09467b48Spatrick   // that's a conditional branch exiting the loop.
303*09467b48Spatrick   // FIXME: The implementation can be extended to work with more complicated
304*09467b48Spatrick   // cases, e.g. loops with multiple latches.
305*09467b48Spatrick   BasicBlock *Header = L->getHeader();
306*09467b48Spatrick   BranchInst *HeaderBI = dyn_cast<BranchInst>(Header->getTerminator());
307*09467b48Spatrick   BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
308*09467b48Spatrick 
309*09467b48Spatrick   // FIXME: Support loops without conditional latch and multiple exiting blocks.
310*09467b48Spatrick   if (!BI ||
311*09467b48Spatrick       (BI->isUnconditional() && (!HeaderBI || HeaderBI->isUnconditional() ||
312*09467b48Spatrick                                  L->getExitingBlock() != Header))) {
313*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Can't unroll; loop not terminated by a conditional "
314*09467b48Spatrick                          "branch in the latch or header.\n");
315*09467b48Spatrick     return LoopUnrollResult::Unmodified;
316*09467b48Spatrick   }
317*09467b48Spatrick 
318*09467b48Spatrick   auto CheckLatchSuccessors = [&](unsigned S1, unsigned S2) {
319*09467b48Spatrick     return BI->isConditional() && BI->getSuccessor(S1) == Header &&
320*09467b48Spatrick            !L->contains(BI->getSuccessor(S2));
321*09467b48Spatrick   };
322*09467b48Spatrick 
323*09467b48Spatrick   // If we have a conditional latch, it must exit the loop.
324*09467b48Spatrick   if (BI && BI->isConditional() && !CheckLatchSuccessors(0, 1) &&
325*09467b48Spatrick       !CheckLatchSuccessors(1, 0)) {
326*09467b48Spatrick     LLVM_DEBUG(
327*09467b48Spatrick         dbgs() << "Can't unroll; a conditional latch must exit the loop");
328*09467b48Spatrick     return LoopUnrollResult::Unmodified;
329*09467b48Spatrick   }
330*09467b48Spatrick 
331*09467b48Spatrick   auto CheckHeaderSuccessors = [&](unsigned S1, unsigned S2) {
332*09467b48Spatrick     return HeaderBI && HeaderBI->isConditional() &&
333*09467b48Spatrick            L->contains(HeaderBI->getSuccessor(S1)) &&
334*09467b48Spatrick            !L->contains(HeaderBI->getSuccessor(S2));
335*09467b48Spatrick   };
336*09467b48Spatrick 
337*09467b48Spatrick   // If we do not have a conditional latch, the header must exit the loop.
338*09467b48Spatrick   if (BI && !BI->isConditional() && HeaderBI && HeaderBI->isConditional() &&
339*09467b48Spatrick       !CheckHeaderSuccessors(0, 1) && !CheckHeaderSuccessors(1, 0)) {
340*09467b48Spatrick     LLVM_DEBUG(dbgs() << "Can't unroll; conditional header must exit the loop");
341*09467b48Spatrick     return LoopUnrollResult::Unmodified;
342*09467b48Spatrick   }
343*09467b48Spatrick 
344*09467b48Spatrick   if (Header->hasAddressTaken()) {
345*09467b48Spatrick     // The loop-rotate pass can be helpful to avoid this in many cases.
346*09467b48Spatrick     LLVM_DEBUG(
347*09467b48Spatrick         dbgs() << "  Won't unroll loop: address of header block is taken.\n");
348*09467b48Spatrick     return LoopUnrollResult::Unmodified;
349*09467b48Spatrick   }
350*09467b48Spatrick 
351*09467b48Spatrick   if (ULO.TripCount != 0)
352*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Trip Count = " << ULO.TripCount << "\n");
353*09467b48Spatrick   if (ULO.TripMultiple != 1)
354*09467b48Spatrick     LLVM_DEBUG(dbgs() << "  Trip Multiple = " << ULO.TripMultiple << "\n");
355*09467b48Spatrick 
356*09467b48Spatrick   // Effectively "DCE" unrolled iterations that are beyond the tripcount
357*09467b48Spatrick   // and will never be executed.
358*09467b48Spatrick   if (ULO.TripCount != 0 && ULO.Count > ULO.TripCount)
359*09467b48Spatrick     ULO.Count = ULO.TripCount;
360*09467b48Spatrick 
361*09467b48Spatrick   // Don't enter the unroll code if there is nothing to do.
362*09467b48Spatrick   if (ULO.TripCount == 0 && ULO.Count < 2 && ULO.PeelCount == 0) {
363*09467b48Spatrick     LLVM_DEBUG(dbgs() << "Won't unroll; almost nothing to do\n");
364*09467b48Spatrick     return LoopUnrollResult::Unmodified;
365*09467b48Spatrick   }
366*09467b48Spatrick 
367*09467b48Spatrick   assert(ULO.Count > 0);
368*09467b48Spatrick   assert(ULO.TripMultiple > 0);
369*09467b48Spatrick   assert(ULO.TripCount == 0 || ULO.TripCount % ULO.TripMultiple == 0);
370*09467b48Spatrick 
371*09467b48Spatrick   // Are we eliminating the loop control altogether?
372*09467b48Spatrick   bool CompletelyUnroll = ULO.Count == ULO.TripCount;
373*09467b48Spatrick   SmallVector<BasicBlock *, 4> ExitBlocks;
374*09467b48Spatrick   L->getExitBlocks(ExitBlocks);
375*09467b48Spatrick   std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks();
376*09467b48Spatrick 
377*09467b48Spatrick   // Go through all exits of L and see if there are any phi-nodes there. We just
378*09467b48Spatrick   // conservatively assume that they're inserted to preserve LCSSA form, which
379*09467b48Spatrick   // means that complete unrolling might break this form. We need to either fix
380*09467b48Spatrick   // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For
381*09467b48Spatrick   // now we just recompute LCSSA for the outer loop, but it should be possible
382*09467b48Spatrick   // to fix it in-place.
383*09467b48Spatrick   bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll &&
384*09467b48Spatrick                         any_of(ExitBlocks, [](const BasicBlock *BB) {
385*09467b48Spatrick                           return isa<PHINode>(BB->begin());
386*09467b48Spatrick                         });
387*09467b48Spatrick 
388*09467b48Spatrick   // We assume a run-time trip count if the compiler cannot
389*09467b48Spatrick   // figure out the loop trip count and the unroll-runtime
390*09467b48Spatrick   // flag is specified.
391*09467b48Spatrick   bool RuntimeTripCount =
392*09467b48Spatrick       (ULO.TripCount == 0 && ULO.Count > 0 && ULO.AllowRuntime);
393*09467b48Spatrick 
394*09467b48Spatrick   assert((!RuntimeTripCount || !ULO.PeelCount) &&
395*09467b48Spatrick          "Did not expect runtime trip-count unrolling "
396*09467b48Spatrick          "and peeling for the same loop");
397*09467b48Spatrick 
398*09467b48Spatrick   bool Peeled = false;
399*09467b48Spatrick   if (ULO.PeelCount) {
400*09467b48Spatrick     Peeled = peelLoop(L, ULO.PeelCount, LI, SE, DT, AC, PreserveLCSSA);
401*09467b48Spatrick 
402*09467b48Spatrick     // Successful peeling may result in a change in the loop preheader/trip
403*09467b48Spatrick     // counts. If we later unroll the loop, we want these to be updated.
404*09467b48Spatrick     if (Peeled) {
405*09467b48Spatrick       // According to our guards and profitability checks the only
406*09467b48Spatrick       // meaningful exit should be latch block. Other exits go to deopt,
407*09467b48Spatrick       // so we do not worry about them.
408*09467b48Spatrick       BasicBlock *ExitingBlock = L->getLoopLatch();
409*09467b48Spatrick       assert(ExitingBlock && "Loop without exiting block?");
410*09467b48Spatrick       assert(L->isLoopExiting(ExitingBlock) && "Latch is not exiting?");
411*09467b48Spatrick       Preheader = L->getLoopPreheader();
412*09467b48Spatrick       ULO.TripCount = SE->getSmallConstantTripCount(L, ExitingBlock);
413*09467b48Spatrick       ULO.TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock);
414*09467b48Spatrick     }
415*09467b48Spatrick   }
416*09467b48Spatrick 
417*09467b48Spatrick   // Loops containing convergent instructions must have a count that divides
418*09467b48Spatrick   // their TripMultiple.
419*09467b48Spatrick   LLVM_DEBUG(
420*09467b48Spatrick       {
421*09467b48Spatrick         bool HasConvergent = false;
422*09467b48Spatrick         for (auto &BB : L->blocks())
423*09467b48Spatrick           for (auto &I : *BB)
424*09467b48Spatrick             if (auto CS = CallSite(&I))
425*09467b48Spatrick               HasConvergent |= CS.isConvergent();
426*09467b48Spatrick         assert((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) &&
427*09467b48Spatrick                "Unroll count must divide trip multiple if loop contains a "
428*09467b48Spatrick                "convergent operation.");
429*09467b48Spatrick       });
430*09467b48Spatrick 
431*09467b48Spatrick   bool EpilogProfitability =
432*09467b48Spatrick       UnrollRuntimeEpilog.getNumOccurrences() ? UnrollRuntimeEpilog
433*09467b48Spatrick                                               : isEpilogProfitable(L);
434*09467b48Spatrick 
435*09467b48Spatrick   if (RuntimeTripCount && ULO.TripMultiple % ULO.Count != 0 &&
436*09467b48Spatrick       !UnrollRuntimeLoopRemainder(L, ULO.Count, ULO.AllowExpensiveTripCount,
437*09467b48Spatrick                                   EpilogProfitability, ULO.UnrollRemainder,
438*09467b48Spatrick                                   ULO.ForgetAllSCEV, LI, SE, DT, AC,
439*09467b48Spatrick                                   PreserveLCSSA, RemainderLoop)) {
440*09467b48Spatrick     if (ULO.Force)
441*09467b48Spatrick       RuntimeTripCount = false;
442*09467b48Spatrick     else {
443*09467b48Spatrick       LLVM_DEBUG(dbgs() << "Won't unroll; remainder loop could not be "
444*09467b48Spatrick                            "generated when assuming runtime trip count\n");
445*09467b48Spatrick       return LoopUnrollResult::Unmodified;
446*09467b48Spatrick     }
447*09467b48Spatrick   }
448*09467b48Spatrick 
449*09467b48Spatrick   // If we know the trip count, we know the multiple...
450*09467b48Spatrick   unsigned BreakoutTrip = 0;
451*09467b48Spatrick   if (ULO.TripCount != 0) {
452*09467b48Spatrick     BreakoutTrip = ULO.TripCount % ULO.Count;
453*09467b48Spatrick     ULO.TripMultiple = 0;
454*09467b48Spatrick   } else {
455*09467b48Spatrick     // Figure out what multiple to use.
456*09467b48Spatrick     BreakoutTrip = ULO.TripMultiple =
457*09467b48Spatrick         (unsigned)GreatestCommonDivisor64(ULO.Count, ULO.TripMultiple);
458*09467b48Spatrick   }
459*09467b48Spatrick 
460*09467b48Spatrick   using namespace ore;
461*09467b48Spatrick   // Report the unrolling decision.
462*09467b48Spatrick   if (CompletelyUnroll) {
463*09467b48Spatrick     LLVM_DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
464*09467b48Spatrick                       << " with trip count " << ULO.TripCount << "!\n");
465*09467b48Spatrick     if (ORE)
466*09467b48Spatrick       ORE->emit([&]() {
467*09467b48Spatrick         return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
468*09467b48Spatrick                                   L->getHeader())
469*09467b48Spatrick                << "completely unrolled loop with "
470*09467b48Spatrick                << NV("UnrollCount", ULO.TripCount) << " iterations";
471*09467b48Spatrick       });
472*09467b48Spatrick   } else if (ULO.PeelCount) {
473*09467b48Spatrick     LLVM_DEBUG(dbgs() << "PEELING loop %" << Header->getName()
474*09467b48Spatrick                       << " with iteration count " << ULO.PeelCount << "!\n");
475*09467b48Spatrick     if (ORE)
476*09467b48Spatrick       ORE->emit([&]() {
477*09467b48Spatrick         return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
478*09467b48Spatrick                                   L->getHeader())
479*09467b48Spatrick                << " peeled loop by " << NV("PeelCount", ULO.PeelCount)
480*09467b48Spatrick                << " iterations";
481*09467b48Spatrick       });
482*09467b48Spatrick   } else {
483*09467b48Spatrick     auto DiagBuilder = [&]() {
484*09467b48Spatrick       OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
485*09467b48Spatrick                               L->getHeader());
486*09467b48Spatrick       return Diag << "unrolled loop by a factor of "
487*09467b48Spatrick                   << NV("UnrollCount", ULO.Count);
488*09467b48Spatrick     };
489*09467b48Spatrick 
490*09467b48Spatrick     LLVM_DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() << " by "
491*09467b48Spatrick                       << ULO.Count);
492*09467b48Spatrick     if (ULO.TripMultiple == 0 || BreakoutTrip != ULO.TripMultiple) {
493*09467b48Spatrick       LLVM_DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
494*09467b48Spatrick       if (ORE)
495*09467b48Spatrick         ORE->emit([&]() {
496*09467b48Spatrick           return DiagBuilder() << " with a breakout at trip "
497*09467b48Spatrick                                << NV("BreakoutTrip", BreakoutTrip);
498*09467b48Spatrick         });
499*09467b48Spatrick     } else if (ULO.TripMultiple != 1) {
500*09467b48Spatrick       LLVM_DEBUG(dbgs() << " with " << ULO.TripMultiple << " trips per branch");
501*09467b48Spatrick       if (ORE)
502*09467b48Spatrick         ORE->emit([&]() {
503*09467b48Spatrick           return DiagBuilder()
504*09467b48Spatrick                  << " with " << NV("TripMultiple", ULO.TripMultiple)
505*09467b48Spatrick                  << " trips per branch";
506*09467b48Spatrick         });
507*09467b48Spatrick     } else if (RuntimeTripCount) {
508*09467b48Spatrick       LLVM_DEBUG(dbgs() << " with run-time trip count");
509*09467b48Spatrick       if (ORE)
510*09467b48Spatrick         ORE->emit(
511*09467b48Spatrick             [&]() { return DiagBuilder() << " with run-time trip count"; });
512*09467b48Spatrick     }
513*09467b48Spatrick     LLVM_DEBUG(dbgs() << "!\n");
514*09467b48Spatrick   }
515*09467b48Spatrick 
516*09467b48Spatrick   // We are going to make changes to this loop. SCEV may be keeping cached info
517*09467b48Spatrick   // about it, in particular about backedge taken count. The changes we make
518*09467b48Spatrick   // are guaranteed to invalidate this information for our loop. It is tempting
519*09467b48Spatrick   // to only invalidate the loop being unrolled, but it is incorrect as long as
520*09467b48Spatrick   // all exiting branches from all inner loops have impact on the outer loops,
521*09467b48Spatrick   // and if something changes inside them then any of outer loops may also
522*09467b48Spatrick   // change. When we forget outermost loop, we also forget all contained loops
523*09467b48Spatrick   // and this is what we need here.
524*09467b48Spatrick   if (SE) {
525*09467b48Spatrick     if (ULO.ForgetAllSCEV)
526*09467b48Spatrick       SE->forgetAllLoops();
527*09467b48Spatrick     else
528*09467b48Spatrick       SE->forgetTopmostLoop(L);
529*09467b48Spatrick   }
530*09467b48Spatrick 
531*09467b48Spatrick   bool ContinueOnTrue;
532*09467b48Spatrick   bool LatchIsExiting = BI->isConditional();
533*09467b48Spatrick   BasicBlock *LoopExit = nullptr;
534*09467b48Spatrick   if (LatchIsExiting) {
535*09467b48Spatrick     ContinueOnTrue = L->contains(BI->getSuccessor(0));
536*09467b48Spatrick     LoopExit = BI->getSuccessor(ContinueOnTrue);
537*09467b48Spatrick   } else {
538*09467b48Spatrick     NumUnrolledWithHeader++;
539*09467b48Spatrick     ContinueOnTrue = L->contains(HeaderBI->getSuccessor(0));
540*09467b48Spatrick     LoopExit = HeaderBI->getSuccessor(ContinueOnTrue);
541*09467b48Spatrick   }
542*09467b48Spatrick 
543*09467b48Spatrick   // For the first iteration of the loop, we should use the precloned values for
544*09467b48Spatrick   // PHI nodes.  Insert associations now.
545*09467b48Spatrick   ValueToValueMapTy LastValueMap;
546*09467b48Spatrick   std::vector<PHINode*> OrigPHINode;
547*09467b48Spatrick   for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
548*09467b48Spatrick     OrigPHINode.push_back(cast<PHINode>(I));
549*09467b48Spatrick   }
550*09467b48Spatrick 
551*09467b48Spatrick   std::vector<BasicBlock *> Headers;
552*09467b48Spatrick   std::vector<BasicBlock *> HeaderSucc;
553*09467b48Spatrick   std::vector<BasicBlock *> Latches;
554*09467b48Spatrick   Headers.push_back(Header);
555*09467b48Spatrick   Latches.push_back(LatchBlock);
556*09467b48Spatrick 
557*09467b48Spatrick   if (!LatchIsExiting) {
558*09467b48Spatrick     auto *Term = cast<BranchInst>(Header->getTerminator());
559*09467b48Spatrick     if (Term->isUnconditional() || L->contains(Term->getSuccessor(0))) {
560*09467b48Spatrick       assert(L->contains(Term->getSuccessor(0)));
561*09467b48Spatrick       HeaderSucc.push_back(Term->getSuccessor(0));
562*09467b48Spatrick     } else {
563*09467b48Spatrick       assert(L->contains(Term->getSuccessor(1)));
564*09467b48Spatrick       HeaderSucc.push_back(Term->getSuccessor(1));
565*09467b48Spatrick     }
566*09467b48Spatrick   }
567*09467b48Spatrick 
568*09467b48Spatrick   // The current on-the-fly SSA update requires blocks to be processed in
569*09467b48Spatrick   // reverse postorder so that LastValueMap contains the correct value at each
570*09467b48Spatrick   // exit.
571*09467b48Spatrick   LoopBlocksDFS DFS(L);
572*09467b48Spatrick   DFS.perform(LI);
573*09467b48Spatrick 
574*09467b48Spatrick   // Stash the DFS iterators before adding blocks to the loop.
575*09467b48Spatrick   LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
576*09467b48Spatrick   LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
577*09467b48Spatrick 
578*09467b48Spatrick   std::vector<BasicBlock*> UnrolledLoopBlocks = L->getBlocks();
579*09467b48Spatrick 
580*09467b48Spatrick   // Loop Unrolling might create new loops. While we do preserve LoopInfo, we
581*09467b48Spatrick   // might break loop-simplified form for these loops (as they, e.g., would
582*09467b48Spatrick   // share the same exit blocks). We'll keep track of loops for which we can
583*09467b48Spatrick   // break this so that later we can re-simplify them.
584*09467b48Spatrick   SmallSetVector<Loop *, 4> LoopsToSimplify;
585*09467b48Spatrick   for (Loop *SubLoop : *L)
586*09467b48Spatrick     LoopsToSimplify.insert(SubLoop);
587*09467b48Spatrick 
588*09467b48Spatrick   if (Header->getParent()->isDebugInfoForProfiling())
589*09467b48Spatrick     for (BasicBlock *BB : L->getBlocks())
590*09467b48Spatrick       for (Instruction &I : *BB)
591*09467b48Spatrick         if (!isa<DbgInfoIntrinsic>(&I))
592*09467b48Spatrick           if (const DILocation *DIL = I.getDebugLoc()) {
593*09467b48Spatrick             auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(ULO.Count);
594*09467b48Spatrick             if (NewDIL)
595*09467b48Spatrick               I.setDebugLoc(NewDIL.getValue());
596*09467b48Spatrick             else
597*09467b48Spatrick               LLVM_DEBUG(dbgs()
598*09467b48Spatrick                          << "Failed to create new discriminator: "
599*09467b48Spatrick                          << DIL->getFilename() << " Line: " << DIL->getLine());
600*09467b48Spatrick           }
601*09467b48Spatrick 
602*09467b48Spatrick   for (unsigned It = 1; It != ULO.Count; ++It) {
603*09467b48Spatrick     std::vector<BasicBlock*> NewBlocks;
604*09467b48Spatrick     SmallDenseMap<const Loop *, Loop *, 4> NewLoops;
605*09467b48Spatrick     NewLoops[L] = L;
606*09467b48Spatrick 
607*09467b48Spatrick     for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
608*09467b48Spatrick       ValueToValueMapTy VMap;
609*09467b48Spatrick       BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
610*09467b48Spatrick       Header->getParent()->getBasicBlockList().push_back(New);
611*09467b48Spatrick 
612*09467b48Spatrick       assert((*BB != Header || LI->getLoopFor(*BB) == L) &&
613*09467b48Spatrick              "Header should not be in a sub-loop");
614*09467b48Spatrick       // Tell LI about New.
615*09467b48Spatrick       const Loop *OldLoop = addClonedBlockToLoopInfo(*BB, New, LI, NewLoops);
616*09467b48Spatrick       if (OldLoop)
617*09467b48Spatrick         LoopsToSimplify.insert(NewLoops[OldLoop]);
618*09467b48Spatrick 
619*09467b48Spatrick       if (*BB == Header)
620*09467b48Spatrick         // Loop over all of the PHI nodes in the block, changing them to use
621*09467b48Spatrick         // the incoming values from the previous block.
622*09467b48Spatrick         for (PHINode *OrigPHI : OrigPHINode) {
623*09467b48Spatrick           PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]);
624*09467b48Spatrick           Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
625*09467b48Spatrick           if (Instruction *InValI = dyn_cast<Instruction>(InVal))
626*09467b48Spatrick             if (It > 1 && L->contains(InValI))
627*09467b48Spatrick               InVal = LastValueMap[InValI];
628*09467b48Spatrick           VMap[OrigPHI] = InVal;
629*09467b48Spatrick           New->getInstList().erase(NewPHI);
630*09467b48Spatrick         }
631*09467b48Spatrick 
632*09467b48Spatrick       // Update our running map of newest clones
633*09467b48Spatrick       LastValueMap[*BB] = New;
634*09467b48Spatrick       for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
635*09467b48Spatrick            VI != VE; ++VI)
636*09467b48Spatrick         LastValueMap[VI->first] = VI->second;
637*09467b48Spatrick 
638*09467b48Spatrick       // Add phi entries for newly created values to all exit blocks.
639*09467b48Spatrick       for (BasicBlock *Succ : successors(*BB)) {
640*09467b48Spatrick         if (L->contains(Succ))
641*09467b48Spatrick           continue;
642*09467b48Spatrick         for (PHINode &PHI : Succ->phis()) {
643*09467b48Spatrick           Value *Incoming = PHI.getIncomingValueForBlock(*BB);
644*09467b48Spatrick           ValueToValueMapTy::iterator It = LastValueMap.find(Incoming);
645*09467b48Spatrick           if (It != LastValueMap.end())
646*09467b48Spatrick             Incoming = It->second;
647*09467b48Spatrick           PHI.addIncoming(Incoming, New);
648*09467b48Spatrick         }
649*09467b48Spatrick       }
650*09467b48Spatrick       // Keep track of new headers and latches as we create them, so that
651*09467b48Spatrick       // we can insert the proper branches later.
652*09467b48Spatrick       if (*BB == Header)
653*09467b48Spatrick         Headers.push_back(New);
654*09467b48Spatrick       if (*BB == LatchBlock)
655*09467b48Spatrick         Latches.push_back(New);
656*09467b48Spatrick 
657*09467b48Spatrick       // Keep track of the successor of the new header in the current iteration.
658*09467b48Spatrick       for (auto *Pred : predecessors(*BB))
659*09467b48Spatrick         if (Pred == Header) {
660*09467b48Spatrick           HeaderSucc.push_back(New);
661*09467b48Spatrick           break;
662*09467b48Spatrick         }
663*09467b48Spatrick 
664*09467b48Spatrick       NewBlocks.push_back(New);
665*09467b48Spatrick       UnrolledLoopBlocks.push_back(New);
666*09467b48Spatrick 
667*09467b48Spatrick       // Update DomTree: since we just copy the loop body, and each copy has a
668*09467b48Spatrick       // dedicated entry block (copy of the header block), this header's copy
669*09467b48Spatrick       // dominates all copied blocks. That means, dominance relations in the
670*09467b48Spatrick       // copied body are the same as in the original body.
671*09467b48Spatrick       if (DT) {
672*09467b48Spatrick         if (*BB == Header)
673*09467b48Spatrick           DT->addNewBlock(New, Latches[It - 1]);
674*09467b48Spatrick         else {
675*09467b48Spatrick           auto BBDomNode = DT->getNode(*BB);
676*09467b48Spatrick           auto BBIDom = BBDomNode->getIDom();
677*09467b48Spatrick           BasicBlock *OriginalBBIDom = BBIDom->getBlock();
678*09467b48Spatrick           DT->addNewBlock(
679*09467b48Spatrick               New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)]));
680*09467b48Spatrick         }
681*09467b48Spatrick       }
682*09467b48Spatrick     }
683*09467b48Spatrick 
684*09467b48Spatrick     // Remap all instructions in the most recent iteration
685*09467b48Spatrick     for (BasicBlock *NewBlock : NewBlocks) {
686*09467b48Spatrick       for (Instruction &I : *NewBlock) {
687*09467b48Spatrick         ::remapInstruction(&I, LastValueMap);
688*09467b48Spatrick         if (auto *II = dyn_cast<IntrinsicInst>(&I))
689*09467b48Spatrick           if (II->getIntrinsicID() == Intrinsic::assume)
690*09467b48Spatrick             AC->registerAssumption(II);
691*09467b48Spatrick       }
692*09467b48Spatrick     }
693*09467b48Spatrick   }
694*09467b48Spatrick 
695*09467b48Spatrick   // Loop over the PHI nodes in the original block, setting incoming values.
696*09467b48Spatrick   for (PHINode *PN : OrigPHINode) {
697*09467b48Spatrick     if (CompletelyUnroll) {
698*09467b48Spatrick       PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
699*09467b48Spatrick       Header->getInstList().erase(PN);
700*09467b48Spatrick     } else if (ULO.Count > 1) {
701*09467b48Spatrick       Value *InVal = PN->removeIncomingValue(LatchBlock, false);
702*09467b48Spatrick       // If this value was defined in the loop, take the value defined by the
703*09467b48Spatrick       // last iteration of the loop.
704*09467b48Spatrick       if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
705*09467b48Spatrick         if (L->contains(InValI))
706*09467b48Spatrick           InVal = LastValueMap[InVal];
707*09467b48Spatrick       }
708*09467b48Spatrick       assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch");
709*09467b48Spatrick       PN->addIncoming(InVal, Latches.back());
710*09467b48Spatrick     }
711*09467b48Spatrick   }
712*09467b48Spatrick 
713*09467b48Spatrick   auto setDest = [LoopExit, ContinueOnTrue](BasicBlock *Src, BasicBlock *Dest,
714*09467b48Spatrick                                             ArrayRef<BasicBlock *> NextBlocks,
715*09467b48Spatrick                                             BasicBlock *BlockInLoop,
716*09467b48Spatrick                                             bool NeedConditional) {
717*09467b48Spatrick     auto *Term = cast<BranchInst>(Src->getTerminator());
718*09467b48Spatrick     if (NeedConditional) {
719*09467b48Spatrick       // Update the conditional branch's successor for the following
720*09467b48Spatrick       // iteration.
721*09467b48Spatrick       Term->setSuccessor(!ContinueOnTrue, Dest);
722*09467b48Spatrick     } else {
723*09467b48Spatrick       // Remove phi operands at this loop exit
724*09467b48Spatrick       if (Dest != LoopExit) {
725*09467b48Spatrick         BasicBlock *BB = Src;
726*09467b48Spatrick         for (BasicBlock *Succ : successors(BB)) {
727*09467b48Spatrick           // Preserve the incoming value from BB if we are jumping to the block
728*09467b48Spatrick           // in the current loop.
729*09467b48Spatrick           if (Succ == BlockInLoop)
730*09467b48Spatrick             continue;
731*09467b48Spatrick           for (PHINode &Phi : Succ->phis())
732*09467b48Spatrick             Phi.removeIncomingValue(BB, false);
733*09467b48Spatrick         }
734*09467b48Spatrick       }
735*09467b48Spatrick       // Replace the conditional branch with an unconditional one.
736*09467b48Spatrick       BranchInst::Create(Dest, Term);
737*09467b48Spatrick       Term->eraseFromParent();
738*09467b48Spatrick     }
739*09467b48Spatrick   };
740*09467b48Spatrick 
741*09467b48Spatrick   // Now that all the basic blocks for the unrolled iterations are in place,
742*09467b48Spatrick   // set up the branches to connect them.
743*09467b48Spatrick   if (LatchIsExiting) {
744*09467b48Spatrick     // Set up latches to branch to the new header in the unrolled iterations or
745*09467b48Spatrick     // the loop exit for the last latch in a fully unrolled loop.
746*09467b48Spatrick     for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
747*09467b48Spatrick       // The branch destination.
748*09467b48Spatrick       unsigned j = (i + 1) % e;
749*09467b48Spatrick       BasicBlock *Dest = Headers[j];
750*09467b48Spatrick       bool NeedConditional = true;
751*09467b48Spatrick 
752*09467b48Spatrick       if (RuntimeTripCount && j != 0) {
753*09467b48Spatrick         NeedConditional = false;
754*09467b48Spatrick       }
755*09467b48Spatrick 
756*09467b48Spatrick       // For a complete unroll, make the last iteration end with a branch
757*09467b48Spatrick       // to the exit block.
758*09467b48Spatrick       if (CompletelyUnroll) {
759*09467b48Spatrick         if (j == 0)
760*09467b48Spatrick           Dest = LoopExit;
761*09467b48Spatrick         // If using trip count upper bound to completely unroll, we need to keep
762*09467b48Spatrick         // the conditional branch except the last one because the loop may exit
763*09467b48Spatrick         // after any iteration.
764*09467b48Spatrick         assert(NeedConditional &&
765*09467b48Spatrick                "NeedCondition cannot be modified by both complete "
766*09467b48Spatrick                "unrolling and runtime unrolling");
767*09467b48Spatrick         NeedConditional =
768*09467b48Spatrick             (ULO.PreserveCondBr && j && !(ULO.PreserveOnlyFirst && i != 0));
769*09467b48Spatrick       } else if (j != BreakoutTrip &&
770*09467b48Spatrick                  (ULO.TripMultiple == 0 || j % ULO.TripMultiple != 0)) {
771*09467b48Spatrick         // If we know the trip count or a multiple of it, we can safely use an
772*09467b48Spatrick         // unconditional branch for some iterations.
773*09467b48Spatrick         NeedConditional = false;
774*09467b48Spatrick       }
775*09467b48Spatrick 
776*09467b48Spatrick       setDest(Latches[i], Dest, Headers, Headers[i], NeedConditional);
777*09467b48Spatrick     }
778*09467b48Spatrick   } else {
779*09467b48Spatrick     // Setup headers to branch to their new successors in the unrolled
780*09467b48Spatrick     // iterations.
781*09467b48Spatrick     for (unsigned i = 0, e = Headers.size(); i != e; ++i) {
782*09467b48Spatrick       // The branch destination.
783*09467b48Spatrick       unsigned j = (i + 1) % e;
784*09467b48Spatrick       BasicBlock *Dest = HeaderSucc[i];
785*09467b48Spatrick       bool NeedConditional = true;
786*09467b48Spatrick 
787*09467b48Spatrick       if (RuntimeTripCount && j != 0)
788*09467b48Spatrick         NeedConditional = false;
789*09467b48Spatrick 
790*09467b48Spatrick       if (CompletelyUnroll)
791*09467b48Spatrick         // We cannot drop the conditional branch for the last condition, as we
792*09467b48Spatrick         // may have to execute the loop body depending on the condition.
793*09467b48Spatrick         NeedConditional = j == 0 || ULO.PreserveCondBr;
794*09467b48Spatrick       else if (j != BreakoutTrip &&
795*09467b48Spatrick                (ULO.TripMultiple == 0 || j % ULO.TripMultiple != 0))
796*09467b48Spatrick         // If we know the trip count or a multiple of it, we can safely use an
797*09467b48Spatrick         // unconditional branch for some iterations.
798*09467b48Spatrick         NeedConditional = false;
799*09467b48Spatrick 
800*09467b48Spatrick       setDest(Headers[i], Dest, Headers, HeaderSucc[i], NeedConditional);
801*09467b48Spatrick     }
802*09467b48Spatrick 
803*09467b48Spatrick     // Set up latches to branch to the new header in the unrolled iterations or
804*09467b48Spatrick     // the loop exit for the last latch in a fully unrolled loop.
805*09467b48Spatrick 
806*09467b48Spatrick     for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
807*09467b48Spatrick       // The original branch was replicated in each unrolled iteration.
808*09467b48Spatrick       BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
809*09467b48Spatrick 
810*09467b48Spatrick       // The branch destination.
811*09467b48Spatrick       unsigned j = (i + 1) % e;
812*09467b48Spatrick       BasicBlock *Dest = Headers[j];
813*09467b48Spatrick 
814*09467b48Spatrick       // When completely unrolling, the last latch becomes unreachable.
815*09467b48Spatrick       if (CompletelyUnroll && j == 0)
816*09467b48Spatrick         new UnreachableInst(Term->getContext(), Term);
817*09467b48Spatrick       else
818*09467b48Spatrick         // Replace the conditional branch with an unconditional one.
819*09467b48Spatrick         BranchInst::Create(Dest, Term);
820*09467b48Spatrick 
821*09467b48Spatrick       Term->eraseFromParent();
822*09467b48Spatrick     }
823*09467b48Spatrick   }
824*09467b48Spatrick 
825*09467b48Spatrick   // Update dominators of blocks we might reach through exits.
826*09467b48Spatrick   // Immediate dominator of such block might change, because we add more
827*09467b48Spatrick   // routes which can lead to the exit: we can now reach it from the copied
828*09467b48Spatrick   // iterations too.
829*09467b48Spatrick   if (DT && ULO.Count > 1) {
830*09467b48Spatrick     for (auto *BB : OriginalLoopBlocks) {
831*09467b48Spatrick       auto *BBDomNode = DT->getNode(BB);
832*09467b48Spatrick       SmallVector<BasicBlock *, 16> ChildrenToUpdate;
833*09467b48Spatrick       for (auto *ChildDomNode : BBDomNode->getChildren()) {
834*09467b48Spatrick         auto *ChildBB = ChildDomNode->getBlock();
835*09467b48Spatrick         if (!L->contains(ChildBB))
836*09467b48Spatrick           ChildrenToUpdate.push_back(ChildBB);
837*09467b48Spatrick       }
838*09467b48Spatrick       BasicBlock *NewIDom;
839*09467b48Spatrick       BasicBlock *&TermBlock = LatchIsExiting ? LatchBlock : Header;
840*09467b48Spatrick       auto &TermBlocks = LatchIsExiting ? Latches : Headers;
841*09467b48Spatrick       if (BB == TermBlock) {
842*09467b48Spatrick         // The latch is special because we emit unconditional branches in
843*09467b48Spatrick         // some cases where the original loop contained a conditional branch.
844*09467b48Spatrick         // Since the latch is always at the bottom of the loop, if the latch
845*09467b48Spatrick         // dominated an exit before unrolling, the new dominator of that exit
846*09467b48Spatrick         // must also be a latch.  Specifically, the dominator is the first
847*09467b48Spatrick         // latch which ends in a conditional branch, or the last latch if
848*09467b48Spatrick         // there is no such latch.
849*09467b48Spatrick         // For loops exiting from the header, we limit the supported loops
850*09467b48Spatrick         // to have a single exiting block.
851*09467b48Spatrick         NewIDom = TermBlocks.back();
852*09467b48Spatrick         for (BasicBlock *Iter : TermBlocks) {
853*09467b48Spatrick           Instruction *Term = Iter->getTerminator();
854*09467b48Spatrick           if (isa<BranchInst>(Term) && cast<BranchInst>(Term)->isConditional()) {
855*09467b48Spatrick             NewIDom = Iter;
856*09467b48Spatrick             break;
857*09467b48Spatrick           }
858*09467b48Spatrick         }
859*09467b48Spatrick       } else {
860*09467b48Spatrick         // The new idom of the block will be the nearest common dominator
861*09467b48Spatrick         // of all copies of the previous idom. This is equivalent to the
862*09467b48Spatrick         // nearest common dominator of the previous idom and the first latch,
863*09467b48Spatrick         // which dominates all copies of the previous idom.
864*09467b48Spatrick         NewIDom = DT->findNearestCommonDominator(BB, LatchBlock);
865*09467b48Spatrick       }
866*09467b48Spatrick       for (auto *ChildBB : ChildrenToUpdate)
867*09467b48Spatrick         DT->changeImmediateDominator(ChildBB, NewIDom);
868*09467b48Spatrick     }
869*09467b48Spatrick   }
870*09467b48Spatrick 
871*09467b48Spatrick   assert(!DT || !UnrollVerifyDomtree ||
872*09467b48Spatrick          DT->verify(DominatorTree::VerificationLevel::Fast));
873*09467b48Spatrick 
874*09467b48Spatrick   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
875*09467b48Spatrick   // Merge adjacent basic blocks, if possible.
876*09467b48Spatrick   for (BasicBlock *Latch : Latches) {
877*09467b48Spatrick     BranchInst *Term = dyn_cast<BranchInst>(Latch->getTerminator());
878*09467b48Spatrick     assert((Term ||
879*09467b48Spatrick             (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) &&
880*09467b48Spatrick            "Need a branch as terminator, except when fully unrolling with "
881*09467b48Spatrick            "unconditional latch");
882*09467b48Spatrick     if (Term && Term->isUnconditional()) {
883*09467b48Spatrick       BasicBlock *Dest = Term->getSuccessor(0);
884*09467b48Spatrick       BasicBlock *Fold = Dest->getUniquePredecessor();
885*09467b48Spatrick       if (MergeBlockIntoPredecessor(Dest, &DTU, LI)) {
886*09467b48Spatrick         // Dest has been folded into Fold. Update our worklists accordingly.
887*09467b48Spatrick         std::replace(Latches.begin(), Latches.end(), Dest, Fold);
888*09467b48Spatrick         UnrolledLoopBlocks.erase(std::remove(UnrolledLoopBlocks.begin(),
889*09467b48Spatrick                                              UnrolledLoopBlocks.end(), Dest),
890*09467b48Spatrick                                  UnrolledLoopBlocks.end());
891*09467b48Spatrick       }
892*09467b48Spatrick     }
893*09467b48Spatrick   }
894*09467b48Spatrick   // Apply updates to the DomTree.
895*09467b48Spatrick   DT = &DTU.getDomTree();
896*09467b48Spatrick 
897*09467b48Spatrick   // At this point, the code is well formed.  We now simplify the unrolled loop,
898*09467b48Spatrick   // doing constant propagation and dead code elimination as we go.
899*09467b48Spatrick   simplifyLoopAfterUnroll(L, !CompletelyUnroll && (ULO.Count > 1 || Peeled), LI,
900*09467b48Spatrick                           SE, DT, AC);
901*09467b48Spatrick 
902*09467b48Spatrick   NumCompletelyUnrolled += CompletelyUnroll;
903*09467b48Spatrick   ++NumUnrolled;
904*09467b48Spatrick 
905*09467b48Spatrick   Loop *OuterL = L->getParentLoop();
906*09467b48Spatrick   // Update LoopInfo if the loop is completely removed.
907*09467b48Spatrick   if (CompletelyUnroll)
908*09467b48Spatrick     LI->erase(L);
909*09467b48Spatrick 
910*09467b48Spatrick   // After complete unrolling most of the blocks should be contained in OuterL.
911*09467b48Spatrick   // However, some of them might happen to be out of OuterL (e.g. if they
912*09467b48Spatrick   // precede a loop exit). In this case we might need to insert PHI nodes in
913*09467b48Spatrick   // order to preserve LCSSA form.
914*09467b48Spatrick   // We don't need to check this if we already know that we need to fix LCSSA
915*09467b48Spatrick   // form.
916*09467b48Spatrick   // TODO: For now we just recompute LCSSA for the outer loop in this case, but
917*09467b48Spatrick   // it should be possible to fix it in-place.
918*09467b48Spatrick   if (PreserveLCSSA && OuterL && CompletelyUnroll && !NeedToFixLCSSA)
919*09467b48Spatrick     NeedToFixLCSSA |= ::needToInsertPhisForLCSSA(OuterL, UnrolledLoopBlocks, LI);
920*09467b48Spatrick 
921*09467b48Spatrick   // If we have a pass and a DominatorTree we should re-simplify impacted loops
922*09467b48Spatrick   // to ensure subsequent analyses can rely on this form. We want to simplify
923*09467b48Spatrick   // at least one layer outside of the loop that was unrolled so that any
924*09467b48Spatrick   // changes to the parent loop exposed by the unrolling are considered.
925*09467b48Spatrick   if (DT) {
926*09467b48Spatrick     if (OuterL) {
927*09467b48Spatrick       // OuterL includes all loops for which we can break loop-simplify, so
928*09467b48Spatrick       // it's sufficient to simplify only it (it'll recursively simplify inner
929*09467b48Spatrick       // loops too).
930*09467b48Spatrick       if (NeedToFixLCSSA) {
931*09467b48Spatrick         // LCSSA must be performed on the outermost affected loop. The unrolled
932*09467b48Spatrick         // loop's last loop latch is guaranteed to be in the outermost loop
933*09467b48Spatrick         // after LoopInfo's been updated by LoopInfo::erase.
934*09467b48Spatrick         Loop *LatchLoop = LI->getLoopFor(Latches.back());
935*09467b48Spatrick         Loop *FixLCSSALoop = OuterL;
936*09467b48Spatrick         if (!FixLCSSALoop->contains(LatchLoop))
937*09467b48Spatrick           while (FixLCSSALoop->getParentLoop() != LatchLoop)
938*09467b48Spatrick             FixLCSSALoop = FixLCSSALoop->getParentLoop();
939*09467b48Spatrick 
940*09467b48Spatrick         formLCSSARecursively(*FixLCSSALoop, *DT, LI, SE);
941*09467b48Spatrick       } else if (PreserveLCSSA) {
942*09467b48Spatrick         assert(OuterL->isLCSSAForm(*DT) &&
943*09467b48Spatrick                "Loops should be in LCSSA form after loop-unroll.");
944*09467b48Spatrick       }
945*09467b48Spatrick 
946*09467b48Spatrick       // TODO: That potentially might be compile-time expensive. We should try
947*09467b48Spatrick       // to fix the loop-simplified form incrementally.
948*09467b48Spatrick       simplifyLoop(OuterL, DT, LI, SE, AC, nullptr, PreserveLCSSA);
949*09467b48Spatrick     } else {
950*09467b48Spatrick       // Simplify loops for which we might've broken loop-simplify form.
951*09467b48Spatrick       for (Loop *SubLoop : LoopsToSimplify)
952*09467b48Spatrick         simplifyLoop(SubLoop, DT, LI, SE, AC, nullptr, PreserveLCSSA);
953*09467b48Spatrick     }
954*09467b48Spatrick   }
955*09467b48Spatrick 
956*09467b48Spatrick   return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled
957*09467b48Spatrick                           : LoopUnrollResult::PartiallyUnrolled;
958*09467b48Spatrick }
959*09467b48Spatrick 
960*09467b48Spatrick /// Given an llvm.loop loop id metadata node, returns the loop hint metadata
961*09467b48Spatrick /// node with the given name (for example, "llvm.loop.unroll.count"). If no
962*09467b48Spatrick /// such metadata node exists, then nullptr is returned.
963*09467b48Spatrick MDNode *llvm::GetUnrollMetadata(MDNode *LoopID, StringRef Name) {
964*09467b48Spatrick   // First operand should refer to the loop id itself.
965*09467b48Spatrick   assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
966*09467b48Spatrick   assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
967*09467b48Spatrick 
968*09467b48Spatrick   for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
969*09467b48Spatrick     MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
970*09467b48Spatrick     if (!MD)
971*09467b48Spatrick       continue;
972*09467b48Spatrick 
973*09467b48Spatrick     MDString *S = dyn_cast<MDString>(MD->getOperand(0));
974*09467b48Spatrick     if (!S)
975*09467b48Spatrick       continue;
976*09467b48Spatrick 
977*09467b48Spatrick     if (Name.equals(S->getString()))
978*09467b48Spatrick       return MD;
979*09467b48Spatrick   }
980*09467b48Spatrick   return nullptr;
981*09467b48Spatrick }
982