1*e8d8bef9SDimitry Andric //===- LoopPeel.cpp -------------------------------------------------------===// 2*e8d8bef9SDimitry Andric // 3*e8d8bef9SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*e8d8bef9SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*e8d8bef9SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*e8d8bef9SDimitry Andric // 7*e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===// 8*e8d8bef9SDimitry Andric // 9*e8d8bef9SDimitry Andric // Loop Peeling Utilities. 10*e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===// 11*e8d8bef9SDimitry Andric 12*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/LoopPeel.h" 13*e8d8bef9SDimitry Andric #include "llvm/ADT/DenseMap.h" 14*e8d8bef9SDimitry Andric #include "llvm/ADT/Optional.h" 15*e8d8bef9SDimitry Andric #include "llvm/ADT/SmallVector.h" 16*e8d8bef9SDimitry Andric #include "llvm/ADT/Statistic.h" 17*e8d8bef9SDimitry Andric #include "llvm/Analysis/LoopInfo.h" 18*e8d8bef9SDimitry Andric #include "llvm/Analysis/LoopIterator.h" 19*e8d8bef9SDimitry Andric #include "llvm/Analysis/ScalarEvolution.h" 20*e8d8bef9SDimitry Andric #include "llvm/Analysis/ScalarEvolutionExpressions.h" 21*e8d8bef9SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h" 22*e8d8bef9SDimitry Andric #include "llvm/IR/BasicBlock.h" 23*e8d8bef9SDimitry Andric #include "llvm/IR/Dominators.h" 24*e8d8bef9SDimitry Andric #include "llvm/IR/Function.h" 25*e8d8bef9SDimitry Andric #include "llvm/IR/InstrTypes.h" 26*e8d8bef9SDimitry Andric #include "llvm/IR/Instruction.h" 27*e8d8bef9SDimitry Andric #include "llvm/IR/Instructions.h" 28*e8d8bef9SDimitry Andric #include "llvm/IR/LLVMContext.h" 29*e8d8bef9SDimitry Andric #include "llvm/IR/MDBuilder.h" 30*e8d8bef9SDimitry Andric #include "llvm/IR/Metadata.h" 31*e8d8bef9SDimitry Andric #include "llvm/IR/PatternMatch.h" 32*e8d8bef9SDimitry Andric #include "llvm/Support/Casting.h" 33*e8d8bef9SDimitry Andric #include "llvm/Support/CommandLine.h" 34*e8d8bef9SDimitry Andric #include "llvm/Support/Debug.h" 35*e8d8bef9SDimitry Andric #include "llvm/Support/raw_ostream.h" 36*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/Cloning.h" 38*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/LoopSimplify.h" 39*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/LoopUtils.h" 40*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/UnrollLoop.h" 41*e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/ValueMapper.h" 42*e8d8bef9SDimitry Andric #include <algorithm> 43*e8d8bef9SDimitry Andric #include <cassert> 44*e8d8bef9SDimitry Andric #include <cstdint> 45*e8d8bef9SDimitry Andric #include <limits> 46*e8d8bef9SDimitry Andric 47*e8d8bef9SDimitry Andric using namespace llvm; 48*e8d8bef9SDimitry Andric using namespace llvm::PatternMatch; 49*e8d8bef9SDimitry Andric 50*e8d8bef9SDimitry Andric #define DEBUG_TYPE "loop-peel" 51*e8d8bef9SDimitry Andric 52*e8d8bef9SDimitry Andric STATISTIC(NumPeeled, "Number of loops peeled"); 53*e8d8bef9SDimitry Andric 54*e8d8bef9SDimitry Andric static cl::opt<unsigned> UnrollPeelCount( 55*e8d8bef9SDimitry Andric "unroll-peel-count", cl::Hidden, 56*e8d8bef9SDimitry Andric cl::desc("Set the unroll peeling count, for testing purposes")); 57*e8d8bef9SDimitry Andric 58*e8d8bef9SDimitry Andric static cl::opt<bool> 59*e8d8bef9SDimitry Andric UnrollAllowPeeling("unroll-allow-peeling", cl::init(true), cl::Hidden, 60*e8d8bef9SDimitry Andric cl::desc("Allows loops to be peeled when the dynamic " 61*e8d8bef9SDimitry Andric "trip count is known to be low.")); 62*e8d8bef9SDimitry Andric 63*e8d8bef9SDimitry Andric static cl::opt<bool> 64*e8d8bef9SDimitry Andric UnrollAllowLoopNestsPeeling("unroll-allow-loop-nests-peeling", 65*e8d8bef9SDimitry Andric cl::init(false), cl::Hidden, 66*e8d8bef9SDimitry Andric cl::desc("Allows loop nests to be peeled.")); 67*e8d8bef9SDimitry Andric 68*e8d8bef9SDimitry Andric static cl::opt<unsigned> UnrollPeelMaxCount( 69*e8d8bef9SDimitry Andric "unroll-peel-max-count", cl::init(7), cl::Hidden, 70*e8d8bef9SDimitry Andric cl::desc("Max average trip count which will cause loop peeling.")); 71*e8d8bef9SDimitry Andric 72*e8d8bef9SDimitry Andric static cl::opt<unsigned> UnrollForcePeelCount( 73*e8d8bef9SDimitry Andric "unroll-force-peel-count", cl::init(0), cl::Hidden, 74*e8d8bef9SDimitry Andric cl::desc("Force a peel count regardless of profiling information.")); 75*e8d8bef9SDimitry Andric 76*e8d8bef9SDimitry Andric static cl::opt<bool> UnrollPeelMultiDeoptExit( 77*e8d8bef9SDimitry Andric "unroll-peel-multi-deopt-exit", cl::init(true), cl::Hidden, 78*e8d8bef9SDimitry Andric cl::desc("Allow peeling of loops with multiple deopt exits.")); 79*e8d8bef9SDimitry Andric 80*e8d8bef9SDimitry Andric static const char *PeeledCountMetaData = "llvm.loop.peeled.count"; 81*e8d8bef9SDimitry Andric 82*e8d8bef9SDimitry Andric // Designates that a Phi is estimated to become invariant after an "infinite" 83*e8d8bef9SDimitry Andric // number of loop iterations (i.e. only may become an invariant if the loop is 84*e8d8bef9SDimitry Andric // fully unrolled). 85*e8d8bef9SDimitry Andric static const unsigned InfiniteIterationsToInvariance = 86*e8d8bef9SDimitry Andric std::numeric_limits<unsigned>::max(); 87*e8d8bef9SDimitry Andric 88*e8d8bef9SDimitry Andric // Check whether we are capable of peeling this loop. 89*e8d8bef9SDimitry Andric bool llvm::canPeel(Loop *L) { 90*e8d8bef9SDimitry Andric // Make sure the loop is in simplified form 91*e8d8bef9SDimitry Andric if (!L->isLoopSimplifyForm()) 92*e8d8bef9SDimitry Andric return false; 93*e8d8bef9SDimitry Andric 94*e8d8bef9SDimitry Andric if (UnrollPeelMultiDeoptExit) { 95*e8d8bef9SDimitry Andric SmallVector<BasicBlock *, 4> Exits; 96*e8d8bef9SDimitry Andric L->getUniqueNonLatchExitBlocks(Exits); 97*e8d8bef9SDimitry Andric 98*e8d8bef9SDimitry Andric if (!Exits.empty()) { 99*e8d8bef9SDimitry Andric // Latch's terminator is a conditional branch, Latch is exiting and 100*e8d8bef9SDimitry Andric // all non Latch exits ends up with deoptimize. 101*e8d8bef9SDimitry Andric const BasicBlock *Latch = L->getLoopLatch(); 102*e8d8bef9SDimitry Andric const BranchInst *T = dyn_cast<BranchInst>(Latch->getTerminator()); 103*e8d8bef9SDimitry Andric return T && T->isConditional() && L->isLoopExiting(Latch) && 104*e8d8bef9SDimitry Andric all_of(Exits, [](const BasicBlock *BB) { 105*e8d8bef9SDimitry Andric return BB->getTerminatingDeoptimizeCall(); 106*e8d8bef9SDimitry Andric }); 107*e8d8bef9SDimitry Andric } 108*e8d8bef9SDimitry Andric } 109*e8d8bef9SDimitry Andric 110*e8d8bef9SDimitry Andric // Only peel loops that contain a single exit 111*e8d8bef9SDimitry Andric if (!L->getExitingBlock() || !L->getUniqueExitBlock()) 112*e8d8bef9SDimitry Andric return false; 113*e8d8bef9SDimitry Andric 114*e8d8bef9SDimitry Andric // Don't try to peel loops where the latch is not the exiting block. 115*e8d8bef9SDimitry Andric // This can be an indication of two different things: 116*e8d8bef9SDimitry Andric // 1) The loop is not rotated. 117*e8d8bef9SDimitry Andric // 2) The loop contains irreducible control flow that involves the latch. 118*e8d8bef9SDimitry Andric const BasicBlock *Latch = L->getLoopLatch(); 119*e8d8bef9SDimitry Andric if (Latch != L->getExitingBlock()) 120*e8d8bef9SDimitry Andric return false; 121*e8d8bef9SDimitry Andric 122*e8d8bef9SDimitry Andric // Peeling is only supported if the latch is a branch. 123*e8d8bef9SDimitry Andric if (!isa<BranchInst>(Latch->getTerminator())) 124*e8d8bef9SDimitry Andric return false; 125*e8d8bef9SDimitry Andric 126*e8d8bef9SDimitry Andric return true; 127*e8d8bef9SDimitry Andric } 128*e8d8bef9SDimitry Andric 129*e8d8bef9SDimitry Andric // This function calculates the number of iterations after which the given Phi 130*e8d8bef9SDimitry Andric // becomes an invariant. The pre-calculated values are memorized in the map. The 131*e8d8bef9SDimitry Andric // function (shortcut is I) is calculated according to the following definition: 132*e8d8bef9SDimitry Andric // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge]. 133*e8d8bef9SDimitry Andric // If %y is a loop invariant, then I(%x) = 1. 134*e8d8bef9SDimitry Andric // If %y is a Phi from the loop header, I(%x) = I(%y) + 1. 135*e8d8bef9SDimitry Andric // Otherwise, I(%x) is infinite. 136*e8d8bef9SDimitry Andric // TODO: Actually if %y is an expression that depends only on Phi %z and some 137*e8d8bef9SDimitry Andric // loop invariants, we can estimate I(%x) = I(%z) + 1. The example 138*e8d8bef9SDimitry Andric // looks like: 139*e8d8bef9SDimitry Andric // %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration. 140*e8d8bef9SDimitry Andric // %y = phi(0, 5), 141*e8d8bef9SDimitry Andric // %a = %y + 1. 142*e8d8bef9SDimitry Andric static unsigned calculateIterationsToInvariance( 143*e8d8bef9SDimitry Andric PHINode *Phi, Loop *L, BasicBlock *BackEdge, 144*e8d8bef9SDimitry Andric SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) { 145*e8d8bef9SDimitry Andric assert(Phi->getParent() == L->getHeader() && 146*e8d8bef9SDimitry Andric "Non-loop Phi should not be checked for turning into invariant."); 147*e8d8bef9SDimitry Andric assert(BackEdge == L->getLoopLatch() && "Wrong latch?"); 148*e8d8bef9SDimitry Andric // If we already know the answer, take it from the map. 149*e8d8bef9SDimitry Andric auto I = IterationsToInvariance.find(Phi); 150*e8d8bef9SDimitry Andric if (I != IterationsToInvariance.end()) 151*e8d8bef9SDimitry Andric return I->second; 152*e8d8bef9SDimitry Andric 153*e8d8bef9SDimitry Andric // Otherwise we need to analyze the input from the back edge. 154*e8d8bef9SDimitry Andric Value *Input = Phi->getIncomingValueForBlock(BackEdge); 155*e8d8bef9SDimitry Andric // Place infinity to map to avoid infinite recursion for cycled Phis. Such 156*e8d8bef9SDimitry Andric // cycles can never stop on an invariant. 157*e8d8bef9SDimitry Andric IterationsToInvariance[Phi] = InfiniteIterationsToInvariance; 158*e8d8bef9SDimitry Andric unsigned ToInvariance = InfiniteIterationsToInvariance; 159*e8d8bef9SDimitry Andric 160*e8d8bef9SDimitry Andric if (L->isLoopInvariant(Input)) 161*e8d8bef9SDimitry Andric ToInvariance = 1u; 162*e8d8bef9SDimitry Andric else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) { 163*e8d8bef9SDimitry Andric // Only consider Phis in header block. 164*e8d8bef9SDimitry Andric if (IncPhi->getParent() != L->getHeader()) 165*e8d8bef9SDimitry Andric return InfiniteIterationsToInvariance; 166*e8d8bef9SDimitry Andric // If the input becomes an invariant after X iterations, then our Phi 167*e8d8bef9SDimitry Andric // becomes an invariant after X + 1 iterations. 168*e8d8bef9SDimitry Andric unsigned InputToInvariance = calculateIterationsToInvariance( 169*e8d8bef9SDimitry Andric IncPhi, L, BackEdge, IterationsToInvariance); 170*e8d8bef9SDimitry Andric if (InputToInvariance != InfiniteIterationsToInvariance) 171*e8d8bef9SDimitry Andric ToInvariance = InputToInvariance + 1u; 172*e8d8bef9SDimitry Andric } 173*e8d8bef9SDimitry Andric 174*e8d8bef9SDimitry Andric // If we found that this Phi lies in an invariant chain, update the map. 175*e8d8bef9SDimitry Andric if (ToInvariance != InfiniteIterationsToInvariance) 176*e8d8bef9SDimitry Andric IterationsToInvariance[Phi] = ToInvariance; 177*e8d8bef9SDimitry Andric return ToInvariance; 178*e8d8bef9SDimitry Andric } 179*e8d8bef9SDimitry Andric 180*e8d8bef9SDimitry Andric // Return the number of iterations to peel off that make conditions in the 181*e8d8bef9SDimitry Andric // body true/false. For example, if we peel 2 iterations off the loop below, 182*e8d8bef9SDimitry Andric // the condition i < 2 can be evaluated at compile time. 183*e8d8bef9SDimitry Andric // for (i = 0; i < n; i++) 184*e8d8bef9SDimitry Andric // if (i < 2) 185*e8d8bef9SDimitry Andric // .. 186*e8d8bef9SDimitry Andric // else 187*e8d8bef9SDimitry Andric // .. 188*e8d8bef9SDimitry Andric // } 189*e8d8bef9SDimitry Andric static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount, 190*e8d8bef9SDimitry Andric ScalarEvolution &SE) { 191*e8d8bef9SDimitry Andric assert(L.isLoopSimplifyForm() && "Loop needs to be in loop simplify form"); 192*e8d8bef9SDimitry Andric unsigned DesiredPeelCount = 0; 193*e8d8bef9SDimitry Andric 194*e8d8bef9SDimitry Andric for (auto *BB : L.blocks()) { 195*e8d8bef9SDimitry Andric auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 196*e8d8bef9SDimitry Andric if (!BI || BI->isUnconditional()) 197*e8d8bef9SDimitry Andric continue; 198*e8d8bef9SDimitry Andric 199*e8d8bef9SDimitry Andric // Ignore loop exit condition. 200*e8d8bef9SDimitry Andric if (L.getLoopLatch() == BB) 201*e8d8bef9SDimitry Andric continue; 202*e8d8bef9SDimitry Andric 203*e8d8bef9SDimitry Andric Value *Condition = BI->getCondition(); 204*e8d8bef9SDimitry Andric Value *LeftVal, *RightVal; 205*e8d8bef9SDimitry Andric CmpInst::Predicate Pred; 206*e8d8bef9SDimitry Andric if (!match(Condition, m_ICmp(Pred, m_Value(LeftVal), m_Value(RightVal)))) 207*e8d8bef9SDimitry Andric continue; 208*e8d8bef9SDimitry Andric 209*e8d8bef9SDimitry Andric const SCEV *LeftSCEV = SE.getSCEV(LeftVal); 210*e8d8bef9SDimitry Andric const SCEV *RightSCEV = SE.getSCEV(RightVal); 211*e8d8bef9SDimitry Andric 212*e8d8bef9SDimitry Andric // Do not consider predicates that are known to be true or false 213*e8d8bef9SDimitry Andric // independently of the loop iteration. 214*e8d8bef9SDimitry Andric if (SE.isKnownPredicate(Pred, LeftSCEV, RightSCEV) || 215*e8d8bef9SDimitry Andric SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), LeftSCEV, 216*e8d8bef9SDimitry Andric RightSCEV)) 217*e8d8bef9SDimitry Andric continue; 218*e8d8bef9SDimitry Andric 219*e8d8bef9SDimitry Andric // Check if we have a condition with one AddRec and one non AddRec 220*e8d8bef9SDimitry Andric // expression. Normalize LeftSCEV to be the AddRec. 221*e8d8bef9SDimitry Andric if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 222*e8d8bef9SDimitry Andric if (isa<SCEVAddRecExpr>(RightSCEV)) { 223*e8d8bef9SDimitry Andric std::swap(LeftSCEV, RightSCEV); 224*e8d8bef9SDimitry Andric Pred = ICmpInst::getSwappedPredicate(Pred); 225*e8d8bef9SDimitry Andric } else 226*e8d8bef9SDimitry Andric continue; 227*e8d8bef9SDimitry Andric } 228*e8d8bef9SDimitry Andric 229*e8d8bef9SDimitry Andric const SCEVAddRecExpr *LeftAR = cast<SCEVAddRecExpr>(LeftSCEV); 230*e8d8bef9SDimitry Andric 231*e8d8bef9SDimitry Andric // Avoid huge SCEV computations in the loop below, make sure we only 232*e8d8bef9SDimitry Andric // consider AddRecs of the loop we are trying to peel. 233*e8d8bef9SDimitry Andric if (!LeftAR->isAffine() || LeftAR->getLoop() != &L) 234*e8d8bef9SDimitry Andric continue; 235*e8d8bef9SDimitry Andric if (!(ICmpInst::isEquality(Pred) && LeftAR->hasNoSelfWrap()) && 236*e8d8bef9SDimitry Andric !SE.getMonotonicPredicateType(LeftAR, Pred)) 237*e8d8bef9SDimitry Andric continue; 238*e8d8bef9SDimitry Andric 239*e8d8bef9SDimitry Andric // Check if extending the current DesiredPeelCount lets us evaluate Pred 240*e8d8bef9SDimitry Andric // or !Pred in the loop body statically. 241*e8d8bef9SDimitry Andric unsigned NewPeelCount = DesiredPeelCount; 242*e8d8bef9SDimitry Andric 243*e8d8bef9SDimitry Andric const SCEV *IterVal = LeftAR->evaluateAtIteration( 244*e8d8bef9SDimitry Andric SE.getConstant(LeftSCEV->getType(), NewPeelCount), SE); 245*e8d8bef9SDimitry Andric 246*e8d8bef9SDimitry Andric // If the original condition is not known, get the negated predicate 247*e8d8bef9SDimitry Andric // (which holds on the else branch) and check if it is known. This allows 248*e8d8bef9SDimitry Andric // us to peel of iterations that make the original condition false. 249*e8d8bef9SDimitry Andric if (!SE.isKnownPredicate(Pred, IterVal, RightSCEV)) 250*e8d8bef9SDimitry Andric Pred = ICmpInst::getInversePredicate(Pred); 251*e8d8bef9SDimitry Andric 252*e8d8bef9SDimitry Andric const SCEV *Step = LeftAR->getStepRecurrence(SE); 253*e8d8bef9SDimitry Andric const SCEV *NextIterVal = SE.getAddExpr(IterVal, Step); 254*e8d8bef9SDimitry Andric auto PeelOneMoreIteration = [&IterVal, &NextIterVal, &SE, Step, 255*e8d8bef9SDimitry Andric &NewPeelCount]() { 256*e8d8bef9SDimitry Andric IterVal = NextIterVal; 257*e8d8bef9SDimitry Andric NextIterVal = SE.getAddExpr(IterVal, Step); 258*e8d8bef9SDimitry Andric NewPeelCount++; 259*e8d8bef9SDimitry Andric }; 260*e8d8bef9SDimitry Andric 261*e8d8bef9SDimitry Andric auto CanPeelOneMoreIteration = [&NewPeelCount, &MaxPeelCount]() { 262*e8d8bef9SDimitry Andric return NewPeelCount < MaxPeelCount; 263*e8d8bef9SDimitry Andric }; 264*e8d8bef9SDimitry Andric 265*e8d8bef9SDimitry Andric while (CanPeelOneMoreIteration() && 266*e8d8bef9SDimitry Andric SE.isKnownPredicate(Pred, IterVal, RightSCEV)) 267*e8d8bef9SDimitry Andric PeelOneMoreIteration(); 268*e8d8bef9SDimitry Andric 269*e8d8bef9SDimitry Andric // With *that* peel count, does the predicate !Pred become known in the 270*e8d8bef9SDimitry Andric // first iteration of the loop body after peeling? 271*e8d8bef9SDimitry Andric if (!SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), IterVal, 272*e8d8bef9SDimitry Andric RightSCEV)) 273*e8d8bef9SDimitry Andric continue; // If not, give up. 274*e8d8bef9SDimitry Andric 275*e8d8bef9SDimitry Andric // However, for equality comparisons, that isn't always sufficient to 276*e8d8bef9SDimitry Andric // eliminate the comparsion in loop body, we may need to peel one more 277*e8d8bef9SDimitry Andric // iteration. See if that makes !Pred become unknown again. 278*e8d8bef9SDimitry Andric if (ICmpInst::isEquality(Pred) && 279*e8d8bef9SDimitry Andric !SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), NextIterVal, 280*e8d8bef9SDimitry Andric RightSCEV) && 281*e8d8bef9SDimitry Andric !SE.isKnownPredicate(Pred, IterVal, RightSCEV) && 282*e8d8bef9SDimitry Andric SE.isKnownPredicate(Pred, NextIterVal, RightSCEV)) { 283*e8d8bef9SDimitry Andric if (!CanPeelOneMoreIteration()) 284*e8d8bef9SDimitry Andric continue; // Need to peel one more iteration, but can't. Give up. 285*e8d8bef9SDimitry Andric PeelOneMoreIteration(); // Great! 286*e8d8bef9SDimitry Andric } 287*e8d8bef9SDimitry Andric 288*e8d8bef9SDimitry Andric DesiredPeelCount = std::max(DesiredPeelCount, NewPeelCount); 289*e8d8bef9SDimitry Andric } 290*e8d8bef9SDimitry Andric 291*e8d8bef9SDimitry Andric return DesiredPeelCount; 292*e8d8bef9SDimitry Andric } 293*e8d8bef9SDimitry Andric 294*e8d8bef9SDimitry Andric // Return the number of iterations we want to peel off. 295*e8d8bef9SDimitry Andric void llvm::computePeelCount(Loop *L, unsigned LoopSize, 296*e8d8bef9SDimitry Andric TargetTransformInfo::PeelingPreferences &PP, 297*e8d8bef9SDimitry Andric unsigned &TripCount, ScalarEvolution &SE, 298*e8d8bef9SDimitry Andric unsigned Threshold) { 299*e8d8bef9SDimitry Andric assert(LoopSize > 0 && "Zero loop size is not allowed!"); 300*e8d8bef9SDimitry Andric // Save the PP.PeelCount value set by the target in 301*e8d8bef9SDimitry Andric // TTI.getPeelingPreferences or by the flag -unroll-peel-count. 302*e8d8bef9SDimitry Andric unsigned TargetPeelCount = PP.PeelCount; 303*e8d8bef9SDimitry Andric PP.PeelCount = 0; 304*e8d8bef9SDimitry Andric if (!canPeel(L)) 305*e8d8bef9SDimitry Andric return; 306*e8d8bef9SDimitry Andric 307*e8d8bef9SDimitry Andric // Only try to peel innermost loops by default. 308*e8d8bef9SDimitry Andric // The constraint can be relaxed by the target in TTI.getUnrollingPreferences 309*e8d8bef9SDimitry Andric // or by the flag -unroll-allow-loop-nests-peeling. 310*e8d8bef9SDimitry Andric if (!PP.AllowLoopNestsPeeling && !L->isInnermost()) 311*e8d8bef9SDimitry Andric return; 312*e8d8bef9SDimitry Andric 313*e8d8bef9SDimitry Andric // If the user provided a peel count, use that. 314*e8d8bef9SDimitry Andric bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0; 315*e8d8bef9SDimitry Andric if (UserPeelCount) { 316*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount 317*e8d8bef9SDimitry Andric << " iterations.\n"); 318*e8d8bef9SDimitry Andric PP.PeelCount = UnrollForcePeelCount; 319*e8d8bef9SDimitry Andric PP.PeelProfiledIterations = true; 320*e8d8bef9SDimitry Andric return; 321*e8d8bef9SDimitry Andric } 322*e8d8bef9SDimitry Andric 323*e8d8bef9SDimitry Andric // Skip peeling if it's disabled. 324*e8d8bef9SDimitry Andric if (!PP.AllowPeeling) 325*e8d8bef9SDimitry Andric return; 326*e8d8bef9SDimitry Andric 327*e8d8bef9SDimitry Andric unsigned AlreadyPeeled = 0; 328*e8d8bef9SDimitry Andric if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData)) 329*e8d8bef9SDimitry Andric AlreadyPeeled = *Peeled; 330*e8d8bef9SDimitry Andric // Stop if we already peeled off the maximum number of iterations. 331*e8d8bef9SDimitry Andric if (AlreadyPeeled >= UnrollPeelMaxCount) 332*e8d8bef9SDimitry Andric return; 333*e8d8bef9SDimitry Andric 334*e8d8bef9SDimitry Andric // Here we try to get rid of Phis which become invariants after 1, 2, ..., N 335*e8d8bef9SDimitry Andric // iterations of the loop. For this we compute the number for iterations after 336*e8d8bef9SDimitry Andric // which every Phi is guaranteed to become an invariant, and try to peel the 337*e8d8bef9SDimitry Andric // maximum number of iterations among these values, thus turning all those 338*e8d8bef9SDimitry Andric // Phis into invariants. 339*e8d8bef9SDimitry Andric // First, check that we can peel at least one iteration. 340*e8d8bef9SDimitry Andric if (2 * LoopSize <= Threshold && UnrollPeelMaxCount > 0) { 341*e8d8bef9SDimitry Andric // Store the pre-calculated values here. 342*e8d8bef9SDimitry Andric SmallDenseMap<PHINode *, unsigned> IterationsToInvariance; 343*e8d8bef9SDimitry Andric // Now go through all Phis to calculate their the number of iterations they 344*e8d8bef9SDimitry Andric // need to become invariants. 345*e8d8bef9SDimitry Andric // Start the max computation with the UP.PeelCount value set by the target 346*e8d8bef9SDimitry Andric // in TTI.getUnrollingPreferences or by the flag -unroll-peel-count. 347*e8d8bef9SDimitry Andric unsigned DesiredPeelCount = TargetPeelCount; 348*e8d8bef9SDimitry Andric BasicBlock *BackEdge = L->getLoopLatch(); 349*e8d8bef9SDimitry Andric assert(BackEdge && "Loop is not in simplified form?"); 350*e8d8bef9SDimitry Andric for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) { 351*e8d8bef9SDimitry Andric PHINode *Phi = cast<PHINode>(&*BI); 352*e8d8bef9SDimitry Andric unsigned ToInvariance = calculateIterationsToInvariance( 353*e8d8bef9SDimitry Andric Phi, L, BackEdge, IterationsToInvariance); 354*e8d8bef9SDimitry Andric if (ToInvariance != InfiniteIterationsToInvariance) 355*e8d8bef9SDimitry Andric DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance); 356*e8d8bef9SDimitry Andric } 357*e8d8bef9SDimitry Andric 358*e8d8bef9SDimitry Andric // Pay respect to limitations implied by loop size and the max peel count. 359*e8d8bef9SDimitry Andric unsigned MaxPeelCount = UnrollPeelMaxCount; 360*e8d8bef9SDimitry Andric MaxPeelCount = std::min(MaxPeelCount, Threshold / LoopSize - 1); 361*e8d8bef9SDimitry Andric 362*e8d8bef9SDimitry Andric DesiredPeelCount = std::max(DesiredPeelCount, 363*e8d8bef9SDimitry Andric countToEliminateCompares(*L, MaxPeelCount, SE)); 364*e8d8bef9SDimitry Andric 365*e8d8bef9SDimitry Andric if (DesiredPeelCount > 0) { 366*e8d8bef9SDimitry Andric DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount); 367*e8d8bef9SDimitry Andric // Consider max peel count limitation. 368*e8d8bef9SDimitry Andric assert(DesiredPeelCount > 0 && "Wrong loop size estimation?"); 369*e8d8bef9SDimitry Andric if (DesiredPeelCount + AlreadyPeeled <= UnrollPeelMaxCount) { 370*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount 371*e8d8bef9SDimitry Andric << " iteration(s) to turn" 372*e8d8bef9SDimitry Andric << " some Phis into invariants.\n"); 373*e8d8bef9SDimitry Andric PP.PeelCount = DesiredPeelCount; 374*e8d8bef9SDimitry Andric PP.PeelProfiledIterations = false; 375*e8d8bef9SDimitry Andric return; 376*e8d8bef9SDimitry Andric } 377*e8d8bef9SDimitry Andric } 378*e8d8bef9SDimitry Andric } 379*e8d8bef9SDimitry Andric 380*e8d8bef9SDimitry Andric // Bail if we know the statically calculated trip count. 381*e8d8bef9SDimitry Andric // In this case we rather prefer partial unrolling. 382*e8d8bef9SDimitry Andric if (TripCount) 383*e8d8bef9SDimitry Andric return; 384*e8d8bef9SDimitry Andric 385*e8d8bef9SDimitry Andric // Do not apply profile base peeling if it is disabled. 386*e8d8bef9SDimitry Andric if (!PP.PeelProfiledIterations) 387*e8d8bef9SDimitry Andric return; 388*e8d8bef9SDimitry Andric // If we don't know the trip count, but have reason to believe the average 389*e8d8bef9SDimitry Andric // trip count is low, peeling should be beneficial, since we will usually 390*e8d8bef9SDimitry Andric // hit the peeled section. 391*e8d8bef9SDimitry Andric // We only do this in the presence of profile information, since otherwise 392*e8d8bef9SDimitry Andric // our estimates of the trip count are not reliable enough. 393*e8d8bef9SDimitry Andric if (L->getHeader()->getParent()->hasProfileData()) { 394*e8d8bef9SDimitry Andric Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L); 395*e8d8bef9SDimitry Andric if (!PeelCount) 396*e8d8bef9SDimitry Andric return; 397*e8d8bef9SDimitry Andric 398*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount 399*e8d8bef9SDimitry Andric << "\n"); 400*e8d8bef9SDimitry Andric 401*e8d8bef9SDimitry Andric if (*PeelCount) { 402*e8d8bef9SDimitry Andric if ((*PeelCount + AlreadyPeeled <= UnrollPeelMaxCount) && 403*e8d8bef9SDimitry Andric (LoopSize * (*PeelCount + 1) <= Threshold)) { 404*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Peeling first " << *PeelCount 405*e8d8bef9SDimitry Andric << " iterations.\n"); 406*e8d8bef9SDimitry Andric PP.PeelCount = *PeelCount; 407*e8d8bef9SDimitry Andric return; 408*e8d8bef9SDimitry Andric } 409*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n"); 410*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Already peel count: " << AlreadyPeeled << "\n"); 411*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); 412*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) 413*e8d8bef9SDimitry Andric << "\n"); 414*e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Max peel cost: " << Threshold << "\n"); 415*e8d8bef9SDimitry Andric } 416*e8d8bef9SDimitry Andric } 417*e8d8bef9SDimitry Andric } 418*e8d8bef9SDimitry Andric 419*e8d8bef9SDimitry Andric /// Update the branch weights of the latch of a peeled-off loop 420*e8d8bef9SDimitry Andric /// iteration. 421*e8d8bef9SDimitry Andric /// This sets the branch weights for the latch of the recently peeled off loop 422*e8d8bef9SDimitry Andric /// iteration correctly. 423*e8d8bef9SDimitry Andric /// Let F is a weight of the edge from latch to header. 424*e8d8bef9SDimitry Andric /// Let E is a weight of the edge from latch to exit. 425*e8d8bef9SDimitry Andric /// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to 426*e8d8bef9SDimitry Andric /// go to exit. 427*e8d8bef9SDimitry Andric /// Then, Estimated TripCount = F / E. 428*e8d8bef9SDimitry Andric /// For I-th (counting from 0) peeled off iteration we set the the weights for 429*e8d8bef9SDimitry Andric /// the peeled latch as (TC - I, 1). It gives us reasonable distribution, 430*e8d8bef9SDimitry Andric /// The probability to go to exit 1/(TC-I) increases. At the same time 431*e8d8bef9SDimitry Andric /// the estimated trip count of remaining loop reduces by I. 432*e8d8bef9SDimitry Andric /// To avoid dealing with division rounding we can just multiple both part 433*e8d8bef9SDimitry Andric /// of weights to E and use weight as (F - I * E, E). 434*e8d8bef9SDimitry Andric /// 435*e8d8bef9SDimitry Andric /// \param Header The copy of the header block that belongs to next iteration. 436*e8d8bef9SDimitry Andric /// \param LatchBR The copy of the latch branch that belongs to this iteration. 437*e8d8bef9SDimitry Andric /// \param[in,out] FallThroughWeight The weight of the edge from latch to 438*e8d8bef9SDimitry Andric /// header before peeling (in) and after peeled off one iteration (out). 439*e8d8bef9SDimitry Andric static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR, 440*e8d8bef9SDimitry Andric uint64_t ExitWeight, 441*e8d8bef9SDimitry Andric uint64_t &FallThroughWeight) { 442*e8d8bef9SDimitry Andric // FallThroughWeight is 0 means that there is no branch weights on original 443*e8d8bef9SDimitry Andric // latch block or estimated trip count is zero. 444*e8d8bef9SDimitry Andric if (!FallThroughWeight) 445*e8d8bef9SDimitry Andric return; 446*e8d8bef9SDimitry Andric 447*e8d8bef9SDimitry Andric unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); 448*e8d8bef9SDimitry Andric MDBuilder MDB(LatchBR->getContext()); 449*e8d8bef9SDimitry Andric MDNode *WeightNode = 450*e8d8bef9SDimitry Andric HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThroughWeight) 451*e8d8bef9SDimitry Andric : MDB.createBranchWeights(FallThroughWeight, ExitWeight); 452*e8d8bef9SDimitry Andric LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); 453*e8d8bef9SDimitry Andric FallThroughWeight = 454*e8d8bef9SDimitry Andric FallThroughWeight > ExitWeight ? FallThroughWeight - ExitWeight : 1; 455*e8d8bef9SDimitry Andric } 456*e8d8bef9SDimitry Andric 457*e8d8bef9SDimitry Andric /// Initialize the weights. 458*e8d8bef9SDimitry Andric /// 459*e8d8bef9SDimitry Andric /// \param Header The header block. 460*e8d8bef9SDimitry Andric /// \param LatchBR The latch branch. 461*e8d8bef9SDimitry Andric /// \param[out] ExitWeight The weight of the edge from Latch to Exit. 462*e8d8bef9SDimitry Andric /// \param[out] FallThroughWeight The weight of the edge from Latch to Header. 463*e8d8bef9SDimitry Andric static void initBranchWeights(BasicBlock *Header, BranchInst *LatchBR, 464*e8d8bef9SDimitry Andric uint64_t &ExitWeight, 465*e8d8bef9SDimitry Andric uint64_t &FallThroughWeight) { 466*e8d8bef9SDimitry Andric uint64_t TrueWeight, FalseWeight; 467*e8d8bef9SDimitry Andric if (!LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) 468*e8d8bef9SDimitry Andric return; 469*e8d8bef9SDimitry Andric unsigned HeaderIdx = LatchBR->getSuccessor(0) == Header ? 0 : 1; 470*e8d8bef9SDimitry Andric ExitWeight = HeaderIdx ? TrueWeight : FalseWeight; 471*e8d8bef9SDimitry Andric FallThroughWeight = HeaderIdx ? FalseWeight : TrueWeight; 472*e8d8bef9SDimitry Andric } 473*e8d8bef9SDimitry Andric 474*e8d8bef9SDimitry Andric /// Update the weights of original Latch block after peeling off all iterations. 475*e8d8bef9SDimitry Andric /// 476*e8d8bef9SDimitry Andric /// \param Header The header block. 477*e8d8bef9SDimitry Andric /// \param LatchBR The latch branch. 478*e8d8bef9SDimitry Andric /// \param ExitWeight The weight of the edge from Latch to Exit. 479*e8d8bef9SDimitry Andric /// \param FallThroughWeight The weight of the edge from Latch to Header. 480*e8d8bef9SDimitry Andric static void fixupBranchWeights(BasicBlock *Header, BranchInst *LatchBR, 481*e8d8bef9SDimitry Andric uint64_t ExitWeight, 482*e8d8bef9SDimitry Andric uint64_t FallThroughWeight) { 483*e8d8bef9SDimitry Andric // FallThroughWeight is 0 means that there is no branch weights on original 484*e8d8bef9SDimitry Andric // latch block or estimated trip count is zero. 485*e8d8bef9SDimitry Andric if (!FallThroughWeight) 486*e8d8bef9SDimitry Andric return; 487*e8d8bef9SDimitry Andric 488*e8d8bef9SDimitry Andric // Sets the branch weights on the loop exit. 489*e8d8bef9SDimitry Andric MDBuilder MDB(LatchBR->getContext()); 490*e8d8bef9SDimitry Andric unsigned HeaderIdx = LatchBR->getSuccessor(0) == Header ? 0 : 1; 491*e8d8bef9SDimitry Andric MDNode *WeightNode = 492*e8d8bef9SDimitry Andric HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThroughWeight) 493*e8d8bef9SDimitry Andric : MDB.createBranchWeights(FallThroughWeight, ExitWeight); 494*e8d8bef9SDimitry Andric LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); 495*e8d8bef9SDimitry Andric } 496*e8d8bef9SDimitry Andric 497*e8d8bef9SDimitry Andric /// Clones the body of the loop L, putting it between \p InsertTop and \p 498*e8d8bef9SDimitry Andric /// InsertBot. 499*e8d8bef9SDimitry Andric /// \param IterNumber The serial number of the iteration currently being 500*e8d8bef9SDimitry Andric /// peeled off. 501*e8d8bef9SDimitry Andric /// \param ExitEdges The exit edges of the original loop. 502*e8d8bef9SDimitry Andric /// \param[out] NewBlocks A list of the blocks in the newly created clone 503*e8d8bef9SDimitry Andric /// \param[out] VMap The value map between the loop and the new clone. 504*e8d8bef9SDimitry Andric /// \param LoopBlocks A helper for DFS-traversal of the loop. 505*e8d8bef9SDimitry Andric /// \param LVMap A value-map that maps instructions from the original loop to 506*e8d8bef9SDimitry Andric /// instructions in the last peeled-off iteration. 507*e8d8bef9SDimitry Andric static void cloneLoopBlocks( 508*e8d8bef9SDimitry Andric Loop *L, unsigned IterNumber, BasicBlock *InsertTop, BasicBlock *InsertBot, 509*e8d8bef9SDimitry Andric SmallVectorImpl<std::pair<BasicBlock *, BasicBlock *>> &ExitEdges, 510*e8d8bef9SDimitry Andric SmallVectorImpl<BasicBlock *> &NewBlocks, LoopBlocksDFS &LoopBlocks, 511*e8d8bef9SDimitry Andric ValueToValueMapTy &VMap, ValueToValueMapTy &LVMap, DominatorTree *DT, 512*e8d8bef9SDimitry Andric LoopInfo *LI) { 513*e8d8bef9SDimitry Andric BasicBlock *Header = L->getHeader(); 514*e8d8bef9SDimitry Andric BasicBlock *Latch = L->getLoopLatch(); 515*e8d8bef9SDimitry Andric BasicBlock *PreHeader = L->getLoopPreheader(); 516*e8d8bef9SDimitry Andric 517*e8d8bef9SDimitry Andric Function *F = Header->getParent(); 518*e8d8bef9SDimitry Andric LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO(); 519*e8d8bef9SDimitry Andric LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO(); 520*e8d8bef9SDimitry Andric Loop *ParentLoop = L->getParentLoop(); 521*e8d8bef9SDimitry Andric 522*e8d8bef9SDimitry Andric // For each block in the original loop, create a new copy, 523*e8d8bef9SDimitry Andric // and update the value map with the newly created values. 524*e8d8bef9SDimitry Andric for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { 525*e8d8bef9SDimitry Andric BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F); 526*e8d8bef9SDimitry Andric NewBlocks.push_back(NewBB); 527*e8d8bef9SDimitry Andric 528*e8d8bef9SDimitry Andric // If an original block is an immediate child of the loop L, its copy 529*e8d8bef9SDimitry Andric // is a child of a ParentLoop after peeling. If a block is a child of 530*e8d8bef9SDimitry Andric // a nested loop, it is handled in the cloneLoop() call below. 531*e8d8bef9SDimitry Andric if (ParentLoop && LI->getLoopFor(*BB) == L) 532*e8d8bef9SDimitry Andric ParentLoop->addBasicBlockToLoop(NewBB, *LI); 533*e8d8bef9SDimitry Andric 534*e8d8bef9SDimitry Andric VMap[*BB] = NewBB; 535*e8d8bef9SDimitry Andric 536*e8d8bef9SDimitry Andric // If dominator tree is available, insert nodes to represent cloned blocks. 537*e8d8bef9SDimitry Andric if (DT) { 538*e8d8bef9SDimitry Andric if (Header == *BB) 539*e8d8bef9SDimitry Andric DT->addNewBlock(NewBB, InsertTop); 540*e8d8bef9SDimitry Andric else { 541*e8d8bef9SDimitry Andric DomTreeNode *IDom = DT->getNode(*BB)->getIDom(); 542*e8d8bef9SDimitry Andric // VMap must contain entry for IDom, as the iteration order is RPO. 543*e8d8bef9SDimitry Andric DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()])); 544*e8d8bef9SDimitry Andric } 545*e8d8bef9SDimitry Andric } 546*e8d8bef9SDimitry Andric } 547*e8d8bef9SDimitry Andric 548*e8d8bef9SDimitry Andric // Recursively create the new Loop objects for nested loops, if any, 549*e8d8bef9SDimitry Andric // to preserve LoopInfo. 550*e8d8bef9SDimitry Andric for (Loop *ChildLoop : *L) { 551*e8d8bef9SDimitry Andric cloneLoop(ChildLoop, ParentLoop, VMap, LI, nullptr); 552*e8d8bef9SDimitry Andric } 553*e8d8bef9SDimitry Andric 554*e8d8bef9SDimitry Andric // Hook-up the control flow for the newly inserted blocks. 555*e8d8bef9SDimitry Andric // The new header is hooked up directly to the "top", which is either 556*e8d8bef9SDimitry Andric // the original loop preheader (for the first iteration) or the previous 557*e8d8bef9SDimitry Andric // iteration's exiting block (for every other iteration) 558*e8d8bef9SDimitry Andric InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header])); 559*e8d8bef9SDimitry Andric 560*e8d8bef9SDimitry Andric // Similarly, for the latch: 561*e8d8bef9SDimitry Andric // The original exiting edge is still hooked up to the loop exit. 562*e8d8bef9SDimitry Andric // The backedge now goes to the "bottom", which is either the loop's real 563*e8d8bef9SDimitry Andric // header (for the last peeled iteration) or the copied header of the next 564*e8d8bef9SDimitry Andric // iteration (for every other iteration) 565*e8d8bef9SDimitry Andric BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]); 566*e8d8bef9SDimitry Andric BranchInst *LatchBR = cast<BranchInst>(NewLatch->getTerminator()); 567*e8d8bef9SDimitry Andric for (unsigned idx = 0, e = LatchBR->getNumSuccessors(); idx < e; ++idx) 568*e8d8bef9SDimitry Andric if (LatchBR->getSuccessor(idx) == Header) { 569*e8d8bef9SDimitry Andric LatchBR->setSuccessor(idx, InsertBot); 570*e8d8bef9SDimitry Andric break; 571*e8d8bef9SDimitry Andric } 572*e8d8bef9SDimitry Andric if (DT) 573*e8d8bef9SDimitry Andric DT->changeImmediateDominator(InsertBot, NewLatch); 574*e8d8bef9SDimitry Andric 575*e8d8bef9SDimitry Andric // The new copy of the loop body starts with a bunch of PHI nodes 576*e8d8bef9SDimitry Andric // that pick an incoming value from either the preheader, or the previous 577*e8d8bef9SDimitry Andric // loop iteration. Since this copy is no longer part of the loop, we 578*e8d8bef9SDimitry Andric // resolve this statically: 579*e8d8bef9SDimitry Andric // For the first iteration, we use the value from the preheader directly. 580*e8d8bef9SDimitry Andric // For any other iteration, we replace the phi with the value generated by 581*e8d8bef9SDimitry Andric // the immediately preceding clone of the loop body (which represents 582*e8d8bef9SDimitry Andric // the previous iteration). 583*e8d8bef9SDimitry Andric for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { 584*e8d8bef9SDimitry Andric PHINode *NewPHI = cast<PHINode>(VMap[&*I]); 585*e8d8bef9SDimitry Andric if (IterNumber == 0) { 586*e8d8bef9SDimitry Andric VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader); 587*e8d8bef9SDimitry Andric } else { 588*e8d8bef9SDimitry Andric Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch); 589*e8d8bef9SDimitry Andric Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); 590*e8d8bef9SDimitry Andric if (LatchInst && L->contains(LatchInst)) 591*e8d8bef9SDimitry Andric VMap[&*I] = LVMap[LatchInst]; 592*e8d8bef9SDimitry Andric else 593*e8d8bef9SDimitry Andric VMap[&*I] = LatchVal; 594*e8d8bef9SDimitry Andric } 595*e8d8bef9SDimitry Andric cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI); 596*e8d8bef9SDimitry Andric } 597*e8d8bef9SDimitry Andric 598*e8d8bef9SDimitry Andric // Fix up the outgoing values - we need to add a value for the iteration 599*e8d8bef9SDimitry Andric // we've just created. Note that this must happen *after* the incoming 600*e8d8bef9SDimitry Andric // values are adjusted, since the value going out of the latch may also be 601*e8d8bef9SDimitry Andric // a value coming into the header. 602*e8d8bef9SDimitry Andric for (auto Edge : ExitEdges) 603*e8d8bef9SDimitry Andric for (PHINode &PHI : Edge.second->phis()) { 604*e8d8bef9SDimitry Andric Value *LatchVal = PHI.getIncomingValueForBlock(Edge.first); 605*e8d8bef9SDimitry Andric Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); 606*e8d8bef9SDimitry Andric if (LatchInst && L->contains(LatchInst)) 607*e8d8bef9SDimitry Andric LatchVal = VMap[LatchVal]; 608*e8d8bef9SDimitry Andric PHI.addIncoming(LatchVal, cast<BasicBlock>(VMap[Edge.first])); 609*e8d8bef9SDimitry Andric } 610*e8d8bef9SDimitry Andric 611*e8d8bef9SDimitry Andric // LastValueMap is updated with the values for the current loop 612*e8d8bef9SDimitry Andric // which are used the next time this function is called. 613*e8d8bef9SDimitry Andric for (auto KV : VMap) 614*e8d8bef9SDimitry Andric LVMap[KV.first] = KV.second; 615*e8d8bef9SDimitry Andric } 616*e8d8bef9SDimitry Andric 617*e8d8bef9SDimitry Andric TargetTransformInfo::PeelingPreferences llvm::gatherPeelingPreferences( 618*e8d8bef9SDimitry Andric Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI, 619*e8d8bef9SDimitry Andric Optional<bool> UserAllowPeeling, 620*e8d8bef9SDimitry Andric Optional<bool> UserAllowProfileBasedPeeling, bool UnrollingSpecficValues) { 621*e8d8bef9SDimitry Andric TargetTransformInfo::PeelingPreferences PP; 622*e8d8bef9SDimitry Andric 623*e8d8bef9SDimitry Andric // Set the default values. 624*e8d8bef9SDimitry Andric PP.PeelCount = 0; 625*e8d8bef9SDimitry Andric PP.AllowPeeling = true; 626*e8d8bef9SDimitry Andric PP.AllowLoopNestsPeeling = false; 627*e8d8bef9SDimitry Andric PP.PeelProfiledIterations = true; 628*e8d8bef9SDimitry Andric 629*e8d8bef9SDimitry Andric // Get the target specifc values. 630*e8d8bef9SDimitry Andric TTI.getPeelingPreferences(L, SE, PP); 631*e8d8bef9SDimitry Andric 632*e8d8bef9SDimitry Andric // User specified values using cl::opt. 633*e8d8bef9SDimitry Andric if (UnrollingSpecficValues) { 634*e8d8bef9SDimitry Andric if (UnrollPeelCount.getNumOccurrences() > 0) 635*e8d8bef9SDimitry Andric PP.PeelCount = UnrollPeelCount; 636*e8d8bef9SDimitry Andric if (UnrollAllowPeeling.getNumOccurrences() > 0) 637*e8d8bef9SDimitry Andric PP.AllowPeeling = UnrollAllowPeeling; 638*e8d8bef9SDimitry Andric if (UnrollAllowLoopNestsPeeling.getNumOccurrences() > 0) 639*e8d8bef9SDimitry Andric PP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling; 640*e8d8bef9SDimitry Andric } 641*e8d8bef9SDimitry Andric 642*e8d8bef9SDimitry Andric // User specifed values provided by argument. 643*e8d8bef9SDimitry Andric if (UserAllowPeeling.hasValue()) 644*e8d8bef9SDimitry Andric PP.AllowPeeling = *UserAllowPeeling; 645*e8d8bef9SDimitry Andric if (UserAllowProfileBasedPeeling.hasValue()) 646*e8d8bef9SDimitry Andric PP.PeelProfiledIterations = *UserAllowProfileBasedPeeling; 647*e8d8bef9SDimitry Andric 648*e8d8bef9SDimitry Andric return PP; 649*e8d8bef9SDimitry Andric } 650*e8d8bef9SDimitry Andric 651*e8d8bef9SDimitry Andric /// Peel off the first \p PeelCount iterations of loop \p L. 652*e8d8bef9SDimitry Andric /// 653*e8d8bef9SDimitry Andric /// Note that this does not peel them off as a single straight-line block. 654*e8d8bef9SDimitry Andric /// Rather, each iteration is peeled off separately, and needs to check the 655*e8d8bef9SDimitry Andric /// exit condition. 656*e8d8bef9SDimitry Andric /// For loops that dynamically execute \p PeelCount iterations or less 657*e8d8bef9SDimitry Andric /// this provides a benefit, since the peeled off iterations, which account 658*e8d8bef9SDimitry Andric /// for the bulk of dynamic execution, can be further simplified by scalar 659*e8d8bef9SDimitry Andric /// optimizations. 660*e8d8bef9SDimitry Andric bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, 661*e8d8bef9SDimitry Andric ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, 662*e8d8bef9SDimitry Andric bool PreserveLCSSA) { 663*e8d8bef9SDimitry Andric assert(PeelCount > 0 && "Attempt to peel out zero iterations?"); 664*e8d8bef9SDimitry Andric assert(canPeel(L) && "Attempt to peel a loop which is not peelable?"); 665*e8d8bef9SDimitry Andric 666*e8d8bef9SDimitry Andric LoopBlocksDFS LoopBlocks(L); 667*e8d8bef9SDimitry Andric LoopBlocks.perform(LI); 668*e8d8bef9SDimitry Andric 669*e8d8bef9SDimitry Andric BasicBlock *Header = L->getHeader(); 670*e8d8bef9SDimitry Andric BasicBlock *PreHeader = L->getLoopPreheader(); 671*e8d8bef9SDimitry Andric BasicBlock *Latch = L->getLoopLatch(); 672*e8d8bef9SDimitry Andric SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitEdges; 673*e8d8bef9SDimitry Andric L->getExitEdges(ExitEdges); 674*e8d8bef9SDimitry Andric 675*e8d8bef9SDimitry Andric DenseMap<BasicBlock *, BasicBlock *> ExitIDom; 676*e8d8bef9SDimitry Andric if (DT) { 677*e8d8bef9SDimitry Andric // We'd like to determine the idom of exit block after peeling one 678*e8d8bef9SDimitry Andric // iteration. 679*e8d8bef9SDimitry Andric // Let Exit is exit block. 680*e8d8bef9SDimitry Andric // Let ExitingSet - is a set of predecessors of Exit block. They are exiting 681*e8d8bef9SDimitry Andric // blocks. 682*e8d8bef9SDimitry Andric // Let Latch' and ExitingSet' are copies after a peeling. 683*e8d8bef9SDimitry Andric // We'd like to find an idom'(Exit) - idom of Exit after peeling. 684*e8d8bef9SDimitry Andric // It is an evident that idom'(Exit) will be the nearest common dominator 685*e8d8bef9SDimitry Andric // of ExitingSet and ExitingSet'. 686*e8d8bef9SDimitry Andric // idom(Exit) is a nearest common dominator of ExitingSet. 687*e8d8bef9SDimitry Andric // idom(Exit)' is a nearest common dominator of ExitingSet'. 688*e8d8bef9SDimitry Andric // Taking into account that we have a single Latch, Latch' will dominate 689*e8d8bef9SDimitry Andric // Header and idom(Exit). 690*e8d8bef9SDimitry Andric // So the idom'(Exit) is nearest common dominator of idom(Exit)' and Latch'. 691*e8d8bef9SDimitry Andric // All these basic blocks are in the same loop, so what we find is 692*e8d8bef9SDimitry Andric // (nearest common dominator of idom(Exit) and Latch)'. 693*e8d8bef9SDimitry Andric // In the loop below we remember nearest common dominator of idom(Exit) and 694*e8d8bef9SDimitry Andric // Latch to update idom of Exit later. 695*e8d8bef9SDimitry Andric assert(L->hasDedicatedExits() && "No dedicated exits?"); 696*e8d8bef9SDimitry Andric for (auto Edge : ExitEdges) { 697*e8d8bef9SDimitry Andric if (ExitIDom.count(Edge.second)) 698*e8d8bef9SDimitry Andric continue; 699*e8d8bef9SDimitry Andric BasicBlock *BB = DT->findNearestCommonDominator( 700*e8d8bef9SDimitry Andric DT->getNode(Edge.second)->getIDom()->getBlock(), Latch); 701*e8d8bef9SDimitry Andric assert(L->contains(BB) && "IDom is not in a loop"); 702*e8d8bef9SDimitry Andric ExitIDom[Edge.second] = BB; 703*e8d8bef9SDimitry Andric } 704*e8d8bef9SDimitry Andric } 705*e8d8bef9SDimitry Andric 706*e8d8bef9SDimitry Andric Function *F = Header->getParent(); 707*e8d8bef9SDimitry Andric 708*e8d8bef9SDimitry Andric // Set up all the necessary basic blocks. It is convenient to split the 709*e8d8bef9SDimitry Andric // preheader into 3 parts - two blocks to anchor the peeled copy of the loop 710*e8d8bef9SDimitry Andric // body, and a new preheader for the "real" loop. 711*e8d8bef9SDimitry Andric 712*e8d8bef9SDimitry Andric // Peeling the first iteration transforms. 713*e8d8bef9SDimitry Andric // 714*e8d8bef9SDimitry Andric // PreHeader: 715*e8d8bef9SDimitry Andric // ... 716*e8d8bef9SDimitry Andric // Header: 717*e8d8bef9SDimitry Andric // LoopBody 718*e8d8bef9SDimitry Andric // If (cond) goto Header 719*e8d8bef9SDimitry Andric // Exit: 720*e8d8bef9SDimitry Andric // 721*e8d8bef9SDimitry Andric // into 722*e8d8bef9SDimitry Andric // 723*e8d8bef9SDimitry Andric // InsertTop: 724*e8d8bef9SDimitry Andric // LoopBody 725*e8d8bef9SDimitry Andric // If (!cond) goto Exit 726*e8d8bef9SDimitry Andric // InsertBot: 727*e8d8bef9SDimitry Andric // NewPreHeader: 728*e8d8bef9SDimitry Andric // ... 729*e8d8bef9SDimitry Andric // Header: 730*e8d8bef9SDimitry Andric // LoopBody 731*e8d8bef9SDimitry Andric // If (cond) goto Header 732*e8d8bef9SDimitry Andric // Exit: 733*e8d8bef9SDimitry Andric // 734*e8d8bef9SDimitry Andric // Each following iteration will split the current bottom anchor in two, 735*e8d8bef9SDimitry Andric // and put the new copy of the loop body between these two blocks. That is, 736*e8d8bef9SDimitry Andric // after peeling another iteration from the example above, we'll split 737*e8d8bef9SDimitry Andric // InsertBot, and get: 738*e8d8bef9SDimitry Andric // 739*e8d8bef9SDimitry Andric // InsertTop: 740*e8d8bef9SDimitry Andric // LoopBody 741*e8d8bef9SDimitry Andric // If (!cond) goto Exit 742*e8d8bef9SDimitry Andric // InsertBot: 743*e8d8bef9SDimitry Andric // LoopBody 744*e8d8bef9SDimitry Andric // If (!cond) goto Exit 745*e8d8bef9SDimitry Andric // InsertBot.next: 746*e8d8bef9SDimitry Andric // NewPreHeader: 747*e8d8bef9SDimitry Andric // ... 748*e8d8bef9SDimitry Andric // Header: 749*e8d8bef9SDimitry Andric // LoopBody 750*e8d8bef9SDimitry Andric // If (cond) goto Header 751*e8d8bef9SDimitry Andric // Exit: 752*e8d8bef9SDimitry Andric 753*e8d8bef9SDimitry Andric BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI); 754*e8d8bef9SDimitry Andric BasicBlock *InsertBot = 755*e8d8bef9SDimitry Andric SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI); 756*e8d8bef9SDimitry Andric BasicBlock *NewPreHeader = 757*e8d8bef9SDimitry Andric SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); 758*e8d8bef9SDimitry Andric 759*e8d8bef9SDimitry Andric InsertTop->setName(Header->getName() + ".peel.begin"); 760*e8d8bef9SDimitry Andric InsertBot->setName(Header->getName() + ".peel.next"); 761*e8d8bef9SDimitry Andric NewPreHeader->setName(PreHeader->getName() + ".peel.newph"); 762*e8d8bef9SDimitry Andric 763*e8d8bef9SDimitry Andric ValueToValueMapTy LVMap; 764*e8d8bef9SDimitry Andric 765*e8d8bef9SDimitry Andric // If we have branch weight information, we'll want to update it for the 766*e8d8bef9SDimitry Andric // newly created branches. 767*e8d8bef9SDimitry Andric BranchInst *LatchBR = 768*e8d8bef9SDimitry Andric cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator()); 769*e8d8bef9SDimitry Andric uint64_t ExitWeight = 0, FallThroughWeight = 0; 770*e8d8bef9SDimitry Andric initBranchWeights(Header, LatchBR, ExitWeight, FallThroughWeight); 771*e8d8bef9SDimitry Andric 772*e8d8bef9SDimitry Andric // For each peeled-off iteration, make a copy of the loop. 773*e8d8bef9SDimitry Andric for (unsigned Iter = 0; Iter < PeelCount; ++Iter) { 774*e8d8bef9SDimitry Andric SmallVector<BasicBlock *, 8> NewBlocks; 775*e8d8bef9SDimitry Andric ValueToValueMapTy VMap; 776*e8d8bef9SDimitry Andric 777*e8d8bef9SDimitry Andric cloneLoopBlocks(L, Iter, InsertTop, InsertBot, ExitEdges, NewBlocks, 778*e8d8bef9SDimitry Andric LoopBlocks, VMap, LVMap, DT, LI); 779*e8d8bef9SDimitry Andric 780*e8d8bef9SDimitry Andric // Remap to use values from the current iteration instead of the 781*e8d8bef9SDimitry Andric // previous one. 782*e8d8bef9SDimitry Andric remapInstructionsInBlocks(NewBlocks, VMap); 783*e8d8bef9SDimitry Andric 784*e8d8bef9SDimitry Andric if (DT) { 785*e8d8bef9SDimitry Andric // Latches of the cloned loops dominate over the loop exit, so idom of the 786*e8d8bef9SDimitry Andric // latter is the first cloned loop body, as original PreHeader dominates 787*e8d8bef9SDimitry Andric // the original loop body. 788*e8d8bef9SDimitry Andric if (Iter == 0) 789*e8d8bef9SDimitry Andric for (auto Exit : ExitIDom) 790*e8d8bef9SDimitry Andric DT->changeImmediateDominator(Exit.first, 791*e8d8bef9SDimitry Andric cast<BasicBlock>(LVMap[Exit.second])); 792*e8d8bef9SDimitry Andric #ifdef EXPENSIVE_CHECKS 793*e8d8bef9SDimitry Andric assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 794*e8d8bef9SDimitry Andric #endif 795*e8d8bef9SDimitry Andric } 796*e8d8bef9SDimitry Andric 797*e8d8bef9SDimitry Andric auto *LatchBRCopy = cast<BranchInst>(VMap[LatchBR]); 798*e8d8bef9SDimitry Andric updateBranchWeights(InsertBot, LatchBRCopy, ExitWeight, FallThroughWeight); 799*e8d8bef9SDimitry Andric // Remove Loop metadata from the latch branch instruction 800*e8d8bef9SDimitry Andric // because it is not the Loop's latch branch anymore. 801*e8d8bef9SDimitry Andric LatchBRCopy->setMetadata(LLVMContext::MD_loop, nullptr); 802*e8d8bef9SDimitry Andric 803*e8d8bef9SDimitry Andric InsertTop = InsertBot; 804*e8d8bef9SDimitry Andric InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); 805*e8d8bef9SDimitry Andric InsertBot->setName(Header->getName() + ".peel.next"); 806*e8d8bef9SDimitry Andric 807*e8d8bef9SDimitry Andric F->getBasicBlockList().splice(InsertTop->getIterator(), 808*e8d8bef9SDimitry Andric F->getBasicBlockList(), 809*e8d8bef9SDimitry Andric NewBlocks[0]->getIterator(), F->end()); 810*e8d8bef9SDimitry Andric } 811*e8d8bef9SDimitry Andric 812*e8d8bef9SDimitry Andric // Now adjust the phi nodes in the loop header to get their initial values 813*e8d8bef9SDimitry Andric // from the last peeled-off iteration instead of the preheader. 814*e8d8bef9SDimitry Andric for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { 815*e8d8bef9SDimitry Andric PHINode *PHI = cast<PHINode>(I); 816*e8d8bef9SDimitry Andric Value *NewVal = PHI->getIncomingValueForBlock(Latch); 817*e8d8bef9SDimitry Andric Instruction *LatchInst = dyn_cast<Instruction>(NewVal); 818*e8d8bef9SDimitry Andric if (LatchInst && L->contains(LatchInst)) 819*e8d8bef9SDimitry Andric NewVal = LVMap[LatchInst]; 820*e8d8bef9SDimitry Andric 821*e8d8bef9SDimitry Andric PHI->setIncomingValueForBlock(NewPreHeader, NewVal); 822*e8d8bef9SDimitry Andric } 823*e8d8bef9SDimitry Andric 824*e8d8bef9SDimitry Andric fixupBranchWeights(Header, LatchBR, ExitWeight, FallThroughWeight); 825*e8d8bef9SDimitry Andric 826*e8d8bef9SDimitry Andric // Update Metadata for count of peeled off iterations. 827*e8d8bef9SDimitry Andric unsigned AlreadyPeeled = 0; 828*e8d8bef9SDimitry Andric if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData)) 829*e8d8bef9SDimitry Andric AlreadyPeeled = *Peeled; 830*e8d8bef9SDimitry Andric addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount); 831*e8d8bef9SDimitry Andric 832*e8d8bef9SDimitry Andric if (Loop *ParentLoop = L->getParentLoop()) 833*e8d8bef9SDimitry Andric L = ParentLoop; 834*e8d8bef9SDimitry Andric 835*e8d8bef9SDimitry Andric // We modified the loop, update SE. 836*e8d8bef9SDimitry Andric SE->forgetTopmostLoop(L); 837*e8d8bef9SDimitry Andric 838*e8d8bef9SDimitry Andric // Finally DomtTree must be correct. 839*e8d8bef9SDimitry Andric assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 840*e8d8bef9SDimitry Andric 841*e8d8bef9SDimitry Andric // FIXME: Incrementally update loop-simplify 842*e8d8bef9SDimitry Andric simplifyLoop(L, DT, LI, SE, AC, nullptr, PreserveLCSSA); 843*e8d8bef9SDimitry Andric 844*e8d8bef9SDimitry Andric NumPeeled++; 845*e8d8bef9SDimitry Andric 846*e8d8bef9SDimitry Andric return true; 847*e8d8bef9SDimitry Andric } 848