109467b48Spatrick //===----------------- LoopRotationUtils.cpp -----------------------------===// 209467b48Spatrick // 309467b48Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 409467b48Spatrick // See https://llvm.org/LICENSE.txt for license information. 509467b48Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 609467b48Spatrick // 709467b48Spatrick //===----------------------------------------------------------------------===// 809467b48Spatrick // 909467b48Spatrick // This file provides utilities to convert a loop into a loop with bottom test. 1009467b48Spatrick // 1109467b48Spatrick //===----------------------------------------------------------------------===// 1209467b48Spatrick 1309467b48Spatrick #include "llvm/Transforms/Utils/LoopRotationUtils.h" 1409467b48Spatrick #include "llvm/ADT/Statistic.h" 1509467b48Spatrick #include "llvm/Analysis/AssumptionCache.h" 1609467b48Spatrick #include "llvm/Analysis/BasicAliasAnalysis.h" 1709467b48Spatrick #include "llvm/Analysis/CodeMetrics.h" 1809467b48Spatrick #include "llvm/Analysis/DomTreeUpdater.h" 1909467b48Spatrick #include "llvm/Analysis/GlobalsModRef.h" 2009467b48Spatrick #include "llvm/Analysis/InstructionSimplify.h" 2109467b48Spatrick #include "llvm/Analysis/LoopPass.h" 2209467b48Spatrick #include "llvm/Analysis/MemorySSA.h" 2309467b48Spatrick #include "llvm/Analysis/MemorySSAUpdater.h" 2409467b48Spatrick #include "llvm/Analysis/ScalarEvolution.h" 2509467b48Spatrick #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 2609467b48Spatrick #include "llvm/Analysis/TargetTransformInfo.h" 2709467b48Spatrick #include "llvm/Analysis/ValueTracking.h" 2809467b48Spatrick #include "llvm/IR/CFG.h" 29*73471bf0Spatrick #include "llvm/IR/DebugInfo.h" 3009467b48Spatrick #include "llvm/IR/Dominators.h" 3109467b48Spatrick #include "llvm/IR/Function.h" 3209467b48Spatrick #include "llvm/IR/IntrinsicInst.h" 3309467b48Spatrick #include "llvm/IR/Module.h" 3409467b48Spatrick #include "llvm/Support/CommandLine.h" 3509467b48Spatrick #include "llvm/Support/Debug.h" 3609467b48Spatrick #include "llvm/Support/raw_ostream.h" 3709467b48Spatrick #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38*73471bf0Spatrick #include "llvm/Transforms/Utils/Cloning.h" 3909467b48Spatrick #include "llvm/Transforms/Utils/Local.h" 4009467b48Spatrick #include "llvm/Transforms/Utils/LoopUtils.h" 4109467b48Spatrick #include "llvm/Transforms/Utils/SSAUpdater.h" 4209467b48Spatrick #include "llvm/Transforms/Utils/ValueMapper.h" 4309467b48Spatrick using namespace llvm; 4409467b48Spatrick 4509467b48Spatrick #define DEBUG_TYPE "loop-rotate" 4609467b48Spatrick 47*73471bf0Spatrick STATISTIC(NumNotRotatedDueToHeaderSize, 48*73471bf0Spatrick "Number of loops not rotated due to the header size"); 49*73471bf0Spatrick STATISTIC(NumInstrsHoisted, 50*73471bf0Spatrick "Number of instructions hoisted into loop preheader"); 51*73471bf0Spatrick STATISTIC(NumInstrsDuplicated, 52*73471bf0Spatrick "Number of instructions cloned into loop preheader"); 5309467b48Spatrick STATISTIC(NumRotated, "Number of loops rotated"); 5409467b48Spatrick 55097a140dSpatrick static cl::opt<bool> 56097a140dSpatrick MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden, 57097a140dSpatrick cl::desc("Allow loop rotation multiple times in order to reach " 58097a140dSpatrick "a better latch exit")); 59097a140dSpatrick 6009467b48Spatrick namespace { 6109467b48Spatrick /// A simple loop rotation transformation. 6209467b48Spatrick class LoopRotate { 6309467b48Spatrick const unsigned MaxHeaderSize; 6409467b48Spatrick LoopInfo *LI; 6509467b48Spatrick const TargetTransformInfo *TTI; 6609467b48Spatrick AssumptionCache *AC; 6709467b48Spatrick DominatorTree *DT; 6809467b48Spatrick ScalarEvolution *SE; 6909467b48Spatrick MemorySSAUpdater *MSSAU; 7009467b48Spatrick const SimplifyQuery &SQ; 7109467b48Spatrick bool RotationOnly; 7209467b48Spatrick bool IsUtilMode; 73*73471bf0Spatrick bool PrepareForLTO; 7409467b48Spatrick 7509467b48Spatrick public: 7609467b48Spatrick LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, 7709467b48Spatrick const TargetTransformInfo *TTI, AssumptionCache *AC, 7809467b48Spatrick DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 79*73471bf0Spatrick const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode, 80*73471bf0Spatrick bool PrepareForLTO) 8109467b48Spatrick : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE), 8209467b48Spatrick MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly), 83*73471bf0Spatrick IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {} 8409467b48Spatrick bool processLoop(Loop *L); 8509467b48Spatrick 8609467b48Spatrick private: 8709467b48Spatrick bool rotateLoop(Loop *L, bool SimplifiedLatch); 8809467b48Spatrick bool simplifyLoopLatch(Loop *L); 8909467b48Spatrick }; 9009467b48Spatrick } // end anonymous namespace 9109467b48Spatrick 9209467b48Spatrick /// Insert (K, V) pair into the ValueToValueMap, and verify the key did not 9309467b48Spatrick /// previously exist in the map, and the value was inserted. 9409467b48Spatrick static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) { 9509467b48Spatrick bool Inserted = VM.insert({K, V}).second; 9609467b48Spatrick assert(Inserted); 9709467b48Spatrick (void)Inserted; 9809467b48Spatrick } 9909467b48Spatrick /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 10009467b48Spatrick /// old header into the preheader. If there were uses of the values produced by 10109467b48Spatrick /// these instruction that were outside of the loop, we have to insert PHI nodes 10209467b48Spatrick /// to merge the two values. Do this now. 10309467b48Spatrick static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 10409467b48Spatrick BasicBlock *OrigPreheader, 10509467b48Spatrick ValueToValueMapTy &ValueMap, 10609467b48Spatrick SmallVectorImpl<PHINode*> *InsertedPHIs) { 10709467b48Spatrick // Remove PHI node entries that are no longer live. 10809467b48Spatrick BasicBlock::iterator I, E = OrigHeader->end(); 10909467b48Spatrick for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 11009467b48Spatrick PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 11109467b48Spatrick 11209467b48Spatrick // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 11309467b48Spatrick // as necessary. 11409467b48Spatrick SSAUpdater SSA(InsertedPHIs); 11509467b48Spatrick for (I = OrigHeader->begin(); I != E; ++I) { 11609467b48Spatrick Value *OrigHeaderVal = &*I; 11709467b48Spatrick 11809467b48Spatrick // If there are no uses of the value (e.g. because it returns void), there 11909467b48Spatrick // is nothing to rewrite. 12009467b48Spatrick if (OrigHeaderVal->use_empty()) 12109467b48Spatrick continue; 12209467b48Spatrick 12309467b48Spatrick Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); 12409467b48Spatrick 12509467b48Spatrick // The value now exits in two versions: the initial value in the preheader 12609467b48Spatrick // and the loop "next" value in the original header. 12709467b48Spatrick SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 12809467b48Spatrick SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 12909467b48Spatrick SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 13009467b48Spatrick 13109467b48Spatrick // Visit each use of the OrigHeader instruction. 13209467b48Spatrick for (Value::use_iterator UI = OrigHeaderVal->use_begin(), 13309467b48Spatrick UE = OrigHeaderVal->use_end(); 13409467b48Spatrick UI != UE;) { 13509467b48Spatrick // Grab the use before incrementing the iterator. 13609467b48Spatrick Use &U = *UI; 13709467b48Spatrick 13809467b48Spatrick // Increment the iterator before removing the use from the list. 13909467b48Spatrick ++UI; 14009467b48Spatrick 14109467b48Spatrick // SSAUpdater can't handle a non-PHI use in the same block as an 14209467b48Spatrick // earlier def. We can easily handle those cases manually. 14309467b48Spatrick Instruction *UserInst = cast<Instruction>(U.getUser()); 14409467b48Spatrick if (!isa<PHINode>(UserInst)) { 14509467b48Spatrick BasicBlock *UserBB = UserInst->getParent(); 14609467b48Spatrick 14709467b48Spatrick // The original users in the OrigHeader are already using the 14809467b48Spatrick // original definitions. 14909467b48Spatrick if (UserBB == OrigHeader) 15009467b48Spatrick continue; 15109467b48Spatrick 15209467b48Spatrick // Users in the OrigPreHeader need to use the value to which the 15309467b48Spatrick // original definitions are mapped. 15409467b48Spatrick if (UserBB == OrigPreheader) { 15509467b48Spatrick U = OrigPreHeaderVal; 15609467b48Spatrick continue; 15709467b48Spatrick } 15809467b48Spatrick } 15909467b48Spatrick 16009467b48Spatrick // Anything else can be handled by SSAUpdater. 16109467b48Spatrick SSA.RewriteUse(U); 16209467b48Spatrick } 16309467b48Spatrick 16409467b48Spatrick // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug 16509467b48Spatrick // intrinsics. 16609467b48Spatrick SmallVector<DbgValueInst *, 1> DbgValues; 16709467b48Spatrick llvm::findDbgValues(DbgValues, OrigHeaderVal); 16809467b48Spatrick for (auto &DbgValue : DbgValues) { 16909467b48Spatrick // The original users in the OrigHeader are already using the original 17009467b48Spatrick // definitions. 17109467b48Spatrick BasicBlock *UserBB = DbgValue->getParent(); 17209467b48Spatrick if (UserBB == OrigHeader) 17309467b48Spatrick continue; 17409467b48Spatrick 17509467b48Spatrick // Users in the OrigPreHeader need to use the value to which the 17609467b48Spatrick // original definitions are mapped and anything else can be handled by 17709467b48Spatrick // the SSAUpdater. To avoid adding PHINodes, check if the value is 17809467b48Spatrick // available in UserBB, if not substitute undef. 17909467b48Spatrick Value *NewVal; 18009467b48Spatrick if (UserBB == OrigPreheader) 18109467b48Spatrick NewVal = OrigPreHeaderVal; 18209467b48Spatrick else if (SSA.HasValueForBlock(UserBB)) 18309467b48Spatrick NewVal = SSA.GetValueInMiddleOfBlock(UserBB); 18409467b48Spatrick else 18509467b48Spatrick NewVal = UndefValue::get(OrigHeaderVal->getType()); 186*73471bf0Spatrick DbgValue->replaceVariableLocationOp(OrigHeaderVal, NewVal); 18709467b48Spatrick } 18809467b48Spatrick } 18909467b48Spatrick } 19009467b48Spatrick 191097a140dSpatrick // Assuming both header and latch are exiting, look for a phi which is only 192097a140dSpatrick // used outside the loop (via a LCSSA phi) in the exit from the header. 193097a140dSpatrick // This means that rotating the loop can remove the phi. 194097a140dSpatrick static bool profitableToRotateLoopExitingLatch(Loop *L) { 19509467b48Spatrick BasicBlock *Header = L->getHeader(); 196097a140dSpatrick BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator()); 197097a140dSpatrick assert(BI && BI->isConditional() && "need header with conditional exit"); 198097a140dSpatrick BasicBlock *HeaderExit = BI->getSuccessor(0); 19909467b48Spatrick if (L->contains(HeaderExit)) 200097a140dSpatrick HeaderExit = BI->getSuccessor(1); 20109467b48Spatrick 20209467b48Spatrick for (auto &Phi : Header->phis()) { 20309467b48Spatrick // Look for uses of this phi in the loop/via exits other than the header. 20409467b48Spatrick if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) { 20509467b48Spatrick return cast<Instruction>(U)->getParent() != HeaderExit; 20609467b48Spatrick })) 20709467b48Spatrick continue; 20809467b48Spatrick return true; 20909467b48Spatrick } 210097a140dSpatrick return false; 211097a140dSpatrick } 21209467b48Spatrick 213097a140dSpatrick // Check that latch exit is deoptimizing (which means - very unlikely to happen) 214097a140dSpatrick // and there is another exit from the loop which is non-deoptimizing. 215097a140dSpatrick // If we rotate latch to that exit our loop has a better chance of being fully 216097a140dSpatrick // canonical. 217097a140dSpatrick // 218097a140dSpatrick // It can give false positives in some rare cases. 219097a140dSpatrick static bool canRotateDeoptimizingLatchExit(Loop *L) { 220097a140dSpatrick BasicBlock *Latch = L->getLoopLatch(); 221097a140dSpatrick assert(Latch && "need latch"); 222097a140dSpatrick BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 223097a140dSpatrick // Need normal exiting latch. 224097a140dSpatrick if (!BI || !BI->isConditional()) 225097a140dSpatrick return false; 226097a140dSpatrick 227097a140dSpatrick BasicBlock *Exit = BI->getSuccessor(1); 228097a140dSpatrick if (L->contains(Exit)) 229097a140dSpatrick Exit = BI->getSuccessor(0); 230097a140dSpatrick 231097a140dSpatrick // Latch exit is non-deoptimizing, no need to rotate. 232097a140dSpatrick if (!Exit->getPostdominatingDeoptimizeCall()) 233097a140dSpatrick return false; 234097a140dSpatrick 235097a140dSpatrick SmallVector<BasicBlock *, 4> Exits; 236097a140dSpatrick L->getUniqueExitBlocks(Exits); 237097a140dSpatrick if (!Exits.empty()) { 238097a140dSpatrick // There is at least one non-deoptimizing exit. 239097a140dSpatrick // 240097a140dSpatrick // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact, 241097a140dSpatrick // as it can conservatively return false for deoptimizing exits with 242097a140dSpatrick // complex enough control flow down to deoptimize call. 243097a140dSpatrick // 244097a140dSpatrick // That means here we can report success for a case where 245097a140dSpatrick // all exits are deoptimizing but one of them has complex enough 246097a140dSpatrick // control flow (e.g. with loops). 247097a140dSpatrick // 248097a140dSpatrick // That should be a very rare case and false positives for this function 249097a140dSpatrick // have compile-time effect only. 250097a140dSpatrick return any_of(Exits, [](const BasicBlock *BB) { 251097a140dSpatrick return !BB->getPostdominatingDeoptimizeCall(); 252097a140dSpatrick }); 253097a140dSpatrick } 25409467b48Spatrick return false; 25509467b48Spatrick } 25609467b48Spatrick 25709467b48Spatrick /// Rotate loop LP. Return true if the loop is rotated. 25809467b48Spatrick /// 25909467b48Spatrick /// \param SimplifiedLatch is true if the latch was just folded into the final 26009467b48Spatrick /// loop exit. In this case we may want to rotate even though the new latch is 26109467b48Spatrick /// now an exiting branch. This rotation would have happened had the latch not 26209467b48Spatrick /// been simplified. However, if SimplifiedLatch is false, then we avoid 26309467b48Spatrick /// rotating loops in which the latch exits to avoid excessive or endless 26409467b48Spatrick /// rotation. LoopRotate should be repeatable and converge to a canonical 26509467b48Spatrick /// form. This property is satisfied because simplifying the loop latch can only 26609467b48Spatrick /// happen once across multiple invocations of the LoopRotate pass. 267097a140dSpatrick /// 268097a140dSpatrick /// If -loop-rotate-multi is enabled we can do multiple rotations in one go 269097a140dSpatrick /// so to reach a suitable (non-deoptimizing) exit. 27009467b48Spatrick bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 27109467b48Spatrick // If the loop has only one block then there is not much to rotate. 27209467b48Spatrick if (L->getBlocks().size() == 1) 27309467b48Spatrick return false; 27409467b48Spatrick 275097a140dSpatrick bool Rotated = false; 276097a140dSpatrick do { 27709467b48Spatrick BasicBlock *OrigHeader = L->getHeader(); 27809467b48Spatrick BasicBlock *OrigLatch = L->getLoopLatch(); 27909467b48Spatrick 28009467b48Spatrick BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 28109467b48Spatrick if (!BI || BI->isUnconditional()) 282097a140dSpatrick return Rotated; 28309467b48Spatrick 28409467b48Spatrick // If the loop header is not one of the loop exiting blocks then 28509467b48Spatrick // either this loop is already rotated or it is not 28609467b48Spatrick // suitable for loop rotation transformations. 28709467b48Spatrick if (!L->isLoopExiting(OrigHeader)) 288097a140dSpatrick return Rotated; 28909467b48Spatrick 29009467b48Spatrick // If the loop latch already contains a branch that leaves the loop then the 29109467b48Spatrick // loop is already rotated. 29209467b48Spatrick if (!OrigLatch) 293097a140dSpatrick return Rotated; 29409467b48Spatrick 29509467b48Spatrick // Rotate if either the loop latch does *not* exit the loop, or if the loop 29609467b48Spatrick // latch was just simplified. Or if we think it will be profitable. 29709467b48Spatrick if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false && 298097a140dSpatrick !profitableToRotateLoopExitingLatch(L) && 299097a140dSpatrick !canRotateDeoptimizingLatchExit(L)) 300097a140dSpatrick return Rotated; 30109467b48Spatrick 30209467b48Spatrick // Check size of original header and reject loop if it is very big or we can't 30309467b48Spatrick // duplicate blocks inside it. 30409467b48Spatrick { 30509467b48Spatrick SmallPtrSet<const Value *, 32> EphValues; 30609467b48Spatrick CodeMetrics::collectEphemeralValues(L, AC, EphValues); 30709467b48Spatrick 30809467b48Spatrick CodeMetrics Metrics; 309*73471bf0Spatrick Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues, PrepareForLTO); 31009467b48Spatrick if (Metrics.notDuplicatable) { 31109467b48Spatrick LLVM_DEBUG( 31209467b48Spatrick dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 31309467b48Spatrick << " instructions: "; 31409467b48Spatrick L->dump()); 315097a140dSpatrick return Rotated; 31609467b48Spatrick } 31709467b48Spatrick if (Metrics.convergent) { 31809467b48Spatrick LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " 31909467b48Spatrick "instructions: "; 32009467b48Spatrick L->dump()); 321097a140dSpatrick return Rotated; 32209467b48Spatrick } 323097a140dSpatrick if (Metrics.NumInsts > MaxHeaderSize) { 324097a140dSpatrick LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains " 325097a140dSpatrick << Metrics.NumInsts 326097a140dSpatrick << " instructions, which is more than the threshold (" 327097a140dSpatrick << MaxHeaderSize << " instructions): "; 328097a140dSpatrick L->dump()); 329*73471bf0Spatrick ++NumNotRotatedDueToHeaderSize; 330097a140dSpatrick return Rotated; 331097a140dSpatrick } 332*73471bf0Spatrick 333*73471bf0Spatrick // When preparing for LTO, avoid rotating loops with calls that could be 334*73471bf0Spatrick // inlined during the LTO stage. 335*73471bf0Spatrick if (PrepareForLTO && Metrics.NumInlineCandidates > 0) 336*73471bf0Spatrick return Rotated; 33709467b48Spatrick } 33809467b48Spatrick 33909467b48Spatrick // Now, this loop is suitable for rotation. 34009467b48Spatrick BasicBlock *OrigPreheader = L->getLoopPreheader(); 34109467b48Spatrick 34209467b48Spatrick // If the loop could not be converted to canonical form, it must have an 34309467b48Spatrick // indirectbr in it, just give up. 34409467b48Spatrick if (!OrigPreheader || !L->hasDedicatedExits()) 345097a140dSpatrick return Rotated; 34609467b48Spatrick 34709467b48Spatrick // Anything ScalarEvolution may know about this loop or the PHI nodes 34809467b48Spatrick // in its header will soon be invalidated. We should also invalidate 34909467b48Spatrick // all outer loops because insertion and deletion of blocks that happens 35009467b48Spatrick // during the rotation may violate invariants related to backedge taken 35109467b48Spatrick // infos in them. 35209467b48Spatrick if (SE) 35309467b48Spatrick SE->forgetTopmostLoop(L); 35409467b48Spatrick 35509467b48Spatrick LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 35609467b48Spatrick if (MSSAU && VerifyMemorySSA) 35709467b48Spatrick MSSAU->getMemorySSA()->verifyMemorySSA(); 35809467b48Spatrick 35909467b48Spatrick // Find new Loop header. NewHeader is a Header's one and only successor 36009467b48Spatrick // that is inside loop. Header's other successor is outside the 36109467b48Spatrick // loop. Otherwise loop is not suitable for rotation. 36209467b48Spatrick BasicBlock *Exit = BI->getSuccessor(0); 36309467b48Spatrick BasicBlock *NewHeader = BI->getSuccessor(1); 36409467b48Spatrick if (L->contains(Exit)) 36509467b48Spatrick std::swap(Exit, NewHeader); 36609467b48Spatrick assert(NewHeader && "Unable to determine new loop header"); 36709467b48Spatrick assert(L->contains(NewHeader) && !L->contains(Exit) && 36809467b48Spatrick "Unable to determine loop header and exit blocks"); 36909467b48Spatrick 37009467b48Spatrick // This code assumes that the new header has exactly one predecessor. 37109467b48Spatrick // Remove any single-entry PHI nodes in it. 37209467b48Spatrick assert(NewHeader->getSinglePredecessor() && 37309467b48Spatrick "New header doesn't have one pred!"); 37409467b48Spatrick FoldSingleEntryPHINodes(NewHeader); 37509467b48Spatrick 37609467b48Spatrick // Begin by walking OrigHeader and populating ValueMap with an entry for 37709467b48Spatrick // each Instruction. 37809467b48Spatrick BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 37909467b48Spatrick ValueToValueMapTy ValueMap, ValueMapMSSA; 38009467b48Spatrick 38109467b48Spatrick // For PHI nodes, the value available in OldPreHeader is just the 38209467b48Spatrick // incoming value from OldPreHeader. 38309467b48Spatrick for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 38409467b48Spatrick InsertNewValueIntoMap(ValueMap, PN, 38509467b48Spatrick PN->getIncomingValueForBlock(OrigPreheader)); 38609467b48Spatrick 38709467b48Spatrick // For the rest of the instructions, either hoist to the OrigPreheader if 38809467b48Spatrick // possible or create a clone in the OldPreHeader if not. 38909467b48Spatrick Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); 39009467b48Spatrick 391*73471bf0Spatrick // Record all debug intrinsics preceding LoopEntryBranch to avoid 392*73471bf0Spatrick // duplication. 39309467b48Spatrick using DbgIntrinsicHash = 394*73471bf0Spatrick std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>; 39509467b48Spatrick auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash { 396*73471bf0Spatrick auto VarLocOps = D->location_ops(); 397*73471bf0Spatrick return {{hash_combine_range(VarLocOps.begin(), VarLocOps.end()), 398*73471bf0Spatrick D->getVariable()}, 399*73471bf0Spatrick D->getExpression()}; 40009467b48Spatrick }; 40109467b48Spatrick SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics; 40209467b48Spatrick for (auto I = std::next(OrigPreheader->rbegin()), E = OrigPreheader->rend(); 40309467b48Spatrick I != E; ++I) { 40409467b48Spatrick if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&*I)) 40509467b48Spatrick DbgIntrinsics.insert(makeHash(DII)); 40609467b48Spatrick else 40709467b48Spatrick break; 40809467b48Spatrick } 40909467b48Spatrick 410*73471bf0Spatrick // Remember the local noalias scope declarations in the header. After the 411*73471bf0Spatrick // rotation, they must be duplicated and the scope must be cloned. This 412*73471bf0Spatrick // avoids unwanted interaction across iterations. 413*73471bf0Spatrick SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions; 414*73471bf0Spatrick for (Instruction &I : *OrigHeader) 415*73471bf0Spatrick if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 416*73471bf0Spatrick NoAliasDeclInstructions.push_back(Decl); 417*73471bf0Spatrick 41809467b48Spatrick while (I != E) { 41909467b48Spatrick Instruction *Inst = &*I++; 42009467b48Spatrick 42109467b48Spatrick // If the instruction's operands are invariant and it doesn't read or write 42209467b48Spatrick // memory, then it is safe to hoist. Doing this doesn't change the order of 42309467b48Spatrick // execution in the preheader, but does prevent the instruction from 42409467b48Spatrick // executing in each iteration of the loop. This means it is safe to hoist 42509467b48Spatrick // something that might trap, but isn't safe to hoist something that reads 42609467b48Spatrick // memory (without proving that the loop doesn't write). 42709467b48Spatrick if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && 42809467b48Spatrick !Inst->mayWriteToMemory() && !Inst->isTerminator() && 42909467b48Spatrick !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { 43009467b48Spatrick Inst->moveBefore(LoopEntryBranch); 431*73471bf0Spatrick ++NumInstrsHoisted; 43209467b48Spatrick continue; 43309467b48Spatrick } 43409467b48Spatrick 43509467b48Spatrick // Otherwise, create a duplicate of the instruction. 43609467b48Spatrick Instruction *C = Inst->clone(); 437*73471bf0Spatrick ++NumInstrsDuplicated; 43809467b48Spatrick 43909467b48Spatrick // Eagerly remap the operands of the instruction. 44009467b48Spatrick RemapInstruction(C, ValueMap, 44109467b48Spatrick RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 44209467b48Spatrick 44309467b48Spatrick // Avoid inserting the same intrinsic twice. 44409467b48Spatrick if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C)) 44509467b48Spatrick if (DbgIntrinsics.count(makeHash(DII))) { 44609467b48Spatrick C->deleteValue(); 44709467b48Spatrick continue; 44809467b48Spatrick } 44909467b48Spatrick 45009467b48Spatrick // With the operands remapped, see if the instruction constant folds or is 45109467b48Spatrick // otherwise simplifyable. This commonly occurs because the entry from PHI 45209467b48Spatrick // nodes allows icmps and other instructions to fold. 45309467b48Spatrick Value *V = SimplifyInstruction(C, SQ); 45409467b48Spatrick if (V && LI->replacementPreservesLCSSAForm(C, V)) { 45509467b48Spatrick // If so, then delete the temporary instruction and stick the folded value 45609467b48Spatrick // in the map. 45709467b48Spatrick InsertNewValueIntoMap(ValueMap, Inst, V); 45809467b48Spatrick if (!C->mayHaveSideEffects()) { 45909467b48Spatrick C->deleteValue(); 46009467b48Spatrick C = nullptr; 46109467b48Spatrick } 46209467b48Spatrick } else { 46309467b48Spatrick InsertNewValueIntoMap(ValueMap, Inst, C); 46409467b48Spatrick } 46509467b48Spatrick if (C) { 46609467b48Spatrick // Otherwise, stick the new instruction into the new block! 46709467b48Spatrick C->setName(Inst->getName()); 46809467b48Spatrick C->insertBefore(LoopEntryBranch); 46909467b48Spatrick 470*73471bf0Spatrick if (auto *II = dyn_cast<AssumeInst>(C)) 47109467b48Spatrick AC->registerAssumption(II); 47209467b48Spatrick // MemorySSA cares whether the cloned instruction was inserted or not, and 47309467b48Spatrick // not whether it can be remapped to a simplified value. 47409467b48Spatrick if (MSSAU) 47509467b48Spatrick InsertNewValueIntoMap(ValueMapMSSA, Inst, C); 47609467b48Spatrick } 47709467b48Spatrick } 47809467b48Spatrick 479*73471bf0Spatrick if (!NoAliasDeclInstructions.empty()) { 480*73471bf0Spatrick // There are noalias scope declarations: 481*73471bf0Spatrick // (general): 482*73471bf0Spatrick // Original: OrigPre { OrigHeader NewHeader ... Latch } 483*73471bf0Spatrick // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader } 484*73471bf0Spatrick // 485*73471bf0Spatrick // with D: llvm.experimental.noalias.scope.decl, 486*73471bf0Spatrick // U: !noalias or !alias.scope depending on D 487*73471bf0Spatrick // ... { D U1 U2 } can transform into: 488*73471bf0Spatrick // (0) : ... { D U1 U2 } // no relevant rotation for this part 489*73471bf0Spatrick // (1) : ... D' { U1 U2 D } // D is part of OrigHeader 490*73471bf0Spatrick // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader 491*73471bf0Spatrick // 492*73471bf0Spatrick // We now want to transform: 493*73471bf0Spatrick // (1) -> : ... D' { D U1 U2 D'' } 494*73471bf0Spatrick // (2) -> : ... D' U1' { D U2 D'' U1'' } 495*73471bf0Spatrick // D: original llvm.experimental.noalias.scope.decl 496*73471bf0Spatrick // D', U1': duplicate with replaced scopes 497*73471bf0Spatrick // D'', U1'': different duplicate with replaced scopes 498*73471bf0Spatrick // This ensures a safe fallback to 'may_alias' introduced by the rotate, 499*73471bf0Spatrick // as U1'' and U1' scopes will not be compatible wrt to the local restrict 500*73471bf0Spatrick 501*73471bf0Spatrick // Clone the llvm.experimental.noalias.decl again for the NewHeader. 502*73471bf0Spatrick Instruction *NewHeaderInsertionPoint = &(*NewHeader->getFirstNonPHI()); 503*73471bf0Spatrick for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) { 504*73471bf0Spatrick LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:" 505*73471bf0Spatrick << *NAD << "\n"); 506*73471bf0Spatrick Instruction *NewNAD = NAD->clone(); 507*73471bf0Spatrick NewNAD->insertBefore(NewHeaderInsertionPoint); 508*73471bf0Spatrick } 509*73471bf0Spatrick 510*73471bf0Spatrick // Scopes must now be duplicated, once for OrigHeader and once for 511*73471bf0Spatrick // OrigPreHeader'. 512*73471bf0Spatrick { 513*73471bf0Spatrick auto &Context = NewHeader->getContext(); 514*73471bf0Spatrick 515*73471bf0Spatrick SmallVector<MDNode *, 8> NoAliasDeclScopes; 516*73471bf0Spatrick for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) 517*73471bf0Spatrick NoAliasDeclScopes.push_back(NAD->getScopeList()); 518*73471bf0Spatrick 519*73471bf0Spatrick LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n"); 520*73471bf0Spatrick cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, {OrigHeader}, Context, 521*73471bf0Spatrick "h.rot"); 522*73471bf0Spatrick LLVM_DEBUG(OrigHeader->dump()); 523*73471bf0Spatrick 524*73471bf0Spatrick // Keep the compile time impact low by only adapting the inserted block 525*73471bf0Spatrick // of instructions in the OrigPreHeader. This might result in slightly 526*73471bf0Spatrick // more aliasing between these instructions and those that were already 527*73471bf0Spatrick // present, but it will be much faster when the original PreHeader is 528*73471bf0Spatrick // large. 529*73471bf0Spatrick LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n"); 530*73471bf0Spatrick auto *FirstDecl = 531*73471bf0Spatrick cast<Instruction>(ValueMap[*NoAliasDeclInstructions.begin()]); 532*73471bf0Spatrick auto *LastInst = &OrigPreheader->back(); 533*73471bf0Spatrick cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, FirstDecl, LastInst, 534*73471bf0Spatrick Context, "pre.rot"); 535*73471bf0Spatrick LLVM_DEBUG(OrigPreheader->dump()); 536*73471bf0Spatrick 537*73471bf0Spatrick LLVM_DEBUG(dbgs() << " Updated NewHeader:\n"); 538*73471bf0Spatrick LLVM_DEBUG(NewHeader->dump()); 539*73471bf0Spatrick } 540*73471bf0Spatrick } 541*73471bf0Spatrick 54209467b48Spatrick // Along with all the other instructions, we just cloned OrigHeader's 54309467b48Spatrick // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 54409467b48Spatrick // successors by duplicating their incoming values for OrigHeader. 54509467b48Spatrick for (BasicBlock *SuccBB : successors(OrigHeader)) 54609467b48Spatrick for (BasicBlock::iterator BI = SuccBB->begin(); 54709467b48Spatrick PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 54809467b48Spatrick PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 54909467b48Spatrick 55009467b48Spatrick // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 55109467b48Spatrick // OrigPreHeader's old terminator (the original branch into the loop), and 55209467b48Spatrick // remove the corresponding incoming values from the PHI nodes in OrigHeader. 55309467b48Spatrick LoopEntryBranch->eraseFromParent(); 55409467b48Spatrick 55509467b48Spatrick // Update MemorySSA before the rewrite call below changes the 1:1 55609467b48Spatrick // instruction:cloned_instruction_or_value mapping. 55709467b48Spatrick if (MSSAU) { 55809467b48Spatrick InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader); 55909467b48Spatrick MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, 56009467b48Spatrick ValueMapMSSA); 56109467b48Spatrick } 56209467b48Spatrick 56309467b48Spatrick SmallVector<PHINode*, 2> InsertedPHIs; 56409467b48Spatrick // If there were any uses of instructions in the duplicated block outside the 56509467b48Spatrick // loop, update them, inserting PHI nodes as required 56609467b48Spatrick RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, 56709467b48Spatrick &InsertedPHIs); 56809467b48Spatrick 56909467b48Spatrick // Attach dbg.value intrinsics to the new phis if that phi uses a value that 57009467b48Spatrick // previously had debug metadata attached. This keeps the debug info 57109467b48Spatrick // up-to-date in the loop body. 57209467b48Spatrick if (!InsertedPHIs.empty()) 57309467b48Spatrick insertDebugValuesForPHIs(OrigHeader, InsertedPHIs); 57409467b48Spatrick 57509467b48Spatrick // NewHeader is now the header of the loop. 57609467b48Spatrick L->moveToHeader(NewHeader); 57709467b48Spatrick assert(L->getHeader() == NewHeader && "Latch block is our new header"); 57809467b48Spatrick 57909467b48Spatrick // Inform DT about changes to the CFG. 58009467b48Spatrick if (DT) { 58109467b48Spatrick // The OrigPreheader branches to the NewHeader and Exit now. Then, inform 58209467b48Spatrick // the DT about the removed edge to the OrigHeader (that got removed). 58309467b48Spatrick SmallVector<DominatorTree::UpdateType, 3> Updates; 58409467b48Spatrick Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit}); 58509467b48Spatrick Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader}); 58609467b48Spatrick Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader}); 58709467b48Spatrick 58809467b48Spatrick if (MSSAU) { 589*73471bf0Spatrick MSSAU->applyUpdates(Updates, *DT, /*UpdateDT=*/true); 59009467b48Spatrick if (VerifyMemorySSA) 59109467b48Spatrick MSSAU->getMemorySSA()->verifyMemorySSA(); 592*73471bf0Spatrick } else { 593*73471bf0Spatrick DT->applyUpdates(Updates); 59409467b48Spatrick } 59509467b48Spatrick } 59609467b48Spatrick 59709467b48Spatrick // At this point, we've finished our major CFG changes. As part of cloning 59809467b48Spatrick // the loop into the preheader we've simplified instructions and the 59909467b48Spatrick // duplicated conditional branch may now be branching on a constant. If it is 60009467b48Spatrick // branching on a constant and if that constant means that we enter the loop, 60109467b48Spatrick // then we fold away the cond branch to an uncond branch. This simplifies the 60209467b48Spatrick // loop in cases important for nested loops, and it also means we don't have 60309467b48Spatrick // to split as many edges. 60409467b48Spatrick BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 60509467b48Spatrick assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 60609467b48Spatrick if (!isa<ConstantInt>(PHBI->getCondition()) || 60709467b48Spatrick PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != 60809467b48Spatrick NewHeader) { 60909467b48Spatrick // The conditional branch can't be folded, handle the general case. 61009467b48Spatrick // Split edges as necessary to preserve LoopSimplify form. 61109467b48Spatrick 61209467b48Spatrick // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 61309467b48Spatrick // thus is not a preheader anymore. 61409467b48Spatrick // Split the edge to form a real preheader. 61509467b48Spatrick BasicBlock *NewPH = SplitCriticalEdge( 61609467b48Spatrick OrigPreheader, NewHeader, 61709467b48Spatrick CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 61809467b48Spatrick NewPH->setName(NewHeader->getName() + ".lr.ph"); 61909467b48Spatrick 62009467b48Spatrick // Preserve canonical loop form, which means that 'Exit' should have only 62109467b48Spatrick // one predecessor. Note that Exit could be an exit block for multiple 62209467b48Spatrick // nested loops, causing both of the edges to now be critical and need to 62309467b48Spatrick // be split. 62409467b48Spatrick SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); 62509467b48Spatrick bool SplitLatchEdge = false; 62609467b48Spatrick for (BasicBlock *ExitPred : ExitPreds) { 62709467b48Spatrick // We only need to split loop exit edges. 62809467b48Spatrick Loop *PredLoop = LI->getLoopFor(ExitPred); 62909467b48Spatrick if (!PredLoop || PredLoop->contains(Exit) || 63009467b48Spatrick ExitPred->getTerminator()->isIndirectTerminator()) 63109467b48Spatrick continue; 63209467b48Spatrick SplitLatchEdge |= L->getLoopLatch() == ExitPred; 63309467b48Spatrick BasicBlock *ExitSplit = SplitCriticalEdge( 63409467b48Spatrick ExitPred, Exit, 63509467b48Spatrick CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 63609467b48Spatrick ExitSplit->moveBefore(Exit); 63709467b48Spatrick } 63809467b48Spatrick assert(SplitLatchEdge && 63909467b48Spatrick "Despite splitting all preds, failed to split latch exit?"); 640*73471bf0Spatrick (void)SplitLatchEdge; 64109467b48Spatrick } else { 64209467b48Spatrick // We can fold the conditional branch in the preheader, this makes things 64309467b48Spatrick // simpler. The first step is to remove the extra edge to the Exit block. 64409467b48Spatrick Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 64509467b48Spatrick BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 64609467b48Spatrick NewBI->setDebugLoc(PHBI->getDebugLoc()); 64709467b48Spatrick PHBI->eraseFromParent(); 64809467b48Spatrick 64909467b48Spatrick // With our CFG finalized, update DomTree if it is available. 65009467b48Spatrick if (DT) DT->deleteEdge(OrigPreheader, Exit); 65109467b48Spatrick 65209467b48Spatrick // Update MSSA too, if available. 65309467b48Spatrick if (MSSAU) 65409467b48Spatrick MSSAU->removeEdge(OrigPreheader, Exit); 65509467b48Spatrick } 65609467b48Spatrick 65709467b48Spatrick assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 65809467b48Spatrick assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 65909467b48Spatrick 66009467b48Spatrick if (MSSAU && VerifyMemorySSA) 66109467b48Spatrick MSSAU->getMemorySSA()->verifyMemorySSA(); 66209467b48Spatrick 66309467b48Spatrick // Now that the CFG and DomTree are in a consistent state again, try to merge 66409467b48Spatrick // the OrigHeader block into OrigLatch. This will succeed if they are 66509467b48Spatrick // connected by an unconditional branch. This is just a cleanup so the 66609467b48Spatrick // emitted code isn't too gross in this common case. 66709467b48Spatrick DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 668*73471bf0Spatrick BasicBlock *PredBB = OrigHeader->getUniquePredecessor(); 669*73471bf0Spatrick bool DidMerge = MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU); 670*73471bf0Spatrick if (DidMerge) 671*73471bf0Spatrick RemoveRedundantDbgInstrs(PredBB); 67209467b48Spatrick 67309467b48Spatrick if (MSSAU && VerifyMemorySSA) 67409467b48Spatrick MSSAU->getMemorySSA()->verifyMemorySSA(); 67509467b48Spatrick 67609467b48Spatrick LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 67709467b48Spatrick 67809467b48Spatrick ++NumRotated; 679097a140dSpatrick 680097a140dSpatrick Rotated = true; 681097a140dSpatrick SimplifiedLatch = false; 682097a140dSpatrick 683097a140dSpatrick // Check that new latch is a deoptimizing exit and then repeat rotation if possible. 684097a140dSpatrick // Deoptimizing latch exit is not a generally typical case, so we just loop over. 685097a140dSpatrick // TODO: if it becomes a performance bottleneck extend rotation algorithm 686097a140dSpatrick // to handle multiple rotations in one go. 687097a140dSpatrick } while (MultiRotate && canRotateDeoptimizingLatchExit(L)); 688097a140dSpatrick 689097a140dSpatrick 69009467b48Spatrick return true; 69109467b48Spatrick } 69209467b48Spatrick 69309467b48Spatrick /// Determine whether the instructions in this range may be safely and cheaply 69409467b48Spatrick /// speculated. This is not an important enough situation to develop complex 69509467b48Spatrick /// heuristics. We handle a single arithmetic instruction along with any type 69609467b48Spatrick /// conversions. 69709467b48Spatrick static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 69809467b48Spatrick BasicBlock::iterator End, Loop *L) { 69909467b48Spatrick bool seenIncrement = false; 70009467b48Spatrick bool MultiExitLoop = false; 70109467b48Spatrick 70209467b48Spatrick if (!L->getExitingBlock()) 70309467b48Spatrick MultiExitLoop = true; 70409467b48Spatrick 70509467b48Spatrick for (BasicBlock::iterator I = Begin; I != End; ++I) { 70609467b48Spatrick 70709467b48Spatrick if (!isSafeToSpeculativelyExecute(&*I)) 70809467b48Spatrick return false; 70909467b48Spatrick 71009467b48Spatrick if (isa<DbgInfoIntrinsic>(I)) 71109467b48Spatrick continue; 71209467b48Spatrick 71309467b48Spatrick switch (I->getOpcode()) { 71409467b48Spatrick default: 71509467b48Spatrick return false; 71609467b48Spatrick case Instruction::GetElementPtr: 71709467b48Spatrick // GEPs are cheap if all indices are constant. 71809467b48Spatrick if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 71909467b48Spatrick return false; 72009467b48Spatrick // fall-thru to increment case 72109467b48Spatrick LLVM_FALLTHROUGH; 72209467b48Spatrick case Instruction::Add: 72309467b48Spatrick case Instruction::Sub: 72409467b48Spatrick case Instruction::And: 72509467b48Spatrick case Instruction::Or: 72609467b48Spatrick case Instruction::Xor: 72709467b48Spatrick case Instruction::Shl: 72809467b48Spatrick case Instruction::LShr: 72909467b48Spatrick case Instruction::AShr: { 73009467b48Spatrick Value *IVOpnd = 73109467b48Spatrick !isa<Constant>(I->getOperand(0)) 73209467b48Spatrick ? I->getOperand(0) 73309467b48Spatrick : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; 73409467b48Spatrick if (!IVOpnd) 73509467b48Spatrick return false; 73609467b48Spatrick 73709467b48Spatrick // If increment operand is used outside of the loop, this speculation 73809467b48Spatrick // could cause extra live range interference. 73909467b48Spatrick if (MultiExitLoop) { 74009467b48Spatrick for (User *UseI : IVOpnd->users()) { 74109467b48Spatrick auto *UserInst = cast<Instruction>(UseI); 74209467b48Spatrick if (!L->contains(UserInst)) 74309467b48Spatrick return false; 74409467b48Spatrick } 74509467b48Spatrick } 74609467b48Spatrick 74709467b48Spatrick if (seenIncrement) 74809467b48Spatrick return false; 74909467b48Spatrick seenIncrement = true; 75009467b48Spatrick break; 75109467b48Spatrick } 75209467b48Spatrick case Instruction::Trunc: 75309467b48Spatrick case Instruction::ZExt: 75409467b48Spatrick case Instruction::SExt: 75509467b48Spatrick // ignore type conversions 75609467b48Spatrick break; 75709467b48Spatrick } 75809467b48Spatrick } 75909467b48Spatrick return true; 76009467b48Spatrick } 76109467b48Spatrick 76209467b48Spatrick /// Fold the loop tail into the loop exit by speculating the loop tail 76309467b48Spatrick /// instructions. Typically, this is a single post-increment. In the case of a 76409467b48Spatrick /// simple 2-block loop, hoisting the increment can be much better than 76509467b48Spatrick /// duplicating the entire loop header. In the case of loops with early exits, 76609467b48Spatrick /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 76709467b48Spatrick /// canonical form so downstream passes can handle it. 76809467b48Spatrick /// 76909467b48Spatrick /// I don't believe this invalidates SCEV. 77009467b48Spatrick bool LoopRotate::simplifyLoopLatch(Loop *L) { 77109467b48Spatrick BasicBlock *Latch = L->getLoopLatch(); 77209467b48Spatrick if (!Latch || Latch->hasAddressTaken()) 77309467b48Spatrick return false; 77409467b48Spatrick 77509467b48Spatrick BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 77609467b48Spatrick if (!Jmp || !Jmp->isUnconditional()) 77709467b48Spatrick return false; 77809467b48Spatrick 77909467b48Spatrick BasicBlock *LastExit = Latch->getSinglePredecessor(); 78009467b48Spatrick if (!LastExit || !L->isLoopExiting(LastExit)) 78109467b48Spatrick return false; 78209467b48Spatrick 78309467b48Spatrick BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 78409467b48Spatrick if (!BI) 78509467b48Spatrick return false; 78609467b48Spatrick 78709467b48Spatrick if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) 78809467b48Spatrick return false; 78909467b48Spatrick 79009467b48Spatrick LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 79109467b48Spatrick << LastExit->getName() << "\n"); 79209467b48Spatrick 79309467b48Spatrick DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 79409467b48Spatrick MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr, 79509467b48Spatrick /*PredecessorWithTwoSuccessors=*/true); 79609467b48Spatrick 79709467b48Spatrick if (MSSAU && VerifyMemorySSA) 79809467b48Spatrick MSSAU->getMemorySSA()->verifyMemorySSA(); 79909467b48Spatrick 80009467b48Spatrick return true; 80109467b48Spatrick } 80209467b48Spatrick 80309467b48Spatrick /// Rotate \c L, and return true if any modification was made. 80409467b48Spatrick bool LoopRotate::processLoop(Loop *L) { 80509467b48Spatrick // Save the loop metadata. 80609467b48Spatrick MDNode *LoopMD = L->getLoopID(); 80709467b48Spatrick 80809467b48Spatrick bool SimplifiedLatch = false; 80909467b48Spatrick 81009467b48Spatrick // Simplify the loop latch before attempting to rotate the header 81109467b48Spatrick // upward. Rotation may not be needed if the loop tail can be folded into the 81209467b48Spatrick // loop exit. 81309467b48Spatrick if (!RotationOnly) 81409467b48Spatrick SimplifiedLatch = simplifyLoopLatch(L); 81509467b48Spatrick 81609467b48Spatrick bool MadeChange = rotateLoop(L, SimplifiedLatch); 81709467b48Spatrick assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) && 81809467b48Spatrick "Loop latch should be exiting after loop-rotate."); 81909467b48Spatrick 82009467b48Spatrick // Restore the loop metadata. 82109467b48Spatrick // NB! We presume LoopRotation DOESN'T ADD its own metadata. 82209467b48Spatrick if ((MadeChange || SimplifiedLatch) && LoopMD) 82309467b48Spatrick L->setLoopID(LoopMD); 82409467b48Spatrick 82509467b48Spatrick return MadeChange || SimplifiedLatch; 82609467b48Spatrick } 82709467b48Spatrick 82809467b48Spatrick 82909467b48Spatrick /// The utility to convert a loop into a loop with bottom test. 83009467b48Spatrick bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, 83109467b48Spatrick AssumptionCache *AC, DominatorTree *DT, 83209467b48Spatrick ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 83309467b48Spatrick const SimplifyQuery &SQ, bool RotationOnly = true, 83409467b48Spatrick unsigned Threshold = unsigned(-1), 835*73471bf0Spatrick bool IsUtilMode = true, bool PrepareForLTO) { 83609467b48Spatrick LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly, 837*73471bf0Spatrick IsUtilMode, PrepareForLTO); 83809467b48Spatrick return LR.processLoop(L); 83909467b48Spatrick } 840