1 //===----------------- LoopRotationUtils.cpp -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides utilities to convert a loop into a loop with bottom test.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/LoopRotationUtils.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/Analysis/AssumptionCache.h"
16 #include "llvm/Analysis/CodeMetrics.h"
17 #include "llvm/Analysis/DomTreeUpdater.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/MemorySSA.h"
21 #include "llvm/Analysis/MemorySSAUpdater.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DebugInfo.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 #include "llvm/Transforms/Utils/Cloning.h"
33 #include "llvm/Transforms/Utils/Local.h"
34 #include "llvm/Transforms/Utils/SSAUpdater.h"
35 #include "llvm/Transforms/Utils/ValueMapper.h"
36 using namespace llvm;
37
38 #define DEBUG_TYPE "loop-rotate"
39
40 STATISTIC(NumNotRotatedDueToHeaderSize,
41 "Number of loops not rotated due to the header size");
42 STATISTIC(NumInstrsHoisted,
43 "Number of instructions hoisted into loop preheader");
44 STATISTIC(NumInstrsDuplicated,
45 "Number of instructions cloned into loop preheader");
46 STATISTIC(NumRotated, "Number of loops rotated");
47
48 static cl::opt<bool>
49 MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden,
50 cl::desc("Allow loop rotation multiple times in order to reach "
51 "a better latch exit"));
52
53 namespace {
54 /// A simple loop rotation transformation.
55 class LoopRotate {
56 const unsigned MaxHeaderSize;
57 LoopInfo *LI;
58 const TargetTransformInfo *TTI;
59 AssumptionCache *AC;
60 DominatorTree *DT;
61 ScalarEvolution *SE;
62 MemorySSAUpdater *MSSAU;
63 const SimplifyQuery &SQ;
64 bool RotationOnly;
65 bool IsUtilMode;
66 bool PrepareForLTO;
67
68 public:
LoopRotate(unsigned MaxHeaderSize,LoopInfo * LI,const TargetTransformInfo * TTI,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE,MemorySSAUpdater * MSSAU,const SimplifyQuery & SQ,bool RotationOnly,bool IsUtilMode,bool PrepareForLTO)69 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI,
70 const TargetTransformInfo *TTI, AssumptionCache *AC,
71 DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
72 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode,
73 bool PrepareForLTO)
74 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE),
75 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
76 IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {}
77 bool processLoop(Loop *L);
78
79 private:
80 bool rotateLoop(Loop *L, bool SimplifiedLatch);
81 bool simplifyLoopLatch(Loop *L);
82 };
83 } // end anonymous namespace
84
85 /// Insert (K, V) pair into the ValueToValueMap, and verify the key did not
86 /// previously exist in the map, and the value was inserted.
InsertNewValueIntoMap(ValueToValueMapTy & VM,Value * K,Value * V)87 static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) {
88 bool Inserted = VM.insert({K, V}).second;
89 assert(Inserted);
90 (void)Inserted;
91 }
92 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
93 /// old header into the preheader. If there were uses of the values produced by
94 /// these instruction that were outside of the loop, we have to insert PHI nodes
95 /// to merge the two values. Do this now.
RewriteUsesOfClonedInstructions(BasicBlock * OrigHeader,BasicBlock * OrigPreheader,ValueToValueMapTy & ValueMap,ScalarEvolution * SE,SmallVectorImpl<PHINode * > * InsertedPHIs)96 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
97 BasicBlock *OrigPreheader,
98 ValueToValueMapTy &ValueMap,
99 ScalarEvolution *SE,
100 SmallVectorImpl<PHINode*> *InsertedPHIs) {
101 // Remove PHI node entries that are no longer live.
102 BasicBlock::iterator I, E = OrigHeader->end();
103 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
104 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
105
106 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
107 // as necessary.
108 SSAUpdater SSA(InsertedPHIs);
109 for (I = OrigHeader->begin(); I != E; ++I) {
110 Value *OrigHeaderVal = &*I;
111
112 // If there are no uses of the value (e.g. because it returns void), there
113 // is nothing to rewrite.
114 if (OrigHeaderVal->use_empty())
115 continue;
116
117 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal);
118
119 // The value now exits in two versions: the initial value in the preheader
120 // and the loop "next" value in the original header.
121 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
122 // Force re-computation of OrigHeaderVal, as some users now need to use the
123 // new PHI node.
124 if (SE)
125 SE->forgetValue(OrigHeaderVal);
126 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
127 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
128
129 // Visit each use of the OrigHeader instruction.
130 for (Use &U : llvm::make_early_inc_range(OrigHeaderVal->uses())) {
131 // SSAUpdater can't handle a non-PHI use in the same block as an
132 // earlier def. We can easily handle those cases manually.
133 Instruction *UserInst = cast<Instruction>(U.getUser());
134 if (!isa<PHINode>(UserInst)) {
135 BasicBlock *UserBB = UserInst->getParent();
136
137 // The original users in the OrigHeader are already using the
138 // original definitions.
139 if (UserBB == OrigHeader)
140 continue;
141
142 // Users in the OrigPreHeader need to use the value to which the
143 // original definitions are mapped.
144 if (UserBB == OrigPreheader) {
145 U = OrigPreHeaderVal;
146 continue;
147 }
148 }
149
150 // Anything else can be handled by SSAUpdater.
151 SSA.RewriteUse(U);
152 }
153
154 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug
155 // intrinsics.
156 SmallVector<DbgValueInst *, 1> DbgValues;
157 llvm::findDbgValues(DbgValues, OrigHeaderVal);
158 for (auto &DbgValue : DbgValues) {
159 // The original users in the OrigHeader are already using the original
160 // definitions.
161 BasicBlock *UserBB = DbgValue->getParent();
162 if (UserBB == OrigHeader)
163 continue;
164
165 // Users in the OrigPreHeader need to use the value to which the
166 // original definitions are mapped and anything else can be handled by
167 // the SSAUpdater. To avoid adding PHINodes, check if the value is
168 // available in UserBB, if not substitute undef.
169 Value *NewVal;
170 if (UserBB == OrigPreheader)
171 NewVal = OrigPreHeaderVal;
172 else if (SSA.HasValueForBlock(UserBB))
173 NewVal = SSA.GetValueInMiddleOfBlock(UserBB);
174 else
175 NewVal = UndefValue::get(OrigHeaderVal->getType());
176 DbgValue->replaceVariableLocationOp(OrigHeaderVal, NewVal);
177 }
178 }
179 }
180
181 // Assuming both header and latch are exiting, look for a phi which is only
182 // used outside the loop (via a LCSSA phi) in the exit from the header.
183 // This means that rotating the loop can remove the phi.
profitableToRotateLoopExitingLatch(Loop * L)184 static bool profitableToRotateLoopExitingLatch(Loop *L) {
185 BasicBlock *Header = L->getHeader();
186 BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator());
187 assert(BI && BI->isConditional() && "need header with conditional exit");
188 BasicBlock *HeaderExit = BI->getSuccessor(0);
189 if (L->contains(HeaderExit))
190 HeaderExit = BI->getSuccessor(1);
191
192 for (auto &Phi : Header->phis()) {
193 // Look for uses of this phi in the loop/via exits other than the header.
194 if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) {
195 return cast<Instruction>(U)->getParent() != HeaderExit;
196 }))
197 continue;
198 return true;
199 }
200 return false;
201 }
202
203 // Check that latch exit is deoptimizing (which means - very unlikely to happen)
204 // and there is another exit from the loop which is non-deoptimizing.
205 // If we rotate latch to that exit our loop has a better chance of being fully
206 // canonical.
207 //
208 // It can give false positives in some rare cases.
canRotateDeoptimizingLatchExit(Loop * L)209 static bool canRotateDeoptimizingLatchExit(Loop *L) {
210 BasicBlock *Latch = L->getLoopLatch();
211 assert(Latch && "need latch");
212 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
213 // Need normal exiting latch.
214 if (!BI || !BI->isConditional())
215 return false;
216
217 BasicBlock *Exit = BI->getSuccessor(1);
218 if (L->contains(Exit))
219 Exit = BI->getSuccessor(0);
220
221 // Latch exit is non-deoptimizing, no need to rotate.
222 if (!Exit->getPostdominatingDeoptimizeCall())
223 return false;
224
225 SmallVector<BasicBlock *, 4> Exits;
226 L->getUniqueExitBlocks(Exits);
227 if (!Exits.empty()) {
228 // There is at least one non-deoptimizing exit.
229 //
230 // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact,
231 // as it can conservatively return false for deoptimizing exits with
232 // complex enough control flow down to deoptimize call.
233 //
234 // That means here we can report success for a case where
235 // all exits are deoptimizing but one of them has complex enough
236 // control flow (e.g. with loops).
237 //
238 // That should be a very rare case and false positives for this function
239 // have compile-time effect only.
240 return any_of(Exits, [](const BasicBlock *BB) {
241 return !BB->getPostdominatingDeoptimizeCall();
242 });
243 }
244 return false;
245 }
246
247 /// Rotate loop LP. Return true if the loop is rotated.
248 ///
249 /// \param SimplifiedLatch is true if the latch was just folded into the final
250 /// loop exit. In this case we may want to rotate even though the new latch is
251 /// now an exiting branch. This rotation would have happened had the latch not
252 /// been simplified. However, if SimplifiedLatch is false, then we avoid
253 /// rotating loops in which the latch exits to avoid excessive or endless
254 /// rotation. LoopRotate should be repeatable and converge to a canonical
255 /// form. This property is satisfied because simplifying the loop latch can only
256 /// happen once across multiple invocations of the LoopRotate pass.
257 ///
258 /// If -loop-rotate-multi is enabled we can do multiple rotations in one go
259 /// so to reach a suitable (non-deoptimizing) exit.
rotateLoop(Loop * L,bool SimplifiedLatch)260 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
261 // If the loop has only one block then there is not much to rotate.
262 if (L->getBlocks().size() == 1)
263 return false;
264
265 bool Rotated = false;
266 do {
267 BasicBlock *OrigHeader = L->getHeader();
268 BasicBlock *OrigLatch = L->getLoopLatch();
269
270 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
271 if (!BI || BI->isUnconditional())
272 return Rotated;
273
274 // If the loop header is not one of the loop exiting blocks then
275 // either this loop is already rotated or it is not
276 // suitable for loop rotation transformations.
277 if (!L->isLoopExiting(OrigHeader))
278 return Rotated;
279
280 // If the loop latch already contains a branch that leaves the loop then the
281 // loop is already rotated.
282 if (!OrigLatch)
283 return Rotated;
284
285 // Rotate if either the loop latch does *not* exit the loop, or if the loop
286 // latch was just simplified. Or if we think it will be profitable.
287 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false &&
288 !profitableToRotateLoopExitingLatch(L) &&
289 !canRotateDeoptimizingLatchExit(L))
290 return Rotated;
291
292 // Check size of original header and reject loop if it is very big or we can't
293 // duplicate blocks inside it.
294 {
295 SmallPtrSet<const Value *, 32> EphValues;
296 CodeMetrics::collectEphemeralValues(L, AC, EphValues);
297
298 CodeMetrics Metrics;
299 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues, PrepareForLTO);
300 if (Metrics.notDuplicatable) {
301 LLVM_DEBUG(
302 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
303 << " instructions: ";
304 L->dump());
305 return Rotated;
306 }
307 if (Metrics.convergent) {
308 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
309 "instructions: ";
310 L->dump());
311 return Rotated;
312 }
313 if (!Metrics.NumInsts.isValid()) {
314 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains instructions"
315 " with invalid cost: ";
316 L->dump());
317 return Rotated;
318 }
319 if (Metrics.NumInsts > MaxHeaderSize) {
320 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains "
321 << Metrics.NumInsts
322 << " instructions, which is more than the threshold ("
323 << MaxHeaderSize << " instructions): ";
324 L->dump());
325 ++NumNotRotatedDueToHeaderSize;
326 return Rotated;
327 }
328
329 // When preparing for LTO, avoid rotating loops with calls that could be
330 // inlined during the LTO stage.
331 if (PrepareForLTO && Metrics.NumInlineCandidates > 0)
332 return Rotated;
333 }
334
335 // Now, this loop is suitable for rotation.
336 BasicBlock *OrigPreheader = L->getLoopPreheader();
337
338 // If the loop could not be converted to canonical form, it must have an
339 // indirectbr in it, just give up.
340 if (!OrigPreheader || !L->hasDedicatedExits())
341 return Rotated;
342
343 // Anything ScalarEvolution may know about this loop or the PHI nodes
344 // in its header will soon be invalidated. We should also invalidate
345 // all outer loops because insertion and deletion of blocks that happens
346 // during the rotation may violate invariants related to backedge taken
347 // infos in them.
348 if (SE) {
349 SE->forgetTopmostLoop(L);
350 // We may hoist some instructions out of loop. In case if they were cached
351 // as "loop variant" or "loop computable", these caches must be dropped.
352 // We also may fold basic blocks, so cached block dispositions also need
353 // to be dropped.
354 SE->forgetBlockAndLoopDispositions();
355 }
356
357 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
358 if (MSSAU && VerifyMemorySSA)
359 MSSAU->getMemorySSA()->verifyMemorySSA();
360
361 // Find new Loop header. NewHeader is a Header's one and only successor
362 // that is inside loop. Header's other successor is outside the
363 // loop. Otherwise loop is not suitable for rotation.
364 BasicBlock *Exit = BI->getSuccessor(0);
365 BasicBlock *NewHeader = BI->getSuccessor(1);
366 if (L->contains(Exit))
367 std::swap(Exit, NewHeader);
368 assert(NewHeader && "Unable to determine new loop header");
369 assert(L->contains(NewHeader) && !L->contains(Exit) &&
370 "Unable to determine loop header and exit blocks");
371
372 // This code assumes that the new header has exactly one predecessor.
373 // Remove any single-entry PHI nodes in it.
374 assert(NewHeader->getSinglePredecessor() &&
375 "New header doesn't have one pred!");
376 FoldSingleEntryPHINodes(NewHeader);
377
378 // Begin by walking OrigHeader and populating ValueMap with an entry for
379 // each Instruction.
380 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
381 ValueToValueMapTy ValueMap, ValueMapMSSA;
382
383 // For PHI nodes, the value available in OldPreHeader is just the
384 // incoming value from OldPreHeader.
385 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
386 InsertNewValueIntoMap(ValueMap, PN,
387 PN->getIncomingValueForBlock(OrigPreheader));
388
389 // For the rest of the instructions, either hoist to the OrigPreheader if
390 // possible or create a clone in the OldPreHeader if not.
391 Instruction *LoopEntryBranch = OrigPreheader->getTerminator();
392
393 // Record all debug intrinsics preceding LoopEntryBranch to avoid
394 // duplication.
395 using DbgIntrinsicHash =
396 std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>;
397 auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash {
398 auto VarLocOps = D->location_ops();
399 return {{hash_combine_range(VarLocOps.begin(), VarLocOps.end()),
400 D->getVariable()},
401 D->getExpression()};
402 };
403 SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics;
404 for (Instruction &I : llvm::drop_begin(llvm::reverse(*OrigPreheader))) {
405 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
406 DbgIntrinsics.insert(makeHash(DII));
407 else
408 break;
409 }
410
411 // Remember the local noalias scope declarations in the header. After the
412 // rotation, they must be duplicated and the scope must be cloned. This
413 // avoids unwanted interaction across iterations.
414 SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions;
415 for (Instruction &I : *OrigHeader)
416 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
417 NoAliasDeclInstructions.push_back(Decl);
418
419 while (I != E) {
420 Instruction *Inst = &*I++;
421
422 // If the instruction's operands are invariant and it doesn't read or write
423 // memory, then it is safe to hoist. Doing this doesn't change the order of
424 // execution in the preheader, but does prevent the instruction from
425 // executing in each iteration of the loop. This means it is safe to hoist
426 // something that might trap, but isn't safe to hoist something that reads
427 // memory (without proving that the loop doesn't write).
428 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() &&
429 !Inst->mayWriteToMemory() && !Inst->isTerminator() &&
430 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) {
431 Inst->moveBefore(LoopEntryBranch);
432 ++NumInstrsHoisted;
433 continue;
434 }
435
436 // Otherwise, create a duplicate of the instruction.
437 Instruction *C = Inst->clone();
438 ++NumInstrsDuplicated;
439
440 // Eagerly remap the operands of the instruction.
441 RemapInstruction(C, ValueMap,
442 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
443
444 // Avoid inserting the same intrinsic twice.
445 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C))
446 if (DbgIntrinsics.count(makeHash(DII))) {
447 C->deleteValue();
448 continue;
449 }
450
451 // With the operands remapped, see if the instruction constant folds or is
452 // otherwise simplifyable. This commonly occurs because the entry from PHI
453 // nodes allows icmps and other instructions to fold.
454 Value *V = simplifyInstruction(C, SQ);
455 if (V && LI->replacementPreservesLCSSAForm(C, V)) {
456 // If so, then delete the temporary instruction and stick the folded value
457 // in the map.
458 InsertNewValueIntoMap(ValueMap, Inst, V);
459 if (!C->mayHaveSideEffects()) {
460 C->deleteValue();
461 C = nullptr;
462 }
463 } else {
464 InsertNewValueIntoMap(ValueMap, Inst, C);
465 }
466 if (C) {
467 // Otherwise, stick the new instruction into the new block!
468 C->setName(Inst->getName());
469 C->insertBefore(LoopEntryBranch);
470
471 if (auto *II = dyn_cast<AssumeInst>(C))
472 AC->registerAssumption(II);
473 // MemorySSA cares whether the cloned instruction was inserted or not, and
474 // not whether it can be remapped to a simplified value.
475 if (MSSAU)
476 InsertNewValueIntoMap(ValueMapMSSA, Inst, C);
477 }
478 }
479
480 if (!NoAliasDeclInstructions.empty()) {
481 // There are noalias scope declarations:
482 // (general):
483 // Original: OrigPre { OrigHeader NewHeader ... Latch }
484 // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader }
485 //
486 // with D: llvm.experimental.noalias.scope.decl,
487 // U: !noalias or !alias.scope depending on D
488 // ... { D U1 U2 } can transform into:
489 // (0) : ... { D U1 U2 } // no relevant rotation for this part
490 // (1) : ... D' { U1 U2 D } // D is part of OrigHeader
491 // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader
492 //
493 // We now want to transform:
494 // (1) -> : ... D' { D U1 U2 D'' }
495 // (2) -> : ... D' U1' { D U2 D'' U1'' }
496 // D: original llvm.experimental.noalias.scope.decl
497 // D', U1': duplicate with replaced scopes
498 // D'', U1'': different duplicate with replaced scopes
499 // This ensures a safe fallback to 'may_alias' introduced by the rotate,
500 // as U1'' and U1' scopes will not be compatible wrt to the local restrict
501
502 // Clone the llvm.experimental.noalias.decl again for the NewHeader.
503 Instruction *NewHeaderInsertionPoint = &(*NewHeader->getFirstNonPHI());
504 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) {
505 LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:"
506 << *NAD << "\n");
507 Instruction *NewNAD = NAD->clone();
508 NewNAD->insertBefore(NewHeaderInsertionPoint);
509 }
510
511 // Scopes must now be duplicated, once for OrigHeader and once for
512 // OrigPreHeader'.
513 {
514 auto &Context = NewHeader->getContext();
515
516 SmallVector<MDNode *, 8> NoAliasDeclScopes;
517 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions)
518 NoAliasDeclScopes.push_back(NAD->getScopeList());
519
520 LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n");
521 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, {OrigHeader}, Context,
522 "h.rot");
523 LLVM_DEBUG(OrigHeader->dump());
524
525 // Keep the compile time impact low by only adapting the inserted block
526 // of instructions in the OrigPreHeader. This might result in slightly
527 // more aliasing between these instructions and those that were already
528 // present, but it will be much faster when the original PreHeader is
529 // large.
530 LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n");
531 auto *FirstDecl =
532 cast<Instruction>(ValueMap[*NoAliasDeclInstructions.begin()]);
533 auto *LastInst = &OrigPreheader->back();
534 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, FirstDecl, LastInst,
535 Context, "pre.rot");
536 LLVM_DEBUG(OrigPreheader->dump());
537
538 LLVM_DEBUG(dbgs() << " Updated NewHeader:\n");
539 LLVM_DEBUG(NewHeader->dump());
540 }
541 }
542
543 // Along with all the other instructions, we just cloned OrigHeader's
544 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
545 // successors by duplicating their incoming values for OrigHeader.
546 for (BasicBlock *SuccBB : successors(OrigHeader))
547 for (BasicBlock::iterator BI = SuccBB->begin();
548 PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
549 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader);
550
551 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
552 // OrigPreHeader's old terminator (the original branch into the loop), and
553 // remove the corresponding incoming values from the PHI nodes in OrigHeader.
554 LoopEntryBranch->eraseFromParent();
555
556 // Update MemorySSA before the rewrite call below changes the 1:1
557 // instruction:cloned_instruction_or_value mapping.
558 if (MSSAU) {
559 InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader);
560 MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader,
561 ValueMapMSSA);
562 }
563
564 SmallVector<PHINode*, 2> InsertedPHIs;
565 // If there were any uses of instructions in the duplicated block outside the
566 // loop, update them, inserting PHI nodes as required
567 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, SE,
568 &InsertedPHIs);
569
570 // Attach dbg.value intrinsics to the new phis if that phi uses a value that
571 // previously had debug metadata attached. This keeps the debug info
572 // up-to-date in the loop body.
573 if (!InsertedPHIs.empty())
574 insertDebugValuesForPHIs(OrigHeader, InsertedPHIs);
575
576 // NewHeader is now the header of the loop.
577 L->moveToHeader(NewHeader);
578 assert(L->getHeader() == NewHeader && "Latch block is our new header");
579
580 // Inform DT about changes to the CFG.
581 if (DT) {
582 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform
583 // the DT about the removed edge to the OrigHeader (that got removed).
584 SmallVector<DominatorTree::UpdateType, 3> Updates;
585 Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit});
586 Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader});
587 Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader});
588
589 if (MSSAU) {
590 MSSAU->applyUpdates(Updates, *DT, /*UpdateDT=*/true);
591 if (VerifyMemorySSA)
592 MSSAU->getMemorySSA()->verifyMemorySSA();
593 } else {
594 DT->applyUpdates(Updates);
595 }
596 }
597
598 // At this point, we've finished our major CFG changes. As part of cloning
599 // the loop into the preheader we've simplified instructions and the
600 // duplicated conditional branch may now be branching on a constant. If it is
601 // branching on a constant and if that constant means that we enter the loop,
602 // then we fold away the cond branch to an uncond branch. This simplifies the
603 // loop in cases important for nested loops, and it also means we don't have
604 // to split as many edges.
605 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator());
606 assert(PHBI->isConditional() && "Should be clone of BI condbr!");
607 if (!isa<ConstantInt>(PHBI->getCondition()) ||
608 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) !=
609 NewHeader) {
610 // The conditional branch can't be folded, handle the general case.
611 // Split edges as necessary to preserve LoopSimplify form.
612
613 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
614 // thus is not a preheader anymore.
615 // Split the edge to form a real preheader.
616 BasicBlock *NewPH = SplitCriticalEdge(
617 OrigPreheader, NewHeader,
618 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
619 NewPH->setName(NewHeader->getName() + ".lr.ph");
620
621 // Preserve canonical loop form, which means that 'Exit' should have only
622 // one predecessor. Note that Exit could be an exit block for multiple
623 // nested loops, causing both of the edges to now be critical and need to
624 // be split.
625 SmallVector<BasicBlock *, 4> ExitPreds(predecessors(Exit));
626 bool SplitLatchEdge = false;
627 for (BasicBlock *ExitPred : ExitPreds) {
628 // We only need to split loop exit edges.
629 Loop *PredLoop = LI->getLoopFor(ExitPred);
630 if (!PredLoop || PredLoop->contains(Exit) ||
631 isa<IndirectBrInst>(ExitPred->getTerminator()))
632 continue;
633 SplitLatchEdge |= L->getLoopLatch() == ExitPred;
634 BasicBlock *ExitSplit = SplitCriticalEdge(
635 ExitPred, Exit,
636 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
637 ExitSplit->moveBefore(Exit);
638 }
639 assert(SplitLatchEdge &&
640 "Despite splitting all preds, failed to split latch exit?");
641 (void)SplitLatchEdge;
642 } else {
643 // We can fold the conditional branch in the preheader, this makes things
644 // simpler. The first step is to remove the extra edge to the Exit block.
645 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/);
646 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI);
647 NewBI->setDebugLoc(PHBI->getDebugLoc());
648 PHBI->eraseFromParent();
649
650 // With our CFG finalized, update DomTree if it is available.
651 if (DT) DT->deleteEdge(OrigPreheader, Exit);
652
653 // Update MSSA too, if available.
654 if (MSSAU)
655 MSSAU->removeEdge(OrigPreheader, Exit);
656 }
657
658 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
659 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
660
661 if (MSSAU && VerifyMemorySSA)
662 MSSAU->getMemorySSA()->verifyMemorySSA();
663
664 // Now that the CFG and DomTree are in a consistent state again, try to merge
665 // the OrigHeader block into OrigLatch. This will succeed if they are
666 // connected by an unconditional branch. This is just a cleanup so the
667 // emitted code isn't too gross in this common case.
668 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
669 BasicBlock *PredBB = OrigHeader->getUniquePredecessor();
670 bool DidMerge = MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU);
671 if (DidMerge)
672 RemoveRedundantDbgInstrs(PredBB);
673
674 if (MSSAU && VerifyMemorySSA)
675 MSSAU->getMemorySSA()->verifyMemorySSA();
676
677 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump());
678
679 ++NumRotated;
680
681 Rotated = true;
682 SimplifiedLatch = false;
683
684 // Check that new latch is a deoptimizing exit and then repeat rotation if possible.
685 // Deoptimizing latch exit is not a generally typical case, so we just loop over.
686 // TODO: if it becomes a performance bottleneck extend rotation algorithm
687 // to handle multiple rotations in one go.
688 } while (MultiRotate && canRotateDeoptimizingLatchExit(L));
689
690
691 return true;
692 }
693
694 /// Determine whether the instructions in this range may be safely and cheaply
695 /// speculated. This is not an important enough situation to develop complex
696 /// heuristics. We handle a single arithmetic instruction along with any type
697 /// conversions.
shouldSpeculateInstrs(BasicBlock::iterator Begin,BasicBlock::iterator End,Loop * L)698 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
699 BasicBlock::iterator End, Loop *L) {
700 bool seenIncrement = false;
701 bool MultiExitLoop = false;
702
703 if (!L->getExitingBlock())
704 MultiExitLoop = true;
705
706 for (BasicBlock::iterator I = Begin; I != End; ++I) {
707
708 if (!isSafeToSpeculativelyExecute(&*I))
709 return false;
710
711 if (isa<DbgInfoIntrinsic>(I))
712 continue;
713
714 switch (I->getOpcode()) {
715 default:
716 return false;
717 case Instruction::GetElementPtr:
718 // GEPs are cheap if all indices are constant.
719 if (!cast<GEPOperator>(I)->hasAllConstantIndices())
720 return false;
721 // fall-thru to increment case
722 [[fallthrough]];
723 case Instruction::Add:
724 case Instruction::Sub:
725 case Instruction::And:
726 case Instruction::Or:
727 case Instruction::Xor:
728 case Instruction::Shl:
729 case Instruction::LShr:
730 case Instruction::AShr: {
731 Value *IVOpnd =
732 !isa<Constant>(I->getOperand(0))
733 ? I->getOperand(0)
734 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr;
735 if (!IVOpnd)
736 return false;
737
738 // If increment operand is used outside of the loop, this speculation
739 // could cause extra live range interference.
740 if (MultiExitLoop) {
741 for (User *UseI : IVOpnd->users()) {
742 auto *UserInst = cast<Instruction>(UseI);
743 if (!L->contains(UserInst))
744 return false;
745 }
746 }
747
748 if (seenIncrement)
749 return false;
750 seenIncrement = true;
751 break;
752 }
753 case Instruction::Trunc:
754 case Instruction::ZExt:
755 case Instruction::SExt:
756 // ignore type conversions
757 break;
758 }
759 }
760 return true;
761 }
762
763 /// Fold the loop tail into the loop exit by speculating the loop tail
764 /// instructions. Typically, this is a single post-increment. In the case of a
765 /// simple 2-block loop, hoisting the increment can be much better than
766 /// duplicating the entire loop header. In the case of loops with early exits,
767 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in
768 /// canonical form so downstream passes can handle it.
769 ///
770 /// I don't believe this invalidates SCEV.
simplifyLoopLatch(Loop * L)771 bool LoopRotate::simplifyLoopLatch(Loop *L) {
772 BasicBlock *Latch = L->getLoopLatch();
773 if (!Latch || Latch->hasAddressTaken())
774 return false;
775
776 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator());
777 if (!Jmp || !Jmp->isUnconditional())
778 return false;
779
780 BasicBlock *LastExit = Latch->getSinglePredecessor();
781 if (!LastExit || !L->isLoopExiting(LastExit))
782 return false;
783
784 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator());
785 if (!BI)
786 return false;
787
788 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L))
789 return false;
790
791 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
792 << LastExit->getName() << "\n");
793
794 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
795 MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr,
796 /*PredecessorWithTwoSuccessors=*/true);
797
798 if (SE) {
799 // Merging blocks may remove blocks reference in the block disposition cache. Clear the cache.
800 SE->forgetBlockAndLoopDispositions();
801 }
802
803 if (MSSAU && VerifyMemorySSA)
804 MSSAU->getMemorySSA()->verifyMemorySSA();
805
806 return true;
807 }
808
809 /// Rotate \c L, and return true if any modification was made.
processLoop(Loop * L)810 bool LoopRotate::processLoop(Loop *L) {
811 // Save the loop metadata.
812 MDNode *LoopMD = L->getLoopID();
813
814 bool SimplifiedLatch = false;
815
816 // Simplify the loop latch before attempting to rotate the header
817 // upward. Rotation may not be needed if the loop tail can be folded into the
818 // loop exit.
819 if (!RotationOnly)
820 SimplifiedLatch = simplifyLoopLatch(L);
821
822 bool MadeChange = rotateLoop(L, SimplifiedLatch);
823 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) &&
824 "Loop latch should be exiting after loop-rotate.");
825
826 // Restore the loop metadata.
827 // NB! We presume LoopRotation DOESN'T ADD its own metadata.
828 if ((MadeChange || SimplifiedLatch) && LoopMD)
829 L->setLoopID(LoopMD);
830
831 return MadeChange || SimplifiedLatch;
832 }
833
834
835 /// The utility to convert a loop into a loop with bottom test.
LoopRotation(Loop * L,LoopInfo * LI,const TargetTransformInfo * TTI,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE,MemorySSAUpdater * MSSAU,const SimplifyQuery & SQ,bool RotationOnly=true,unsigned Threshold=unsigned (-1),bool IsUtilMode=true,bool PrepareForLTO)836 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
837 AssumptionCache *AC, DominatorTree *DT,
838 ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
839 const SimplifyQuery &SQ, bool RotationOnly = true,
840 unsigned Threshold = unsigned(-1),
841 bool IsUtilMode = true, bool PrepareForLTO) {
842 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly,
843 IsUtilMode, PrepareForLTO);
844 return LR.processLoop(L);
845 }
846