xref: /llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp (revision 3088c316994f078833cba11086b6c5cb29df2aae)
1 //===- ShrinkWrap.cpp - Compute safe point for prolog/epilog insertion ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass looks for safe point where the prologue and epilogue can be
10 // inserted.
11 // The safe point for the prologue (resp. epilogue) is called Save
12 // (resp. Restore).
13 // A point is safe for prologue (resp. epilogue) if and only if
14 // it 1) dominates (resp. post-dominates) all the frame related operations and
15 // between 2) two executions of the Save (resp. Restore) point there is an
16 // execution of the Restore (resp. Save) point.
17 //
18 // For instance, the following points are safe:
19 // for (int i = 0; i < 10; ++i) {
20 //   Save
21 //   ...
22 //   Restore
23 // }
24 // Indeed, the execution looks like Save -> Restore -> Save -> Restore ...
25 // And the following points are not:
26 // for (int i = 0; i < 10; ++i) {
27 //   Save
28 //   ...
29 // }
30 // for (int i = 0; i < 10; ++i) {
31 //   ...
32 //   Restore
33 // }
34 // Indeed, the execution looks like Save -> Save -> ... -> Restore -> Restore.
35 //
36 // This pass also ensures that the safe points are 3) cheaper than the regular
37 // entry and exits blocks.
38 //
39 // Property #1 is ensured via the use of MachineDominatorTree and
40 // MachinePostDominatorTree.
41 // Property #2 is ensured via property #1 and MachineLoopInfo, i.e., both
42 // points must be in the same loop.
43 // Property #3 is ensured via the MachineBlockFrequencyInfo.
44 //
45 // If this pass found points matching all these properties, then
46 // MachineFrameInfo is updated with this information.
47 //
48 //===----------------------------------------------------------------------===//
49 
50 #include "llvm/ADT/BitVector.h"
51 #include "llvm/ADT/PostOrderIterator.h"
52 #include "llvm/ADT/SetVector.h"
53 #include "llvm/ADT/SmallVector.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/CFG.h"
56 #include "llvm/Analysis/ValueTracking.h"
57 #include "llvm/CodeGen/MachineBasicBlock.h"
58 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
59 #include "llvm/CodeGen/MachineDominators.h"
60 #include "llvm/CodeGen/MachineFrameInfo.h"
61 #include "llvm/CodeGen/MachineFunction.h"
62 #include "llvm/CodeGen/MachineFunctionPass.h"
63 #include "llvm/CodeGen/MachineInstr.h"
64 #include "llvm/CodeGen/MachineLoopInfo.h"
65 #include "llvm/CodeGen/MachineOperand.h"
66 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
67 #include "llvm/CodeGen/MachinePostDominators.h"
68 #include "llvm/CodeGen/RegisterClassInfo.h"
69 #include "llvm/CodeGen/RegisterScavenging.h"
70 #include "llvm/CodeGen/TargetFrameLowering.h"
71 #include "llvm/CodeGen/TargetInstrInfo.h"
72 #include "llvm/CodeGen/TargetLowering.h"
73 #include "llvm/CodeGen/TargetRegisterInfo.h"
74 #include "llvm/CodeGen/TargetSubtargetInfo.h"
75 #include "llvm/IR/Attributes.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/InitializePasses.h"
78 #include "llvm/MC/MCAsmInfo.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/CommandLine.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include "llvm/Target/TargetMachine.h"
85 #include <cassert>
86 #include <memory>
87 
88 using namespace llvm;
89 
90 #define DEBUG_TYPE "shrink-wrap"
91 
92 STATISTIC(NumFunc, "Number of functions");
93 STATISTIC(NumCandidates, "Number of shrink-wrapping candidates");
94 STATISTIC(NumCandidatesDropped,
95           "Number of shrink-wrapping candidates dropped because of frequency");
96 
97 static cl::opt<cl::boolOrDefault>
98 EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden,
99                     cl::desc("enable the shrink-wrapping pass"));
100 static cl::opt<bool> EnablePostShrinkWrapOpt(
101     "enable-shrink-wrap-region-split", cl::init(true), cl::Hidden,
102     cl::desc("enable splitting of the restore block if possible"));
103 
104 namespace {
105 
106 /// Class to determine where the safe point to insert the
107 /// prologue and epilogue are.
108 /// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the
109 /// shrink-wrapping term for prologue/epilogue placement, this pass
110 /// does not rely on expensive data-flow analysis. Instead we use the
111 /// dominance properties and loop information to decide which point
112 /// are safe for such insertion.
113 class ShrinkWrap : public MachineFunctionPass {
114   /// Hold callee-saved information.
115   RegisterClassInfo RCI;
116   MachineDominatorTree *MDT = nullptr;
117   MachinePostDominatorTree *MPDT = nullptr;
118 
119   /// Current safe point found for the prologue.
120   /// The prologue will be inserted before the first instruction
121   /// in this basic block.
122   MachineBasicBlock *Save = nullptr;
123 
124   /// Current safe point found for the epilogue.
125   /// The epilogue will be inserted before the first terminator instruction
126   /// in this basic block.
127   MachineBasicBlock *Restore = nullptr;
128 
129   /// Hold the information of the basic block frequency.
130   /// Use to check the profitability of the new points.
131   MachineBlockFrequencyInfo *MBFI = nullptr;
132 
133   /// Hold the loop information. Used to determine if Save and Restore
134   /// are in the same loop.
135   MachineLoopInfo *MLI = nullptr;
136 
137   // Emit remarks.
138   MachineOptimizationRemarkEmitter *ORE = nullptr;
139 
140   /// Frequency of the Entry block.
141   BlockFrequency EntryFreq;
142 
143   /// Current opcode for frame setup.
144   unsigned FrameSetupOpcode = ~0u;
145 
146   /// Current opcode for frame destroy.
147   unsigned FrameDestroyOpcode = ~0u;
148 
149   /// Stack pointer register, used by llvm.{savestack,restorestack}
150   Register SP;
151 
152   /// Entry block.
153   const MachineBasicBlock *Entry = nullptr;
154 
155   using SetOfRegs = SmallSetVector<unsigned, 16>;
156 
157   /// Registers that need to be saved for the current function.
158   mutable SetOfRegs CurrentCSRs;
159 
160   /// Current MachineFunction.
161   MachineFunction *MachineFunc = nullptr;
162 
163   /// Is `true` for the block numbers where we assume possible stack accesses
164   /// or computation of stack-relative addresses on any CFG path including the
165   /// block itself. Is `false` for basic blocks where we can guarantee the
166   /// opposite. False positives won't lead to incorrect analysis results,
167   /// therefore this approach is fair.
168   BitVector StackAddressUsedBlockInfo;
169 
170   /// Check if \p MI uses or defines a callee-saved register or
171   /// a frame index. If this is the case, this means \p MI must happen
172   /// after Save and before Restore.
173   bool useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
174                        bool StackAddressUsed) const;
175 
176   const SetOfRegs &getCurrentCSRs(RegScavenger *RS) const {
177     if (CurrentCSRs.empty()) {
178       BitVector SavedRegs;
179       const TargetFrameLowering *TFI =
180           MachineFunc->getSubtarget().getFrameLowering();
181 
182       TFI->determineCalleeSaves(*MachineFunc, SavedRegs, RS);
183 
184       for (int Reg = SavedRegs.find_first(); Reg != -1;
185            Reg = SavedRegs.find_next(Reg))
186         CurrentCSRs.insert((unsigned)Reg);
187     }
188     return CurrentCSRs;
189   }
190 
191   /// Update the Save and Restore points such that \p MBB is in
192   /// the region that is dominated by Save and post-dominated by Restore
193   /// and Save and Restore still match the safe point definition.
194   /// Such point may not exist and Save and/or Restore may be null after
195   /// this call.
196   void updateSaveRestorePoints(MachineBasicBlock &MBB, RegScavenger *RS);
197 
198   // Try to find safe point based on dominance and block frequency without
199   // any change in IR.
200   bool performShrinkWrapping(
201       const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
202       RegScavenger *RS);
203 
204   /// This function tries to split the restore point if doing so can shrink the
205   /// save point further. \return True if restore point is split.
206   bool postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
207                           RegScavenger *RS);
208 
209   /// This function analyzes if the restore point can split to create a new
210   /// restore point. This function collects
211   /// 1. Any preds of current restore that are reachable by callee save/FI
212   /// blocks
213   /// - indicated by DirtyPreds
214   /// 2. Any preds of current restore that are not DirtyPreds - indicated by
215   /// CleanPreds
216   /// Both sets should be non-empty for considering restore point split.
217   bool checkIfRestoreSplittable(
218       const MachineBasicBlock *CurRestore,
219       const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
220       SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
221       SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
222       const TargetInstrInfo *TII, RegScavenger *RS);
223 
224   /// Initialize the pass for \p MF.
225   void init(MachineFunction &MF) {
226     RCI.runOnMachineFunction(MF);
227     MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
228     MPDT = &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
229     Save = nullptr;
230     Restore = nullptr;
231     MBFI = &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI();
232     MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
233     ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
234     EntryFreq = MBFI->getEntryFreq();
235     const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
236     const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
237     FrameSetupOpcode = TII.getCallFrameSetupOpcode();
238     FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
239     SP = Subtarget.getTargetLowering()->getStackPointerRegisterToSaveRestore();
240     Entry = &MF.front();
241     CurrentCSRs.clear();
242     MachineFunc = &MF;
243 
244     ++NumFunc;
245   }
246 
247   /// Check whether or not Save and Restore points are still interesting for
248   /// shrink-wrapping.
249   bool ArePointsInteresting() const { return Save != Entry && Save && Restore; }
250 
251   /// Check if shrink wrapping is enabled for this target and function.
252   static bool isShrinkWrapEnabled(const MachineFunction &MF);
253 
254 public:
255   static char ID;
256 
257   ShrinkWrap() : MachineFunctionPass(ID) {
258     initializeShrinkWrapPass(*PassRegistry::getPassRegistry());
259   }
260 
261   void getAnalysisUsage(AnalysisUsage &AU) const override {
262     AU.setPreservesAll();
263     AU.addRequired<MachineBlockFrequencyInfoWrapperPass>();
264     AU.addRequired<MachineDominatorTreeWrapperPass>();
265     AU.addRequired<MachinePostDominatorTreeWrapperPass>();
266     AU.addRequired<MachineLoopInfoWrapperPass>();
267     AU.addRequired<MachineOptimizationRemarkEmitterPass>();
268     MachineFunctionPass::getAnalysisUsage(AU);
269   }
270 
271   MachineFunctionProperties getRequiredProperties() const override {
272     return MachineFunctionProperties().set(
273       MachineFunctionProperties::Property::NoVRegs);
274   }
275 
276   StringRef getPassName() const override { return "Shrink Wrapping analysis"; }
277 
278   /// Perform the shrink-wrapping analysis and update
279   /// the MachineFrameInfo attached to \p MF with the results.
280   bool runOnMachineFunction(MachineFunction &MF) override;
281 };
282 
283 } // end anonymous namespace
284 
285 char ShrinkWrap::ID = 0;
286 
287 char &llvm::ShrinkWrapID = ShrinkWrap::ID;
288 
289 INITIALIZE_PASS_BEGIN(ShrinkWrap, DEBUG_TYPE, "Shrink Wrap Pass", false, false)
290 INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfoWrapperPass)
291 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
292 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTreeWrapperPass)
293 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
294 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
295 INITIALIZE_PASS_END(ShrinkWrap, DEBUG_TYPE, "Shrink Wrap Pass", false, false)
296 
297 bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS,
298                                  bool StackAddressUsed) const {
299   /// Check if \p Op is known to access an address not on the function's stack .
300   /// At the moment, accesses where the underlying object is a global, function
301   /// argument, or jump table are considered non-stack accesses. Note that the
302   /// caller's stack may get accessed when passing an argument via the stack,
303   /// but not the stack of the current function.
304   ///
305   auto IsKnownNonStackPtr = [](MachineMemOperand *Op) {
306     if (Op->getValue()) {
307       const Value *UO = getUnderlyingObject(Op->getValue());
308       if (!UO)
309         return false;
310       if (auto *Arg = dyn_cast<Argument>(UO))
311         return !Arg->hasPassPointeeByValueCopyAttr();
312       return isa<GlobalValue>(UO);
313     }
314     if (const PseudoSourceValue *PSV = Op->getPseudoValue())
315       return PSV->isJumpTable();
316     return false;
317   };
318   // Load/store operations may access the stack indirectly when we previously
319   // computed an address to a stack location.
320   if (StackAddressUsed && MI.mayLoadOrStore() &&
321       (MI.isCall() || MI.hasUnmodeledSideEffects() || MI.memoperands_empty() ||
322        !all_of(MI.memoperands(), IsKnownNonStackPtr)))
323     return true;
324 
325   if (MI.getOpcode() == FrameSetupOpcode ||
326       MI.getOpcode() == FrameDestroyOpcode) {
327     LLVM_DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
328     return true;
329   }
330   const MachineFunction *MF = MI.getParent()->getParent();
331   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
332   for (const MachineOperand &MO : MI.operands()) {
333     bool UseOrDefCSR = false;
334     if (MO.isReg()) {
335       // Ignore instructions like DBG_VALUE which don't read/def the register.
336       if (!MO.isDef() && !MO.readsReg())
337         continue;
338       Register PhysReg = MO.getReg();
339       if (!PhysReg)
340         continue;
341       assert(PhysReg.isPhysical() && "Unallocated register?!");
342       // The stack pointer is not normally described as a callee-saved register
343       // in calling convention definitions, so we need to watch for it
344       // separately. An SP mentioned by a call instruction, we can ignore,
345       // though, as it's harmless and we do not want to effectively disable tail
346       // calls by forcing the restore point to post-dominate them.
347       // PPC's LR is also not normally described as a callee-saved register in
348       // calling convention definitions, so we need to watch for it, too. An LR
349       // mentioned implicitly by a return (or "branch to link register")
350       // instruction we can ignore, otherwise we may pessimize shrinkwrapping.
351       UseOrDefCSR =
352           (!MI.isCall() && PhysReg == SP) ||
353           RCI.getLastCalleeSavedAlias(PhysReg) ||
354           (!MI.isReturn() && TRI->isNonallocatableRegisterCalleeSave(PhysReg));
355     } else if (MO.isRegMask()) {
356       // Check if this regmask clobbers any of the CSRs.
357       for (unsigned Reg : getCurrentCSRs(RS)) {
358         if (MO.clobbersPhysReg(Reg)) {
359           UseOrDefCSR = true;
360           break;
361         }
362       }
363     }
364     // Skip FrameIndex operands in DBG_VALUE instructions.
365     if (UseOrDefCSR || (MO.isFI() && !MI.isDebugValue())) {
366       LLVM_DEBUG(dbgs() << "Use or define CSR(" << UseOrDefCSR << ") or FI("
367                         << MO.isFI() << "): " << MI << '\n');
368       return true;
369     }
370   }
371   return false;
372 }
373 
374 /// Helper function to find the immediate (post) dominator.
375 template <typename ListOfBBs, typename DominanceAnalysis>
376 static MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs,
377                                    DominanceAnalysis &Dom, bool Strict = true) {
378   MachineBasicBlock *IDom = Dom.findNearestCommonDominator(iterator_range(BBs));
379   if (Strict && IDom == &Block)
380     return nullptr;
381   return IDom;
382 }
383 
384 static bool isAnalyzableBB(const TargetInstrInfo &TII,
385                            MachineBasicBlock &Entry) {
386   // Check if the block is analyzable.
387   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
388   SmallVector<MachineOperand, 4> Cond;
389   return !TII.analyzeBranch(Entry, TBB, FBB, Cond);
390 }
391 
392 /// Determines if any predecessor of MBB is on the path from block that has use
393 /// or def of CSRs/FI to MBB.
394 /// ReachableByDirty: All blocks reachable from block that has use or def of
395 /// CSR/FI.
396 static bool
397 hasDirtyPred(const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
398              const MachineBasicBlock &MBB) {
399   for (const MachineBasicBlock *PredBB : MBB.predecessors())
400     if (ReachableByDirty.count(PredBB))
401       return true;
402   return false;
403 }
404 
405 /// Derives the list of all the basic blocks reachable from MBB.
406 static void markAllReachable(DenseSet<const MachineBasicBlock *> &Visited,
407                              const MachineBasicBlock &MBB) {
408   SmallVector<MachineBasicBlock *, 4> Worklist(MBB.successors());
409   Visited.insert(&MBB);
410   while (!Worklist.empty()) {
411     MachineBasicBlock *SuccMBB = Worklist.pop_back_val();
412     if (!Visited.insert(SuccMBB).second)
413       continue;
414     Worklist.append(SuccMBB->succ_begin(), SuccMBB->succ_end());
415   }
416 }
417 
418 /// Collect blocks reachable by use or def of CSRs/FI.
419 static void collectBlocksReachableByDirty(
420     const DenseSet<const MachineBasicBlock *> &DirtyBBs,
421     DenseSet<const MachineBasicBlock *> &ReachableByDirty) {
422   for (const MachineBasicBlock *MBB : DirtyBBs) {
423     if (ReachableByDirty.count(MBB))
424       continue;
425     // Mark all offsprings as reachable.
426     markAllReachable(ReachableByDirty, *MBB);
427   }
428 }
429 
430 /// \return true if there is a clean path from SavePoint to the original
431 /// Restore.
432 static bool
433 isSaveReachableThroughClean(const MachineBasicBlock *SavePoint,
434                             ArrayRef<MachineBasicBlock *> CleanPreds) {
435   DenseSet<const MachineBasicBlock *> Visited;
436   SmallVector<MachineBasicBlock *, 4> Worklist(CleanPreds);
437   while (!Worklist.empty()) {
438     MachineBasicBlock *CleanBB = Worklist.pop_back_val();
439     if (CleanBB == SavePoint)
440       return true;
441     if (!Visited.insert(CleanBB).second || !CleanBB->pred_size())
442       continue;
443     Worklist.append(CleanBB->pred_begin(), CleanBB->pred_end());
444   }
445   return false;
446 }
447 
448 /// This function updates the branches post restore point split.
449 ///
450 /// Restore point has been split.
451 /// Old restore point: MBB
452 /// New restore point: NMBB
453 /// Any basic block(say BBToUpdate) which had a fallthrough to MBB
454 /// previously should
455 /// 1. Fallthrough to NMBB iff NMBB is inserted immediately above MBB in the
456 /// block layout OR
457 /// 2. Branch unconditionally to NMBB iff NMBB is inserted at any other place.
458 static void updateTerminator(MachineBasicBlock *BBToUpdate,
459                              MachineBasicBlock *NMBB,
460                              const TargetInstrInfo *TII) {
461   DebugLoc DL = BBToUpdate->findBranchDebugLoc();
462   // if NMBB isn't the new layout successor for BBToUpdate, insert unconditional
463   // branch to it
464   if (!BBToUpdate->isLayoutSuccessor(NMBB))
465     TII->insertUnconditionalBranch(*BBToUpdate, NMBB, DL);
466 }
467 
468 /// This function splits the restore point and returns new restore point/BB.
469 ///
470 /// DirtyPreds: Predessors of \p MBB that are ReachableByDirty
471 ///
472 /// Decision has been made to split the restore point.
473 /// old restore point: \p MBB
474 /// new restore point: \p NMBB
475 /// This function makes the necessary block layout changes so that
476 /// 1. \p NMBB points to \p MBB unconditionally
477 /// 2. All dirtyPreds that previously pointed to \p MBB point to \p NMBB
478 static MachineBasicBlock *
479 tryToSplitRestore(MachineBasicBlock *MBB,
480                   ArrayRef<MachineBasicBlock *> DirtyPreds,
481                   const TargetInstrInfo *TII) {
482   MachineFunction *MF = MBB->getParent();
483 
484   // get the list of DirtyPreds who have a fallthrough to MBB
485   // before the block layout change. This is just to ensure that if the NMBB is
486   // inserted after MBB, then we create unconditional branch from
487   // DirtyPred/CleanPred to NMBB
488   SmallPtrSet<MachineBasicBlock *, 8> MBBFallthrough;
489   for (MachineBasicBlock *BB : DirtyPreds)
490     if (BB->getFallThrough(false) == MBB)
491       MBBFallthrough.insert(BB);
492 
493   MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
494   // Insert this block at the end of the function. Inserting in between may
495   // interfere with control flow optimizer decisions.
496   MF->insert(MF->end(), NMBB);
497 
498   for (const MachineBasicBlock::RegisterMaskPair &LI : MBB->liveins())
499     NMBB->addLiveIn(LI.PhysReg);
500 
501   TII->insertUnconditionalBranch(*NMBB, MBB, DebugLoc());
502 
503   // After splitting, all predecessors of the restore point should be dirty
504   // blocks.
505   for (MachineBasicBlock *SuccBB : DirtyPreds)
506     SuccBB->ReplaceUsesOfBlockWith(MBB, NMBB);
507 
508   NMBB->addSuccessor(MBB);
509 
510   for (MachineBasicBlock *BBToUpdate : MBBFallthrough)
511     updateTerminator(BBToUpdate, NMBB, TII);
512 
513   return NMBB;
514 }
515 
516 /// This function undoes the restore point split done earlier.
517 ///
518 /// DirtyPreds: All predecessors of \p NMBB that are ReachableByDirty.
519 ///
520 /// Restore point was split and the change needs to be unrolled. Make necessary
521 /// changes to reset restore point from \p NMBB to \p MBB.
522 static void rollbackRestoreSplit(MachineFunction &MF, MachineBasicBlock *NMBB,
523                                  MachineBasicBlock *MBB,
524                                  ArrayRef<MachineBasicBlock *> DirtyPreds,
525                                  const TargetInstrInfo *TII) {
526   // For a BB, if NMBB is fallthrough in the current layout, then in the new
527   // layout a. BB should fallthrough to MBB OR b. BB should undconditionally
528   // branch to MBB
529   SmallPtrSet<MachineBasicBlock *, 8> NMBBFallthrough;
530   for (MachineBasicBlock *BB : DirtyPreds)
531     if (BB->getFallThrough(false) == NMBB)
532       NMBBFallthrough.insert(BB);
533 
534   NMBB->removeSuccessor(MBB);
535   for (MachineBasicBlock *SuccBB : DirtyPreds)
536     SuccBB->ReplaceUsesOfBlockWith(NMBB, MBB);
537 
538   NMBB->erase(NMBB->begin(), NMBB->end());
539   NMBB->eraseFromParent();
540 
541   for (MachineBasicBlock *BBToUpdate : NMBBFallthrough)
542     updateTerminator(BBToUpdate, MBB, TII);
543 }
544 
545 // A block is deemed fit for restore point split iff there exist
546 // 1. DirtyPreds - preds of CurRestore reachable from use or def of CSR/FI
547 // 2. CleanPreds - preds of CurRestore that arent DirtyPreds
548 bool ShrinkWrap::checkIfRestoreSplittable(
549     const MachineBasicBlock *CurRestore,
550     const DenseSet<const MachineBasicBlock *> &ReachableByDirty,
551     SmallVectorImpl<MachineBasicBlock *> &DirtyPreds,
552     SmallVectorImpl<MachineBasicBlock *> &CleanPreds,
553     const TargetInstrInfo *TII, RegScavenger *RS) {
554   for (const MachineInstr &MI : *CurRestore)
555     if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true))
556       return false;
557 
558   for (MachineBasicBlock *PredBB : CurRestore->predecessors()) {
559     if (!isAnalyzableBB(*TII, *PredBB))
560       return false;
561 
562     if (ReachableByDirty.count(PredBB))
563       DirtyPreds.push_back(PredBB);
564     else
565       CleanPreds.push_back(PredBB);
566   }
567 
568   return !(CleanPreds.empty() || DirtyPreds.empty());
569 }
570 
571 bool ShrinkWrap::postShrinkWrapping(bool HasCandidate, MachineFunction &MF,
572                                     RegScavenger *RS) {
573   if (!EnablePostShrinkWrapOpt)
574     return false;
575 
576   MachineBasicBlock *InitSave = nullptr;
577   MachineBasicBlock *InitRestore = nullptr;
578 
579   if (HasCandidate) {
580     InitSave = Save;
581     InitRestore = Restore;
582   } else {
583     InitRestore = nullptr;
584     InitSave = &MF.front();
585     for (MachineBasicBlock &MBB : MF) {
586       if (MBB.isEHFuncletEntry())
587         return false;
588       if (MBB.isReturnBlock()) {
589         // Do not support multiple restore points.
590         if (InitRestore)
591           return false;
592         InitRestore = &MBB;
593       }
594     }
595   }
596 
597   if (!InitSave || !InitRestore || InitRestore == InitSave ||
598       !MDT->dominates(InitSave, InitRestore) ||
599       !MPDT->dominates(InitRestore, InitSave))
600     return false;
601 
602   // Bail out of the optimization if any of the basic block is target of
603   // INLINEASM_BR instruction
604   for (MachineBasicBlock &MBB : MF)
605     if (MBB.isInlineAsmBrIndirectTarget())
606       return false;
607 
608   DenseSet<const MachineBasicBlock *> DirtyBBs;
609   for (MachineBasicBlock &MBB : MF) {
610     if (MBB.isEHPad()) {
611       DirtyBBs.insert(&MBB);
612       continue;
613     }
614     for (const MachineInstr &MI : MBB)
615       if (useOrDefCSROrFI(MI, RS, /*StackAddressUsed=*/true)) {
616         DirtyBBs.insert(&MBB);
617         break;
618       }
619   }
620 
621   // Find blocks reachable from the use or def of CSRs/FI.
622   DenseSet<const MachineBasicBlock *> ReachableByDirty;
623   collectBlocksReachableByDirty(DirtyBBs, ReachableByDirty);
624 
625   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
626   SmallVector<MachineBasicBlock *, 2> DirtyPreds;
627   SmallVector<MachineBasicBlock *, 2> CleanPreds;
628   if (!checkIfRestoreSplittable(InitRestore, ReachableByDirty, DirtyPreds,
629                                 CleanPreds, TII, RS))
630     return false;
631 
632   // Trying to reach out to the new save point which dominates all dirty blocks.
633   MachineBasicBlock *NewSave =
634       FindIDom<>(**DirtyPreds.begin(), DirtyPreds, *MDT, false);
635 
636   while (NewSave && (hasDirtyPred(ReachableByDirty, *NewSave) ||
637                      EntryFreq < MBFI->getBlockFreq(NewSave) ||
638                      /*Entry freq has been observed more than a loop block in
639                         some cases*/
640                      MLI->getLoopFor(NewSave)))
641     NewSave = FindIDom<>(**NewSave->pred_begin(), NewSave->predecessors(), *MDT,
642                          false);
643 
644   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
645   if (!NewSave || NewSave == InitSave ||
646       isSaveReachableThroughClean(NewSave, CleanPreds) ||
647       !TFI->canUseAsPrologue(*NewSave))
648     return false;
649 
650   // Now we know that splitting a restore point can isolate the restore point
651   // from clean blocks and doing so can shrink the save point.
652   MachineBasicBlock *NewRestore =
653       tryToSplitRestore(InitRestore, DirtyPreds, TII);
654 
655   // Make sure if the new restore point is valid as an epilogue, depending on
656   // targets.
657   if (!TFI->canUseAsEpilogue(*NewRestore)) {
658     rollbackRestoreSplit(MF, NewRestore, InitRestore, DirtyPreds, TII);
659     return false;
660   }
661 
662   Save = NewSave;
663   Restore = NewRestore;
664 
665   MDT->recalculate(MF);
666   MPDT->recalculate(MF);
667 
668   assert((MDT->dominates(Save, Restore) && MPDT->dominates(Restore, Save)) &&
669          "Incorrect save or restore point due to dominance relations");
670   assert((!MLI->getLoopFor(Save) && !MLI->getLoopFor(Restore)) &&
671          "Unexpected save or restore point in a loop");
672   assert((EntryFreq >= MBFI->getBlockFreq(Save) &&
673           EntryFreq >= MBFI->getBlockFreq(Restore)) &&
674          "Incorrect save or restore point based on block frequency");
675   return true;
676 }
677 
678 void ShrinkWrap::updateSaveRestorePoints(MachineBasicBlock &MBB,
679                                          RegScavenger *RS) {
680   // Get rid of the easy cases first.
681   if (!Save)
682     Save = &MBB;
683   else
684     Save = MDT->findNearestCommonDominator(Save, &MBB);
685   assert(Save);
686 
687   if (!Restore)
688     Restore = &MBB;
689   else if (MPDT->getNode(&MBB)) // If the block is not in the post dom tree, it
690                                 // means the block never returns. If that's the
691                                 // case, we don't want to call
692                                 // `findNearestCommonDominator`, which will
693                                 // return `Restore`.
694     Restore = MPDT->findNearestCommonDominator(Restore, &MBB);
695   else
696     Restore = nullptr; // Abort, we can't find a restore point in this case.
697 
698   // Make sure we would be able to insert the restore code before the
699   // terminator.
700   if (Restore == &MBB) {
701     for (const MachineInstr &Terminator : MBB.terminators()) {
702       if (!useOrDefCSROrFI(Terminator, RS, /*StackAddressUsed=*/true))
703         continue;
704       // One of the terminator needs to happen before the restore point.
705       if (MBB.succ_empty()) {
706         Restore = nullptr; // Abort, we can't find a restore point in this case.
707         break;
708       }
709       // Look for a restore point that post-dominates all the successors.
710       // The immediate post-dominator is what we are looking for.
711       Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
712       break;
713     }
714   }
715 
716   if (!Restore) {
717     LLVM_DEBUG(
718         dbgs() << "Restore point needs to be spanned on several blocks\n");
719     return;
720   }
721 
722   // Make sure Save and Restore are suitable for shrink-wrapping:
723   // 1. all path from Save needs to lead to Restore before exiting.
724   // 2. all path to Restore needs to go through Save from Entry.
725   // We achieve that by making sure that:
726   // A. Save dominates Restore.
727   // B. Restore post-dominates Save.
728   // C. Save and Restore are in the same loop.
729   bool SaveDominatesRestore = false;
730   bool RestorePostDominatesSave = false;
731   while (Restore &&
732          (!(SaveDominatesRestore = MDT->dominates(Save, Restore)) ||
733           !(RestorePostDominatesSave = MPDT->dominates(Restore, Save)) ||
734           // Post-dominance is not enough in loops to ensure that all uses/defs
735           // are after the prologue and before the epilogue at runtime.
736           // E.g.,
737           // while(1) {
738           //  Save
739           //  Restore
740           //   if (...)
741           //     break;
742           //  use/def CSRs
743           // }
744           // All the uses/defs of CSRs are dominated by Save and post-dominated
745           // by Restore. However, the CSRs uses are still reachable after
746           // Restore and before Save are executed.
747           //
748           // For now, just push the restore/save points outside of loops.
749           // FIXME: Refine the criteria to still find interesting cases
750           // for loops.
751           MLI->getLoopFor(Save) || MLI->getLoopFor(Restore))) {
752     // Fix (A).
753     if (!SaveDominatesRestore) {
754       Save = MDT->findNearestCommonDominator(Save, Restore);
755       continue;
756     }
757     // Fix (B).
758     if (!RestorePostDominatesSave)
759       Restore = MPDT->findNearestCommonDominator(Restore, Save);
760 
761     // Fix (C).
762     if (Restore && (MLI->getLoopFor(Save) || MLI->getLoopFor(Restore))) {
763       if (MLI->getLoopDepth(Save) > MLI->getLoopDepth(Restore)) {
764         // Push Save outside of this loop if immediate dominator is different
765         // from save block. If immediate dominator is not different, bail out.
766         Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
767         if (!Save)
768           break;
769       } else {
770         // If the loop does not exit, there is no point in looking
771         // for a post-dominator outside the loop.
772         SmallVector<MachineBasicBlock*, 4> ExitBlocks;
773         MLI->getLoopFor(Restore)->getExitingBlocks(ExitBlocks);
774         // Push Restore outside of this loop.
775         // Look for the immediate post-dominator of the loop exits.
776         MachineBasicBlock *IPdom = Restore;
777         for (MachineBasicBlock *LoopExitBB: ExitBlocks) {
778           IPdom = FindIDom<>(*IPdom, LoopExitBB->successors(), *MPDT);
779           if (!IPdom)
780             break;
781         }
782         // If the immediate post-dominator is not in a less nested loop,
783         // then we are stuck in a program with an infinite loop.
784         // In that case, we will not find a safe point, hence, bail out.
785         if (IPdom && MLI->getLoopDepth(IPdom) < MLI->getLoopDepth(Restore))
786           Restore = IPdom;
787         else {
788           Restore = nullptr;
789           break;
790         }
791       }
792     }
793   }
794 }
795 
796 static bool giveUpWithRemarks(MachineOptimizationRemarkEmitter *ORE,
797                               StringRef RemarkName, StringRef RemarkMessage,
798                               const DiagnosticLocation &Loc,
799                               const MachineBasicBlock *MBB) {
800   ORE->emit([&]() {
801     return MachineOptimizationRemarkMissed(DEBUG_TYPE, RemarkName, Loc, MBB)
802            << RemarkMessage;
803   });
804 
805   LLVM_DEBUG(dbgs() << RemarkMessage << '\n');
806   return false;
807 }
808 
809 bool ShrinkWrap::performShrinkWrapping(
810     const ReversePostOrderTraversal<MachineBasicBlock *> &RPOT,
811     RegScavenger *RS) {
812   for (MachineBasicBlock *MBB : RPOT) {
813     LLVM_DEBUG(dbgs() << "Look into: " << printMBBReference(*MBB) << '\n');
814 
815     if (MBB->isEHFuncletEntry())
816       return giveUpWithRemarks(ORE, "UnsupportedEHFunclets",
817                                "EH Funclets are not supported yet.",
818                                MBB->front().getDebugLoc(), MBB);
819 
820     if (MBB->isEHPad() || MBB->isInlineAsmBrIndirectTarget()) {
821       // Push the prologue and epilogue outside of the region that may throw (or
822       // jump out via inlineasm_br), by making sure that all the landing pads
823       // are at least at the boundary of the save and restore points.  The
824       // problem is that a basic block can jump out from the middle in these
825       // cases, which we do not handle.
826       updateSaveRestorePoints(*MBB, RS);
827       if (!ArePointsInteresting()) {
828         LLVM_DEBUG(dbgs() << "EHPad/inlineasm_br prevents shrink-wrapping\n");
829         return false;
830       }
831       continue;
832     }
833 
834     bool StackAddressUsed = false;
835     // Check if we found any stack accesses in the predecessors. We are not
836     // doing a full dataflow analysis here to keep things simple but just
837     // rely on a reverse portorder traversal (RPOT) to guarantee predecessors
838     // are already processed except for loops (and accept the conservative
839     // result for loops).
840     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
841       if (StackAddressUsedBlockInfo.test(Pred->getNumber())) {
842         StackAddressUsed = true;
843         break;
844       }
845     }
846 
847     for (const MachineInstr &MI : *MBB) {
848       if (useOrDefCSROrFI(MI, RS, StackAddressUsed)) {
849         // Save (resp. restore) point must dominate (resp. post dominate)
850         // MI. Look for the proper basic block for those.
851         updateSaveRestorePoints(*MBB, RS);
852         // If we are at a point where we cannot improve the placement of
853         // save/restore instructions, just give up.
854         if (!ArePointsInteresting()) {
855           LLVM_DEBUG(dbgs() << "No Shrink wrap candidate found\n");
856           return false;
857         }
858         // No need to look for other instructions, this basic block
859         // will already be part of the handled region.
860         StackAddressUsed = true;
861         break;
862       }
863     }
864     StackAddressUsedBlockInfo[MBB->getNumber()] = StackAddressUsed;
865   }
866   if (!ArePointsInteresting()) {
867     // If the points are not interesting at this point, then they must be null
868     // because it means we did not encounter any frame/CSR related code.
869     // Otherwise, we would have returned from the previous loop.
870     assert(!Save && !Restore && "We miss a shrink-wrap opportunity?!");
871     LLVM_DEBUG(dbgs() << "Nothing to shrink-wrap\n");
872     return false;
873   }
874 
875   LLVM_DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: "
876                     << EntryFreq.getFrequency() << '\n');
877 
878   const TargetFrameLowering *TFI =
879       MachineFunc->getSubtarget().getFrameLowering();
880   do {
881     LLVM_DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
882                       << printMBBReference(*Save) << ' '
883                       << printBlockFreq(*MBFI, *Save)
884                       << "\nRestore: " << printMBBReference(*Restore) << ' '
885                       << printBlockFreq(*MBFI, *Restore) << '\n');
886 
887     bool IsSaveCheap, TargetCanUseSaveAsPrologue = false;
888     if (((IsSaveCheap = EntryFreq >= MBFI->getBlockFreq(Save)) &&
889          EntryFreq >= MBFI->getBlockFreq(Restore)) &&
890         ((TargetCanUseSaveAsPrologue = TFI->canUseAsPrologue(*Save)) &&
891          TFI->canUseAsEpilogue(*Restore)))
892       break;
893     LLVM_DEBUG(
894         dbgs() << "New points are too expensive or invalid for the target\n");
895     MachineBasicBlock *NewBB;
896     if (!IsSaveCheap || !TargetCanUseSaveAsPrologue) {
897       Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
898       if (!Save)
899         break;
900       NewBB = Save;
901     } else {
902       // Restore is expensive.
903       Restore = FindIDom<>(*Restore, Restore->successors(), *MPDT);
904       if (!Restore)
905         break;
906       NewBB = Restore;
907     }
908     updateSaveRestorePoints(*NewBB, RS);
909   } while (Save && Restore);
910 
911   if (!ArePointsInteresting()) {
912     ++NumCandidatesDropped;
913     return false;
914   }
915   return true;
916 }
917 
918 bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
919   if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
920     return false;
921 
922   LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
923 
924   init(MF);
925 
926   ReversePostOrderTraversal<MachineBasicBlock *> RPOT(&*MF.begin());
927   if (containsIrreducibleCFG<MachineBasicBlock *>(RPOT, *MLI)) {
928     // If MF is irreducible, a block may be in a loop without
929     // MachineLoopInfo reporting it. I.e., we may use the
930     // post-dominance property in loops, which lead to incorrect
931     // results. Moreover, we may miss that the prologue and
932     // epilogue are not in the same loop, leading to unbalanced
933     // construction/deconstruction of the stack frame.
934     return giveUpWithRemarks(ORE, "UnsupportedIrreducibleCFG",
935                              "Irreducible CFGs are not supported yet.",
936                              MF.getFunction().getSubprogram(), &MF.front());
937   }
938 
939   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
940   std::unique_ptr<RegScavenger> RS(
941       TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr);
942 
943   bool Changed = false;
944 
945   // Initially, conservatively assume that stack addresses can be used in each
946   // basic block and change the state only for those basic blocks for which we
947   // were able to prove the opposite.
948   StackAddressUsedBlockInfo.resize(MF.getNumBlockIDs(), true);
949   bool HasCandidate = performShrinkWrapping(RPOT, RS.get());
950   StackAddressUsedBlockInfo.clear();
951   Changed = postShrinkWrapping(HasCandidate, MF, RS.get());
952   if (!HasCandidate && !Changed)
953     return false;
954   if (!ArePointsInteresting())
955     return Changed;
956 
957   LLVM_DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: "
958                     << printMBBReference(*Save) << ' '
959                     << "\nRestore: " << printMBBReference(*Restore) << '\n');
960 
961   MachineFrameInfo &MFI = MF.getFrameInfo();
962   MFI.setSavePoint(Save);
963   MFI.setRestorePoint(Restore);
964   ++NumCandidates;
965   return Changed;
966 }
967 
968 bool ShrinkWrap::isShrinkWrapEnabled(const MachineFunction &MF) {
969   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
970 
971   switch (EnableShrinkWrapOpt) {
972   case cl::BOU_UNSET:
973     return TFI->enableShrinkWrapping(MF) &&
974            // Windows with CFI has some limitations that make it impossible
975            // to use shrink-wrapping.
976            !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
977            // Sanitizers look at the value of the stack at the location
978            // of the crash. Since a crash can happen anywhere, the
979            // frame must be lowered before anything else happen for the
980            // sanitizers to be able to get a correct stack frame.
981            !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) ||
982              MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) ||
983              MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) ||
984              MF.getFunction().hasFnAttribute(Attribute::SanitizeType) ||
985              MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress));
986   // If EnableShrinkWrap is set, it takes precedence on whatever the
987   // target sets. The rational is that we assume we want to test
988   // something related to shrink-wrapping.
989   case cl::BOU_TRUE:
990     return true;
991   case cl::BOU_FALSE:
992     return false;
993   }
994   llvm_unreachable("Invalid shrink-wrapping state");
995 }
996