xref: /llvm-project/llvm/lib/CodeGen/MachineSink.cpp (revision 9fecb4f9071740f6c1e665940583e9dceae2beb5)
1 //===- MachineSink.cpp - Sinking for machine instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass moves instructions into successor blocks when possible, so that
10 // they aren't executed on paths where their results aren't needed.
11 //
12 // This pass is not intended to be a replacement or a complete alternative
13 // for an LLVM-IR-level sinking pass. It is only designed to sink simple
14 // constructs that are not exposed before lowering and instruction selection.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/CFG.h"
28 #include "llvm/Analysis/ProfileSummaryInfo.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
31 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
32 #include "llvm/CodeGen/MachineCycleAnalysis.h"
33 #include "llvm/CodeGen/MachineDomTreeUpdater.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineFunctionPass.h"
37 #include "llvm/CodeGen/MachineInstr.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachinePostDominators.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/MachineSizeOpts.h"
43 #include "llvm/CodeGen/RegisterClassInfo.h"
44 #include "llvm/CodeGen/RegisterPressure.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetPassConfig.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/TargetSchedule.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/DebugInfoMetadata.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/BranchProbability.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <utility>
62 #include <vector>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "machine-sink"
67 
68 static cl::opt<bool>
69     SplitEdges("machine-sink-split",
70                cl::desc("Split critical edges during machine sinking"),
71                cl::init(true), cl::Hidden);
72 
73 static cl::opt<bool> UseBlockFreqInfo(
74     "machine-sink-bfi",
75     cl::desc("Use block frequency info to find successors to sink"),
76     cl::init(true), cl::Hidden);
77 
78 static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
79     "machine-sink-split-probability-threshold",
80     cl::desc(
81         "Percentage threshold for splitting single-instruction critical edge. "
82         "If the branch threshold is higher than this threshold, we allow "
83         "speculative execution of up to 1 instruction to avoid branching to "
84         "splitted critical edge"),
85     cl::init(40), cl::Hidden);
86 
87 static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold(
88     "machine-sink-load-instrs-threshold",
89     cl::desc("Do not try to find alias store for a load if there is a in-path "
90              "block whose instruction number is higher than this threshold."),
91     cl::init(2000), cl::Hidden);
92 
93 static cl::opt<unsigned> SinkLoadBlocksThreshold(
94     "machine-sink-load-blocks-threshold",
95     cl::desc("Do not try to find alias store for a load if the block number in "
96              "the straight line is higher than this threshold."),
97     cl::init(20), cl::Hidden);
98 
99 static cl::opt<bool>
100     SinkInstsIntoCycle("sink-insts-to-avoid-spills",
101                        cl::desc("Sink instructions into cycles to avoid "
102                                 "register spills"),
103                        cl::init(false), cl::Hidden);
104 
105 static cl::opt<unsigned> SinkIntoCycleLimit(
106     "machine-sink-cycle-limit",
107     cl::desc(
108         "The maximum number of instructions considered for cycle sinking."),
109     cl::init(50), cl::Hidden);
110 
111 STATISTIC(NumSunk, "Number of machine instructions sunk");
112 STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle");
113 STATISTIC(NumSplit, "Number of critical edges split");
114 STATISTIC(NumCoalesces, "Number of copies coalesced");
115 STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
116 
117 using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
118 
119 namespace {
120 
121 class MachineSinking : public MachineFunctionPass {
122   const TargetSubtargetInfo *STI = nullptr;
123   const TargetInstrInfo *TII = nullptr;
124   const TargetRegisterInfo *TRI = nullptr;
125   MachineRegisterInfo *MRI = nullptr;      // Machine register information
126   MachineDominatorTree *DT = nullptr;      // Machine dominator tree
127   MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree
128   MachineCycleInfo *CI = nullptr;
129   ProfileSummaryInfo *PSI = nullptr;
130   MachineBlockFrequencyInfo *MBFI = nullptr;
131   const MachineBranchProbabilityInfo *MBPI = nullptr;
132   AliasAnalysis *AA = nullptr;
133   RegisterClassInfo RegClassInfo;
134   TargetSchedModel SchedModel;
135 
136   // Remember which edges have been considered for breaking.
137   SmallSet<std::pair<MachineBasicBlock *, MachineBasicBlock *>, 8>
138       CEBCandidates;
139   // Memorize the register that also wanted to sink into the same block along
140   // a different critical edge.
141   // {register to sink, sink-to block} -> the first sink-from block.
142   // We're recording the first sink-from block because that (critical) edge
143   // was deferred until we see another register that's going to sink into the
144   // same block.
145   DenseMap<std::pair<Register, MachineBasicBlock *>, MachineBasicBlock *>
146       CEMergeCandidates;
147   // Remember which edges we are about to split.
148   // This is different from CEBCandidates since those edges
149   // will be split.
150   SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
151 
152   DenseSet<Register> RegsToClearKillFlags;
153 
154   using AllSuccsCache =
155       SmallDenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
156 
157   /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is
158   /// post-dominated by another DBG_VALUE of the same variable location.
159   /// This is necessary to detect sequences such as:
160   ///     %0 = someinst
161   ///     DBG_VALUE %0, !123, !DIExpression()
162   ///     %1 = anotherinst
163   ///     DBG_VALUE %1, !123, !DIExpression()
164   /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that
165   /// would re-order assignments.
166   using SeenDbgUser = PointerIntPair<MachineInstr *, 1>;
167 
168   using SinkItem = std::pair<MachineInstr *, MachineBasicBlock *>;
169 
170   /// Record of DBG_VALUE uses of vregs in a block, so that we can identify
171   /// debug instructions to sink.
172   SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers;
173 
174   /// Record of debug variables that have had their locations set in the
175   /// current block.
176   DenseSet<DebugVariable> SeenDbgVars;
177 
178   DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool>
179       HasStoreCache;
180 
181   DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>,
182            SmallVector<MachineInstr *>>
183       StoreInstrCache;
184 
185   /// Cached BB's register pressure.
186   DenseMap<const MachineBasicBlock *, std::vector<unsigned>>
187       CachedRegisterPressure;
188 
189   bool EnableSinkAndFold;
190 
191 public:
192   static char ID; // Pass identification
193 
194   MachineSinking() : MachineFunctionPass(ID) {
195     initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
196   }
197 
198   bool runOnMachineFunction(MachineFunction &MF) override;
199 
200   void getAnalysisUsage(AnalysisUsage &AU) const override {
201     MachineFunctionPass::getAnalysisUsage(AU);
202     AU.addRequired<AAResultsWrapperPass>();
203     AU.addRequired<MachineDominatorTreeWrapperPass>();
204     AU.addRequired<MachinePostDominatorTreeWrapperPass>();
205     AU.addRequired<MachineCycleInfoWrapperPass>();
206     AU.addRequired<MachineBranchProbabilityInfoWrapperPass>();
207     AU.addPreserved<MachineCycleInfoWrapperPass>();
208     AU.addPreserved<MachineLoopInfoWrapperPass>();
209     AU.addRequired<ProfileSummaryInfoWrapperPass>();
210     if (UseBlockFreqInfo)
211       AU.addRequired<MachineBlockFrequencyInfoWrapperPass>();
212     AU.addRequired<TargetPassConfig>();
213   }
214 
215   void releaseMemory() override {
216     CEBCandidates.clear();
217     CEMergeCandidates.clear();
218   }
219 
220 private:
221   bool ProcessBlock(MachineBasicBlock &MBB);
222   void ProcessDbgInst(MachineInstr &MI);
223   bool isLegalToBreakCriticalEdge(MachineInstr &MI, MachineBasicBlock *From,
224                                   MachineBasicBlock *To, bool BreakPHIEdge);
225   bool isWorthBreakingCriticalEdge(MachineInstr &MI, MachineBasicBlock *From,
226                                    MachineBasicBlock *To,
227                                    MachineBasicBlock *&DeferredFromBlock);
228 
229   bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To,
230                        MachineInstr &MI);
231 
232   /// Postpone the splitting of the given critical
233   /// edge (\p From, \p To).
234   ///
235   /// We do not split the edges on the fly. Indeed, this invalidates
236   /// the dominance information and thus triggers a lot of updates
237   /// of that information underneath.
238   /// Instead, we postpone all the splits after each iteration of
239   /// the main loop. That way, the information is at least valid
240   /// for the lifetime of an iteration.
241   ///
242   /// \return True if the edge is marked as toSplit, false otherwise.
243   /// False can be returned if, for instance, this is not profitable.
244   bool PostponeSplitCriticalEdge(MachineInstr &MI, MachineBasicBlock *From,
245                                  MachineBasicBlock *To, bool BreakPHIEdge);
246   bool SinkInstruction(MachineInstr &MI, bool &SawStore,
247                        AllSuccsCache &AllSuccessors);
248 
249   /// If we sink a COPY inst, some debug users of it's destination may no
250   /// longer be dominated by the COPY, and will eventually be dropped.
251   /// This is easily rectified by forwarding the non-dominated debug uses
252   /// to the copy source.
253   void SalvageUnsunkDebugUsersOfCopy(MachineInstr &,
254                                      MachineBasicBlock *TargetBlock);
255   bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB,
256                                MachineBasicBlock *DefMBB, bool &BreakPHIEdge,
257                                bool &LocalUse) const;
258   MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
259                                       bool &BreakPHIEdge,
260                                       AllSuccsCache &AllSuccessors);
261 
262   void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB,
263                                SmallVectorImpl<MachineInstr *> &Candidates);
264 
265   bool
266   aggressivelySinkIntoCycle(MachineCycle *Cycle, MachineInstr &I,
267                             DenseMap<SinkItem, MachineInstr *> &SunkInstrs);
268 
269   bool isProfitableToSinkTo(Register Reg, MachineInstr &MI,
270                             MachineBasicBlock *MBB,
271                             MachineBasicBlock *SuccToSinkTo,
272                             AllSuccsCache &AllSuccessors);
273 
274   bool PerformTrivialForwardCoalescing(MachineInstr &MI,
275                                        MachineBasicBlock *MBB);
276 
277   bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB);
278 
279   SmallVector<MachineBasicBlock *, 4> &
280   GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
281                          AllSuccsCache &AllSuccessors) const;
282 
283   std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB,
284                                                bool UseCache = true);
285 
286   bool registerPressureSetExceedsLimit(unsigned NRegs,
287                                        const TargetRegisterClass *RC,
288                                        const MachineBasicBlock &MBB);
289 
290   bool registerPressureExceedsLimit(const MachineBasicBlock &MBB);
291 };
292 
293 } // end anonymous namespace
294 
295 char MachineSinking::ID = 0;
296 
297 char &llvm::MachineSinkingID = MachineSinking::ID;
298 
299 INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE, "Machine code sinking", false,
300                       false)
301 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
302 INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfoWrapperPass)
303 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
304 INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass)
305 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
306 INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE, "Machine code sinking", false,
307                     false)
308 
309 /// Return true if a target defined block prologue instruction interferes
310 /// with a sink candidate.
311 static bool blockPrologueInterferes(const MachineBasicBlock *BB,
312                                     MachineBasicBlock::const_iterator End,
313                                     const MachineInstr &MI,
314                                     const TargetRegisterInfo *TRI,
315                                     const TargetInstrInfo *TII,
316                                     const MachineRegisterInfo *MRI) {
317   for (MachineBasicBlock::const_iterator PI = BB->getFirstNonPHI(); PI != End;
318        ++PI) {
319     // Only check target defined prologue instructions
320     if (!TII->isBasicBlockPrologue(*PI))
321       continue;
322     for (auto &MO : MI.operands()) {
323       if (!MO.isReg())
324         continue;
325       Register Reg = MO.getReg();
326       if (!Reg)
327         continue;
328       if (MO.isUse()) {
329         if (Reg.isPhysical() &&
330             (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))
331           continue;
332         if (PI->modifiesRegister(Reg, TRI))
333           return true;
334       } else {
335         if (PI->readsRegister(Reg, TRI))
336           return true;
337         // Check for interference with non-dead defs
338         auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, false, true);
339         if (DefOp && !DefOp->isDead())
340           return true;
341       }
342     }
343   }
344 
345   return false;
346 }
347 
348 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
349                                                      MachineBasicBlock *MBB) {
350   if (!MI.isCopy())
351     return false;
352 
353   Register SrcReg = MI.getOperand(1).getReg();
354   Register DstReg = MI.getOperand(0).getReg();
355   if (!SrcReg.isVirtual() || !DstReg.isVirtual() ||
356       !MRI->hasOneNonDBGUse(SrcReg))
357     return false;
358 
359   const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
360   const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
361   if (SRC != DRC)
362     return false;
363 
364   MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
365   if (DefMI->isCopyLike())
366     return false;
367   LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
368   LLVM_DEBUG(dbgs() << "*** to: " << MI);
369   MRI->replaceRegWith(DstReg, SrcReg);
370   MI.eraseFromParent();
371 
372   // Conservatively, clear any kill flags, since it's possible that they are no
373   // longer correct.
374   MRI->clearKillFlags(SrcReg);
375 
376   ++NumCoalesces;
377   return true;
378 }
379 
380 bool MachineSinking::PerformSinkAndFold(MachineInstr &MI,
381                                         MachineBasicBlock *MBB) {
382   if (MI.isCopy() || MI.mayLoadOrStore() ||
383       MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
384     return false;
385 
386   // Don't sink instructions that the target prefers not to sink.
387   if (!TII->shouldSink(MI))
388     return false;
389 
390   // Check if it's safe to move the instruction.
391   bool SawStore = true;
392   if (!MI.isSafeToMove(SawStore))
393     return false;
394 
395   // Convergent operations may not be made control-dependent on additional
396   // values.
397   if (MI.isConvergent())
398     return false;
399 
400   // Don't sink defs/uses of hard registers or if the instruction defines more
401   // than one register.
402   // Don't sink more than two register uses - it'll cover most of the cases and
403   // greatly simplifies the register pressure checks.
404   Register DefReg;
405   Register UsedRegA, UsedRegB;
406   for (const MachineOperand &MO : MI.operands()) {
407     if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
408         MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
409         MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
410       continue;
411     if (!MO.isReg())
412       return false;
413 
414     Register Reg = MO.getReg();
415     if (Reg == 0)
416       continue;
417 
418     if (Reg.isVirtual()) {
419       if (MO.isDef()) {
420         if (DefReg)
421           return false;
422         DefReg = Reg;
423         continue;
424       }
425 
426       if (UsedRegA == 0)
427         UsedRegA = Reg;
428       else if (UsedRegB == 0)
429         UsedRegB = Reg;
430       else
431         return false;
432       continue;
433     }
434 
435     if (Reg.isPhysical() && MO.isUse() &&
436         (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO)))
437       continue;
438 
439     return false;
440   }
441 
442   // Scan uses of the destination register. Every use, except the last, must be
443   // a copy, with a chain of copies terminating with either a copy into a hard
444   // register, or a load/store instruction where the use is part of the
445   // address (*not* the stored value).
446   using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
447   SmallVector<SinkInfo> SinkInto;
448   SmallVector<Register> Worklist;
449 
450   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
451   const TargetRegisterClass *RCA =
452       UsedRegA == 0 ? nullptr : MRI->getRegClass(UsedRegA);
453   const TargetRegisterClass *RCB =
454       UsedRegB == 0 ? nullptr : MRI->getRegClass(UsedRegB);
455 
456   Worklist.push_back(DefReg);
457   while (!Worklist.empty()) {
458     Register Reg = Worklist.pop_back_val();
459 
460     for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
461       ExtAddrMode MaybeAM;
462       MachineInstr &UseInst = *MO.getParent();
463       if (UseInst.isCopy()) {
464         Register DstReg;
465         if (const MachineOperand &O = UseInst.getOperand(0); O.isReg())
466           DstReg = O.getReg();
467         if (DstReg == 0)
468           return false;
469         if (DstReg.isVirtual()) {
470           Worklist.push_back(DstReg);
471           continue;
472         }
473         // If we are going to replace a copy, the original instruction must be
474         // as cheap as a copy.
475         if (!TII->isAsCheapAsAMove(MI))
476           return false;
477         // The hard register must be in the register class of the original
478         // instruction's destination register.
479         if (!RC->contains(DstReg))
480           return false;
481       } else if (UseInst.mayLoadOrStore()) {
482         ExtAddrMode AM;
483         if (!TII->canFoldIntoAddrMode(UseInst, Reg, MI, AM))
484           return false;
485         MaybeAM = AM;
486       } else {
487         return false;
488       }
489 
490       if (UseInst.getParent() != MI.getParent()) {
491         // If the register class of the register we are replacing is a superset
492         // of any of the register classes of the operands of the materialized
493         // instruction don't consider that live range extended.
494         const TargetRegisterClass *RCS = MRI->getRegClass(Reg);
495         if (RCA && RCA->hasSuperClassEq(RCS))
496           RCA = nullptr;
497         else if (RCB && RCB->hasSuperClassEq(RCS))
498           RCB = nullptr;
499         if (RCA || RCB) {
500           if (RCA == nullptr) {
501             RCA = RCB;
502             RCB = nullptr;
503           }
504 
505           unsigned NRegs = !!RCA + !!RCB;
506           if (RCA == RCB)
507             RCB = nullptr;
508 
509           // Check we don't exceed register pressure at the destination.
510           const MachineBasicBlock &MBB = *UseInst.getParent();
511           if (RCB == nullptr) {
512             if (registerPressureSetExceedsLimit(NRegs, RCA, MBB))
513               return false;
514           } else if (registerPressureSetExceedsLimit(1, RCA, MBB) ||
515                      registerPressureSetExceedsLimit(1, RCB, MBB)) {
516             return false;
517           }
518         }
519       }
520 
521       SinkInto.emplace_back(&UseInst, MaybeAM);
522     }
523   }
524 
525   if (SinkInto.empty())
526     return false;
527 
528   // Now we know we can fold the instruction in all its users.
529   for (auto &[SinkDst, MaybeAM] : SinkInto) {
530     MachineInstr *New = nullptr;
531     LLVM_DEBUG(dbgs() << "Sinking copy of"; MI.dump(); dbgs() << "into";
532                SinkDst->dump());
533     if (SinkDst->isCopy()) {
534       // TODO: After performing the sink-and-fold, the original instruction is
535       // deleted. Its value is still available (in a hard register), so if there
536       // are debug instructions which refer to the (now deleted) virtual
537       // register they could be updated to refer to the hard register, in
538       // principle. However, it's not clear how to do that, moreover in some
539       // cases the debug instructions may need to be replicated proportionally
540       // to the number of the COPY instructions replaced and in some extreme
541       // cases we can end up with quadratic increase in the number of debug
542       // instructions.
543 
544       // Sink a copy of the instruction, replacing a COPY instruction.
545       MachineBasicBlock::iterator InsertPt = SinkDst->getIterator();
546       Register DstReg = SinkDst->getOperand(0).getReg();
547       TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI, *TRI);
548       New = &*std::prev(InsertPt);
549       if (!New->getDebugLoc())
550         New->setDebugLoc(SinkDst->getDebugLoc());
551 
552       // The operand registers of the "sunk" instruction have their live range
553       // extended and their kill flags may no longer be correct. Conservatively
554       // clear the kill flags.
555       if (UsedRegA)
556         MRI->clearKillFlags(UsedRegA);
557       if (UsedRegB)
558         MRI->clearKillFlags(UsedRegB);
559     } else {
560       // Fold instruction into the addressing mode of a memory instruction.
561       New = TII->emitLdStWithAddr(*SinkDst, MaybeAM);
562 
563       // The registers of the addressing mode may have their live range extended
564       // and their kill flags may no longer be correct. Conservatively clear the
565       // kill flags.
566       if (Register R = MaybeAM.BaseReg; R.isValid() && R.isVirtual())
567         MRI->clearKillFlags(R);
568       if (Register R = MaybeAM.ScaledReg; R.isValid() && R.isVirtual())
569         MRI->clearKillFlags(R);
570     }
571     LLVM_DEBUG(dbgs() << "yielding"; New->dump());
572     // Clear the StoreInstrCache, since we may invalidate it by erasing.
573     if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
574       StoreInstrCache.clear();
575     SinkDst->eraseFromParent();
576   }
577 
578   // Collect operands that need to be cleaned up because the registers no longer
579   // exist (in COPYs and debug instructions). We cannot delete instructions or
580   // clear operands while traversing register uses.
581   SmallVector<MachineOperand *> Cleanup;
582   Worklist.push_back(DefReg);
583   while (!Worklist.empty()) {
584     Register Reg = Worklist.pop_back_val();
585     for (MachineOperand &MO : MRI->use_operands(Reg)) {
586       MachineInstr *U = MO.getParent();
587       assert((U->isCopy() || U->isDebugInstr()) &&
588              "Only debug uses and copies must remain");
589       if (U->isCopy())
590         Worklist.push_back(U->getOperand(0).getReg());
591       Cleanup.push_back(&MO);
592     }
593   }
594 
595   // Delete the dead COPYs and clear operands in debug instructions
596   for (MachineOperand *MO : Cleanup) {
597     MachineInstr *I = MO->getParent();
598     if (I->isCopy()) {
599       I->eraseFromParent();
600     } else {
601       MO->setReg(0);
602       MO->setSubReg(0);
603     }
604   }
605 
606   MI.eraseFromParent();
607   return true;
608 }
609 
610 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
611 /// occur in blocks dominated by the specified block. If any use is in the
612 /// definition block, then return false since it is never legal to move def
613 /// after uses.
614 bool MachineSinking::AllUsesDominatedByBlock(Register Reg,
615                                              MachineBasicBlock *MBB,
616                                              MachineBasicBlock *DefMBB,
617                                              bool &BreakPHIEdge,
618                                              bool &LocalUse) const {
619   assert(Reg.isVirtual() && "Only makes sense for vregs");
620 
621   // Ignore debug uses because debug info doesn't affect the code.
622   if (MRI->use_nodbg_empty(Reg))
623     return true;
624 
625   // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
626   // into and they are all PHI nodes. In this case, machine-sink must break
627   // the critical edge first. e.g.
628   //
629   // %bb.1:
630   //   Predecessors according to CFG: %bb.0
631   //     ...
632   //     %def = DEC64_32r %x, implicit-def dead %eflags
633   //     ...
634   //     JE_4 <%bb.37>, implicit %eflags
635   //   Successors according to CFG: %bb.37 %bb.2
636   //
637   // %bb.2:
638   //     %p = PHI %y, %bb.0, %def, %bb.1
639   if (all_of(MRI->use_nodbg_operands(Reg), [&](MachineOperand &MO) {
640         MachineInstr *UseInst = MO.getParent();
641         unsigned OpNo = MO.getOperandNo();
642         MachineBasicBlock *UseBlock = UseInst->getParent();
643         return UseBlock == MBB && UseInst->isPHI() &&
644                UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
645       })) {
646     BreakPHIEdge = true;
647     return true;
648   }
649 
650   for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
651     // Determine the block of the use.
652     MachineInstr *UseInst = MO.getParent();
653     unsigned OpNo = &MO - &UseInst->getOperand(0);
654     MachineBasicBlock *UseBlock = UseInst->getParent();
655     if (UseInst->isPHI()) {
656       // PHI nodes use the operand in the predecessor block, not the block with
657       // the PHI.
658       UseBlock = UseInst->getOperand(OpNo + 1).getMBB();
659     } else if (UseBlock == DefMBB) {
660       LocalUse = true;
661       return false;
662     }
663 
664     // Check that it dominates.
665     if (!DT->dominates(MBB, UseBlock))
666       return false;
667   }
668 
669   return true;
670 }
671 
672 /// Return true if this machine instruction loads from global offset table or
673 /// constant pool.
674 static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
675   assert(MI.mayLoad() && "Expected MI that loads!");
676 
677   // If we lost memory operands, conservatively assume that the instruction
678   // reads from everything..
679   if (MI.memoperands_empty())
680     return true;
681 
682   for (MachineMemOperand *MemOp : MI.memoperands())
683     if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
684       if (PSV->isGOT() || PSV->isConstantPool())
685         return true;
686 
687   return false;
688 }
689 
690 void MachineSinking::FindCycleSinkCandidates(
691     MachineCycle *Cycle, MachineBasicBlock *BB,
692     SmallVectorImpl<MachineInstr *> &Candidates) {
693   for (auto &MI : *BB) {
694     LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI);
695     if (MI.isMetaInstruction()) {
696       LLVM_DEBUG(dbgs() << "CycleSink: not sinking meta instruction\n");
697       continue;
698     }
699     if (!TII->shouldSink(MI)) {
700       LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this "
701                            "target\n");
702       continue;
703     }
704     if (!isCycleInvariant(Cycle, MI)) {
705       LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n");
706       continue;
707     }
708     bool DontMoveAcrossStore = true;
709     if (!MI.isSafeToMove(DontMoveAcrossStore)) {
710       LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n");
711       continue;
712     }
713     if (MI.mayLoad() && !mayLoadFromGOTOrConstantPool(MI)) {
714       LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n");
715       continue;
716     }
717     if (MI.isConvergent())
718       continue;
719 
720     const MachineOperand &MO = MI.getOperand(0);
721     if (!MO.isReg() || !MO.getReg() || !MO.isDef())
722       continue;
723     if (!MRI->hasOneDef(MO.getReg()))
724       continue;
725 
726     LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n");
727     Candidates.push_back(&MI);
728   }
729 }
730 
731 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
732   if (skipFunction(MF.getFunction()))
733     return false;
734 
735   LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
736 
737   STI = &MF.getSubtarget();
738   TII = STI->getInstrInfo();
739   TRI = STI->getRegisterInfo();
740   MRI = &MF.getRegInfo();
741   DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
742   PDT = &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
743   CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
744   PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
745   MBFI = UseBlockFreqInfo
746              ? &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI()
747              : nullptr;
748   MBPI = &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
749   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
750   RegClassInfo.runOnMachineFunction(MF);
751   TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
752   EnableSinkAndFold = PassConfig->getEnableSinkAndFold();
753 
754   bool EverMadeChange = false;
755 
756   while (true) {
757     bool MadeChange = false;
758 
759     // Process all basic blocks.
760     CEBCandidates.clear();
761     CEMergeCandidates.clear();
762     ToSplit.clear();
763     for (auto &MBB : MF)
764       MadeChange |= ProcessBlock(MBB);
765 
766     // If we have anything we marked as toSplit, split it now.
767     MachineDomTreeUpdater MDTU(DT, PDT,
768                                MachineDomTreeUpdater::UpdateStrategy::Lazy);
769     for (const auto &Pair : ToSplit) {
770       auto NewSucc =
771           Pair.first->SplitCriticalEdge(Pair.second, *this, nullptr, &MDTU);
772       if (NewSucc != nullptr) {
773         LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
774                           << printMBBReference(*Pair.first) << " -- "
775                           << printMBBReference(*NewSucc) << " -- "
776                           << printMBBReference(*Pair.second) << '\n');
777         if (MBFI)
778           MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI);
779 
780         MadeChange = true;
781         ++NumSplit;
782         CI->splitCriticalEdge(Pair.first, Pair.second, NewSucc);
783       } else
784         LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
785     }
786     // If this iteration over the code changed anything, keep iterating.
787     if (!MadeChange)
788       break;
789     EverMadeChange = true;
790   }
791 
792   if (SinkInstsIntoCycle) {
793     SmallVector<MachineCycle *, 8> Cycles(CI->toplevel_cycles());
794     SchedModel.init(STI);
795     bool HasHighPressure;
796 
797     DenseMap<SinkItem, MachineInstr *> SunkInstrs;
798 
799     enum CycleSinkStage { COPY, LOW_LATENCY, AGGRESSIVE, END };
800     for (unsigned Stage = CycleSinkStage::COPY; Stage != CycleSinkStage::END;
801          ++Stage, SunkInstrs.clear()) {
802       HasHighPressure = false;
803 
804       for (auto *Cycle : Cycles) {
805         MachineBasicBlock *Preheader = Cycle->getCyclePreheader();
806         if (!Preheader) {
807           LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n");
808           continue;
809         }
810         SmallVector<MachineInstr *, 8> Candidates;
811         FindCycleSinkCandidates(Cycle, Preheader, Candidates);
812 
813         unsigned i = 0;
814 
815         // Walk the candidates in reverse order so that we start with the use
816         // of a def-use chain, if there is any.
817         // TODO: Sort the candidates using a cost-model.
818         for (MachineInstr *I : llvm::reverse(Candidates)) {
819           // CycleSinkStage::COPY: Sink a limited number of copies
820           if (Stage == CycleSinkStage::COPY) {
821             if (i++ == SinkIntoCycleLimit) {
822               LLVM_DEBUG(dbgs()
823                          << "CycleSink:   Limit reached of instructions to "
824                             "be analyzed.");
825               break;
826             }
827 
828             if (!I->isCopy())
829               continue;
830           }
831 
832           // CycleSinkStage::LOW_LATENCY: sink unlimited number of instructions
833           // which the target specifies as low-latency
834           if (Stage == CycleSinkStage::LOW_LATENCY &&
835               !TII->hasLowDefLatency(SchedModel, *I, 0))
836             continue;
837 
838           if (!aggressivelySinkIntoCycle(Cycle, *I, SunkInstrs))
839             break;
840           EverMadeChange = true;
841           ++NumCycleSunk;
842         }
843 
844         // Recalculate the pressure after sinking
845         if (!HasHighPressure)
846           HasHighPressure = registerPressureExceedsLimit(*Preheader);
847       }
848       if (!HasHighPressure)
849         break;
850     }
851   }
852 
853   HasStoreCache.clear();
854   StoreInstrCache.clear();
855 
856   // Now clear any kill flags for recorded registers.
857   for (auto I : RegsToClearKillFlags)
858     MRI->clearKillFlags(I);
859   RegsToClearKillFlags.clear();
860 
861   return EverMadeChange;
862 }
863 
864 bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
865   if ((!EnableSinkAndFold && MBB.succ_size() <= 1) || MBB.empty())
866     return false;
867 
868   // Don't bother sinking code out of unreachable blocks. In addition to being
869   // unprofitable, it can also lead to infinite looping, because in an
870   // unreachable cycle there may be nowhere to stop.
871   if (!DT->isReachableFromEntry(&MBB))
872     return false;
873 
874   bool MadeChange = false;
875 
876   // Cache all successors, sorted by frequency info and cycle depth.
877   AllSuccsCache AllSuccessors;
878 
879   // Walk the basic block bottom-up.  Remember if we saw a store.
880   MachineBasicBlock::iterator I = MBB.end();
881   --I;
882   bool ProcessedBegin, SawStore = false;
883   do {
884     MachineInstr &MI = *I; // The instruction to sink.
885 
886     // Predecrement I (if it's not begin) so that it isn't invalidated by
887     // sinking.
888     ProcessedBegin = I == MBB.begin();
889     if (!ProcessedBegin)
890       --I;
891 
892     if (MI.isDebugOrPseudoInstr() || MI.isFakeUse()) {
893       if (MI.isDebugValue())
894         ProcessDbgInst(MI);
895       continue;
896     }
897 
898     if (EnableSinkAndFold && PerformSinkAndFold(MI, &MBB)) {
899       MadeChange = true;
900       continue;
901     }
902 
903     // Can't sink anything out of a block that has less than two successors.
904     if (MBB.succ_size() <= 1)
905       continue;
906 
907     if (PerformTrivialForwardCoalescing(MI, &MBB)) {
908       MadeChange = true;
909       continue;
910     }
911 
912     if (SinkInstruction(MI, SawStore, AllSuccessors)) {
913       ++NumSunk;
914       MadeChange = true;
915     }
916 
917     // If we just processed the first instruction in the block, we're done.
918   } while (!ProcessedBegin);
919 
920   SeenDbgUsers.clear();
921   SeenDbgVars.clear();
922   // recalculate the bb register pressure after sinking one BB.
923   CachedRegisterPressure.clear();
924   return MadeChange;
925 }
926 
927 void MachineSinking::ProcessDbgInst(MachineInstr &MI) {
928   // When we see DBG_VALUEs for registers, record any vreg it reads, so that
929   // we know what to sink if the vreg def sinks.
930   assert(MI.isDebugValue() && "Expected DBG_VALUE for processing");
931 
932   DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
933                     MI.getDebugLoc()->getInlinedAt());
934   bool SeenBefore = SeenDbgVars.contains(Var);
935 
936   for (MachineOperand &MO : MI.debug_operands()) {
937     if (MO.isReg() && MO.getReg().isVirtual())
938       SeenDbgUsers[MO.getReg()].push_back(SeenDbgUser(&MI, SeenBefore));
939   }
940 
941   // Record the variable for any DBG_VALUE, to avoid re-ordering any of them.
942   SeenDbgVars.insert(Var);
943 }
944 
945 bool MachineSinking::isWorthBreakingCriticalEdge(
946     MachineInstr &MI, MachineBasicBlock *From, MachineBasicBlock *To,
947     MachineBasicBlock *&DeferredFromBlock) {
948   // FIXME: Need much better heuristics.
949 
950   // If the pass has already considered breaking this edge (during this pass
951   // through the function), then let's go ahead and break it. This means
952   // sinking multiple "cheap" instructions into the same block.
953   if (!CEBCandidates.insert(std::make_pair(From, To)).second)
954     return true;
955 
956   if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
957     return true;
958 
959   // Check and record the register and the destination block we want to sink
960   // into. Note that we want to do the following before the next check on branch
961   // probability. Because we want to record the initial candidate even if it's
962   // on hot edge, so that other candidates that might not on hot edges can be
963   // sinked as well.
964   for (const auto &MO : MI.all_defs()) {
965     Register Reg = MO.getReg();
966     if (!Reg)
967       continue;
968     Register SrcReg = Reg.isVirtual() ? TRI->lookThruCopyLike(Reg, MRI) : Reg;
969     auto Key = std::make_pair(SrcReg, To);
970     auto Res = CEMergeCandidates.try_emplace(Key, From);
971     // We wanted to sink the same register into the same block, consider it to
972     // be profitable.
973     if (!Res.second) {
974       // Return the source block that was previously held off.
975       DeferredFromBlock = Res.first->second;
976       return true;
977     }
978   }
979 
980   if (From->isSuccessor(To) &&
981       MBPI->getEdgeProbability(From, To) <=
982           BranchProbability(SplitEdgeProbabilityThreshold, 100))
983     return true;
984 
985   // MI is cheap, we probably don't want to break the critical edge for it.
986   // However, if this would allow some definitions of its source operands
987   // to be sunk then it's probably worth it.
988   for (const MachineOperand &MO : MI.all_uses()) {
989     Register Reg = MO.getReg();
990     if (Reg == 0)
991       continue;
992 
993     // We don't move live definitions of physical registers,
994     // so sinking their uses won't enable any opportunities.
995     if (Reg.isPhysical())
996       continue;
997 
998     // If this instruction is the only user of a virtual register,
999     // check if breaking the edge will enable sinking
1000     // both this instruction and the defining instruction.
1001     if (MRI->hasOneNonDBGUse(Reg)) {
1002       // If the definition resides in same MBB,
1003       // claim it's likely we can sink these together.
1004       // If definition resides elsewhere, we aren't
1005       // blocking it from being sunk so don't break the edge.
1006       MachineInstr *DefMI = MRI->getVRegDef(Reg);
1007       if (DefMI->getParent() == MI.getParent())
1008         return true;
1009     }
1010   }
1011 
1012   // Let the target decide if it's worth breaking this
1013   // critical edge for a "cheap" instruction.
1014   return TII->shouldBreakCriticalEdgeToSink(MI);
1015 }
1016 
1017 bool MachineSinking::isLegalToBreakCriticalEdge(MachineInstr &MI,
1018                                                 MachineBasicBlock *FromBB,
1019                                                 MachineBasicBlock *ToBB,
1020                                                 bool BreakPHIEdge) {
1021   // Avoid breaking back edge. From == To means backedge for single BB cycle.
1022   if (!SplitEdges || FromBB == ToBB || !FromBB->isSuccessor(ToBB))
1023     return false;
1024 
1025   MachineCycle *FromCycle = CI->getCycle(FromBB);
1026   MachineCycle *ToCycle = CI->getCycle(ToBB);
1027 
1028   // Check for backedges of more "complex" cycles.
1029   if (FromCycle == ToCycle && FromCycle &&
1030       (!FromCycle->isReducible() || FromCycle->getHeader() == ToBB))
1031     return false;
1032 
1033   // It's not always legal to break critical edges and sink the computation
1034   // to the edge.
1035   //
1036   // %bb.1:
1037   // v1024
1038   // Beq %bb.3
1039   // <fallthrough>
1040   // %bb.2:
1041   // ... no uses of v1024
1042   // <fallthrough>
1043   // %bb.3:
1044   // ...
1045   //       = v1024
1046   //
1047   // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
1048   //
1049   // %bb.1:
1050   // ...
1051   // Bne %bb.2
1052   // %bb.4:
1053   // v1024 =
1054   // B %bb.3
1055   // %bb.2:
1056   // ... no uses of v1024
1057   // <fallthrough>
1058   // %bb.3:
1059   // ...
1060   //       = v1024
1061   //
1062   // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
1063   // flow. We need to ensure the new basic block where the computation is
1064   // sunk to dominates all the uses.
1065   // It's only legal to break critical edge and sink the computation to the
1066   // new block if all the predecessors of "To", except for "From", are
1067   // not dominated by "From". Given SSA property, this means these
1068   // predecessors are dominated by "To".
1069   //
1070   // There is no need to do this check if all the uses are PHI nodes. PHI
1071   // sources are only defined on the specific predecessor edges.
1072   if (!BreakPHIEdge) {
1073     for (MachineBasicBlock *Pred : ToBB->predecessors())
1074       if (Pred != FromBB && !DT->dominates(ToBB, Pred))
1075         return false;
1076   }
1077 
1078   return true;
1079 }
1080 
1081 bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
1082                                                MachineBasicBlock *FromBB,
1083                                                MachineBasicBlock *ToBB,
1084                                                bool BreakPHIEdge) {
1085   bool Status = false;
1086   MachineBasicBlock *DeferredFromBB = nullptr;
1087   if (isWorthBreakingCriticalEdge(MI, FromBB, ToBB, DeferredFromBB)) {
1088     // If there is a DeferredFromBB, we consider FromBB only if _both_
1089     // of them are legal to split.
1090     if ((!DeferredFromBB ||
1091          ToSplit.count(std::make_pair(DeferredFromBB, ToBB)) ||
1092          isLegalToBreakCriticalEdge(MI, DeferredFromBB, ToBB, BreakPHIEdge)) &&
1093         isLegalToBreakCriticalEdge(MI, FromBB, ToBB, BreakPHIEdge)) {
1094       ToSplit.insert(std::make_pair(FromBB, ToBB));
1095       if (DeferredFromBB)
1096         ToSplit.insert(std::make_pair(DeferredFromBB, ToBB));
1097       Status = true;
1098     }
1099   }
1100 
1101   return Status;
1102 }
1103 
1104 std::vector<unsigned> &
1105 MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB,
1106                                       bool UseCache) {
1107   // Currently to save compiling time, MBB's register pressure will not change
1108   // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's
1109   // register pressure is changed after sinking any instructions into it.
1110   // FIXME: need a accurate and cheap register pressure estiminate model here.
1111 
1112   auto RP = CachedRegisterPressure.find(&MBB);
1113   if (UseCache && RP != CachedRegisterPressure.end())
1114     return RP->second;
1115 
1116   RegionPressure Pressure;
1117   RegPressureTracker RPTracker(Pressure);
1118 
1119   // Initialize the register pressure tracker.
1120   RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(),
1121                  /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true);
1122 
1123   for (MachineBasicBlock::const_iterator MII = MBB.instr_end(),
1124                                          MIE = MBB.instr_begin();
1125        MII != MIE; --MII) {
1126     const MachineInstr &MI = *std::prev(MII);
1127     if (MI.isDebugInstr() || MI.isPseudoProbe())
1128       continue;
1129     RegisterOperands RegOpers;
1130     RegOpers.collect(MI, *TRI, *MRI, false, false);
1131     RPTracker.recedeSkipDebugValues();
1132     assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!");
1133     RPTracker.recede(RegOpers);
1134   }
1135 
1136   RPTracker.closeRegion();
1137 
1138   if (RP != CachedRegisterPressure.end()) {
1139     CachedRegisterPressure[&MBB] = RPTracker.getPressure().MaxSetPressure;
1140     return CachedRegisterPressure[&MBB];
1141   }
1142 
1143   auto It = CachedRegisterPressure.insert(
1144       std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure));
1145   return It.first->second;
1146 }
1147 
1148 bool MachineSinking::registerPressureSetExceedsLimit(
1149     unsigned NRegs, const TargetRegisterClass *RC,
1150     const MachineBasicBlock &MBB) {
1151   unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight;
1152   const int *PS = TRI->getRegClassPressureSets(RC);
1153   std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(MBB);
1154   for (; *PS != -1; PS++)
1155     if (Weight + BBRegisterPressure[*PS] >=
1156         RegClassInfo.getRegPressureSetLimit(*PS))
1157       return true;
1158   return false;
1159 }
1160 
1161 // Recalculate RP and check if any pressure set exceeds the set limit.
1162 bool MachineSinking::registerPressureExceedsLimit(
1163     const MachineBasicBlock &MBB) {
1164   std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(MBB, false);
1165 
1166   for (unsigned PS = 0; PS < BBRegisterPressure.size(); ++PS) {
1167     if (BBRegisterPressure[PS] >=
1168         TRI->getRegPressureSetLimit(*MBB.getParent(), PS)) {
1169       return true;
1170     }
1171   }
1172 
1173   return false;
1174 }
1175 
1176 /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
1177 bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI,
1178                                           MachineBasicBlock *MBB,
1179                                           MachineBasicBlock *SuccToSinkTo,
1180                                           AllSuccsCache &AllSuccessors) {
1181   assert(SuccToSinkTo && "Invalid SinkTo Candidate BB");
1182 
1183   if (MBB == SuccToSinkTo)
1184     return false;
1185 
1186   // It is profitable if SuccToSinkTo does not post dominate current block.
1187   if (!PDT->dominates(SuccToSinkTo, MBB))
1188     return true;
1189 
1190   // It is profitable to sink an instruction from a deeper cycle to a shallower
1191   // cycle, even if the latter post-dominates the former (PR21115).
1192   if (CI->getCycleDepth(MBB) > CI->getCycleDepth(SuccToSinkTo))
1193     return true;
1194 
1195   // Check if only use in post dominated block is PHI instruction.
1196   bool NonPHIUse = false;
1197   for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
1198     MachineBasicBlock *UseBlock = UseInst.getParent();
1199     if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
1200       NonPHIUse = true;
1201   }
1202   if (!NonPHIUse)
1203     return true;
1204 
1205   // If SuccToSinkTo post dominates then also it may be profitable if MI
1206   // can further profitably sinked into another block in next round.
1207   bool BreakPHIEdge = false;
1208   // FIXME - If finding successor is compile time expensive then cache results.
1209   if (MachineBasicBlock *MBB2 =
1210           FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1211     return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
1212 
1213   MachineCycle *MCycle = CI->getCycle(MBB);
1214 
1215   // If the instruction is not inside a cycle, it is not profitable to sink MI
1216   // to a post dominate block SuccToSinkTo.
1217   if (!MCycle)
1218     return false;
1219 
1220   // If this instruction is inside a Cycle and sinking this instruction can make
1221   // more registers live range shorten, it is still prifitable.
1222   for (const MachineOperand &MO : MI.operands()) {
1223     // Ignore non-register operands.
1224     if (!MO.isReg())
1225       continue;
1226     Register Reg = MO.getReg();
1227     if (Reg == 0)
1228       continue;
1229 
1230     if (Reg.isPhysical()) {
1231       // Don't handle non-constant and non-ignorable physical register uses.
1232       if (MO.isUse() && !MRI->isConstantPhysReg(Reg) &&
1233           !TII->isIgnorableUse(MO))
1234         return false;
1235       continue;
1236     }
1237 
1238     // Users for the defs are all dominated by SuccToSinkTo.
1239     if (MO.isDef()) {
1240       // This def register's live range is shortened after sinking.
1241       bool LocalUse = false;
1242       if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,
1243                                    LocalUse))
1244         return false;
1245     } else {
1246       MachineInstr *DefMI = MRI->getVRegDef(Reg);
1247       if (!DefMI)
1248         continue;
1249       MachineCycle *Cycle = CI->getCycle(DefMI->getParent());
1250       // DefMI is defined outside of cycle. There should be no live range
1251       // impact for this operand. Defination outside of cycle means:
1252       // 1: defination is outside of cycle.
1253       // 2: defination is in this cycle, but it is a PHI in the cycle header.
1254       if (Cycle != MCycle || (DefMI->isPHI() && Cycle && Cycle->isReducible() &&
1255                               Cycle->getHeader() == DefMI->getParent()))
1256         continue;
1257       // The DefMI is defined inside the cycle.
1258       // If sinking this operand makes some register pressure set exceed limit,
1259       // it is not profitable.
1260       if (registerPressureSetExceedsLimit(1, MRI->getRegClass(Reg),
1261                                           *SuccToSinkTo)) {
1262         LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable.");
1263         return false;
1264       }
1265     }
1266   }
1267 
1268   // If MI is in cycle and all its operands are alive across the whole cycle or
1269   // if no operand sinking make register pressure set exceed limit, it is
1270   // profitable to sink MI.
1271   return true;
1272 }
1273 
1274 /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
1275 /// computing it if it was not already cached.
1276 SmallVector<MachineBasicBlock *, 4> &
1277 MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
1278                                        AllSuccsCache &AllSuccessors) const {
1279   // Do we have the sorted successors in cache ?
1280   auto Succs = AllSuccessors.find(MBB);
1281   if (Succs != AllSuccessors.end())
1282     return Succs->second;
1283 
1284   SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors());
1285 
1286   // Handle cases where sinking can happen but where the sink point isn't a
1287   // successor. For example:
1288   //
1289   //   x = computation
1290   //   if () {} else {}
1291   //   use x
1292   //
1293   for (MachineDomTreeNode *DTChild : DT->getNode(MBB)->children()) {
1294     // DomTree children of MBB that have MBB as immediate dominator are added.
1295     if (DTChild->getIDom()->getBlock() == MI.getParent() &&
1296         // Skip MBBs already added to the AllSuccs vector above.
1297         !MBB->isSuccessor(DTChild->getBlock()))
1298       AllSuccs.push_back(DTChild->getBlock());
1299   }
1300 
1301   // Sort Successors according to their cycle depth or block frequency info.
1302   llvm::stable_sort(
1303       AllSuccs, [&](const MachineBasicBlock *L, const MachineBasicBlock *R) {
1304         uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
1305         uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
1306         if (llvm::shouldOptimizeForSize(MBB, PSI, MBFI) ||
1307             (!LHSFreq && !RHSFreq))
1308           return CI->getCycleDepth(L) < CI->getCycleDepth(R);
1309         return LHSFreq < RHSFreq;
1310       });
1311 
1312   auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
1313 
1314   return it.first->second;
1315 }
1316 
1317 /// FindSuccToSinkTo - Find a successor to sink this instruction to.
1318 MachineBasicBlock *
1319 MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
1320                                  bool &BreakPHIEdge,
1321                                  AllSuccsCache &AllSuccessors) {
1322   assert(MBB && "Invalid MachineBasicBlock!");
1323 
1324   // loop over all the operands of the specified instruction.  If there is
1325   // anything we can't handle, bail out.
1326 
1327   // SuccToSinkTo - This is the successor to sink this instruction to, once we
1328   // decide.
1329   MachineBasicBlock *SuccToSinkTo = nullptr;
1330   for (const MachineOperand &MO : MI.operands()) {
1331     if (!MO.isReg())
1332       continue; // Ignore non-register operands.
1333 
1334     Register Reg = MO.getReg();
1335     if (Reg == 0)
1336       continue;
1337 
1338     if (Reg.isPhysical()) {
1339       if (MO.isUse()) {
1340         // If the physreg has no defs anywhere, it's just an ambient register
1341         // and we can freely move its uses. Alternatively, if it's allocatable,
1342         // it could get allocated to something with a def during allocation.
1343         if (!MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
1344           return nullptr;
1345       } else if (!MO.isDead()) {
1346         // A def that isn't dead. We can't move it.
1347         return nullptr;
1348       }
1349     } else {
1350       // Virtual register uses are always safe to sink.
1351       if (MO.isUse())
1352         continue;
1353 
1354       // If it's not safe to move defs of the register class, then abort.
1355       if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
1356         return nullptr;
1357 
1358       // Virtual register defs can only be sunk if all their uses are in blocks
1359       // dominated by one of the successors.
1360       if (SuccToSinkTo) {
1361         // If a previous operand picked a block to sink to, then this operand
1362         // must be sinkable to the same block.
1363         bool LocalUse = false;
1364         if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,
1365                                      LocalUse))
1366           return nullptr;
1367 
1368         continue;
1369       }
1370 
1371       // Otherwise, we should look at all the successors and decide which one
1372       // we should sink to. If we have reliable block frequency information
1373       // (frequency != 0) available, give successors with smaller frequencies
1374       // higher priority, otherwise prioritize smaller cycle depths.
1375       for (MachineBasicBlock *SuccBlock :
1376            GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
1377         bool LocalUse = false;
1378         if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB, BreakPHIEdge,
1379                                     LocalUse)) {
1380           SuccToSinkTo = SuccBlock;
1381           break;
1382         }
1383         if (LocalUse)
1384           // Def is used locally, it's never safe to move this def.
1385           return nullptr;
1386       }
1387 
1388       // If we couldn't find a block to sink to, ignore this instruction.
1389       if (!SuccToSinkTo)
1390         return nullptr;
1391       if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
1392         return nullptr;
1393     }
1394   }
1395 
1396   // It is not possible to sink an instruction into its own block.  This can
1397   // happen with cycles.
1398   if (MBB == SuccToSinkTo)
1399     return nullptr;
1400 
1401   // It's not safe to sink instructions to EH landing pad. Control flow into
1402   // landing pad is implicitly defined.
1403   if (SuccToSinkTo && SuccToSinkTo->isEHPad())
1404     return nullptr;
1405 
1406   // It ought to be okay to sink instructions into an INLINEASM_BR target, but
1407   // only if we make sure that MI occurs _before_ an INLINEASM_BR instruction in
1408   // the source block (which this code does not yet do). So for now, forbid
1409   // doing so.
1410   if (SuccToSinkTo && SuccToSinkTo->isInlineAsmBrIndirectTarget())
1411     return nullptr;
1412 
1413   if (SuccToSinkTo && !TII->isSafeToSink(MI, SuccToSinkTo, CI))
1414     return nullptr;
1415 
1416   return SuccToSinkTo;
1417 }
1418 
1419 /// Return true if MI is likely to be usable as a memory operation by the
1420 /// implicit null check optimization.
1421 ///
1422 /// This is a "best effort" heuristic, and should not be relied upon for
1423 /// correctness.  This returning true does not guarantee that the implicit null
1424 /// check optimization is legal over MI, and this returning false does not
1425 /// guarantee MI cannot possibly be used to do a null check.
1426 static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
1427                                              const TargetInstrInfo *TII,
1428                                              const TargetRegisterInfo *TRI) {
1429   using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
1430 
1431   auto *MBB = MI.getParent();
1432   if (MBB->pred_size() != 1)
1433     return false;
1434 
1435   auto *PredMBB = *MBB->pred_begin();
1436   auto *PredBB = PredMBB->getBasicBlock();
1437 
1438   // Frontends that don't use implicit null checks have no reason to emit
1439   // branches with make.implicit metadata, and this function should always
1440   // return false for them.
1441   if (!PredBB ||
1442       !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1443     return false;
1444 
1445   const MachineOperand *BaseOp;
1446   int64_t Offset;
1447   bool OffsetIsScalable;
1448   if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI))
1449     return false;
1450 
1451   if (!BaseOp->isReg())
1452     return false;
1453 
1454   if (!(MI.mayLoad() && !MI.isPredicable()))
1455     return false;
1456 
1457   MachineBranchPredicate MBP;
1458   if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
1459     return false;
1460 
1461   return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1462          (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1463           MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1464          MBP.LHS.getReg() == BaseOp->getReg();
1465 }
1466 
1467 /// If the sunk instruction is a copy, try to forward the copy instead of
1468 /// leaving an 'undef' DBG_VALUE in the original location. Don't do this if
1469 /// there's any subregister weirdness involved. Returns true if copy
1470 /// propagation occurred.
1471 static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI,
1472                                  Register Reg) {
1473   const MachineRegisterInfo &MRI = SinkInst.getMF()->getRegInfo();
1474   const TargetInstrInfo &TII = *SinkInst.getMF()->getSubtarget().getInstrInfo();
1475 
1476   // Copy DBG_VALUE operand and set the original to undef. We then check to
1477   // see whether this is something that can be copy-forwarded. If it isn't,
1478   // continue around the loop.
1479 
1480   const MachineOperand *SrcMO = nullptr, *DstMO = nullptr;
1481   auto CopyOperands = TII.isCopyInstr(SinkInst);
1482   if (!CopyOperands)
1483     return false;
1484   SrcMO = CopyOperands->Source;
1485   DstMO = CopyOperands->Destination;
1486 
1487   // Check validity of forwarding this copy.
1488   bool PostRA = MRI.getNumVirtRegs() == 0;
1489 
1490   // Trying to forward between physical and virtual registers is too hard.
1491   if (Reg.isVirtual() != SrcMO->getReg().isVirtual())
1492     return false;
1493 
1494   // Only try virtual register copy-forwarding before regalloc, and physical
1495   // register copy-forwarding after regalloc.
1496   bool arePhysRegs = !Reg.isVirtual();
1497   if (arePhysRegs != PostRA)
1498     return false;
1499 
1500   // Pre-regalloc, only forward if all subregisters agree (or there are no
1501   // subregs at all). More analysis might recover some forwardable copies.
1502   if (!PostRA)
1503     for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg))
1504       if (DbgMO.getSubReg() != SrcMO->getSubReg() ||
1505           DbgMO.getSubReg() != DstMO->getSubReg())
1506         return false;
1507 
1508   // Post-regalloc, we may be sinking a DBG_VALUE of a sub or super-register
1509   // of this copy. Only forward the copy if the DBG_VALUE operand exactly
1510   // matches the copy destination.
1511   if (PostRA && Reg != DstMO->getReg())
1512     return false;
1513 
1514   for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg)) {
1515     DbgMO.setReg(SrcMO->getReg());
1516     DbgMO.setSubReg(SrcMO->getSubReg());
1517   }
1518   return true;
1519 }
1520 
1521 using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>;
1522 /// Sink an instruction and its associated debug instructions.
1523 static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
1524                         MachineBasicBlock::iterator InsertPos,
1525                         ArrayRef<MIRegs> DbgValuesToSink) {
1526   // If we cannot find a location to use (merge with), then we erase the debug
1527   // location to prevent debug-info driven tools from potentially reporting
1528   // wrong location information.
1529   if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())
1530     MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
1531                                                  InsertPos->getDebugLoc()));
1532   else
1533     MI.setDebugLoc(DebugLoc());
1534 
1535   // Move the instruction.
1536   MachineBasicBlock *ParentBlock = MI.getParent();
1537   SuccToSinkTo.splice(InsertPos, ParentBlock, MI,
1538                       ++MachineBasicBlock::iterator(MI));
1539 
1540   // Sink a copy of debug users to the insert position. Mark the original
1541   // DBG_VALUE location as 'undef', indicating that any earlier variable
1542   // location should be terminated as we've optimised away the value at this
1543   // point.
1544   for (const auto &DbgValueToSink : DbgValuesToSink) {
1545     MachineInstr *DbgMI = DbgValueToSink.first;
1546     MachineInstr *NewDbgMI = DbgMI->getMF()->CloneMachineInstr(DbgMI);
1547     SuccToSinkTo.insert(InsertPos, NewDbgMI);
1548 
1549     bool PropagatedAllSunkOps = true;
1550     for (unsigned Reg : DbgValueToSink.second) {
1551       if (DbgMI->hasDebugOperandForReg(Reg)) {
1552         if (!attemptDebugCopyProp(MI, *DbgMI, Reg)) {
1553           PropagatedAllSunkOps = false;
1554           break;
1555         }
1556       }
1557     }
1558     if (!PropagatedAllSunkOps)
1559       DbgMI->setDebugValueUndef();
1560   }
1561 }
1562 
1563 /// hasStoreBetween - check if there is store betweeen straight line blocks From
1564 /// and To.
1565 bool MachineSinking::hasStoreBetween(MachineBasicBlock *From,
1566                                      MachineBasicBlock *To, MachineInstr &MI) {
1567   // Make sure From and To are in straight line which means From dominates To
1568   // and To post dominates From.
1569   if (!DT->dominates(From, To) || !PDT->dominates(To, From))
1570     return true;
1571 
1572   auto BlockPair = std::make_pair(From, To);
1573 
1574   // Does these two blocks pair be queried before and have a definite cached
1575   // result?
1576   if (auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1577     return It->second;
1578 
1579   if (auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1580     return llvm::any_of(It->second, [&](MachineInstr *I) {
1581       return I->mayAlias(AA, MI, false);
1582     });
1583 
1584   bool SawStore = false;
1585   bool HasAliasedStore = false;
1586   DenseSet<MachineBasicBlock *> HandledBlocks;
1587   DenseSet<MachineBasicBlock *> HandledDomBlocks;
1588   // Go through all reachable blocks from From.
1589   for (MachineBasicBlock *BB : depth_first(From)) {
1590     // We insert the instruction at the start of block To, so no need to worry
1591     // about stores inside To.
1592     // Store in block From should be already considered when just enter function
1593     // SinkInstruction.
1594     if (BB == To || BB == From)
1595       continue;
1596 
1597     // We already handle this BB in previous iteration.
1598     if (HandledBlocks.count(BB))
1599       continue;
1600 
1601     HandledBlocks.insert(BB);
1602     // To post dominates BB, it must be a path from block From.
1603     if (PDT->dominates(To, BB)) {
1604       if (!HandledDomBlocks.count(BB))
1605         HandledDomBlocks.insert(BB);
1606 
1607       // If this BB is too big or the block number in straight line between From
1608       // and To is too big, stop searching to save compiling time.
1609       if (BB->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold) ||
1610           HandledDomBlocks.size() > SinkLoadBlocksThreshold) {
1611         for (auto *DomBB : HandledDomBlocks) {
1612           if (DomBB != BB && DT->dominates(DomBB, BB))
1613             HasStoreCache[std::make_pair(DomBB, To)] = true;
1614           else if (DomBB != BB && DT->dominates(BB, DomBB))
1615             HasStoreCache[std::make_pair(From, DomBB)] = true;
1616         }
1617         HasStoreCache[BlockPair] = true;
1618         return true;
1619       }
1620 
1621       for (MachineInstr &I : *BB) {
1622         // Treat as alias conservatively for a call or an ordered memory
1623         // operation.
1624         if (I.isCall() || I.hasOrderedMemoryRef()) {
1625           for (auto *DomBB : HandledDomBlocks) {
1626             if (DomBB != BB && DT->dominates(DomBB, BB))
1627               HasStoreCache[std::make_pair(DomBB, To)] = true;
1628             else if (DomBB != BB && DT->dominates(BB, DomBB))
1629               HasStoreCache[std::make_pair(From, DomBB)] = true;
1630           }
1631           HasStoreCache[BlockPair] = true;
1632           return true;
1633         }
1634 
1635         if (I.mayStore()) {
1636           SawStore = true;
1637           // We still have chance to sink MI if all stores between are not
1638           // aliased to MI.
1639           // Cache all store instructions, so that we don't need to go through
1640           // all From reachable blocks for next load instruction.
1641           if (I.mayAlias(AA, MI, false))
1642             HasAliasedStore = true;
1643           StoreInstrCache[BlockPair].push_back(&I);
1644         }
1645       }
1646     }
1647   }
1648   // If there is no store at all, cache the result.
1649   if (!SawStore)
1650     HasStoreCache[BlockPair] = false;
1651   return HasAliasedStore;
1652 }
1653 
1654 /// Aggressively sink instructions into cycles. This will aggressively try to
1655 /// sink all instructions in the top-most preheaders in an attempt to reduce RP.
1656 /// In particular, it will sink into multiple successor blocks without limits
1657 /// based on the amount of sinking, or the type of ops being sunk (so long as
1658 /// they are safe to sink).
1659 bool MachineSinking::aggressivelySinkIntoCycle(
1660     MachineCycle *Cycle, MachineInstr &I,
1661     DenseMap<SinkItem, MachineInstr *> &SunkInstrs) {
1662   // TODO: support instructions with multiple defs
1663   if (I.getNumDefs() > 1)
1664     return false;
1665 
1666   LLVM_DEBUG(dbgs() << "AggressiveCycleSink: Finding sink block for: " << I);
1667   assert(Cycle->getCyclePreheader() && "Cycle sink needs a preheader block");
1668   SmallVector<std::pair<RegSubRegPair, MachineInstr *>> Uses;
1669 
1670   MachineOperand &DefMO = I.getOperand(0);
1671   for (MachineInstr &MI : MRI->use_instructions(DefMO.getReg())) {
1672     Uses.push_back({{DefMO.getReg(), DefMO.getSubReg()}, &MI});
1673   }
1674 
1675   for (std::pair<RegSubRegPair, MachineInstr *> Entry : Uses) {
1676     MachineInstr *MI = Entry.second;
1677     LLVM_DEBUG(dbgs() << "AggressiveCycleSink:   Analysing use: " << MI);
1678     if (MI->isPHI()) {
1679       LLVM_DEBUG(
1680           dbgs() << "AggressiveCycleSink:   Not attempting to sink for PHI.\n");
1681       continue;
1682     }
1683     // We cannot sink before the prologue
1684     if (MI->isPosition() || TII->isBasicBlockPrologue(*MI)) {
1685       LLVM_DEBUG(dbgs() << "AggressiveCycleSink:   Use is BasicBlock prologue, "
1686                            "can't sink.\n");
1687       continue;
1688     }
1689     if (!Cycle->contains(MI->getParent())) {
1690       LLVM_DEBUG(
1691           dbgs() << "AggressiveCycleSink:   Use not in cycle, can't sink.\n");
1692       continue;
1693     }
1694 
1695     MachineBasicBlock *SinkBlock = MI->getParent();
1696     MachineInstr *NewMI = nullptr;
1697     SinkItem MapEntry(&I, SinkBlock);
1698 
1699     auto SI = SunkInstrs.find(MapEntry);
1700 
1701     // Check for the case in which we have already sunk a copy of this
1702     // instruction into the user block.
1703     if (SI != SunkInstrs.end()) {
1704       LLVM_DEBUG(dbgs() << "AggressiveCycleSink:   Already sunk to block: "
1705                         << printMBBReference(*SinkBlock) << "\n");
1706       NewMI = SI->second;
1707     }
1708 
1709     // Create a copy of the instruction in the use block.
1710     if (!NewMI) {
1711       LLVM_DEBUG(dbgs() << "AggressiveCycleSink: Sinking instruction to block: "
1712                         << printMBBReference(*SinkBlock) << "\n");
1713 
1714       NewMI = I.getMF()->CloneMachineInstr(&I);
1715       if (DefMO.getReg().isVirtual()) {
1716         const TargetRegisterClass *TRC = MRI->getRegClass(DefMO.getReg());
1717         Register DestReg = MRI->createVirtualRegister(TRC);
1718         NewMI->substituteRegister(DefMO.getReg(), DestReg, DefMO.getSubReg(),
1719                                   *TRI);
1720       }
1721       SinkBlock->insert(SinkBlock->SkipPHIsAndLabels(SinkBlock->begin()),
1722                         NewMI);
1723       SunkInstrs.insert({MapEntry, NewMI});
1724     }
1725 
1726     // Conservatively clear any kill flags on uses of sunk instruction
1727     for (MachineOperand &MO : NewMI->all_uses()) {
1728       assert(MO.isReg() && MO.isUse());
1729       RegsToClearKillFlags.insert(MO.getReg());
1730     }
1731 
1732     // The instruction is moved from its basic block, so do not retain the
1733     // debug information.
1734     assert(!NewMI->isDebugInstr() && "Should not sink debug inst");
1735     NewMI->setDebugLoc(DebugLoc());
1736 
1737     // Replace the use with the newly created virtual register.
1738     RegSubRegPair &UseReg = Entry.first;
1739     MI->substituteRegister(UseReg.Reg, NewMI->getOperand(0).getReg(),
1740                            UseReg.SubReg, *TRI);
1741   }
1742   // If we have replaced all uses, then delete the dead instruction
1743   if (I.isDead(*MRI))
1744     I.eraseFromParent();
1745   return true;
1746 }
1747 
1748 /// SinkInstruction - Determine whether it is safe to sink the specified machine
1749 /// instruction out of its current block into a successor.
1750 bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
1751                                      AllSuccsCache &AllSuccessors) {
1752   // Don't sink instructions that the target prefers not to sink.
1753   if (!TII->shouldSink(MI))
1754     return false;
1755 
1756   // Check if it's safe to move the instruction.
1757   if (!MI.isSafeToMove(SawStore))
1758     return false;
1759 
1760   // Convergent operations may not be made control-dependent on additional
1761   // values.
1762   if (MI.isConvergent())
1763     return false;
1764 
1765   // Don't break implicit null checks.  This is a performance heuristic, and not
1766   // required for correctness.
1767   if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
1768     return false;
1769 
1770   // FIXME: This should include support for sinking instructions within the
1771   // block they are currently in to shorten the live ranges.  We often get
1772   // instructions sunk into the top of a large block, but it would be better to
1773   // also sink them down before their first use in the block.  This xform has to
1774   // be careful not to *increase* register pressure though, e.g. sinking
1775   // "x = y + z" down if it kills y and z would increase the live ranges of y
1776   // and z and only shrink the live range of x.
1777 
1778   bool BreakPHIEdge = false;
1779   MachineBasicBlock *ParentBlock = MI.getParent();
1780   MachineBasicBlock *SuccToSinkTo =
1781       FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1782 
1783   // If there are no outputs, it must have side-effects.
1784   if (!SuccToSinkTo)
1785     return false;
1786 
1787   // If the instruction to move defines a dead physical register which is live
1788   // when leaving the basic block, don't move it because it could turn into a
1789   // "zombie" define of that preg. E.g., EFLAGS.
1790   for (const MachineOperand &MO : MI.all_defs()) {
1791     Register Reg = MO.getReg();
1792     if (Reg == 0 || !Reg.isPhysical())
1793       continue;
1794     if (SuccToSinkTo->isLiveIn(Reg))
1795       return false;
1796   }
1797 
1798   LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
1799 
1800   // If the block has multiple predecessors, this is a critical edge.
1801   // Decide if we can sink along it or need to break the edge.
1802   if (SuccToSinkTo->pred_size() > 1) {
1803     // We cannot sink a load across a critical edge - there may be stores in
1804     // other code paths.
1805     bool TryBreak = false;
1806     bool Store =
1807         MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true;
1808     if (!MI.isSafeToMove(Store)) {
1809       LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
1810       TryBreak = true;
1811     }
1812 
1813     // We don't want to sink across a critical edge if we don't dominate the
1814     // successor. We could be introducing calculations to new code paths.
1815     if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
1816       LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
1817       TryBreak = true;
1818     }
1819 
1820     // Don't sink instructions into a cycle.
1821     if (!TryBreak && CI->getCycle(SuccToSinkTo) &&
1822         (!CI->getCycle(SuccToSinkTo)->isReducible() ||
1823          CI->getCycle(SuccToSinkTo)->getHeader() == SuccToSinkTo)) {
1824       LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n");
1825       TryBreak = true;
1826     }
1827 
1828     // Otherwise we are OK with sinking along a critical edge.
1829     if (!TryBreak)
1830       LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
1831     else {
1832       // Mark this edge as to be split.
1833       // If the edge can actually be split, the next iteration of the main loop
1834       // will sink MI in the newly created block.
1835       bool Status = PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo,
1836                                               BreakPHIEdge);
1837       if (!Status)
1838         LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1839                              "break critical edge\n");
1840       // The instruction will not be sunk this time.
1841       return false;
1842     }
1843   }
1844 
1845   if (BreakPHIEdge) {
1846     // BreakPHIEdge is true if all the uses are in the successor MBB being
1847     // sunken into and they are all PHI nodes. In this case, machine-sink must
1848     // break the critical edge first.
1849     bool Status =
1850         PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1851     if (!Status)
1852       LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1853                            "break critical edge\n");
1854     // The instruction will not be sunk this time.
1855     return false;
1856   }
1857 
1858   // Determine where to insert into. Skip phi nodes.
1859   MachineBasicBlock::iterator InsertPos =
1860       SuccToSinkTo->SkipPHIsAndLabels(SuccToSinkTo->begin());
1861   if (blockPrologueInterferes(SuccToSinkTo, InsertPos, MI, TRI, TII, MRI)) {
1862     LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
1863     return false;
1864   }
1865 
1866   // Collect debug users of any vreg that this inst defines.
1867   SmallVector<MIRegs, 4> DbgUsersToSink;
1868   for (auto &MO : MI.all_defs()) {
1869     if (!MO.getReg().isVirtual())
1870       continue;
1871     auto It = SeenDbgUsers.find(MO.getReg());
1872     if (It == SeenDbgUsers.end())
1873       continue;
1874 
1875     // Sink any users that don't pass any other DBG_VALUEs for this variable.
1876     auto &Users = It->second;
1877     for (auto &User : Users) {
1878       MachineInstr *DbgMI = User.getPointer();
1879       if (User.getInt()) {
1880         // This DBG_VALUE would re-order assignments. If we can't copy-propagate
1881         // it, it can't be recovered. Set it undef.
1882         if (!attemptDebugCopyProp(MI, *DbgMI, MO.getReg()))
1883           DbgMI->setDebugValueUndef();
1884       } else {
1885         DbgUsersToSink.push_back(
1886             {DbgMI, SmallVector<unsigned, 2>(1, MO.getReg())});
1887       }
1888     }
1889   }
1890 
1891   // After sinking, some debug users may not be dominated any more. If possible,
1892   // copy-propagate their operands. As it's expensive, don't do this if there's
1893   // no debuginfo in the program.
1894   if (MI.getMF()->getFunction().getSubprogram() && MI.isCopy())
1895     SalvageUnsunkDebugUsersOfCopy(MI, SuccToSinkTo);
1896 
1897   performSink(MI, *SuccToSinkTo, InsertPos, DbgUsersToSink);
1898 
1899   // Conservatively, clear any kill flags, since it's possible that they are no
1900   // longer correct.
1901   // Note that we have to clear the kill flags for any register this instruction
1902   // uses as we may sink over another instruction which currently kills the
1903   // used registers.
1904   for (MachineOperand &MO : MI.all_uses())
1905     RegsToClearKillFlags.insert(MO.getReg()); // Remember to clear kill flags.
1906 
1907   return true;
1908 }
1909 
1910 void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
1911     MachineInstr &MI, MachineBasicBlock *TargetBlock) {
1912   assert(MI.isCopy());
1913   assert(MI.getOperand(1).isReg());
1914 
1915   // Enumerate all users of vreg operands that are def'd. Skip those that will
1916   // be sunk. For the rest, if they are not dominated by the block we will sink
1917   // MI into, propagate the copy source to them.
1918   SmallVector<MachineInstr *, 4> DbgDefUsers;
1919   SmallVector<Register, 4> DbgUseRegs;
1920   const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1921   for (auto &MO : MI.all_defs()) {
1922     if (!MO.getReg().isVirtual())
1923       continue;
1924     DbgUseRegs.push_back(MO.getReg());
1925     for (auto &User : MRI.use_instructions(MO.getReg())) {
1926       if (!User.isDebugValue() || DT->dominates(TargetBlock, User.getParent()))
1927         continue;
1928 
1929       // If is in same block, will either sink or be use-before-def.
1930       if (User.getParent() == MI.getParent())
1931         continue;
1932 
1933       assert(User.hasDebugOperandForReg(MO.getReg()) &&
1934              "DBG_VALUE user of vreg, but has no operand for it?");
1935       DbgDefUsers.push_back(&User);
1936     }
1937   }
1938 
1939   // Point the users of this copy that are no longer dominated, at the source
1940   // of the copy.
1941   for (auto *User : DbgDefUsers) {
1942     for (auto &Reg : DbgUseRegs) {
1943       for (auto &DbgOp : User->getDebugOperandsForReg(Reg)) {
1944         DbgOp.setReg(MI.getOperand(1).getReg());
1945         DbgOp.setSubReg(MI.getOperand(1).getSubReg());
1946       }
1947     }
1948   }
1949 }
1950 
1951 //===----------------------------------------------------------------------===//
1952 // This pass is not intended to be a replacement or a complete alternative
1953 // for the pre-ra machine sink pass. It is only designed to sink COPY
1954 // instructions which should be handled after RA.
1955 //
1956 // This pass sinks COPY instructions into a successor block, if the COPY is not
1957 // used in the current block and the COPY is live-in to a single successor
1958 // (i.e., doesn't require the COPY to be duplicated).  This avoids executing the
1959 // copy on paths where their results aren't needed.  This also exposes
1960 // additional opportunites for dead copy elimination and shrink wrapping.
1961 //
1962 // These copies were either not handled by or are inserted after the MachineSink
1963 // pass. As an example of the former case, the MachineSink pass cannot sink
1964 // COPY instructions with allocatable source registers; for AArch64 these type
1965 // of copy instructions are frequently used to move function parameters (PhyReg)
1966 // into virtual registers in the entry block.
1967 //
1968 // For the machine IR below, this pass will sink %w19 in the entry into its
1969 // successor (%bb.1) because %w19 is only live-in in %bb.1.
1970 // %bb.0:
1971 //   %wzr = SUBSWri %w1, 1
1972 //   %w19 = COPY %w0
1973 //   Bcc 11, %bb.2
1974 // %bb.1:
1975 //   Live Ins: %w19
1976 //   BL @fun
1977 //   %w0 = ADDWrr %w0, %w19
1978 //   RET %w0
1979 // %bb.2:
1980 //   %w0 = COPY %wzr
1981 //   RET %w0
1982 // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
1983 // able to see %bb.0 as a candidate.
1984 //===----------------------------------------------------------------------===//
1985 namespace {
1986 
1987 class PostRAMachineSinking : public MachineFunctionPass {
1988 public:
1989   bool runOnMachineFunction(MachineFunction &MF) override;
1990 
1991   static char ID;
1992   PostRAMachineSinking() : MachineFunctionPass(ID) {}
1993   StringRef getPassName() const override { return "PostRA Machine Sink"; }
1994 
1995   void getAnalysisUsage(AnalysisUsage &AU) const override {
1996     AU.setPreservesCFG();
1997     MachineFunctionPass::getAnalysisUsage(AU);
1998   }
1999 
2000   MachineFunctionProperties getRequiredProperties() const override {
2001     return MachineFunctionProperties().set(
2002         MachineFunctionProperties::Property::NoVRegs);
2003   }
2004 
2005 private:
2006   /// Track which register units have been modified and used.
2007   LiveRegUnits ModifiedRegUnits, UsedRegUnits;
2008 
2009   /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an
2010   /// entry in this map for each unit it touches. The DBG_VALUE's entry
2011   /// consists of a pointer to the instruction itself, and a vector of registers
2012   /// referred to by the instruction that overlap the key register unit.
2013   DenseMap<unsigned, SmallVector<MIRegs, 2>> SeenDbgInstrs;
2014 
2015   /// Sink Copy instructions unused in the same block close to their uses in
2016   /// successors.
2017   bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
2018                      const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
2019 };
2020 } // namespace
2021 
2022 char PostRAMachineSinking::ID = 0;
2023 char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
2024 
2025 INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
2026                 "PostRA Machine Sink", false, false)
2027 
2028 static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
2029                                   const TargetRegisterInfo *TRI) {
2030   LiveRegUnits LiveInRegUnits(*TRI);
2031   LiveInRegUnits.addLiveIns(MBB);
2032   return !LiveInRegUnits.available(Reg);
2033 }
2034 
2035 static MachineBasicBlock *
2036 getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
2037                       const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
2038                       unsigned Reg, const TargetRegisterInfo *TRI) {
2039   // Try to find a single sinkable successor in which Reg is live-in.
2040   MachineBasicBlock *BB = nullptr;
2041   for (auto *SI : SinkableBBs) {
2042     if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
2043       // If BB is set here, Reg is live-in to at least two sinkable successors,
2044       // so quit.
2045       if (BB)
2046         return nullptr;
2047       BB = SI;
2048     }
2049   }
2050   // Reg is not live-in to any sinkable successors.
2051   if (!BB)
2052     return nullptr;
2053 
2054   // Check if any register aliased with Reg is live-in in other successors.
2055   for (auto *SI : CurBB.successors()) {
2056     if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
2057       return nullptr;
2058   }
2059   return BB;
2060 }
2061 
2062 static MachineBasicBlock *
2063 getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
2064                       const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
2065                       ArrayRef<unsigned> DefedRegsInCopy,
2066                       const TargetRegisterInfo *TRI) {
2067   MachineBasicBlock *SingleBB = nullptr;
2068   for (auto DefReg : DefedRegsInCopy) {
2069     MachineBasicBlock *BB =
2070         getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
2071     if (!BB || (SingleBB && SingleBB != BB))
2072       return nullptr;
2073     SingleBB = BB;
2074   }
2075   return SingleBB;
2076 }
2077 
2078 static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
2079                            SmallVectorImpl<unsigned> &UsedOpsInCopy,
2080                            LiveRegUnits &UsedRegUnits,
2081                            const TargetRegisterInfo *TRI) {
2082   for (auto U : UsedOpsInCopy) {
2083     MachineOperand &MO = MI->getOperand(U);
2084     Register SrcReg = MO.getReg();
2085     if (!UsedRegUnits.available(SrcReg)) {
2086       MachineBasicBlock::iterator NI = std::next(MI->getIterator());
2087       for (MachineInstr &UI : make_range(NI, CurBB.end())) {
2088         if (UI.killsRegister(SrcReg, TRI)) {
2089           UI.clearRegisterKills(SrcReg, TRI);
2090           MO.setIsKill(true);
2091           break;
2092         }
2093       }
2094     }
2095   }
2096 }
2097 
2098 static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
2099                          SmallVectorImpl<unsigned> &UsedOpsInCopy,
2100                          SmallVectorImpl<unsigned> &DefedRegsInCopy) {
2101   MachineFunction &MF = *SuccBB->getParent();
2102   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2103   for (unsigned DefReg : DefedRegsInCopy)
2104     for (MCPhysReg S : TRI->subregs_inclusive(DefReg))
2105       SuccBB->removeLiveIn(S);
2106   for (auto U : UsedOpsInCopy)
2107     SuccBB->addLiveIn(MI->getOperand(U).getReg());
2108   SuccBB->sortUniqueLiveIns();
2109 }
2110 
2111 static bool hasRegisterDependency(MachineInstr *MI,
2112                                   SmallVectorImpl<unsigned> &UsedOpsInCopy,
2113                                   SmallVectorImpl<unsigned> &DefedRegsInCopy,
2114                                   LiveRegUnits &ModifiedRegUnits,
2115                                   LiveRegUnits &UsedRegUnits) {
2116   bool HasRegDependency = false;
2117   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2118     MachineOperand &MO = MI->getOperand(i);
2119     if (!MO.isReg())
2120       continue;
2121     Register Reg = MO.getReg();
2122     if (!Reg)
2123       continue;
2124     if (MO.isDef()) {
2125       if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
2126         HasRegDependency = true;
2127         break;
2128       }
2129       DefedRegsInCopy.push_back(Reg);
2130 
2131       // FIXME: instead of isUse(), readsReg() would be a better fix here,
2132       // For example, we can ignore modifications in reg with undef. However,
2133       // it's not perfectly clear if skipping the internal read is safe in all
2134       // other targets.
2135     } else if (MO.isUse()) {
2136       if (!ModifiedRegUnits.available(Reg)) {
2137         HasRegDependency = true;
2138         break;
2139       }
2140       UsedOpsInCopy.push_back(i);
2141     }
2142   }
2143   return HasRegDependency;
2144 }
2145 
2146 bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
2147                                          MachineFunction &MF,
2148                                          const TargetRegisterInfo *TRI,
2149                                          const TargetInstrInfo *TII) {
2150   SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
2151   // FIXME: For now, we sink only to a successor which has a single predecessor
2152   // so that we can directly sink COPY instructions to the successor without
2153   // adding any new block or branch instruction.
2154   for (MachineBasicBlock *SI : CurBB.successors())
2155     if (!SI->livein_empty() && SI->pred_size() == 1)
2156       SinkableBBs.insert(SI);
2157 
2158   if (SinkableBBs.empty())
2159     return false;
2160 
2161   bool Changed = false;
2162 
2163   // Track which registers have been modified and used between the end of the
2164   // block and the current instruction.
2165   ModifiedRegUnits.clear();
2166   UsedRegUnits.clear();
2167   SeenDbgInstrs.clear();
2168 
2169   for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(CurBB))) {
2170     // Track the operand index for use in Copy.
2171     SmallVector<unsigned, 2> UsedOpsInCopy;
2172     // Track the register number defed in Copy.
2173     SmallVector<unsigned, 2> DefedRegsInCopy;
2174 
2175     // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
2176     // for DBG_VALUEs later, record them when they're encountered.
2177     if (MI.isDebugValue() && !MI.isDebugRef()) {
2178       SmallDenseMap<MCRegister, SmallVector<unsigned, 2>, 4> MIUnits;
2179       bool IsValid = true;
2180       for (MachineOperand &MO : MI.debug_operands()) {
2181         if (MO.isReg() && MO.getReg().isPhysical()) {
2182           // Bail if we can already tell the sink would be rejected, rather
2183           // than needlessly accumulating lots of DBG_VALUEs.
2184           if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2185                                     ModifiedRegUnits, UsedRegUnits)) {
2186             IsValid = false;
2187             break;
2188           }
2189 
2190           // Record debug use of each reg unit.
2191           for (MCRegUnit Unit : TRI->regunits(MO.getReg()))
2192             MIUnits[Unit].push_back(MO.getReg());
2193         }
2194       }
2195       if (IsValid) {
2196         for (auto &RegOps : MIUnits)
2197           SeenDbgInstrs[RegOps.first].emplace_back(&MI,
2198                                                    std::move(RegOps.second));
2199       }
2200       continue;
2201     }
2202 
2203     if (MI.isDebugOrPseudoInstr())
2204       continue;
2205 
2206     // Do not move any instruction across function call.
2207     if (MI.isCall())
2208       return false;
2209 
2210     if (!MI.isCopy() || !MI.getOperand(0).isRenamable()) {
2211       LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2212                                         TRI);
2213       continue;
2214     }
2215 
2216     // Don't sink the COPY if it would violate a register dependency.
2217     if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2218                               ModifiedRegUnits, UsedRegUnits)) {
2219       LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2220                                         TRI);
2221       continue;
2222     }
2223     assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
2224            "Unexpect SrcReg or DefReg");
2225     MachineBasicBlock *SuccBB =
2226         getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
2227     // Don't sink if we cannot find a single sinkable successor in which Reg
2228     // is live-in.
2229     if (!SuccBB) {
2230       LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2231                                         TRI);
2232       continue;
2233     }
2234     assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
2235            "Unexpected predecessor");
2236 
2237     // Collect DBG_VALUEs that must sink with this copy. We've previously
2238     // recorded which reg units that DBG_VALUEs read, if this instruction
2239     // writes any of those units then the corresponding DBG_VALUEs must sink.
2240     MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap;
2241     for (auto &MO : MI.all_defs()) {
2242       for (MCRegUnit Unit : TRI->regunits(MO.getReg())) {
2243         for (const auto &MIRegs : SeenDbgInstrs.lookup(Unit)) {
2244           auto &Regs = DbgValsToSinkMap[MIRegs.first];
2245           for (unsigned Reg : MIRegs.second)
2246             Regs.push_back(Reg);
2247         }
2248       }
2249     }
2250     auto DbgValsToSink = DbgValsToSinkMap.takeVector();
2251 
2252     LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccBB);
2253 
2254     MachineBasicBlock::iterator InsertPos =
2255         SuccBB->SkipPHIsAndLabels(SuccBB->begin());
2256     if (blockPrologueInterferes(SuccBB, InsertPos, MI, TRI, TII, nullptr)) {
2257       LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2258                                         TRI);
2259       LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
2260       continue;
2261     }
2262 
2263     // Clear the kill flag if SrcReg is killed between MI and the end of the
2264     // block.
2265     clearKillFlags(&MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
2266     performSink(MI, *SuccBB, InsertPos, DbgValsToSink);
2267     updateLiveIn(&MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
2268 
2269     Changed = true;
2270     ++NumPostRACopySink;
2271   }
2272   return Changed;
2273 }
2274 
2275 bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
2276   if (skipFunction(MF.getFunction()))
2277     return false;
2278 
2279   bool Changed = false;
2280   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2281   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2282 
2283   ModifiedRegUnits.init(*TRI);
2284   UsedRegUnits.init(*TRI);
2285   for (auto &BB : MF)
2286     Changed |= tryToSinkCopy(BB, MF, TRI, TII);
2287 
2288   return Changed;
2289 }
2290