xref: /llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h (revision a9d2834508e276d0a3cc09ac549132b56796e87f)
1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes the target machine instruction set to the code generator.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14 #define LLVM_CODEGEN_TARGETINSTRINFO_H
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/Uniformity.h"
20 #include "llvm/CodeGen/MIRFormatter.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineCombinerPattern.h"
23 #include "llvm/CodeGen/MachineCycleAnalysis.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineOutliner.h"
29 #include "llvm/CodeGen/RegisterClassInfo.h"
30 #include "llvm/CodeGen/VirtRegMap.h"
31 #include "llvm/MC/MCInstrInfo.h"
32 #include "llvm/Support/BranchProbability.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include <array>
35 #include <cassert>
36 #include <cstddef>
37 #include <cstdint>
38 #include <utility>
39 #include <vector>
40 
41 namespace llvm {
42 
43 class DFAPacketizer;
44 class InstrItineraryData;
45 class LiveIntervals;
46 class LiveVariables;
47 class MachineLoop;
48 class MachineMemOperand;
49 class MachineModuleInfo;
50 class MachineRegisterInfo;
51 class MCAsmInfo;
52 class MCInst;
53 struct MCSchedModel;
54 class Module;
55 class ScheduleDAG;
56 class ScheduleDAGMI;
57 class ScheduleHazardRecognizer;
58 class SDNode;
59 class SelectionDAG;
60 class SMSchedule;
61 class SwingSchedulerDAG;
62 class RegScavenger;
63 class TargetRegisterClass;
64 class TargetRegisterInfo;
65 class TargetSchedModel;
66 class TargetSubtargetInfo;
67 enum class MachineTraceStrategy;
68 
69 template <class T> class SmallVectorImpl;
70 
71 using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
72 
73 struct DestSourcePair {
74   const MachineOperand *Destination;
75   const MachineOperand *Source;
76 
77   DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
78       : Destination(&Dest), Source(&Src) {}
79 };
80 
81 /// Used to describe a register and immediate addition.
82 struct RegImmPair {
83   Register Reg;
84   int64_t Imm;
85 
86   RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
87 };
88 
89 /// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
90 /// It holds the register values, the scale value and the displacement.
91 /// It also holds a descriptor for the expression used to calculate the address
92 /// from the operands.
93 struct ExtAddrMode {
94   enum class Formula {
95     Basic = 0,         // BaseReg + ScaledReg * Scale + Displacement
96     SExtScaledReg = 1, // BaseReg + sext(ScaledReg) * Scale + Displacement
97     ZExtScaledReg = 2  // BaseReg + zext(ScaledReg) * Scale + Displacement
98   };
99 
100   Register BaseReg;
101   Register ScaledReg;
102   int64_t Scale = 0;
103   int64_t Displacement = 0;
104   Formula Form = Formula::Basic;
105   ExtAddrMode() = default;
106 };
107 
108 //---------------------------------------------------------------------------
109 ///
110 /// TargetInstrInfo - Interface to description of machine instruction set
111 ///
112 class TargetInstrInfo : public MCInstrInfo {
113 public:
114   TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
115                   unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
116       : CallFrameSetupOpcode(CFSetupOpcode),
117         CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
118         ReturnOpcode(ReturnOpcode) {}
119   TargetInstrInfo(const TargetInstrInfo &) = delete;
120   TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
121   virtual ~TargetInstrInfo();
122 
123   static bool isGenericOpcode(unsigned Opc) {
124     return Opc <= TargetOpcode::GENERIC_OP_END;
125   }
126 
127   static bool isGenericAtomicRMWOpcode(unsigned Opc) {
128     return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
129            Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
130   }
131 
132   /// Given a machine instruction descriptor, returns the register
133   /// class constraint for OpNum, or NULL.
134   virtual
135   const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
136                                          const TargetRegisterInfo *TRI,
137                                          const MachineFunction &MF) const;
138 
139   /// Returns true if MI is an instruction we are unable to reason about
140   /// (like a call or something with unmodeled side effects).
141   virtual bool isGlobalMemoryObject(const MachineInstr *MI) const;
142 
143   /// Return true if the instruction is trivially rematerializable, meaning it
144   /// has no side effects and requires no operands that aren't always available.
145   /// This means the only allowed uses are constants and unallocatable physical
146   /// registers so that the instructions result is independent of the place
147   /// in the function.
148   bool isTriviallyReMaterializable(const MachineInstr &MI) const {
149     return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF &&
150             MI.getNumOperands() == 1) ||
151            (MI.getDesc().isRematerializable() &&
152             isReallyTriviallyReMaterializable(MI));
153   }
154 
155   /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
156   /// of instruction rematerialization or sinking.
157   virtual bool isIgnorableUse(const MachineOperand &MO) const {
158     return false;
159   }
160 
161   virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
162                             MachineCycleInfo *CI) const {
163     return true;
164   }
165 
166   /// For a "cheap" instruction which doesn't enable additional sinking,
167   /// should MachineSink break a critical edge to sink it anyways?
168   virtual bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const {
169     return false;
170   }
171 
172 protected:
173   /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
174   /// set, this hook lets the target specify whether the instruction is actually
175   /// trivially rematerializable, taking into consideration its operands. This
176   /// predicate must return false if the instruction has any side effects other
177   /// than producing a value, or if it requres any address registers that are
178   /// not always available.
179   virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const;
180 
181   /// This method commutes the operands of the given machine instruction MI.
182   /// The operands to be commuted are specified by their indices OpIdx1 and
183   /// OpIdx2.
184   ///
185   /// If a target has any instructions that are commutable but require
186   /// converting to different instructions or making non-trivial changes
187   /// to commute them, this method can be overloaded to do that.
188   /// The default implementation simply swaps the commutable operands.
189   ///
190   /// If NewMI is false, MI is modified in place and returned; otherwise, a
191   /// new machine instruction is created and returned.
192   ///
193   /// Do not call this method for a non-commutable instruction.
194   /// Even though the instruction is commutable, the method may still
195   /// fail to commute the operands, null pointer is returned in such cases.
196   virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
197                                                unsigned OpIdx1,
198                                                unsigned OpIdx2) const;
199 
200   /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
201   /// operand indices to (ResultIdx1, ResultIdx2).
202   /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
203   /// predefined to some indices or be undefined (designated by the special
204   /// value 'CommuteAnyOperandIndex').
205   /// The predefined result indices cannot be re-defined.
206   /// The function returns true iff after the result pair redefinition
207   /// the fixed result pair is equal to or equivalent to the source pair of
208   /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
209   /// the pairs (x,y) and (y,x) are equivalent.
210   static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
211                                    unsigned CommutableOpIdx1,
212                                    unsigned CommutableOpIdx2);
213 
214 public:
215   /// These methods return the opcode of the frame setup/destroy instructions
216   /// if they exist (-1 otherwise).  Some targets use pseudo instructions in
217   /// order to abstract away the difference between operating with a frame
218   /// pointer and operating without, through the use of these two instructions.
219   /// A FrameSetup MI in MF implies MFI::AdjustsStack.
220   ///
221   unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
222   unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
223 
224   /// Returns true if the argument is a frame pseudo instruction.
225   bool isFrameInstr(const MachineInstr &I) const {
226     return I.getOpcode() == getCallFrameSetupOpcode() ||
227            I.getOpcode() == getCallFrameDestroyOpcode();
228   }
229 
230   /// Returns true if the argument is a frame setup pseudo instruction.
231   bool isFrameSetup(const MachineInstr &I) const {
232     return I.getOpcode() == getCallFrameSetupOpcode();
233   }
234 
235   /// Returns size of the frame associated with the given frame instruction.
236   /// For frame setup instruction this is frame that is set up space set up
237   /// after the instruction. For frame destroy instruction this is the frame
238   /// freed by the caller.
239   /// Note, in some cases a call frame (or a part of it) may be prepared prior
240   /// to the frame setup instruction. It occurs in the calls that involve
241   /// inalloca arguments. This function reports only the size of the frame part
242   /// that is set up between the frame setup and destroy pseudo instructions.
243   int64_t getFrameSize(const MachineInstr &I) const {
244     assert(isFrameInstr(I) && "Not a frame instruction");
245     assert(I.getOperand(0).getImm() >= 0);
246     return I.getOperand(0).getImm();
247   }
248 
249   /// Returns the total frame size, which is made up of the space set up inside
250   /// the pair of frame start-stop instructions and the space that is set up
251   /// prior to the pair.
252   int64_t getFrameTotalSize(const MachineInstr &I) const {
253     if (isFrameSetup(I)) {
254       assert(I.getOperand(1).getImm() >= 0 &&
255              "Frame size must not be negative");
256       return getFrameSize(I) + I.getOperand(1).getImm();
257     }
258     return getFrameSize(I);
259   }
260 
261   unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
262   unsigned getReturnOpcode() const { return ReturnOpcode; }
263 
264   /// Returns the actual stack pointer adjustment made by an instruction
265   /// as part of a call sequence. By default, only call frame setup/destroy
266   /// instructions adjust the stack, but targets may want to override this
267   /// to enable more fine-grained adjustment, or adjust by a different value.
268   virtual int getSPAdjust(const MachineInstr &MI) const;
269 
270   /// Return true if the instruction is a "coalescable" extension instruction.
271   /// That is, it's like a copy where it's legal for the source to overlap the
272   /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
273   /// expected the pre-extension value is available as a subreg of the result
274   /// register. This also returns the sub-register index in SubIdx.
275   virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
276                                      Register &DstReg, unsigned &SubIdx) const {
277     return false;
278   }
279 
280   /// If the specified machine instruction is a direct
281   /// load from a stack slot, return the virtual or physical register number of
282   /// the destination along with the FrameIndex of the loaded stack slot.  If
283   /// not, return 0.  This predicate must return 0 if the instruction has
284   /// any side effects other than loading from the stack slot.
285   virtual Register isLoadFromStackSlot(const MachineInstr &MI,
286                                        int &FrameIndex) const {
287     return 0;
288   }
289 
290   /// Optional extension of isLoadFromStackSlot that returns the number of
291   /// bytes loaded from the stack. This must be implemented if a backend
292   /// supports partial stack slot spills/loads to further disambiguate
293   /// what the load does.
294   virtual Register isLoadFromStackSlot(const MachineInstr &MI,
295                                        int &FrameIndex,
296                                        unsigned &MemBytes) const {
297     MemBytes = 0;
298     return isLoadFromStackSlot(MI, FrameIndex);
299   }
300 
301   /// Check for post-frame ptr elimination stack locations as well.
302   /// This uses a heuristic so it isn't reliable for correctness.
303   virtual Register isLoadFromStackSlotPostFE(const MachineInstr &MI,
304                                              int &FrameIndex) const {
305     return 0;
306   }
307 
308   /// If the specified machine instruction has a load from a stack slot,
309   /// return true along with the FrameIndices of the loaded stack slot and the
310   /// machine mem operands containing the reference.
311   /// If not, return false.  Unlike isLoadFromStackSlot, this returns true for
312   /// any instructions that loads from the stack.  This is just a hint, as some
313   /// cases may be missed.
314   virtual bool hasLoadFromStackSlot(
315       const MachineInstr &MI,
316       SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
317 
318   /// If the specified machine instruction is a direct
319   /// store to a stack slot, return the virtual or physical register number of
320   /// the source reg along with the FrameIndex of the loaded stack slot.  If
321   /// not, return 0.  This predicate must return 0 if the instruction has
322   /// any side effects other than storing to the stack slot.
323   virtual Register isStoreToStackSlot(const MachineInstr &MI,
324                                       int &FrameIndex) const {
325     return 0;
326   }
327 
328   /// Optional extension of isStoreToStackSlot that returns the number of
329   /// bytes stored to the stack. This must be implemented if a backend
330   /// supports partial stack slot spills/loads to further disambiguate
331   /// what the store does.
332   virtual Register isStoreToStackSlot(const MachineInstr &MI,
333                                       int &FrameIndex,
334                                       unsigned &MemBytes) const {
335     MemBytes = 0;
336     return isStoreToStackSlot(MI, FrameIndex);
337   }
338 
339   /// Check for post-frame ptr elimination stack locations as well.
340   /// This uses a heuristic, so it isn't reliable for correctness.
341   virtual Register isStoreToStackSlotPostFE(const MachineInstr &MI,
342                                             int &FrameIndex) const {
343     return 0;
344   }
345 
346   /// If the specified machine instruction has a store to a stack slot,
347   /// return true along with the FrameIndices of the loaded stack slot and the
348   /// machine mem operands containing the reference.
349   /// If not, return false.  Unlike isStoreToStackSlot,
350   /// this returns true for any instructions that stores to the
351   /// stack.  This is just a hint, as some cases may be missed.
352   virtual bool hasStoreToStackSlot(
353       const MachineInstr &MI,
354       SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
355 
356   /// Return true if the specified machine instruction
357   /// is a copy of one stack slot to another and has no other effect.
358   /// Provide the identity of the two frame indices.
359   virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
360                                int &SrcFrameIndex) const {
361     return false;
362   }
363 
364   /// Compute the size in bytes and offset within a stack slot of a spilled
365   /// register or subregister.
366   ///
367   /// \param [out] Size in bytes of the spilled value.
368   /// \param [out] Offset in bytes within the stack slot.
369   /// \returns true if both Size and Offset are successfully computed.
370   ///
371   /// Not all subregisters have computable spill slots. For example,
372   /// subregisters registers may not be byte-sized, and a pair of discontiguous
373   /// subregisters has no single offset.
374   ///
375   /// Targets with nontrivial bigendian implementations may need to override
376   /// this, particularly to support spilled vector registers.
377   virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
378                                  unsigned &Size, unsigned &Offset,
379                                  const MachineFunction &MF) const;
380 
381   /// Return true if the given instruction is terminator that is unspillable,
382   /// according to isUnspillableTerminatorImpl.
383   bool isUnspillableTerminator(const MachineInstr *MI) const {
384     return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
385   }
386 
387   /// Returns the size in bytes of the specified MachineInstr, or ~0U
388   /// when this function is not implemented by a target.
389   virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
390     return ~0U;
391   }
392 
393   /// Return true if the instruction is as cheap as a move instruction.
394   ///
395   /// Targets for different archs need to override this, and different
396   /// micro-architectures can also be finely tuned inside.
397   virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
398     return MI.isAsCheapAsAMove();
399   }
400 
401   /// Return true if the instruction should be sunk by MachineSink.
402   ///
403   /// MachineSink determines on its own whether the instruction is safe to sink;
404   /// this gives the target a hook to override the default behavior with regards
405   /// to which instructions should be sunk.
406   virtual bool shouldSink(const MachineInstr &MI) const { return true; }
407 
408   /// Return false if the instruction should not be hoisted by MachineLICM.
409   ///
410   /// MachineLICM determines on its own whether the instruction is safe to
411   /// hoist; this gives the target a hook to extend this assessment and prevent
412   /// an instruction being hoisted from a given loop for target specific
413   /// reasons.
414   virtual bool shouldHoist(const MachineInstr &MI,
415                            const MachineLoop *FromLoop) const {
416     return true;
417   }
418 
419   /// Re-issue the specified 'original' instruction at the
420   /// specific location targeting a new destination register.
421   /// The register in Orig->getOperand(0).getReg() will be substituted by
422   /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
423   /// SubIdx.
424   virtual void reMaterialize(MachineBasicBlock &MBB,
425                              MachineBasicBlock::iterator MI, Register DestReg,
426                              unsigned SubIdx, const MachineInstr &Orig,
427                              const TargetRegisterInfo &TRI) const;
428 
429   /// Clones instruction or the whole instruction bundle \p Orig and
430   /// insert into \p MBB before \p InsertBefore. The target may update operands
431   /// that are required to be unique.
432   ///
433   /// \p Orig must not return true for MachineInstr::isNotDuplicable().
434   virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
435                                   MachineBasicBlock::iterator InsertBefore,
436                                   const MachineInstr &Orig) const;
437 
438   /// This method must be implemented by targets that
439   /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
440   /// may be able to convert a two-address instruction into one or more true
441   /// three-address instructions on demand.  This allows the X86 target (for
442   /// example) to convert ADD and SHL instructions into LEA instructions if they
443   /// would require register copies due to two-addressness.
444   ///
445   /// This method returns a null pointer if the transformation cannot be
446   /// performed, otherwise it returns the last new instruction.
447   ///
448   /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
449   /// replacing \p MI with new instructions, even though this function does not
450   /// remove MI.
451   virtual MachineInstr *convertToThreeAddress(MachineInstr &MI,
452                                               LiveVariables *LV,
453                                               LiveIntervals *LIS) const {
454     return nullptr;
455   }
456 
457   // This constant can be used as an input value of operand index passed to
458   // the method findCommutedOpIndices() to tell the method that the
459   // corresponding operand index is not pre-defined and that the method
460   // can pick any commutable operand.
461   static const unsigned CommuteAnyOperandIndex = ~0U;
462 
463   /// This method commutes the operands of the given machine instruction MI.
464   ///
465   /// The operands to be commuted are specified by their indices OpIdx1 and
466   /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
467   /// 'CommuteAnyOperandIndex', which means that the method is free to choose
468   /// any arbitrarily chosen commutable operand. If both arguments are set to
469   /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
470   /// operands; then commutes them if such operands could be found.
471   ///
472   /// If NewMI is false, MI is modified in place and returned; otherwise, a
473   /// new machine instruction is created and returned.
474   ///
475   /// Do not call this method for a non-commutable instruction or
476   /// for non-commuable operands.
477   /// Even though the instruction is commutable, the method may still
478   /// fail to commute the operands, null pointer is returned in such cases.
479   MachineInstr *
480   commuteInstruction(MachineInstr &MI, bool NewMI = false,
481                      unsigned OpIdx1 = CommuteAnyOperandIndex,
482                      unsigned OpIdx2 = CommuteAnyOperandIndex) const;
483 
484   /// Returns true iff the routine could find two commutable operands in the
485   /// given machine instruction.
486   /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
487   /// If any of the INPUT values is set to the special value
488   /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
489   /// operand, then returns its index in the corresponding argument.
490   /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
491   /// looks for 2 commutable operands.
492   /// If INPUT values refer to some operands of MI, then the method simply
493   /// returns true if the corresponding operands are commutable and returns
494   /// false otherwise.
495   ///
496   /// For example, calling this method this way:
497   ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
498   ///     findCommutedOpIndices(MI, Op1, Op2);
499   /// can be interpreted as a query asking to find an operand that would be
500   /// commutable with the operand#1.
501   virtual bool findCommutedOpIndices(const MachineInstr &MI,
502                                      unsigned &SrcOpIdx1,
503                                      unsigned &SrcOpIdx2) const;
504 
505   /// Returns true if the target has a preference on the operands order of
506   /// the given machine instruction. And specify if \p Commute is required to
507   /// get the desired operands order.
508   virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
509     return false;
510   }
511 
512   /// A pair composed of a register and a sub-register index.
513   /// Used to give some type checking when modeling Reg:SubReg.
514   struct RegSubRegPair {
515     Register Reg;
516     unsigned SubReg;
517 
518     RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0)
519         : Reg(Reg), SubReg(SubReg) {}
520 
521     bool operator==(const RegSubRegPair& P) const {
522       return Reg == P.Reg && SubReg == P.SubReg;
523     }
524     bool operator!=(const RegSubRegPair& P) const {
525       return !(*this == P);
526     }
527   };
528 
529   /// A pair composed of a pair of a register and a sub-register index,
530   /// and another sub-register index.
531   /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
532   struct RegSubRegPairAndIdx : RegSubRegPair {
533     unsigned SubIdx;
534 
535     RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0,
536                         unsigned SubIdx = 0)
537         : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
538   };
539 
540   /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
541   /// and \p DefIdx.
542   /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
543   /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
544   /// flag are not added to this list.
545   /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
546   /// two elements:
547   /// - %1:sub1, sub0
548   /// - %2<:0>, sub1
549   ///
550   /// \returns true if it is possible to build such an input sequence
551   /// with the pair \p MI, \p DefIdx. False otherwise.
552   ///
553   /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
554   ///
555   /// \note The generic implementation does not provide any support for
556   /// MI.isRegSequenceLike(). In other words, one has to override
557   /// getRegSequenceLikeInputs for target specific instructions.
558   bool
559   getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
560                        SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
561 
562   /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
563   /// and \p DefIdx.
564   /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
565   /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
566   /// - %1:sub1, sub0
567   ///
568   /// \returns true if it is possible to build such an input sequence
569   /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
570   /// False otherwise.
571   ///
572   /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
573   ///
574   /// \note The generic implementation does not provide any support for
575   /// MI.isExtractSubregLike(). In other words, one has to override
576   /// getExtractSubregLikeInputs for target specific instructions.
577   bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
578                               RegSubRegPairAndIdx &InputReg) const;
579 
580   /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
581   /// and \p DefIdx.
582   /// \p [out] BaseReg and \p [out] InsertedReg contain
583   /// the equivalent inputs of INSERT_SUBREG.
584   /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
585   /// - BaseReg: %0:sub0
586   /// - InsertedReg: %1:sub1, sub3
587   ///
588   /// \returns true if it is possible to build such an input sequence
589   /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
590   /// False otherwise.
591   ///
592   /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
593   ///
594   /// \note The generic implementation does not provide any support for
595   /// MI.isInsertSubregLike(). In other words, one has to override
596   /// getInsertSubregLikeInputs for target specific instructions.
597   bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
598                              RegSubRegPair &BaseReg,
599                              RegSubRegPairAndIdx &InsertedReg) const;
600 
601   /// Return true if two machine instructions would produce identical values.
602   /// By default, this is only true when the two instructions
603   /// are deemed identical except for defs. If this function is called when the
604   /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
605   /// aggressive checks.
606   virtual bool produceSameValue(const MachineInstr &MI0,
607                                 const MachineInstr &MI1,
608                                 const MachineRegisterInfo *MRI = nullptr) const;
609 
610   /// \returns true if a branch from an instruction with opcode \p BranchOpc
611   ///  bytes is capable of jumping to a position \p BrOffset bytes away.
612   virtual bool isBranchOffsetInRange(unsigned BranchOpc,
613                                      int64_t BrOffset) const {
614     llvm_unreachable("target did not implement");
615   }
616 
617   /// \returns The block that branch instruction \p MI jumps to.
618   virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
619     llvm_unreachable("target did not implement");
620   }
621 
622   /// Insert an unconditional indirect branch at the end of \p MBB to \p
623   /// NewDestBB. Optionally, insert the clobbered register restoring in \p
624   /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
625   /// the offset of the position to insert the new branch.
626   virtual void insertIndirectBranch(MachineBasicBlock &MBB,
627                                     MachineBasicBlock &NewDestBB,
628                                     MachineBasicBlock &RestoreBB,
629                                     const DebugLoc &DL, int64_t BrOffset = 0,
630                                     RegScavenger *RS = nullptr) const {
631     llvm_unreachable("target did not implement");
632   }
633 
634   /// Analyze the branching code at the end of MBB, returning
635   /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
636   /// implemented for a target).  Upon success, this returns false and returns
637   /// with the following information in various cases:
638   ///
639   /// 1. If this block ends with no branches (it just falls through to its succ)
640   ///    just return false, leaving TBB/FBB null.
641   /// 2. If this block ends with only an unconditional branch, it sets TBB to be
642   ///    the destination block.
643   /// 3. If this block ends with a conditional branch and it falls through to a
644   ///    successor block, it sets TBB to be the branch destination block and a
645   ///    list of operands that evaluate the condition. These operands can be
646   ///    passed to other TargetInstrInfo methods to create new branches.
647   /// 4. If this block ends with a conditional branch followed by an
648   ///    unconditional branch, it returns the 'true' destination in TBB, the
649   ///    'false' destination in FBB, and a list of operands that evaluate the
650   ///    condition.  These operands can be passed to other TargetInstrInfo
651   ///    methods to create new branches.
652   ///
653   /// Note that removeBranch and insertBranch must be implemented to support
654   /// cases where this method returns success.
655   ///
656   /// If AllowModify is true, then this routine is allowed to modify the basic
657   /// block (e.g. delete instructions after the unconditional branch).
658   ///
659   /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
660   /// before calling this function.
661   virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
662                              MachineBasicBlock *&FBB,
663                              SmallVectorImpl<MachineOperand> &Cond,
664                              bool AllowModify = false) const {
665     return true;
666   }
667 
668   /// Represents a predicate at the MachineFunction level.  The control flow a
669   /// MachineBranchPredicate represents is:
670   ///
671   ///  Reg = LHS `Predicate` RHS         == ConditionDef
672   ///  if Reg then goto TrueDest else goto FalseDest
673   ///
674   struct MachineBranchPredicate {
675     enum ComparePredicate {
676       PRED_EQ,     // True if two values are equal
677       PRED_NE,     // True if two values are not equal
678       PRED_INVALID // Sentinel value
679     };
680 
681     ComparePredicate Predicate = PRED_INVALID;
682     MachineOperand LHS = MachineOperand::CreateImm(0);
683     MachineOperand RHS = MachineOperand::CreateImm(0);
684     MachineBasicBlock *TrueDest = nullptr;
685     MachineBasicBlock *FalseDest = nullptr;
686     MachineInstr *ConditionDef = nullptr;
687 
688     /// SingleUseCondition is true if ConditionDef is dead except for the
689     /// branch(es) at the end of the basic block.
690     ///
691     bool SingleUseCondition = false;
692 
693     explicit MachineBranchPredicate() = default;
694   };
695 
696   /// Analyze the branching code at the end of MBB and parse it into the
697   /// MachineBranchPredicate structure if possible.  Returns false on success
698   /// and true on failure.
699   ///
700   /// If AllowModify is true, then this routine is allowed to modify the basic
701   /// block (e.g. delete instructions after the unconditional branch).
702   ///
703   virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
704                                       MachineBranchPredicate &MBP,
705                                       bool AllowModify = false) const {
706     return true;
707   }
708 
709   /// Remove the branching code at the end of the specific MBB.
710   /// This is only invoked in cases where analyzeBranch returns success. It
711   /// returns the number of instructions that were removed.
712   /// If \p BytesRemoved is non-null, report the change in code size from the
713   /// removed instructions.
714   virtual unsigned removeBranch(MachineBasicBlock &MBB,
715                                 int *BytesRemoved = nullptr) const {
716     llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
717   }
718 
719   /// Insert branch code into the end of the specified MachineBasicBlock. The
720   /// operands to this method are the same as those returned by analyzeBranch.
721   /// This is only invoked in cases where analyzeBranch returns success. It
722   /// returns the number of instructions inserted. If \p BytesAdded is non-null,
723   /// report the change in code size from the added instructions.
724   ///
725   /// It is also invoked by tail merging to add unconditional branches in
726   /// cases where analyzeBranch doesn't apply because there was no original
727   /// branch to analyze.  At least this much must be implemented, else tail
728   /// merging needs to be disabled.
729   ///
730   /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
731   /// before calling this function.
732   virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
733                                 MachineBasicBlock *FBB,
734                                 ArrayRef<MachineOperand> Cond,
735                                 const DebugLoc &DL,
736                                 int *BytesAdded = nullptr) const {
737     llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
738   }
739 
740   unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
741                                      MachineBasicBlock *DestBB,
742                                      const DebugLoc &DL,
743                                      int *BytesAdded = nullptr) const {
744     return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
745                         BytesAdded);
746   }
747 
748   /// Object returned by analyzeLoopForPipelining. Allows software pipelining
749   /// implementations to query attributes of the loop being pipelined and to
750   /// apply target-specific updates to the loop once pipelining is complete.
751   class PipelinerLoopInfo {
752   public:
753     virtual ~PipelinerLoopInfo();
754     /// Return true if the given instruction should not be pipelined and should
755     /// be ignored. An example could be a loop comparison, or induction variable
756     /// update with no users being pipelined.
757     virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
758 
759     /// Return true if the proposed schedule should used.  Otherwise return
760     /// false to not pipeline the loop. This function should be used to ensure
761     /// that pipelined loops meet target-specific quality heuristics.
762     virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS) {
763       return true;
764     }
765 
766     /// Create a condition to determine if the trip count of the loop is greater
767     /// than TC, where TC is always one more than for the previous prologue or
768     /// 0 if this is being called for the outermost prologue.
769     ///
770     /// If the trip count is statically known to be greater than TC, return
771     /// true. If the trip count is statically known to be not greater than TC,
772     /// return false. Otherwise return nullopt and fill out Cond with the test
773     /// condition.
774     ///
775     /// Note: This hook is guaranteed to be called from the innermost to the
776     /// outermost prologue of the loop being software pipelined.
777     virtual std::optional<bool>
778     createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
779                                     SmallVectorImpl<MachineOperand> &Cond) = 0;
780 
781     /// Create a condition to determine if the remaining trip count for a phase
782     /// is greater than TC. Some instructions such as comparisons may be
783     /// inserted at the bottom of MBB. All instructions expanded for the
784     /// phase must be inserted in MBB before calling this function.
785     /// LastStage0Insts is the map from the original instructions scheduled at
786     /// stage#0 to the expanded instructions for the last iteration of the
787     /// kernel. LastStage0Insts is intended to obtain the instruction that
788     /// refers the latest loop counter value.
789     ///
790     /// MBB can also be a predecessor of the prologue block. Then
791     /// LastStage0Insts must be empty and the compared value is the initial
792     /// value of the trip count.
793     virtual void createRemainingIterationsGreaterCondition(
794         int TC, MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &Cond,
795         DenseMap<MachineInstr *, MachineInstr *> &LastStage0Insts) {
796       llvm_unreachable(
797           "Target didn't implement "
798           "PipelinerLoopInfo::createRemainingIterationsGreaterCondition!");
799     }
800 
801     /// Modify the loop such that the trip count is
802     /// OriginalTC + TripCountAdjust.
803     virtual void adjustTripCount(int TripCountAdjust) = 0;
804 
805     /// Called when the loop's preheader has been modified to NewPreheader.
806     virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
807 
808     /// Called when the loop is being removed. Any instructions in the preheader
809     /// should be removed.
810     ///
811     /// Once this function is called, no other functions on this object are
812     /// valid; the loop has been removed.
813     virtual void disposed(LiveIntervals *LIS = nullptr) {}
814 
815     /// Return true if the target can expand pipelined schedule with modulo
816     /// variable expansion.
817     virtual bool isMVEExpanderSupported() { return false; }
818   };
819 
820   /// Analyze loop L, which must be a single-basic-block loop, and if the
821   /// conditions can be understood enough produce a PipelinerLoopInfo object.
822   virtual std::unique_ptr<PipelinerLoopInfo>
823   analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
824     return nullptr;
825   }
826 
827   /// Analyze the loop code, return true if it cannot be understood. Upon
828   /// success, this function returns false and returns information about the
829   /// induction variable and compare instruction used at the end.
830   virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
831                            MachineInstr *&CmpInst) const {
832     return true;
833   }
834 
835   /// Generate code to reduce the loop iteration by one and check if the loop
836   /// is finished.  Return the value/register of the new loop count.  We need
837   /// this function when peeling off one or more iterations of a loop. This
838   /// function assumes the nth iteration is peeled first.
839   virtual unsigned reduceLoopCount(MachineBasicBlock &MBB,
840                                    MachineBasicBlock &PreHeader,
841                                    MachineInstr *IndVar, MachineInstr &Cmp,
842                                    SmallVectorImpl<MachineOperand> &Cond,
843                                    SmallVectorImpl<MachineInstr *> &PrevInsts,
844                                    unsigned Iter, unsigned MaxIter) const {
845     llvm_unreachable("Target didn't implement ReduceLoopCount");
846   }
847 
848   /// Delete the instruction OldInst and everything after it, replacing it with
849   /// an unconditional branch to NewDest. This is used by the tail merging pass.
850   virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
851                                        MachineBasicBlock *NewDest) const;
852 
853   /// Return true if it's legal to split the given basic
854   /// block at the specified instruction (i.e. instruction would be the start
855   /// of a new basic block).
856   virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
857                                    MachineBasicBlock::iterator MBBI) const {
858     return true;
859   }
860 
861   /// Return true if it's profitable to predicate
862   /// instructions with accumulated instruction latency of "NumCycles"
863   /// of the specified basic block, where the probability of the instructions
864   /// being executed is given by Probability, and Confidence is a measure
865   /// of our confidence that it will be properly predicted.
866   virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
867                                    unsigned ExtraPredCycles,
868                                    BranchProbability Probability) const {
869     return false;
870   }
871 
872   /// Second variant of isProfitableToIfCvt. This one
873   /// checks for the case where two basic blocks from true and false path
874   /// of a if-then-else (diamond) are predicated on mutually exclusive
875   /// predicates, where the probability of the true path being taken is given
876   /// by Probability, and Confidence is a measure of our confidence that it
877   /// will be properly predicted.
878   virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
879                                    unsigned ExtraTCycles,
880                                    MachineBasicBlock &FMBB, unsigned NumFCycles,
881                                    unsigned ExtraFCycles,
882                                    BranchProbability Probability) const {
883     return false;
884   }
885 
886   /// Return true if it's profitable for if-converter to duplicate instructions
887   /// of specified accumulated instruction latencies in the specified MBB to
888   /// enable if-conversion.
889   /// The probability of the instructions being executed is given by
890   /// Probability, and Confidence is a measure of our confidence that it
891   /// will be properly predicted.
892   virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
893                                          unsigned NumCycles,
894                                          BranchProbability Probability) const {
895     return false;
896   }
897 
898   /// Return the increase in code size needed to predicate a contiguous run of
899   /// NumInsts instructions.
900   virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF,
901                                                     unsigned NumInsts) const {
902     return 0;
903   }
904 
905   /// Return an estimate for the code size reduction (in bytes) which will be
906   /// caused by removing the given branch instruction during if-conversion.
907   virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
908     return getInstSizeInBytes(MI);
909   }
910 
911   /// Return true if it's profitable to unpredicate
912   /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
913   /// exclusive predicates.
914   /// e.g.
915   ///   subeq  r0, r1, #1
916   ///   addne  r0, r1, #1
917   /// =>
918   ///   sub    r0, r1, #1
919   ///   addne  r0, r1, #1
920   ///
921   /// This may be profitable is conditional instructions are always executed.
922   virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
923                                          MachineBasicBlock &FMBB) const {
924     return false;
925   }
926 
927   /// Return true if it is possible to insert a select
928   /// instruction that chooses between TrueReg and FalseReg based on the
929   /// condition code in Cond.
930   ///
931   /// When successful, also return the latency in cycles from TrueReg,
932   /// FalseReg, and Cond to the destination register. In most cases, a select
933   /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
934   ///
935   /// Some x86 implementations have 2-cycle cmov instructions.
936   ///
937   /// @param MBB         Block where select instruction would be inserted.
938   /// @param Cond        Condition returned by analyzeBranch.
939   /// @param DstReg      Virtual dest register that the result should write to.
940   /// @param TrueReg     Virtual register to select when Cond is true.
941   /// @param FalseReg    Virtual register to select when Cond is false.
942   /// @param CondCycles  Latency from Cond+Branch to select output.
943   /// @param TrueCycles  Latency from TrueReg to select output.
944   /// @param FalseCycles Latency from FalseReg to select output.
945   virtual bool canInsertSelect(const MachineBasicBlock &MBB,
946                                ArrayRef<MachineOperand> Cond, Register DstReg,
947                                Register TrueReg, Register FalseReg,
948                                int &CondCycles, int &TrueCycles,
949                                int &FalseCycles) const {
950     return false;
951   }
952 
953   /// Insert a select instruction into MBB before I that will copy TrueReg to
954   /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
955   ///
956   /// This function can only be called after canInsertSelect() returned true.
957   /// The condition in Cond comes from analyzeBranch, and it can be assumed
958   /// that the same flags or registers required by Cond are available at the
959   /// insertion point.
960   ///
961   /// @param MBB      Block where select instruction should be inserted.
962   /// @param I        Insertion point.
963   /// @param DL       Source location for debugging.
964   /// @param DstReg   Virtual register to be defined by select instruction.
965   /// @param Cond     Condition as computed by analyzeBranch.
966   /// @param TrueReg  Virtual register to copy when Cond is true.
967   /// @param FalseReg Virtual register to copy when Cons is false.
968   virtual void insertSelect(MachineBasicBlock &MBB,
969                             MachineBasicBlock::iterator I, const DebugLoc &DL,
970                             Register DstReg, ArrayRef<MachineOperand> Cond,
971                             Register TrueReg, Register FalseReg) const {
972     llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
973   }
974 
975   /// Analyze the given select instruction, returning true if
976   /// it cannot be understood. It is assumed that MI->isSelect() is true.
977   ///
978   /// When successful, return the controlling condition and the operands that
979   /// determine the true and false result values.
980   ///
981   ///   Result = SELECT Cond, TrueOp, FalseOp
982   ///
983   /// Some targets can optimize select instructions, for example by predicating
984   /// the instruction defining one of the operands. Such targets should set
985   /// Optimizable.
986   ///
987   /// @param         MI Select instruction to analyze.
988   /// @param Cond    Condition controlling the select.
989   /// @param TrueOp  Operand number of the value selected when Cond is true.
990   /// @param FalseOp Operand number of the value selected when Cond is false.
991   /// @param Optimizable Returned as true if MI is optimizable.
992   /// @returns False on success.
993   virtual bool analyzeSelect(const MachineInstr &MI,
994                              SmallVectorImpl<MachineOperand> &Cond,
995                              unsigned &TrueOp, unsigned &FalseOp,
996                              bool &Optimizable) const {
997     assert(MI.getDesc().isSelect() && "MI must be a select instruction");
998     return true;
999   }
1000 
1001   /// Given a select instruction that was understood by
1002   /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
1003   /// merging it with one of its operands. Returns NULL on failure.
1004   ///
1005   /// When successful, returns the new select instruction. The client is
1006   /// responsible for deleting MI.
1007   ///
1008   /// If both sides of the select can be optimized, PreferFalse is used to pick
1009   /// a side.
1010   ///
1011   /// @param MI          Optimizable select instruction.
1012   /// @param NewMIs     Set that record all MIs in the basic block up to \p
1013   /// MI. Has to be updated with any newly created MI or deleted ones.
1014   /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
1015   /// @returns Optimized instruction or NULL.
1016   virtual MachineInstr *optimizeSelect(MachineInstr &MI,
1017                                        SmallPtrSetImpl<MachineInstr *> &NewMIs,
1018                                        bool PreferFalse = false) const {
1019     // This function must be implemented if Optimizable is ever set.
1020     llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
1021   }
1022 
1023   /// Emit instructions to copy a pair of physical registers.
1024   ///
1025   /// This function should support copies within any legal register class as
1026   /// well as any cross-class copies created during instruction selection.
1027   ///
1028   /// The source and destination registers may overlap, which may require a
1029   /// careful implementation when multiple copy instructions are required for
1030   /// large registers. See for example the ARM target.
1031   ///
1032   /// If RenamableDest is true, the copy instruction's destination operand is
1033   /// marked renamable.
1034   /// If RenamableSrc is true, the copy instruction's source operand is
1035   /// marked renamable.
1036   virtual void copyPhysReg(MachineBasicBlock &MBB,
1037                            MachineBasicBlock::iterator MI, const DebugLoc &DL,
1038                            MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
1039                            bool RenamableDest = false,
1040                            bool RenamableSrc = false) const {
1041     llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
1042   }
1043 
1044   /// Allow targets to tell MachineVerifier whether a specific register
1045   /// MachineOperand can be used as part of PC-relative addressing.
1046   /// PC-relative addressing modes in many CISC architectures contain
1047   /// (non-PC) registers as offsets or scaling values, which inherently
1048   /// tags the corresponding MachineOperand with OPERAND_PCREL.
1049   ///
1050   /// @param MO The MachineOperand in question. MO.isReg() should always
1051   /// be true.
1052   /// @return Whether this operand is allowed to be used PC-relatively.
1053   virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1054     return false;
1055   }
1056 
1057   /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
1058   /// using a jump table, otherwise -1.
1059   virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }
1060 
1061 protected:
1062   /// Target-dependent implementation for IsCopyInstr.
1063   /// If the specific machine instruction is a instruction that moves/copies
1064   /// value from one register to another register return destination and source
1065   /// registers as machine operands.
1066   virtual std::optional<DestSourcePair>
1067   isCopyInstrImpl(const MachineInstr &MI) const {
1068     return std::nullopt;
1069   }
1070 
1071   virtual std::optional<DestSourcePair>
1072   isCopyLikeInstrImpl(const MachineInstr &MI) const {
1073     return std::nullopt;
1074   }
1075 
1076   /// Return true if the given terminator MI is not expected to spill. This
1077   /// sets the live interval as not spillable and adjusts phi node lowering to
1078   /// not introduce copies after the terminator. Use with care, these are
1079   /// currently used for hardware loop intrinsics in very controlled situations,
1080   /// created prior to registry allocation in loops that only have single phi
1081   /// users for the terminators value. They may run out of registers if not used
1082   /// carefully.
1083   virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1084     return false;
1085   }
1086 
1087 public:
1088   /// If the specific machine instruction is a instruction that moves/copies
1089   /// value from one register to another register return destination and source
1090   /// registers as machine operands.
1091   /// For COPY-instruction the method naturally returns destination and source
1092   /// registers as machine operands, for all other instructions the method calls
1093   /// target-dependent implementation.
1094   std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1095     if (MI.isCopy()) {
1096       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1097     }
1098     return isCopyInstrImpl(MI);
1099   }
1100 
1101   // Similar to `isCopyInstr`, but adds non-copy semantics on MIR, but
1102   // ultimately generates a copy instruction.
1103   std::optional<DestSourcePair> isCopyLikeInstr(const MachineInstr &MI) const {
1104     if (auto IsCopyInstr = isCopyInstr(MI))
1105       return IsCopyInstr;
1106     return isCopyLikeInstrImpl(MI);
1107   }
1108 
1109   bool isFullCopyInstr(const MachineInstr &MI) const {
1110     auto DestSrc = isCopyInstr(MI);
1111     if (!DestSrc)
1112       return false;
1113 
1114     const MachineOperand *DestRegOp = DestSrc->Destination;
1115     const MachineOperand *SrcRegOp = DestSrc->Source;
1116     return !DestRegOp->getSubReg() && !SrcRegOp->getSubReg();
1117   }
1118 
1119   /// If the specific machine instruction is an instruction that adds an
1120   /// immediate value and a register, and stores the result in the given
1121   /// register \c Reg, return a pair of the source register and the offset
1122   /// which has been added.
1123   virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1124                                                    Register Reg) const {
1125     return std::nullopt;
1126   }
1127 
1128   /// Returns true if MI is an instruction that defines Reg to have a constant
1129   /// value and the value is recorded in ImmVal. The ImmVal is a result that
1130   /// should be interpreted as modulo size of Reg.
1131   virtual bool getConstValDefinedInReg(const MachineInstr &MI,
1132                                        const Register Reg,
1133                                        int64_t &ImmVal) const {
1134     return false;
1135   }
1136 
1137   /// Store the specified register of the given register class to the specified
1138   /// stack frame index. The store instruction is to be added to the given
1139   /// machine basic block before the specified machine instruction. If isKill
1140   /// is true, the register operand is the last use and must be marked kill. If
1141   /// \p SrcReg is being directly spilled as part of assigning a virtual
1142   /// register, \p VReg is the register being assigned. This additional register
1143   /// argument is needed for certain targets when invoked from RegAllocFast to
1144   /// map the spilled physical register to its virtual register. A null register
1145   /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1146   /// flags on the spill instruction e.g. FrameSetup flag on a callee saved
1147   /// register spill instruction, part of prologue, during the frame lowering.
1148   virtual void storeRegToStackSlot(
1149       MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
1150       bool isKill, int FrameIndex, const TargetRegisterClass *RC,
1151       const TargetRegisterInfo *TRI, Register VReg,
1152       MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
1153     llvm_unreachable("Target didn't implement "
1154                      "TargetInstrInfo::storeRegToStackSlot!");
1155   }
1156 
1157   /// Load the specified register of the given register class from the specified
1158   /// stack frame index. The load instruction is to be added to the given
1159   /// machine basic block before the specified machine instruction. If \p
1160   /// DestReg is being directly reloaded as part of assigning a virtual
1161   /// register, \p VReg is the register being assigned. This additional register
1162   /// argument is needed for certain targets when invoked from RegAllocFast to
1163   /// map the loaded physical register to its virtual register. A null register
1164   /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1165   /// flags on the spill instruction e.g. FrameDestroy flag on a callee saved
1166   /// register reload instruction, part of epilogue, during the frame lowering.
1167   virtual void loadRegFromStackSlot(
1168       MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
1169       int FrameIndex, const TargetRegisterClass *RC,
1170       const TargetRegisterInfo *TRI, Register VReg,
1171       MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
1172     llvm_unreachable("Target didn't implement "
1173                      "TargetInstrInfo::loadRegFromStackSlot!");
1174   }
1175 
1176   /// This function is called for all pseudo instructions
1177   /// that remain after register allocation. Many pseudo instructions are
1178   /// created to help register allocation. This is the place to convert them
1179   /// into real instructions. The target can edit MI in place, or it can insert
1180   /// new instructions and erase MI. The function should return true if
1181   /// anything was changed.
1182   virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1183 
1184   /// Check whether the target can fold a load that feeds a subreg operand
1185   /// (or a subreg operand that feeds a store).
1186   /// For example, X86 may want to return true if it can fold
1187   /// movl (%esp), %eax
1188   /// subb, %al, ...
1189   /// Into:
1190   /// subb (%esp), ...
1191   ///
1192   /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1193   /// reject subregs - but since this behavior used to be enforced in the
1194   /// target-independent code, moving this responsibility to the targets
1195   /// has the potential of causing nasty silent breakage in out-of-tree targets.
1196   virtual bool isSubregFoldable() const { return false; }
1197 
1198   /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1199   /// operands which can't be folded into stack references. Operands outside
1200   /// of the range are most likely foldable but it is not guaranteed.
1201   /// These instructions are unique in that stack references for some operands
1202   /// have the same execution cost (e.g. none) as the unfolded register forms.
1203   /// The ranged return is guaranteed to include all operands which can't be
1204   /// folded at zero cost.
1205   virtual std::pair<unsigned, unsigned>
1206   getPatchpointUnfoldableRange(const MachineInstr &MI) const;
1207 
1208   /// Attempt to fold a load or store of the specified stack
1209   /// slot into the specified machine instruction for the specified operand(s).
1210   /// If this is possible, a new instruction is returned with the specified
1211   /// operand folded, otherwise NULL is returned.
1212   /// The new instruction is inserted before MI, and the client is responsible
1213   /// for removing the old instruction.
1214   /// If VRM is passed, the assigned physregs can be inspected by target to
1215   /// decide on using an opcode (note that those assignments can still change).
1216   MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1217                                   int FI,
1218                                   LiveIntervals *LIS = nullptr,
1219                                   VirtRegMap *VRM = nullptr) const;
1220 
1221   /// Same as the previous version except it allows folding of any load and
1222   /// store from / to any address, not just from a specific stack slot.
1223   MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1224                                   MachineInstr &LoadMI,
1225                                   LiveIntervals *LIS = nullptr) const;
1226 
1227   /// This function defines the logic to lower COPY instruction to
1228   /// target specific instruction(s).
1229   void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const;
1230 
1231   /// Return true when there is potentially a faster code sequence
1232   /// for an instruction chain ending in \p Root. All potential patterns are
1233   /// returned in the \p Pattern vector. Pattern should be sorted in priority
1234   /// order since the pattern evaluator stops checking as soon as it finds a
1235   /// faster sequence.
1236   /// \param Root - Instruction that could be combined with one of its operands
1237   /// \param Patterns - Vector of possible combination patterns
1238   virtual bool getMachineCombinerPatterns(MachineInstr &Root,
1239                                           SmallVectorImpl<unsigned> &Patterns,
1240                                           bool DoRegPressureReduce) const;
1241 
1242   /// Return true if target supports reassociation of instructions in machine
1243   /// combiner pass to reduce register pressure for a given BB.
1244   virtual bool
1245   shouldReduceRegisterPressure(const MachineBasicBlock *MBB,
1246                                const RegisterClassInfo *RegClassInfo) const {
1247     return false;
1248   }
1249 
1250   /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1251   virtual void
1252   finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern,
1253                     SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1254 
1255   /// Return true when a code sequence can improve throughput. It
1256   /// should be called only for instructions in loops.
1257   /// \param Pattern - combiner pattern
1258   virtual bool isThroughputPattern(unsigned Pattern) const;
1259 
1260   /// Return the objective of a combiner pattern.
1261   /// \param Pattern - combiner pattern
1262   virtual CombinerObjective getCombinerObjective(unsigned Pattern) const;
1263 
1264   /// Return true if the input \P Inst is part of a chain of dependent ops
1265   /// that are suitable for reassociation, otherwise return false.
1266   /// If the instruction's operands must be commuted to have a previous
1267   /// instruction of the same type define the first source operand, \P Commuted
1268   /// will be set to true.
1269   bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1270 
1271   /// Return true when \P Inst is both associative and commutative. If \P Invert
1272   /// is true, then the inverse of \P Inst operation must be tested.
1273   virtual bool isAssociativeAndCommutative(const MachineInstr &Inst,
1274                                            bool Invert = false) const {
1275     return false;
1276   }
1277 
1278   /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1279   /// for sub and vice versa).
1280   virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1281     return std::nullopt;
1282   }
1283 
1284   /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1285   bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1286 
1287   /// Return true when \P Inst has reassociable operands in the same \P MBB.
1288   virtual bool hasReassociableOperands(const MachineInstr &Inst,
1289                                        const MachineBasicBlock *MBB) const;
1290 
1291   /// Return true when \P Inst has reassociable sibling.
1292   virtual bool hasReassociableSibling(const MachineInstr &Inst,
1293                                       bool &Commuted) const;
1294 
1295   /// When getMachineCombinerPatterns() finds patterns, this function generates
1296   /// the instructions that could replace the original code sequence. The client
1297   /// has to decide whether the actual replacement is beneficial or not.
1298   /// \param Root - Instruction that could be combined with one of its operands
1299   /// \param Pattern - Combination pattern for Root
1300   /// \param InsInstrs - Vector of new instructions that implement P
1301   /// \param DelInstrs - Old instructions, including Root, that could be
1302   /// replaced by InsInstr
1303   /// \param InstIdxForVirtReg - map of virtual register to instruction in
1304   /// InsInstr that defines it
1305   virtual void genAlternativeCodeSequence(
1306       MachineInstr &Root, unsigned Pattern,
1307       SmallVectorImpl<MachineInstr *> &InsInstrs,
1308       SmallVectorImpl<MachineInstr *> &DelInstrs,
1309       DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
1310 
1311   /// When calculate the latency of the root instruction, accumulate the
1312   /// latency of the sequence to the root latency.
1313   /// \param Root - Instruction that could be combined with one of its operands
1314   virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const {
1315     return true;
1316   }
1317 
1318   /// The returned array encodes the operand index for each parameter because
1319   /// the operands may be commuted; the operand indices for associative
1320   /// operations might also be target-specific. Each element specifies the index
1321   /// of {Prev, A, B, X, Y}.
1322   virtual void
1323   getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern,
1324                                std::array<unsigned, 5> &OperandIndices) const;
1325 
1326   /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1327   /// reduce critical path length.
1328   void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1329                       SmallVectorImpl<MachineInstr *> &InsInstrs,
1330                       SmallVectorImpl<MachineInstr *> &DelInstrs,
1331                       ArrayRef<unsigned> OperandIndices,
1332                       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
1333 
1334   /// Reassociation of some instructions requires inverse operations (e.g.
1335   /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1336   /// (new root opcode, new prev opcode) that must be used to reassociate \P
1337   /// Root and \P Prev accoring to \P Pattern.
1338   std::pair<unsigned, unsigned>
1339   getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root,
1340                           const MachineInstr &Prev) const;
1341 
1342   /// The limit on resource length extension we accept in MachineCombiner Pass.
1343   virtual int getExtendResourceLenLimit() const { return 0; }
1344 
1345   /// This is an architecture-specific helper function of reassociateOps.
1346   /// Set special operand attributes for new instructions after reassociation.
1347   virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1348                                      MachineInstr &NewMI1,
1349                                      MachineInstr &NewMI2) const {}
1350 
1351   /// Return true when a target supports MachineCombiner.
1352   virtual bool useMachineCombiner() const { return false; }
1353 
1354   /// Return a strategy that MachineCombiner must use when creating traces.
1355   virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const;
1356 
1357   /// Return true if the given SDNode can be copied during scheduling
1358   /// even if it has glue.
1359   virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1360 
1361 protected:
1362   /// Target-dependent implementation for foldMemoryOperand.
1363   /// Target-independent code in foldMemoryOperand will
1364   /// take care of adding a MachineMemOperand to the newly created instruction.
1365   /// The instruction and any auxiliary instructions necessary will be inserted
1366   /// at InsertPt.
1367   virtual MachineInstr *
1368   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
1369                         ArrayRef<unsigned> Ops,
1370                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
1371                         LiveIntervals *LIS = nullptr,
1372                         VirtRegMap *VRM = nullptr) const {
1373     return nullptr;
1374   }
1375 
1376   /// Target-dependent implementation for foldMemoryOperand.
1377   /// Target-independent code in foldMemoryOperand will
1378   /// take care of adding a MachineMemOperand to the newly created instruction.
1379   /// The instruction and any auxiliary instructions necessary will be inserted
1380   /// at InsertPt.
1381   virtual MachineInstr *foldMemoryOperandImpl(
1382       MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1383       MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1384       LiveIntervals *LIS = nullptr) const {
1385     return nullptr;
1386   }
1387 
1388   /// Target-dependent implementation of getRegSequenceInputs.
1389   ///
1390   /// \returns true if it is possible to build the equivalent
1391   /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1392   ///
1393   /// \pre MI.isRegSequenceLike().
1394   ///
1395   /// \see TargetInstrInfo::getRegSequenceInputs.
1396   virtual bool getRegSequenceLikeInputs(
1397       const MachineInstr &MI, unsigned DefIdx,
1398       SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1399     return false;
1400   }
1401 
1402   /// Target-dependent implementation of getExtractSubregInputs.
1403   ///
1404   /// \returns true if it is possible to build the equivalent
1405   /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1406   ///
1407   /// \pre MI.isExtractSubregLike().
1408   ///
1409   /// \see TargetInstrInfo::getExtractSubregInputs.
1410   virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
1411                                           unsigned DefIdx,
1412                                           RegSubRegPairAndIdx &InputReg) const {
1413     return false;
1414   }
1415 
1416   /// Target-dependent implementation of getInsertSubregInputs.
1417   ///
1418   /// \returns true if it is possible to build the equivalent
1419   /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1420   ///
1421   /// \pre MI.isInsertSubregLike().
1422   ///
1423   /// \see TargetInstrInfo::getInsertSubregInputs.
1424   virtual bool
1425   getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
1426                             RegSubRegPair &BaseReg,
1427                             RegSubRegPairAndIdx &InsertedReg) const {
1428     return false;
1429   }
1430 
1431 public:
1432   /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1433   /// a store or a load and a store into two or more instruction. If this is
1434   /// possible, returns true as well as the new instructions by reference.
1435   virtual bool
1436   unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
1437                       bool UnfoldLoad, bool UnfoldStore,
1438                       SmallVectorImpl<MachineInstr *> &NewMIs) const {
1439     return false;
1440   }
1441 
1442   virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
1443                                    SmallVectorImpl<SDNode *> &NewNodes) const {
1444     return false;
1445   }
1446 
1447   /// Returns the opcode of the would be new
1448   /// instruction after load / store are unfolded from an instruction of the
1449   /// specified opcode. It returns zero if the specified unfolding is not
1450   /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1451   /// index of the operand which will hold the register holding the loaded
1452   /// value.
1453   virtual unsigned
1454   getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1455                              unsigned *LoadRegIndex = nullptr) const {
1456     return 0;
1457   }
1458 
1459   /// This is used by the pre-regalloc scheduler to determine if two loads are
1460   /// loading from the same base address. It should only return true if the base
1461   /// pointers are the same and the only differences between the two addresses
1462   /// are the offset. It also returns the offsets by reference.
1463   virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1464                                        int64_t &Offset1,
1465                                        int64_t &Offset2) const {
1466     return false;
1467   }
1468 
1469   /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1470   /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1471   /// On some targets if two loads are loading from
1472   /// addresses in the same cache line, it's better if they are scheduled
1473   /// together. This function takes two integers that represent the load offsets
1474   /// from the common base address. It returns true if it decides it's desirable
1475   /// to schedule the two loads together. "NumLoads" is the number of loads that
1476   /// have already been scheduled after Load1.
1477   virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1478                                        int64_t Offset1, int64_t Offset2,
1479                                        unsigned NumLoads) const {
1480     return false;
1481   }
1482 
1483   /// Get the base operand and byte offset of an instruction that reads/writes
1484   /// memory. This is a convenience function for callers that are only prepared
1485   /// to handle a single base operand.
1486   /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1487   /// abstraction that supports negative offsets.
1488   bool getMemOperandWithOffset(const MachineInstr &MI,
1489                                const MachineOperand *&BaseOp, int64_t &Offset,
1490                                bool &OffsetIsScalable,
1491                                const TargetRegisterInfo *TRI) const;
1492 
1493   /// Get zero or more base operands and the byte offset of an instruction that
1494   /// reads/writes memory. Note that there may be zero base operands if the
1495   /// instruction accesses a constant address.
1496   /// It returns false if MI does not read/write memory.
1497   /// It returns false if base operands and offset could not be determined.
1498   /// It is not guaranteed to always recognize base operands and offsets in all
1499   /// cases.
1500   /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1501   /// abstraction that supports negative offsets.
1502   virtual bool getMemOperandsWithOffsetWidth(
1503       const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
1504       int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
1505       const TargetRegisterInfo *TRI) const {
1506     return false;
1507   }
1508 
1509   /// Return true if the instruction contains a base register and offset. If
1510   /// true, the function also sets the operand position in the instruction
1511   /// for the base register and offset.
1512   virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
1513                                         unsigned &BasePos,
1514                                         unsigned &OffsetPos) const {
1515     return false;
1516   }
1517 
1518   /// Target dependent implementation to get the values constituting the address
1519   /// MachineInstr that is accessing memory. These values are returned as a
1520   /// struct ExtAddrMode which contains all relevant information to make up the
1521   /// address.
1522   virtual std::optional<ExtAddrMode>
1523   getAddrModeFromMemoryOp(const MachineInstr &MemI,
1524                           const TargetRegisterInfo *TRI) const {
1525     return std::nullopt;
1526   }
1527 
1528   /// Check if it's possible and beneficial to fold the addressing computation
1529   /// `AddrI` into the addressing mode of the load/store instruction `MemI`. The
1530   /// memory instruction is a user of the virtual register `Reg`, which in turn
1531   /// is the ultimate destination of zero or more COPY instructions from the
1532   /// output register of `AddrI`.
1533   /// Return the adddressing mode after folding in `AM`.
1534   virtual bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
1535                                    const MachineInstr &AddrI,
1536                                    ExtAddrMode &AM) const {
1537     return false;
1538   }
1539 
1540   /// Emit a load/store instruction with the same value register as `MemI`, but
1541   /// using the address from `AM`. The addressing mode must have been obtained
1542   /// from `canFoldIntoAddr` for the same memory instruction.
1543   virtual MachineInstr *emitLdStWithAddr(MachineInstr &MemI,
1544                                          const ExtAddrMode &AM) const {
1545     llvm_unreachable("target did not implement emitLdStWithAddr()");
1546   }
1547 
1548   /// Returns true if MI's Def is NullValueReg, and the MI
1549   /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1550   /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1551   /// function can return true even if becomes zero. Specifically cases such as
1552   /// NullValueReg = shl NullValueReg, 63.
1553   virtual bool preservesZeroValueInReg(const MachineInstr *MI,
1554                                        const Register NullValueReg,
1555                                        const TargetRegisterInfo *TRI) const {
1556     return false;
1557   }
1558 
1559   /// If the instruction is an increment of a constant value, return the amount.
1560   virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1561     return false;
1562   }
1563 
1564   /// Returns true if the two given memory operations should be scheduled
1565   /// adjacent. Note that you have to add:
1566   ///   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1567   /// or
1568   ///   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1569   /// to TargetPassConfig::createMachineScheduler() to have an effect.
1570   ///
1571   /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1572   /// \p Offset1 and \p Offset2 are the byte offsets for the memory
1573   /// operations.
1574   /// \p OffsetIsScalable1 and \p OffsetIsScalable2 indicate if the offset is
1575   /// scaled by a runtime quantity.
1576   /// \p ClusterSize is the number of operations in the resulting load/store
1577   /// cluster if this hook returns true.
1578   /// \p NumBytes is the number of bytes that will be loaded from all the
1579   /// clustered loads if this hook returns true.
1580   virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
1581                                    int64_t Offset1, bool OffsetIsScalable1,
1582                                    ArrayRef<const MachineOperand *> BaseOps2,
1583                                    int64_t Offset2, bool OffsetIsScalable2,
1584                                    unsigned ClusterSize,
1585                                    unsigned NumBytes) const {
1586     llvm_unreachable("target did not implement shouldClusterMemOps()");
1587   }
1588 
1589   /// Reverses the branch condition of the specified condition list,
1590   /// returning false on success and true if it cannot be reversed.
1591   virtual bool
1592   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1593     return true;
1594   }
1595 
1596   /// Insert a noop into the instruction stream at the specified point.
1597   virtual void insertNoop(MachineBasicBlock &MBB,
1598                           MachineBasicBlock::iterator MI) const;
1599 
1600   /// Insert noops into the instruction stream at the specified point.
1601   virtual void insertNoops(MachineBasicBlock &MBB,
1602                            MachineBasicBlock::iterator MI,
1603                            unsigned Quantity) const;
1604 
1605   /// Return the noop instruction to use for a noop.
1606   virtual MCInst getNop() const;
1607 
1608   /// Return true for post-incremented instructions.
1609   virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1610 
1611   /// Returns true if the instruction is already predicated.
1612   virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1613 
1614   /// Assumes the instruction is already predicated and returns true if the
1615   /// instruction can be predicated again.
1616   virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1617     assert(isPredicated(MI) && "Instruction is not predicated");
1618     return false;
1619   }
1620 
1621   // Returns a MIRPrinter comment for this machine operand.
1622   virtual std::string
1623   createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
1624                           unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1625 
1626   /// Returns true if the instruction is a
1627   /// terminator instruction that has not been predicated.
1628   bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1629 
1630   /// Returns true if MI is an unconditional tail call.
1631   virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1632     return false;
1633   }
1634 
1635   /// Returns true if the tail call can be made conditional on BranchCond.
1636   virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
1637                                           const MachineInstr &TailCall) const {
1638     return false;
1639   }
1640 
1641   /// Replace the conditional branch in MBB with a conditional tail call.
1642   virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
1643                                          SmallVectorImpl<MachineOperand> &Cond,
1644                                          const MachineInstr &TailCall) const {
1645     llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1646   }
1647 
1648   /// Convert the instruction into a predicated instruction.
1649   /// It returns true if the operation was successful.
1650   virtual bool PredicateInstruction(MachineInstr &MI,
1651                                     ArrayRef<MachineOperand> Pred) const;
1652 
1653   /// Returns true if the first specified predicate
1654   /// subsumes the second, e.g. GE subsumes GT.
1655   virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1656                                  ArrayRef<MachineOperand> Pred2) const {
1657     return false;
1658   }
1659 
1660   /// If the specified instruction defines any predicate
1661   /// or condition code register(s) used for predication, returns true as well
1662   /// as the definition predicate(s) by reference.
1663   /// SkipDead should be set to false at any point that dead
1664   /// predicate instructions should be considered as being defined.
1665   /// A dead predicate instruction is one that is guaranteed to be removed
1666   /// after a call to PredicateInstruction.
1667   virtual bool ClobbersPredicate(MachineInstr &MI,
1668                                  std::vector<MachineOperand> &Pred,
1669                                  bool SkipDead) const {
1670     return false;
1671   }
1672 
1673   /// Return true if the specified instruction can be predicated.
1674   /// By default, this returns true for every instruction with a
1675   /// PredicateOperand.
1676   virtual bool isPredicable(const MachineInstr &MI) const {
1677     return MI.getDesc().isPredicable();
1678   }
1679 
1680   /// Return true if it's safe to move a machine
1681   /// instruction that defines the specified register class.
1682   virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1683     return true;
1684   }
1685 
1686   /// Test if the given instruction should be considered a scheduling boundary.
1687   /// This primarily includes labels and terminators.
1688   virtual bool isSchedulingBoundary(const MachineInstr &MI,
1689                                     const MachineBasicBlock *MBB,
1690                                     const MachineFunction &MF) const;
1691 
1692   /// Measure the specified inline asm to determine an approximation of its
1693   /// length.
1694   virtual unsigned getInlineAsmLength(
1695     const char *Str, const MCAsmInfo &MAI,
1696     const TargetSubtargetInfo *STI = nullptr) const;
1697 
1698   /// Allocate and return a hazard recognizer to use for this target when
1699   /// scheduling the machine instructions before register allocation.
1700   virtual ScheduleHazardRecognizer *
1701   CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1702                                const ScheduleDAG *DAG) const;
1703 
1704   /// Allocate and return a hazard recognizer to use for this target when
1705   /// scheduling the machine instructions before register allocation.
1706   virtual ScheduleHazardRecognizer *
1707   CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1708                                  const ScheduleDAGMI *DAG) const;
1709 
1710   /// Allocate and return a hazard recognizer to use for this target when
1711   /// scheduling the machine instructions after register allocation.
1712   virtual ScheduleHazardRecognizer *
1713   CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1714                                      const ScheduleDAG *DAG) const;
1715 
1716   /// Allocate and return a hazard recognizer to use for by non-scheduling
1717   /// passes.
1718   virtual ScheduleHazardRecognizer *
1719   CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
1720     return nullptr;
1721   }
1722 
1723   /// Provide a global flag for disabling the PreRA hazard recognizer that
1724   /// targets may choose to honor.
1725   bool usePreRAHazardRecognizer() const;
1726 
1727   /// For a comparison instruction, return the source registers
1728   /// in SrcReg and SrcReg2 if having two register operands, and the value it
1729   /// compares against in CmpValue. Return true if the comparison instruction
1730   /// can be analyzed.
1731   virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1732                               Register &SrcReg2, int64_t &Mask,
1733                               int64_t &Value) const {
1734     return false;
1735   }
1736 
1737   /// See if the comparison instruction can be converted
1738   /// into something more efficient. E.g., on ARM most instructions can set the
1739   /// flags register, obviating the need for a separate CMP.
1740   virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1741                                     Register SrcReg2, int64_t Mask,
1742                                     int64_t Value,
1743                                     const MachineRegisterInfo *MRI) const {
1744     return false;
1745   }
1746   virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1747 
1748   /// Try to remove the load by folding it to a register operand at the use.
1749   /// We fold the load instructions if and only if the
1750   /// def and use are in the same BB. We only look at one load and see
1751   /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1752   /// defined by the load we are trying to fold. DefMI returns the machine
1753   /// instruction that defines FoldAsLoadDefReg, and the function returns
1754   /// the machine instruction generated due to folding.
1755   virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1756                                           const MachineRegisterInfo *MRI,
1757                                           Register &FoldAsLoadDefReg,
1758                                           MachineInstr *&DefMI) const {
1759     return nullptr;
1760   }
1761 
1762   /// 'Reg' is known to be defined by a move immediate instruction,
1763   /// try to fold the immediate into the use instruction.
1764   /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1765   /// then the caller may assume that DefMI has been erased from its parent
1766   /// block. The caller may assume that it will not be erased by this
1767   /// function otherwise.
1768   virtual bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1769                              Register Reg, MachineRegisterInfo *MRI) const {
1770     return false;
1771   }
1772 
1773   /// Return the number of u-operations the given machine
1774   /// instruction will be decoded to on the target cpu. The itinerary's
1775   /// IssueWidth is the number of microops that can be dispatched each
1776   /// cycle. An instruction with zero microops takes no dispatch resources.
1777   virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1778                                   const MachineInstr &MI) const;
1779 
1780   /// Return true for pseudo instructions that don't consume any
1781   /// machine resources in their current form. These are common cases that the
1782   /// scheduler should consider free, rather than conservatively handling them
1783   /// as instructions with no itinerary.
1784   bool isZeroCost(unsigned Opcode) const {
1785     return Opcode <= TargetOpcode::COPY;
1786   }
1787 
1788   virtual std::optional<unsigned>
1789   getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode,
1790                     unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const;
1791 
1792   /// Compute and return the use operand latency of a given pair of def and use.
1793   /// In most cases, the static scheduling itinerary was enough to determine the
1794   /// operand latency. But it may not be possible for instructions with variable
1795   /// number of defs / uses.
1796   ///
1797   /// This is a raw interface to the itinerary that may be directly overridden
1798   /// by a target. Use computeOperandLatency to get the best estimate of
1799   /// latency.
1800   virtual std::optional<unsigned>
1801   getOperandLatency(const InstrItineraryData *ItinData,
1802                     const MachineInstr &DefMI, unsigned DefIdx,
1803                     const MachineInstr &UseMI, unsigned UseIdx) const;
1804 
1805   /// Compute the instruction latency of a given instruction.
1806   /// If the instruction has higher cost when predicated, it's returned via
1807   /// PredCost.
1808   virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1809                                    const MachineInstr &MI,
1810                                    unsigned *PredCost = nullptr) const;
1811 
1812   virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1813 
1814   virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1815                                    SDNode *Node) const;
1816 
1817   /// Return the default expected latency for a def based on its opcode.
1818   unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1819                              const MachineInstr &DefMI) const;
1820 
1821   /// Return true if this opcode has high latency to its result.
1822   virtual bool isHighLatencyDef(int opc) const { return false; }
1823 
1824   /// Compute operand latency between a def of 'Reg'
1825   /// and a use in the current loop. Return true if the target considered
1826   /// it 'high'. This is used by optimization passes such as machine LICM to
1827   /// determine whether it makes sense to hoist an instruction out even in a
1828   /// high register pressure situation.
1829   virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1830                                      const MachineRegisterInfo *MRI,
1831                                      const MachineInstr &DefMI, unsigned DefIdx,
1832                                      const MachineInstr &UseMI,
1833                                      unsigned UseIdx) const {
1834     return false;
1835   }
1836 
1837   /// Compute operand latency of a def of 'Reg'. Return true
1838   /// if the target considered it 'low'.
1839   virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1840                                 const MachineInstr &DefMI,
1841                                 unsigned DefIdx) const;
1842 
1843   /// Perform target-specific instruction verification.
1844   virtual bool verifyInstruction(const MachineInstr &MI,
1845                                  StringRef &ErrInfo) const {
1846     return true;
1847   }
1848 
1849   /// Return the current execution domain and bit mask of
1850   /// possible domains for instruction.
1851   ///
1852   /// Some micro-architectures have multiple execution domains, and multiple
1853   /// opcodes that perform the same operation in different domains.  For
1854   /// example, the x86 architecture provides the por, orps, and orpd
1855   /// instructions that all do the same thing.  There is a latency penalty if a
1856   /// register is written in one domain and read in another.
1857   ///
1858   /// This function returns a pair (domain, mask) containing the execution
1859   /// domain of MI, and a bit mask of possible domains.  The setExecutionDomain
1860   /// function can be used to change the opcode to one of the domains in the
1861   /// bit mask.  Instructions whose execution domain can't be changed should
1862   /// return a 0 mask.
1863   ///
1864   /// The execution domain numbers don't have any special meaning except domain
1865   /// 0 is used for instructions that are not associated with any interesting
1866   /// execution domain.
1867   ///
1868   virtual std::pair<uint16_t, uint16_t>
1869   getExecutionDomain(const MachineInstr &MI) const {
1870     return std::make_pair(0, 0);
1871   }
1872 
1873   /// Change the opcode of MI to execute in Domain.
1874   ///
1875   /// The bit (1 << Domain) must be set in the mask returned from
1876   /// getExecutionDomain(MI).
1877   virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1878 
1879   /// Returns the preferred minimum clearance
1880   /// before an instruction with an unwanted partial register update.
1881   ///
1882   /// Some instructions only write part of a register, and implicitly need to
1883   /// read the other parts of the register.  This may cause unwanted stalls
1884   /// preventing otherwise unrelated instructions from executing in parallel in
1885   /// an out-of-order CPU.
1886   ///
1887   /// For example, the x86 instruction cvtsi2ss writes its result to bits
1888   /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1889   /// the instruction needs to wait for the old value of the register to become
1890   /// available:
1891   ///
1892   ///   addps %xmm1, %xmm0
1893   ///   movaps %xmm0, (%rax)
1894   ///   cvtsi2ss %rbx, %xmm0
1895   ///
1896   /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1897   /// instruction before it can issue, even though the high bits of %xmm0
1898   /// probably aren't needed.
1899   ///
1900   /// This hook returns the preferred clearance before MI, measured in
1901   /// instructions.  Other defs of MI's operand OpNum are avoided in the last N
1902   /// instructions before MI.  It should only return a positive value for
1903   /// unwanted dependencies.  If the old bits of the defined register have
1904   /// useful values, or if MI is determined to otherwise read the dependency,
1905   /// the hook should return 0.
1906   ///
1907   /// The unwanted dependency may be handled by:
1908   ///
1909   /// 1. Allocating the same register for an MI def and use.  That makes the
1910   ///    unwanted dependency identical to a required dependency.
1911   ///
1912   /// 2. Allocating a register for the def that has no defs in the previous N
1913   ///    instructions.
1914   ///
1915   /// 3. Calling breakPartialRegDependency() with the same arguments.  This
1916   ///    allows the target to insert a dependency breaking instruction.
1917   ///
1918   virtual unsigned
1919   getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
1920                                const TargetRegisterInfo *TRI) const {
1921     // The default implementation returns 0 for no partial register dependency.
1922     return 0;
1923   }
1924 
1925   /// Return the minimum clearance before an instruction that reads an
1926   /// unused register.
1927   ///
1928   /// For example, AVX instructions may copy part of a register operand into
1929   /// the unused high bits of the destination register.
1930   ///
1931   /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1932   ///
1933   /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1934   /// false dependence on any previous write to %xmm0.
1935   ///
1936   /// This hook works similarly to getPartialRegUpdateClearance, except that it
1937   /// does not take an operand index. Instead sets \p OpNum to the index of the
1938   /// unused register.
1939   virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
1940                                         const TargetRegisterInfo *TRI) const {
1941     // The default implementation returns 0 for no undef register dependency.
1942     return 0;
1943   }
1944 
1945   /// Insert a dependency-breaking instruction
1946   /// before MI to eliminate an unwanted dependency on OpNum.
1947   ///
1948   /// If it wasn't possible to avoid a def in the last N instructions before MI
1949   /// (see getPartialRegUpdateClearance), this hook will be called to break the
1950   /// unwanted dependency.
1951   ///
1952   /// On x86, an xorps instruction can be used as a dependency breaker:
1953   ///
1954   ///   addps %xmm1, %xmm0
1955   ///   movaps %xmm0, (%rax)
1956   ///   xorps %xmm0, %xmm0
1957   ///   cvtsi2ss %rbx, %xmm0
1958   ///
1959   /// An <imp-kill> operand should be added to MI if an instruction was
1960   /// inserted.  This ties the instructions together in the post-ra scheduler.
1961   ///
1962   virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1963                                          const TargetRegisterInfo *TRI) const {}
1964 
1965   /// Create machine specific model for scheduling.
1966   virtual DFAPacketizer *
1967   CreateTargetScheduleState(const TargetSubtargetInfo &) const {
1968     return nullptr;
1969   }
1970 
1971   /// Sometimes, it is possible for the target
1972   /// to tell, even without aliasing information, that two MIs access different
1973   /// memory addresses. This function returns true if two MIs access different
1974   /// memory addresses and false otherwise.
1975   ///
1976   /// Assumes any physical registers used to compute addresses have the same
1977   /// value for both instructions. (This is the most useful assumption for
1978   /// post-RA scheduling.)
1979   ///
1980   /// See also MachineInstr::mayAlias, which is implemented on top of this
1981   /// function.
1982   virtual bool
1983   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
1984                                   const MachineInstr &MIb) const {
1985     assert(MIa.mayLoadOrStore() &&
1986            "MIa must load from or modify a memory location");
1987     assert(MIb.mayLoadOrStore() &&
1988            "MIb must load from or modify a memory location");
1989     return false;
1990   }
1991 
1992   /// Return the value to use for the MachineCSE's LookAheadLimit,
1993   /// which is a heuristic used for CSE'ing phys reg defs.
1994   virtual unsigned getMachineCSELookAheadLimit() const {
1995     // The default lookahead is small to prevent unprofitable quadratic
1996     // behavior.
1997     return 5;
1998   }
1999 
2000   /// Return the maximal number of alias checks on memory operands. For
2001   /// instructions with more than one memory operands, the alias check on a
2002   /// single MachineInstr pair has quadratic overhead and results in
2003   /// unacceptable performance in the worst case. The limit here is to clamp
2004   /// that maximal checks performed. Usually, that's the product of memory
2005   /// operand numbers from that pair of MachineInstr to be checked. For
2006   /// instance, with two MachineInstrs with 4 and 5 memory operands
2007   /// correspondingly, a total of 20 checks are required. With this limit set to
2008   /// 16, their alias check is skipped. We choose to limit the product instead
2009   /// of the individual instruction as targets may have special MachineInstrs
2010   /// with a considerably high number of memory operands, such as `ldm` in ARM.
2011   /// Setting this limit per MachineInstr would result in either too high
2012   /// overhead or too rigid restriction.
2013   virtual unsigned getMemOperandAACheckLimit() const { return 16; }
2014 
2015   /// Return an array that contains the ids of the target indices (used for the
2016   /// TargetIndex machine operand) and their names.
2017   ///
2018   /// MIR Serialization is able to serialize only the target indices that are
2019   /// defined by this method.
2020   virtual ArrayRef<std::pair<int, const char *>>
2021   getSerializableTargetIndices() const {
2022     return {};
2023   }
2024 
2025   /// Decompose the machine operand's target flags into two values - the direct
2026   /// target flag value and any of bit flags that are applied.
2027   virtual std::pair<unsigned, unsigned>
2028   decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
2029     return std::make_pair(0u, 0u);
2030   }
2031 
2032   /// Return an array that contains the direct target flag values and their
2033   /// names.
2034   ///
2035   /// MIR Serialization is able to serialize only the target flags that are
2036   /// defined by this method.
2037   virtual ArrayRef<std::pair<unsigned, const char *>>
2038   getSerializableDirectMachineOperandTargetFlags() const {
2039     return {};
2040   }
2041 
2042   /// Return an array that contains the bitmask target flag values and their
2043   /// names.
2044   ///
2045   /// MIR Serialization is able to serialize only the target flags that are
2046   /// defined by this method.
2047   virtual ArrayRef<std::pair<unsigned, const char *>>
2048   getSerializableBitmaskMachineOperandTargetFlags() const {
2049     return {};
2050   }
2051 
2052   /// Return an array that contains the MMO target flag values and their
2053   /// names.
2054   ///
2055   /// MIR Serialization is able to serialize only the MMO target flags that are
2056   /// defined by this method.
2057   virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
2058   getSerializableMachineMemOperandTargetFlags() const {
2059     return {};
2060   }
2061 
2062   /// Determines whether \p Inst is a tail call instruction. Override this
2063   /// method on targets that do not properly set MCID::Return and MCID::Call on
2064   /// tail call instructions."
2065   virtual bool isTailCall(const MachineInstr &Inst) const {
2066     return Inst.isReturn() && Inst.isCall();
2067   }
2068 
2069   /// True if the instruction is bound to the top of its basic block and no
2070   /// other instructions shall be inserted before it. This can be implemented
2071   /// to prevent register allocator to insert spills for \p Reg before such
2072   /// instructions.
2073   virtual bool isBasicBlockPrologue(const MachineInstr &MI,
2074                                     Register Reg = Register()) const {
2075     return false;
2076   }
2077 
2078   /// Allows targets to use appropriate copy instruction while spilitting live
2079   /// range of a register in register allocation.
2080   virtual unsigned getLiveRangeSplitOpcode(Register Reg,
2081                                            const MachineFunction &MF) const {
2082     return TargetOpcode::COPY;
2083   }
2084 
2085   /// During PHI eleimination lets target to make necessary checks and
2086   /// insert the copy to the PHI destination register in a target specific
2087   /// manner.
2088   virtual MachineInstr *createPHIDestinationCopy(
2089       MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt,
2090       const DebugLoc &DL, Register Src, Register Dst) const {
2091     return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2092         .addReg(Src);
2093   }
2094 
2095   /// During PHI eleimination lets target to make necessary checks and
2096   /// insert the copy to the PHI destination register in a target specific
2097   /// manner.
2098   virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB,
2099                                             MachineBasicBlock::iterator InsPt,
2100                                             const DebugLoc &DL, Register Src,
2101                                             unsigned SrcSubReg,
2102                                             Register Dst) const {
2103     return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2104         .addReg(Src, 0, SrcSubReg);
2105   }
2106 
2107   /// Returns a \p outliner::OutlinedFunction struct containing target-specific
2108   /// information for a set of outlining candidates. Returns std::nullopt if the
2109   /// candidates are not suitable for outlining. \p MinRepeats is the minimum
2110   /// number of times the instruction sequence must be repeated.
2111   virtual std::optional<std::unique_ptr<outliner::OutlinedFunction>>
2112   getOutliningCandidateInfo(
2113       const MachineModuleInfo &MMI,
2114       std::vector<outliner::Candidate> &RepeatedSequenceLocs,
2115       unsigned MinRepeats) const {
2116     llvm_unreachable(
2117         "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
2118   }
2119 
2120   /// Optional target hook to create the LLVM IR attributes for the outlined
2121   /// function. If overridden, the overriding function must call the default
2122   /// implementation.
2123   virtual void mergeOutliningCandidateAttributes(
2124       Function &F, std::vector<outliner::Candidate> &Candidates) const;
2125 
2126 protected:
2127   /// Target-dependent implementation for getOutliningTypeImpl.
2128   virtual outliner::InstrType
2129   getOutliningTypeImpl(const MachineModuleInfo &MMI,
2130                        MachineBasicBlock::iterator &MIT, unsigned Flags) const {
2131     llvm_unreachable(
2132         "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
2133   }
2134 
2135 public:
2136   /// Returns how or if \p MIT should be outlined. \p Flags is the
2137   /// target-specific information returned by isMBBSafeToOutlineFrom.
2138   outliner::InstrType getOutliningType(const MachineModuleInfo &MMI,
2139                                        MachineBasicBlock::iterator &MIT,
2140                                        unsigned Flags) const;
2141 
2142   /// Optional target hook that returns true if \p MBB is safe to outline from,
2143   /// and returns any target-specific information in \p Flags.
2144   virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2145                                       unsigned &Flags) const;
2146 
2147   /// Optional target hook which partitions \p MBB into outlinable ranges for
2148   /// instruction mapping purposes. Each range is defined by two iterators:
2149   /// [start, end).
2150   ///
2151   /// Ranges are expected to be ordered top-down. That is, ranges closer to the
2152   /// top of the block should come before ranges closer to the end of the block.
2153   ///
2154   /// Ranges cannot overlap.
2155   ///
2156   /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2157   ///
2158   /// All instructions not present in an outlinable range are considered
2159   /// illegal.
2160   virtual SmallVector<
2161       std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2162   getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
2163     return {std::make_pair(MBB.begin(), MBB.end())};
2164   }
2165 
2166   /// Insert a custom frame for outlined functions.
2167   virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
2168                                   const outliner::OutlinedFunction &OF) const {
2169     llvm_unreachable(
2170         "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2171   }
2172 
2173   /// Insert a call to an outlined function into the program.
2174   /// Returns an iterator to the spot where we inserted the call. This must be
2175   /// implemented by the target.
2176   virtual MachineBasicBlock::iterator
2177   insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
2178                      MachineBasicBlock::iterator &It, MachineFunction &MF,
2179                      outliner::Candidate &C) const {
2180     llvm_unreachable(
2181         "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2182   }
2183 
2184   /// Insert an architecture-specific instruction to clear a register. If you
2185   /// need to avoid sideeffects (e.g. avoid XOR on x86, which sets EFLAGS), set
2186   /// \p AllowSideEffects to \p false.
2187   virtual void buildClearRegister(Register Reg, MachineBasicBlock &MBB,
2188                                   MachineBasicBlock::iterator Iter,
2189                                   DebugLoc &DL,
2190                                   bool AllowSideEffects = true) const {
2191 #if 0
2192     // FIXME: This should exist once all platforms that use stack protectors
2193     // implements it.
2194     llvm_unreachable(
2195         "Target didn't implement TargetInstrInfo::buildClearRegister!");
2196 #endif
2197   }
2198 
2199   /// Return true if the function can safely be outlined from.
2200   /// A function \p MF is considered safe for outlining if an outlined function
2201   /// produced from instructions in F will produce a program which produces the
2202   /// same output for any set of given inputs.
2203   virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
2204                                            bool OutlineFromLinkOnceODRs) const {
2205     llvm_unreachable("Target didn't implement "
2206                      "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2207   }
2208 
2209   /// Return true if the function should be outlined from by default.
2210   virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const {
2211     return false;
2212   }
2213 
2214   /// Return true if the function is a viable candidate for machine function
2215   /// splitting. The criteria for if a function can be split may vary by target.
2216   virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const;
2217 
2218   /// Return true if the MachineBasicBlock can safely be split to the cold
2219   /// section. On AArch64, certain instructions may cause a block to be unsafe
2220   /// to split to the cold section.
2221   virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const {
2222     return true;
2223   }
2224 
2225   /// Produce the expression describing the \p MI loading a value into
2226   /// the physical register \p Reg. This hook should only be used with
2227   /// \p MIs belonging to VReg-less functions.
2228   virtual std::optional<ParamLoadedValue>
2229   describeLoadedValue(const MachineInstr &MI, Register Reg) const;
2230 
2231   /// Given the generic extension instruction \p ExtMI, returns true if this
2232   /// extension is a likely candidate for being folded into an another
2233   /// instruction.
2234   virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
2235                                         MachineRegisterInfo &MRI) const {
2236     return false;
2237   }
2238 
2239   /// Return MIR formatter to format/parse MIR operands.  Target can override
2240   /// this virtual function and return target specific MIR formatter.
2241   virtual const MIRFormatter *getMIRFormatter() const {
2242     if (!Formatter)
2243       Formatter = std::make_unique<MIRFormatter>();
2244     return Formatter.get();
2245   }
2246 
2247   /// Returns the target-specific default value for tail duplication.
2248   /// This value will be used if the tail-dup-placement-threshold argument is
2249   /// not provided.
2250   virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
2251     return OptLevel >= CodeGenOptLevel::Aggressive ? 4 : 2;
2252   }
2253 
2254   /// Returns the target-specific default value for tail merging.
2255   /// This value will be used if the tail-merge-size argument is not provided.
2256   virtual unsigned getTailMergeSize(const MachineFunction &MF) const {
2257     return 3;
2258   }
2259 
2260   /// Returns the callee operand from the given \p MI.
2261   virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2262     return MI.getOperand(0);
2263   }
2264 
2265   /// Return the uniformity behavior of the given instruction.
2266   virtual InstructionUniformity
2267   getInstructionUniformity(const MachineInstr &MI) const {
2268     return InstructionUniformity::Default;
2269   }
2270 
2271   /// Returns true if the given \p MI defines a TargetIndex operand that can be
2272   /// tracked by their offset, can have values, and can have debug info
2273   /// associated with it. If so, sets \p Index and \p Offset of the target index
2274   /// operand.
2275   virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index,
2276                                         int64_t &Offset) const {
2277     return false;
2278   }
2279 
2280   // Get the call frame size just before MI.
2281   unsigned getCallFrameSizeAt(MachineInstr &MI) const;
2282 
2283   /// Fills in the necessary MachineOperands to refer to a frame index.
2284   /// The best way to understand this is to print `asm(""::"m"(x));` after
2285   /// finalize-isel. Example:
2286   /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
2287   /// we would add placeholders for:                     ^  ^       ^  ^
2288   virtual void getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops,
2289                                      int FI) const {
2290     llvm_unreachable("unknown number of operands necessary");
2291   }
2292 
2293 private:
2294   mutable std::unique_ptr<MIRFormatter> Formatter;
2295   unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2296   unsigned CatchRetOpcode;
2297   unsigned ReturnOpcode;
2298 };
2299 
2300 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2301 template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
2302   using RegInfo = DenseMapInfo<Register>;
2303   using SubRegInfo = DenseMapInfo<unsigned>;
2304 
2305   static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
2306     return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2307                                           SubRegInfo::getEmptyKey());
2308   }
2309 
2310   static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
2311     return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2312                                           SubRegInfo::getTombstoneKey());
2313   }
2314 
2315   /// Reuse getHashValue implementation from
2316   /// std::pair<unsigned, unsigned>.
2317   static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2318     return DenseMapInfo<std::pair<Register, unsigned>>::getHashValue(
2319         std::make_pair(Val.Reg, Val.SubReg));
2320   }
2321 
2322   static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
2323                       const TargetInstrInfo::RegSubRegPair &RHS) {
2324     return LHS == RHS;
2325   }
2326 };
2327 
2328 } // end namespace llvm
2329 
2330 #endif // LLVM_CODEGEN_TARGETINSTRINFO_H
2331