xref: /llvm-project/llvm/lib/CodeGen/TargetInstrInfo.cpp (revision 19032bfe87fa0f4a3a7b3e68daafc93331b71e0d)
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/BinaryFormat/Dwarf.h"
16 #include "llvm/CodeGen/MachineCombinerPattern.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/MachineScheduler.h"
22 #include "llvm/CodeGen/MachineTraceMetrics.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
25 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/CodeGen/TargetFrameLowering.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/CodeGen/TargetSchedule.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/MC/MCInstrItineraries.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetMachine.h"
38 
39 using namespace llvm;
40 
41 static cl::opt<bool> DisableHazardRecognizer(
42   "disable-sched-hazard", cl::Hidden, cl::init(false),
43   cl::desc("Disable hazard detection during preRA scheduling"));
44 
45 TargetInstrInfo::~TargetInstrInfo() = default;
46 
47 const TargetRegisterClass*
48 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
49                              const TargetRegisterInfo *TRI,
50                              const MachineFunction &MF) const {
51   if (OpNum >= MCID.getNumOperands())
52     return nullptr;
53 
54   short RegClass = MCID.operands()[OpNum].RegClass;
55   if (MCID.operands()[OpNum].isLookupPtrRegClass())
56     return TRI->getPointerRegClass(MF, RegClass);
57 
58   // Instructions like INSERT_SUBREG do not have fixed register classes.
59   if (RegClass < 0)
60     return nullptr;
61 
62   // Otherwise just look it up normally.
63   return TRI->getRegClass(RegClass);
64 }
65 
66 /// insertNoop - Insert a noop into the instruction stream at the specified
67 /// point.
68 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
69                                  MachineBasicBlock::iterator MI) const {
70   llvm_unreachable("Target didn't implement insertNoop!");
71 }
72 
73 /// insertNoops - Insert noops into the instruction stream at the specified
74 /// point.
75 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
76                                   MachineBasicBlock::iterator MI,
77                                   unsigned Quantity) const {
78   for (unsigned i = 0; i < Quantity; ++i)
79     insertNoop(MBB, MI);
80 }
81 
82 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
83   return strncmp(Str, MAI.getCommentString().data(),
84                  MAI.getCommentString().size()) == 0;
85 }
86 
87 /// Measure the specified inline asm to determine an approximation of its
88 /// length.
89 /// Comments (which run till the next SeparatorString or newline) do not
90 /// count as an instruction.
91 /// Any other non-whitespace text is considered an instruction, with
92 /// multiple instructions separated by SeparatorString or newlines.
93 /// Variable-length instructions are not handled here; this function
94 /// may be overloaded in the target code to do that.
95 /// We implement a special case of the .space directive which takes only a
96 /// single integer argument in base 10 that is the size in bytes. This is a
97 /// restricted form of the GAS directive in that we only interpret
98 /// simple--i.e. not a logical or arithmetic expression--size values without
99 /// the optional fill value. This is primarily used for creating arbitrary
100 /// sized inline asm blocks for testing purposes.
101 unsigned TargetInstrInfo::getInlineAsmLength(
102   const char *Str,
103   const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
104   // Count the number of instructions in the asm.
105   bool AtInsnStart = true;
106   unsigned Length = 0;
107   const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
108   for (; *Str; ++Str) {
109     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
110                                 strlen(MAI.getSeparatorString())) == 0) {
111       AtInsnStart = true;
112     } else if (isAsmComment(Str, MAI)) {
113       // Stop counting as an instruction after a comment until the next
114       // separator.
115       AtInsnStart = false;
116     }
117 
118     if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
119       unsigned AddLength = MaxInstLength;
120       if (strncmp(Str, ".space", 6) == 0) {
121         char *EStr;
122         int SpaceSize;
123         SpaceSize = strtol(Str + 6, &EStr, 10);
124         SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
125         while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
126           ++EStr;
127         if (*EStr == '\0' || *EStr == '\n' ||
128             isAsmComment(EStr, MAI)) // Successfully parsed .space argument
129           AddLength = SpaceSize;
130       }
131       Length += AddLength;
132       AtInsnStart = false;
133     }
134   }
135 
136   return Length;
137 }
138 
139 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
140 /// after it, replacing it with an unconditional branch to NewDest.
141 void
142 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
143                                          MachineBasicBlock *NewDest) const {
144   MachineBasicBlock *MBB = Tail->getParent();
145 
146   // Remove all the old successors of MBB from the CFG.
147   while (!MBB->succ_empty())
148     MBB->removeSuccessor(MBB->succ_begin());
149 
150   // Save off the debug loc before erasing the instruction.
151   DebugLoc DL = Tail->getDebugLoc();
152 
153   // Update call info and remove all the dead instructions
154   // from the end of MBB.
155   while (Tail != MBB->end()) {
156     auto MI = Tail++;
157     if (MI->shouldUpdateAdditionalCallInfo())
158       MBB->getParent()->eraseAdditionalCallInfo(&*MI);
159     MBB->erase(MI);
160   }
161 
162   // If MBB isn't immediately before MBB, insert a branch to it.
163   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
164     insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
165   MBB->addSuccessor(NewDest);
166 }
167 
168 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
169                                                       bool NewMI, unsigned Idx1,
170                                                       unsigned Idx2) const {
171   const MCInstrDesc &MCID = MI.getDesc();
172   bool HasDef = MCID.getNumDefs();
173   if (HasDef && !MI.getOperand(0).isReg())
174     // No idea how to commute this instruction. Target should implement its own.
175     return nullptr;
176 
177   unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
178   unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
179   assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
180          CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
181          "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
182   assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
183          "This only knows how to commute register operands so far");
184 
185   Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
186   Register Reg1 = MI.getOperand(Idx1).getReg();
187   Register Reg2 = MI.getOperand(Idx2).getReg();
188   unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
189   unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
190   unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
191   bool Reg1IsKill = MI.getOperand(Idx1).isKill();
192   bool Reg2IsKill = MI.getOperand(Idx2).isKill();
193   bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
194   bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
195   bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
196   bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
197   // Avoid calling isRenamable for virtual registers since we assert that
198   // renamable property is only queried/set for physical registers.
199   bool Reg1IsRenamable =
200       Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
201   bool Reg2IsRenamable =
202       Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
203   // If destination is tied to either of the commuted source register, then
204   // it must be updated.
205   if (HasDef && Reg0 == Reg1 &&
206       MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
207     Reg2IsKill = false;
208     Reg0 = Reg2;
209     SubReg0 = SubReg2;
210   } else if (HasDef && Reg0 == Reg2 &&
211              MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
212     Reg1IsKill = false;
213     Reg0 = Reg1;
214     SubReg0 = SubReg1;
215   }
216 
217   MachineInstr *CommutedMI = nullptr;
218   if (NewMI) {
219     // Create a new instruction.
220     MachineFunction &MF = *MI.getMF();
221     CommutedMI = MF.CloneMachineInstr(&MI);
222   } else {
223     CommutedMI = &MI;
224   }
225 
226   if (HasDef) {
227     CommutedMI->getOperand(0).setReg(Reg0);
228     CommutedMI->getOperand(0).setSubReg(SubReg0);
229   }
230   CommutedMI->getOperand(Idx2).setReg(Reg1);
231   CommutedMI->getOperand(Idx1).setReg(Reg2);
232   CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
233   CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
234   CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
235   CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
236   CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
237   CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
238   CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
239   CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
240   // Avoid calling setIsRenamable for virtual registers since we assert that
241   // renamable property is only queried/set for physical registers.
242   if (Reg1.isPhysical())
243     CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
244   if (Reg2.isPhysical())
245     CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
246   return CommutedMI;
247 }
248 
249 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
250                                                   unsigned OpIdx1,
251                                                   unsigned OpIdx2) const {
252   // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
253   // any commutable operand, which is done in findCommutedOpIndices() method
254   // called below.
255   if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
256       !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
257     assert(MI.isCommutable() &&
258            "Precondition violation: MI must be commutable.");
259     return nullptr;
260   }
261   return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
262 }
263 
264 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
265                                            unsigned &ResultIdx2,
266                                            unsigned CommutableOpIdx1,
267                                            unsigned CommutableOpIdx2) {
268   if (ResultIdx1 == CommuteAnyOperandIndex &&
269       ResultIdx2 == CommuteAnyOperandIndex) {
270     ResultIdx1 = CommutableOpIdx1;
271     ResultIdx2 = CommutableOpIdx2;
272   } else if (ResultIdx1 == CommuteAnyOperandIndex) {
273     if (ResultIdx2 == CommutableOpIdx1)
274       ResultIdx1 = CommutableOpIdx2;
275     else if (ResultIdx2 == CommutableOpIdx2)
276       ResultIdx1 = CommutableOpIdx1;
277     else
278       return false;
279   } else if (ResultIdx2 == CommuteAnyOperandIndex) {
280     if (ResultIdx1 == CommutableOpIdx1)
281       ResultIdx2 = CommutableOpIdx2;
282     else if (ResultIdx1 == CommutableOpIdx2)
283       ResultIdx2 = CommutableOpIdx1;
284     else
285       return false;
286   } else
287     // Check that the result operand indices match the given commutable
288     // operand indices.
289     return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
290            (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
291 
292   return true;
293 }
294 
295 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
296                                             unsigned &SrcOpIdx1,
297                                             unsigned &SrcOpIdx2) const {
298   assert(!MI.isBundle() &&
299          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300 
301   const MCInstrDesc &MCID = MI.getDesc();
302   if (!MCID.isCommutable())
303     return false;
304 
305   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
306   // is not true, then the target must implement this.
307   unsigned CommutableOpIdx1 = MCID.getNumDefs();
308   unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
309   if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
310                             CommutableOpIdx1, CommutableOpIdx2))
311     return false;
312 
313   if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
314     // No idea.
315     return false;
316   return true;
317 }
318 
319 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
320   if (!MI.isTerminator()) return false;
321 
322   // Conditional branch is a special case.
323   if (MI.isBranch() && !MI.isBarrier())
324     return true;
325   if (!MI.isPredicable())
326     return true;
327   return !isPredicated(MI);
328 }
329 
330 bool TargetInstrInfo::PredicateInstruction(
331     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
332   bool MadeChange = false;
333 
334   assert(!MI.isBundle() &&
335          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336 
337   const MCInstrDesc &MCID = MI.getDesc();
338   if (!MI.isPredicable())
339     return false;
340 
341   for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
342     if (MCID.operands()[i].isPredicate()) {
343       MachineOperand &MO = MI.getOperand(i);
344       if (MO.isReg()) {
345         MO.setReg(Pred[j].getReg());
346         MadeChange = true;
347       } else if (MO.isImm()) {
348         MO.setImm(Pred[j].getImm());
349         MadeChange = true;
350       } else if (MO.isMBB()) {
351         MO.setMBB(Pred[j].getMBB());
352         MadeChange = true;
353       }
354       ++j;
355     }
356   }
357   return MadeChange;
358 }
359 
360 bool TargetInstrInfo::hasLoadFromStackSlot(
361     const MachineInstr &MI,
362     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
363   size_t StartSize = Accesses.size();
364   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
365                                   oe = MI.memoperands_end();
366        o != oe; ++o) {
367     if ((*o)->isLoad() &&
368         isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
369       Accesses.push_back(*o);
370   }
371   return Accesses.size() != StartSize;
372 }
373 
374 bool TargetInstrInfo::hasStoreToStackSlot(
375     const MachineInstr &MI,
376     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
377   size_t StartSize = Accesses.size();
378   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
379                                   oe = MI.memoperands_end();
380        o != oe; ++o) {
381     if ((*o)->isStore() &&
382         isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
383       Accesses.push_back(*o);
384   }
385   return Accesses.size() != StartSize;
386 }
387 
388 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
389                                         unsigned SubIdx, unsigned &Size,
390                                         unsigned &Offset,
391                                         const MachineFunction &MF) const {
392   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
393   if (!SubIdx) {
394     Size = TRI->getSpillSize(*RC);
395     Offset = 0;
396     return true;
397   }
398   unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
399   // Convert bit size to byte size.
400   if (BitSize % 8)
401     return false;
402 
403   int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
404   if (BitOffset < 0 || BitOffset % 8)
405     return false;
406 
407   Size = BitSize / 8;
408   Offset = (unsigned)BitOffset / 8;
409 
410   assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
411 
412   if (!MF.getDataLayout().isLittleEndian()) {
413     Offset = TRI->getSpillSize(*RC) - (Offset + Size);
414   }
415   return true;
416 }
417 
418 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
419                                     MachineBasicBlock::iterator I,
420                                     Register DestReg, unsigned SubIdx,
421                                     const MachineInstr &Orig,
422                                     const TargetRegisterInfo &TRI) const {
423   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
424   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
425   MBB.insert(I, MI);
426 }
427 
428 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
429                                        const MachineInstr &MI1,
430                                        const MachineRegisterInfo *MRI) const {
431   return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
432 }
433 
434 MachineInstr &
435 TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
436                            MachineBasicBlock::iterator InsertBefore,
437                            const MachineInstr &Orig) const {
438   MachineFunction &MF = *MBB.getParent();
439   // CFI instructions are marked as non-duplicable, because Darwin compact
440   // unwind info emission can't handle multiple prologue setups.
441   assert((!Orig.isNotDuplicable() ||
442           (!MF.getTarget().getTargetTriple().isOSDarwin() &&
443            Orig.isCFIInstruction())) &&
444          "Instruction cannot be duplicated");
445 
446   return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
447 }
448 
449 // If the COPY instruction in MI can be folded to a stack operation, return
450 // the register class to use.
451 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
452                                               const TargetInstrInfo &TII,
453                                               unsigned FoldIdx) {
454   assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
455   if (MI.getNumOperands() != 2)
456     return nullptr;
457   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
458 
459   const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
460   const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
461 
462   if (FoldOp.getSubReg() || LiveOp.getSubReg())
463     return nullptr;
464 
465   Register FoldReg = FoldOp.getReg();
466   Register LiveReg = LiveOp.getReg();
467 
468   assert(FoldReg.isVirtual() && "Cannot fold physregs");
469 
470   const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
471   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
472 
473   if (LiveOp.getReg().isPhysical())
474     return RC->contains(LiveOp.getReg()) ? RC : nullptr;
475 
476   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
477     return RC;
478 
479   // FIXME: Allow folding when register classes are memory compatible.
480   return nullptr;
481 }
482 
483 MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
484 
485 std::pair<unsigned, unsigned>
486 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
487   switch (MI.getOpcode()) {
488   case TargetOpcode::STACKMAP:
489     // StackMapLiveValues are foldable
490     return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
491   case TargetOpcode::PATCHPOINT:
492     // For PatchPoint, the call args are not foldable (even if reported in the
493     // stackmap e.g. via anyregcc).
494     return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
495   case TargetOpcode::STATEPOINT:
496     // For statepoints, fold deopt and gc arguments, but not call arguments.
497     return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
498   default:
499     llvm_unreachable("unexpected stackmap opcode");
500   }
501 }
502 
503 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
504                                     ArrayRef<unsigned> Ops, int FrameIndex,
505                                     const TargetInstrInfo &TII) {
506   unsigned StartIdx = 0;
507   unsigned NumDefs = 0;
508   // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
509   std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
510 
511   unsigned DefToFoldIdx = MI.getNumOperands();
512 
513   // Return false if any operands requested for folding are not foldable (not
514   // part of the stackmap's live values).
515   for (unsigned Op : Ops) {
516     if (Op < NumDefs) {
517       assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
518       DefToFoldIdx = Op;
519     } else if (Op < StartIdx) {
520       return nullptr;
521     }
522     if (MI.getOperand(Op).isTied())
523       return nullptr;
524   }
525 
526   MachineInstr *NewMI =
527       MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
528   MachineInstrBuilder MIB(MF, NewMI);
529 
530   // No need to fold return, the meta data, and function arguments
531   for (unsigned i = 0; i < StartIdx; ++i)
532     if (i != DefToFoldIdx)
533       MIB.add(MI.getOperand(i));
534 
535   for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
536     MachineOperand &MO = MI.getOperand(i);
537     unsigned TiedTo = e;
538     (void)MI.isRegTiedToDefOperand(i, &TiedTo);
539 
540     if (is_contained(Ops, i)) {
541       assert(TiedTo == e && "Cannot fold tied operands");
542       unsigned SpillSize;
543       unsigned SpillOffset;
544       // Compute the spill slot size and offset.
545       const TargetRegisterClass *RC =
546         MF.getRegInfo().getRegClass(MO.getReg());
547       bool Valid =
548           TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
549       if (!Valid)
550         report_fatal_error("cannot spill patchpoint subregister operand");
551       MIB.addImm(StackMaps::IndirectMemRefOp);
552       MIB.addImm(SpillSize);
553       MIB.addFrameIndex(FrameIndex);
554       MIB.addImm(SpillOffset);
555     } else {
556       MIB.add(MO);
557       if (TiedTo < e) {
558         assert(TiedTo < NumDefs && "Bad tied operand");
559         if (TiedTo > DefToFoldIdx)
560           --TiedTo;
561         NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
562       }
563     }
564   }
565   return NewMI;
566 }
567 
568 static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
569                                     const TargetInstrInfo &TII) {
570   // If the machine operand is tied, untie it first.
571   if (MI->getOperand(OpNo).isTied()) {
572     unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
573     MI->untieRegOperand(OpNo);
574     // Intentional recursion!
575     foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
576   }
577 
578   SmallVector<MachineOperand, 5> NewOps;
579   TII.getFrameIndexOperands(NewOps, FI);
580   assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
581   MI->removeOperand(OpNo);
582   MI->insert(MI->operands_begin() + OpNo, NewOps);
583 
584   // Change the previous operand to a MemKind InlineAsm::Flag. The second param
585   // is the per-target number of operands that represent the memory operand
586   // excluding this one (MD). This includes MO.
587   InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size());
588   F.setMemConstraint(InlineAsm::ConstraintCode::m);
589   MachineOperand &MD = MI->getOperand(OpNo - 1);
590   MD.setImm(F);
591 }
592 
593 // Returns nullptr if not possible to fold.
594 static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
595                                              ArrayRef<unsigned> Ops, int FI,
596                                              const TargetInstrInfo &TII) {
597   assert(MI.isInlineAsm() && "wrong opcode");
598   if (Ops.size() > 1)
599     return nullptr;
600   unsigned Op = Ops[0];
601   assert(Op && "should never be first operand");
602   assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
603 
604   if (!MI.mayFoldInlineAsmRegOp(Op))
605     return nullptr;
606 
607   MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
608 
609   foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
610 
611   // Update mayload/maystore metadata, and memoperands.
612   const VirtRegInfo &RI =
613       AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
614   MachineOperand &ExtraMO = NewMI.getOperand(InlineAsm::MIOp_ExtraInfo);
615   MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
616   if (RI.Reads) {
617     ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
618     Flags |= MachineMemOperand::MOLoad;
619   }
620   if (RI.Writes) {
621     ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
622     Flags |= MachineMemOperand::MOStore;
623   }
624   MachineFunction *MF = NewMI.getMF();
625   const MachineFrameInfo &MFI = MF->getFrameInfo();
626   MachineMemOperand *MMO = MF->getMachineMemOperand(
627       MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
628       MFI.getObjectAlign(FI));
629   NewMI.addMemOperand(*MF, MMO);
630 
631   return &NewMI;
632 }
633 
634 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
635                                                  ArrayRef<unsigned> Ops, int FI,
636                                                  LiveIntervals *LIS,
637                                                  VirtRegMap *VRM) const {
638   auto Flags = MachineMemOperand::MONone;
639   for (unsigned OpIdx : Ops)
640     Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
641                                           : MachineMemOperand::MOLoad;
642 
643   MachineBasicBlock *MBB = MI.getParent();
644   assert(MBB && "foldMemoryOperand needs an inserted instruction");
645   MachineFunction &MF = *MBB->getParent();
646 
647   // If we're not folding a load into a subreg, the size of the load is the
648   // size of the spill slot. But if we are, we need to figure out what the
649   // actual load size is.
650   int64_t MemSize = 0;
651   const MachineFrameInfo &MFI = MF.getFrameInfo();
652   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
653 
654   if (Flags & MachineMemOperand::MOStore) {
655     MemSize = MFI.getObjectSize(FI);
656   } else {
657     for (unsigned OpIdx : Ops) {
658       int64_t OpSize = MFI.getObjectSize(FI);
659 
660       if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
661         unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
662         if (SubRegSize > 0 && !(SubRegSize % 8))
663           OpSize = SubRegSize / 8;
664       }
665 
666       MemSize = std::max(MemSize, OpSize);
667     }
668   }
669 
670   assert(MemSize && "Did not expect a zero-sized stack slot");
671 
672   MachineInstr *NewMI = nullptr;
673 
674   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
675       MI.getOpcode() == TargetOpcode::PATCHPOINT ||
676       MI.getOpcode() == TargetOpcode::STATEPOINT) {
677     // Fold stackmap/patchpoint.
678     NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
679     if (NewMI)
680       MBB->insert(MI, NewMI);
681   } else if (MI.isInlineAsm()) {
682     return foldInlineAsmMemOperand(MI, Ops, FI, *this);
683   } else {
684     // Ask the target to do the actual folding.
685     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
686   }
687 
688   if (NewMI) {
689     NewMI->setMemRefs(MF, MI.memoperands());
690     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
691     assert((!(Flags & MachineMemOperand::MOStore) ||
692             NewMI->mayStore()) &&
693            "Folded a def to a non-store!");
694     assert((!(Flags & MachineMemOperand::MOLoad) ||
695             NewMI->mayLoad()) &&
696            "Folded a use to a non-load!");
697     assert(MFI.getObjectOffset(FI) != -1);
698     MachineMemOperand *MMO =
699         MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
700                                 Flags, MemSize, MFI.getObjectAlign(FI));
701     NewMI->addMemOperand(MF, MMO);
702 
703     // The pass "x86 speculative load hardening" always attaches symbols to
704     // call instructions. We need copy it form old instruction.
705     NewMI->cloneInstrSymbols(MF, MI);
706 
707     return NewMI;
708   }
709 
710   // Straight COPY may fold as load/store.
711   if (!isCopyInstr(MI) || Ops.size() != 1)
712     return nullptr;
713 
714   const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
715   if (!RC)
716     return nullptr;
717 
718   const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
719   MachineBasicBlock::iterator Pos = MI;
720 
721   if (Flags == MachineMemOperand::MOStore)
722     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
723                         Register());
724   else
725     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
726   return &*--Pos;
727 }
728 
729 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
730                                                  ArrayRef<unsigned> Ops,
731                                                  MachineInstr &LoadMI,
732                                                  LiveIntervals *LIS) const {
733   assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
734 #ifndef NDEBUG
735   for (unsigned OpIdx : Ops)
736     assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
737 #endif
738 
739   MachineBasicBlock &MBB = *MI.getParent();
740   MachineFunction &MF = *MBB.getParent();
741 
742   // Ask the target to do the actual folding.
743   MachineInstr *NewMI = nullptr;
744   int FrameIndex = 0;
745 
746   if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
747        MI.getOpcode() == TargetOpcode::PATCHPOINT ||
748        MI.getOpcode() == TargetOpcode::STATEPOINT) &&
749       isLoadFromStackSlot(LoadMI, FrameIndex)) {
750     // Fold stackmap/patchpoint.
751     NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
752     if (NewMI)
753       NewMI = &*MBB.insert(MI, NewMI);
754   } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
755     return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
756   } else {
757     // Ask the target to do the actual folding.
758     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
759   }
760 
761   if (!NewMI)
762     return nullptr;
763 
764   // Copy the memoperands from the load to the folded instruction.
765   if (MI.memoperands_empty()) {
766     NewMI->setMemRefs(MF, LoadMI.memoperands());
767   } else {
768     // Handle the rare case of folding multiple loads.
769     NewMI->setMemRefs(MF, MI.memoperands());
770     for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
771                                     E = LoadMI.memoperands_end();
772          I != E; ++I) {
773       NewMI->addMemOperand(MF, *I);
774     }
775   }
776   return NewMI;
777 }
778 
779 /// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
780 /// replacement instructions immediately precede it.  Copy any implicit
781 /// operands from MI to the replacement instruction.
782 static void transferImplicitOperands(MachineInstr *MI,
783                                      const TargetRegisterInfo *TRI) {
784   MachineBasicBlock::iterator CopyMI = MI;
785   --CopyMI;
786 
787   Register DstReg = MI->getOperand(0).getReg();
788   for (const MachineOperand &MO : MI->implicit_operands()) {
789     CopyMI->addOperand(MO);
790 
791     // Be conservative about preserving kills when subregister defs are
792     // involved. If there was implicit kill of a super-register overlapping the
793     // copy result, we would kill the subregisters previous copies defined.
794 
795     if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
796       CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
797   }
798 }
799 
800 void TargetInstrInfo::lowerCopy(MachineInstr *MI,
801                                 const TargetRegisterInfo *TRI) const {
802   if (MI->allDefsAreDead()) {
803     MI->setDesc(get(TargetOpcode::KILL));
804     return;
805   }
806 
807   MachineOperand &DstMO = MI->getOperand(0);
808   MachineOperand &SrcMO = MI->getOperand(1);
809 
810   bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
811   if (IdentityCopy || SrcMO.isUndef()) {
812     // No need to insert an identity copy instruction, but replace with a KILL
813     // if liveness is changed.
814     if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
815       // We must make sure the super-register gets killed. Replace the
816       // instruction with KILL.
817       MI->setDesc(get(TargetOpcode::KILL));
818       return;
819     }
820     // Vanilla identity copy.
821     MI->eraseFromParent();
822     return;
823   }
824 
825   copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
826               SrcMO.getReg(), SrcMO.isKill(),
827               DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
828               SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
829 
830   if (MI->getNumOperands() > 2)
831     transferImplicitOperands(MI, TRI);
832   MI->eraseFromParent();
833 }
834 
835 bool TargetInstrInfo::hasReassociableOperands(
836     const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
837   const MachineOperand &Op1 = Inst.getOperand(1);
838   const MachineOperand &Op2 = Inst.getOperand(2);
839   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
840 
841   // We need virtual register definitions for the operands that we will
842   // reassociate.
843   MachineInstr *MI1 = nullptr;
844   MachineInstr *MI2 = nullptr;
845   if (Op1.isReg() && Op1.getReg().isVirtual())
846     MI1 = MRI.getUniqueVRegDef(Op1.getReg());
847   if (Op2.isReg() && Op2.getReg().isVirtual())
848     MI2 = MRI.getUniqueVRegDef(Op2.getReg());
849 
850   // And at least one operand must be defined in MBB.
851   return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
852 }
853 
854 bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
855                                                unsigned Opcode2) const {
856   return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
857 }
858 
859 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
860                                              bool &Commuted) const {
861   const MachineBasicBlock *MBB = Inst.getParent();
862   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
863   MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
864   MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
865   unsigned Opcode = Inst.getOpcode();
866 
867   // If only one operand has the same or inverse opcode and it's the second
868   // source operand, the operands must be commuted.
869   Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
870              areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
871   if (Commuted)
872     std::swap(MI1, MI2);
873 
874   // 1. The previous instruction must be the same type as Inst.
875   // 2. The previous instruction must also be associative/commutative or be the
876   //    inverse of such an operation (this can be different even for
877   //    instructions with the same opcode if traits like fast-math-flags are
878   //    included).
879   // 3. The previous instruction must have virtual register definitions for its
880   //    operands in the same basic block as Inst.
881   // 4. The previous instruction's result must only be used by Inst.
882   return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
883          (isAssociativeAndCommutative(*MI1) ||
884           isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
885          hasReassociableOperands(*MI1, MBB) &&
886          MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
887 }
888 
889 // 1. The operation must be associative and commutative or be the inverse of
890 //    such an operation.
891 // 2. The instruction must have virtual register definitions for its
892 //    operands in the same basic block.
893 // 3. The instruction must have a reassociable sibling.
894 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
895                                                bool &Commuted) const {
896   return (isAssociativeAndCommutative(Inst) ||
897           isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
898          hasReassociableOperands(Inst, Inst.getParent()) &&
899          hasReassociableSibling(Inst, Commuted);
900 }
901 
902 // The concept of the reassociation pass is that these operations can benefit
903 // from this kind of transformation:
904 //
905 // A = ? op ?
906 // B = A op X (Prev)
907 // C = B op Y (Root)
908 // -->
909 // A = ? op ?
910 // B = X op Y
911 // C = A op B
912 //
913 // breaking the dependency between A and B, allowing them to be executed in
914 // parallel (or back-to-back in a pipeline) instead of depending on each other.
915 
916 // FIXME: This has the potential to be expensive (compile time) while not
917 // improving the code at all. Some ways to limit the overhead:
918 // 1. Track successful transforms; bail out if hit rate gets too low.
919 // 2. Only enable at -O3 or some other non-default optimization level.
920 // 3. Pre-screen pattern candidates here: if an operand of the previous
921 //    instruction is known to not increase the critical path, then don't match
922 //    that pattern.
923 bool TargetInstrInfo::getMachineCombinerPatterns(
924     MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
925     bool DoRegPressureReduce) const {
926   bool Commute;
927   if (isReassociationCandidate(Root, Commute)) {
928     // We found a sequence of instructions that may be suitable for a
929     // reassociation of operands to increase ILP. Specify each commutation
930     // possibility for the Prev instruction in the sequence and let the
931     // machine combiner decide if changing the operands is worthwhile.
932     if (Commute) {
933       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
934       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
935     } else {
936       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
937       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
938     }
939     return true;
940   }
941 
942   return false;
943 }
944 
945 /// Return true when a code sequence can improve loop throughput.
946 bool TargetInstrInfo::isThroughputPattern(unsigned Pattern) const {
947   return false;
948 }
949 
950 CombinerObjective
951 TargetInstrInfo::getCombinerObjective(unsigned Pattern) const {
952   return CombinerObjective::Default;
953 }
954 
955 std::pair<unsigned, unsigned>
956 TargetInstrInfo::getReassociationOpcodes(unsigned Pattern,
957                                          const MachineInstr &Root,
958                                          const MachineInstr &Prev) const {
959   bool AssocCommutRoot = isAssociativeAndCommutative(Root);
960   bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
961 
962   // Early exit if both opcodes are associative and commutative. It's a trivial
963   // reassociation when we only change operands order. In this case opcodes are
964   // not required to have inverse versions.
965   if (AssocCommutRoot && AssocCommutPrev) {
966     assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
967     return std::make_pair(Root.getOpcode(), Root.getOpcode());
968   }
969 
970   // At least one instruction is not associative or commutative.
971   // Since we have matched one of the reassociation patterns, we expect that the
972   // instructions' opcodes are equal or one of them is the inversion of the
973   // other.
974   assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
975          "Incorrectly matched pattern");
976   unsigned AssocCommutOpcode = Root.getOpcode();
977   unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
978   if (!AssocCommutRoot)
979     std::swap(AssocCommutOpcode, InverseOpcode);
980 
981   // The transformation rule (`+` is any associative and commutative binary
982   // operation, `-` is the inverse):
983   // REASSOC_AX_BY:
984   //   (A + X) + Y => A + (X + Y)
985   //   (A + X) - Y => A + (X - Y)
986   //   (A - X) + Y => A - (X - Y)
987   //   (A - X) - Y => A - (X + Y)
988   // REASSOC_XA_BY:
989   //   (X + A) + Y => (X + Y) + A
990   //   (X + A) - Y => (X - Y) + A
991   //   (X - A) + Y => (X + Y) - A
992   //   (X - A) - Y => (X - Y) - A
993   // REASSOC_AX_YB:
994   //   Y + (A + X) => (Y + X) + A
995   //   Y - (A + X) => (Y - X) - A
996   //   Y + (A - X) => (Y - X) + A
997   //   Y - (A - X) => (Y + X) - A
998   // REASSOC_XA_YB:
999   //   Y + (X + A) => (Y + X) + A
1000   //   Y - (X + A) => (Y - X) - A
1001   //   Y + (X - A) => (Y + X) - A
1002   //   Y - (X - A) => (Y - X) + A
1003   switch (Pattern) {
1004   default:
1005     llvm_unreachable("Unexpected pattern");
1006   case MachineCombinerPattern::REASSOC_AX_BY:
1007     if (!AssocCommutRoot && AssocCommutPrev)
1008       return {AssocCommutOpcode, InverseOpcode};
1009     if (AssocCommutRoot && !AssocCommutPrev)
1010       return {InverseOpcode, InverseOpcode};
1011     if (!AssocCommutRoot && !AssocCommutPrev)
1012       return {InverseOpcode, AssocCommutOpcode};
1013     break;
1014   case MachineCombinerPattern::REASSOC_XA_BY:
1015     if (!AssocCommutRoot && AssocCommutPrev)
1016       return {AssocCommutOpcode, InverseOpcode};
1017     if (AssocCommutRoot && !AssocCommutPrev)
1018       return {InverseOpcode, AssocCommutOpcode};
1019     if (!AssocCommutRoot && !AssocCommutPrev)
1020       return {InverseOpcode, InverseOpcode};
1021     break;
1022   case MachineCombinerPattern::REASSOC_AX_YB:
1023     if (!AssocCommutRoot && AssocCommutPrev)
1024       return {InverseOpcode, InverseOpcode};
1025     if (AssocCommutRoot && !AssocCommutPrev)
1026       return {AssocCommutOpcode, InverseOpcode};
1027     if (!AssocCommutRoot && !AssocCommutPrev)
1028       return {InverseOpcode, AssocCommutOpcode};
1029     break;
1030   case MachineCombinerPattern::REASSOC_XA_YB:
1031     if (!AssocCommutRoot && AssocCommutPrev)
1032       return {InverseOpcode, InverseOpcode};
1033     if (AssocCommutRoot && !AssocCommutPrev)
1034       return {InverseOpcode, AssocCommutOpcode};
1035     if (!AssocCommutRoot && !AssocCommutPrev)
1036       return {AssocCommutOpcode, InverseOpcode};
1037     break;
1038   }
1039   llvm_unreachable("Unhandled combination");
1040 }
1041 
1042 // Return a pair of boolean flags showing if the new root and new prev operands
1043 // must be swapped. See visual example of the rule in
1044 // TargetInstrInfo::getReassociationOpcodes.
1045 static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1046   switch (Pattern) {
1047   default:
1048     llvm_unreachable("Unexpected pattern");
1049   case MachineCombinerPattern::REASSOC_AX_BY:
1050     return {false, false};
1051   case MachineCombinerPattern::REASSOC_XA_BY:
1052     return {true, false};
1053   case MachineCombinerPattern::REASSOC_AX_YB:
1054     return {true, true};
1055   case MachineCombinerPattern::REASSOC_XA_YB:
1056     return {true, true};
1057   }
1058 }
1059 
1060 void TargetInstrInfo::getReassociateOperandIndices(
1061     const MachineInstr &Root, unsigned Pattern,
1062     std::array<unsigned, 5> &OperandIndices) const {
1063   switch (Pattern) {
1064   case MachineCombinerPattern::REASSOC_AX_BY:
1065     OperandIndices = {1, 1, 1, 2, 2};
1066     break;
1067   case MachineCombinerPattern::REASSOC_AX_YB:
1068     OperandIndices = {2, 1, 2, 2, 1};
1069     break;
1070   case MachineCombinerPattern::REASSOC_XA_BY:
1071     OperandIndices = {1, 2, 1, 1, 2};
1072     break;
1073   case MachineCombinerPattern::REASSOC_XA_YB:
1074     OperandIndices = {2, 2, 2, 1, 1};
1075     break;
1076   default:
1077     llvm_unreachable("unexpected MachineCombinerPattern");
1078   }
1079 }
1080 
1081 /// Attempt the reassociation transformation to reduce critical path length.
1082 /// See the above comments before getMachineCombinerPatterns().
1083 void TargetInstrInfo::reassociateOps(
1084     MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1085     SmallVectorImpl<MachineInstr *> &InsInstrs,
1086     SmallVectorImpl<MachineInstr *> &DelInstrs,
1087     ArrayRef<unsigned> OperandIndices,
1088     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1089   MachineFunction *MF = Root.getMF();
1090   MachineRegisterInfo &MRI = MF->getRegInfo();
1091   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1092   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1093   const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1094 
1095   MachineOperand &OpA = Prev.getOperand(OperandIndices[1]);
1096   MachineOperand &OpB = Root.getOperand(OperandIndices[2]);
1097   MachineOperand &OpX = Prev.getOperand(OperandIndices[3]);
1098   MachineOperand &OpY = Root.getOperand(OperandIndices[4]);
1099   MachineOperand &OpC = Root.getOperand(0);
1100 
1101   Register RegA = OpA.getReg();
1102   Register RegB = OpB.getReg();
1103   Register RegX = OpX.getReg();
1104   Register RegY = OpY.getReg();
1105   Register RegC = OpC.getReg();
1106 
1107   if (RegA.isVirtual())
1108     MRI.constrainRegClass(RegA, RC);
1109   if (RegB.isVirtual())
1110     MRI.constrainRegClass(RegB, RC);
1111   if (RegX.isVirtual())
1112     MRI.constrainRegClass(RegX, RC);
1113   if (RegY.isVirtual())
1114     MRI.constrainRegClass(RegY, RC);
1115   if (RegC.isVirtual())
1116     MRI.constrainRegClass(RegC, RC);
1117 
1118   // Create a new virtual register for the result of (X op Y) instead of
1119   // recycling RegB because the MachineCombiner's computation of the critical
1120   // path requires a new register definition rather than an existing one.
1121   Register NewVR = MRI.createVirtualRegister(RC);
1122   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1123 
1124   auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1125   bool KillA = OpA.isKill();
1126   bool KillX = OpX.isKill();
1127   bool KillY = OpY.isKill();
1128   bool KillNewVR = true;
1129 
1130   auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1131 
1132   if (SwapPrevOperands) {
1133     std::swap(RegX, RegY);
1134     std::swap(KillX, KillY);
1135   }
1136 
1137   unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1138   unsigned RootFirstOpIdx, RootSecondOpIdx;
1139   switch (Pattern) {
1140   case MachineCombinerPattern::REASSOC_AX_BY:
1141     PrevFirstOpIdx = OperandIndices[1];
1142     PrevSecondOpIdx = OperandIndices[3];
1143     RootFirstOpIdx = OperandIndices[2];
1144     RootSecondOpIdx = OperandIndices[4];
1145     break;
1146   case MachineCombinerPattern::REASSOC_AX_YB:
1147     PrevFirstOpIdx = OperandIndices[1];
1148     PrevSecondOpIdx = OperandIndices[3];
1149     RootFirstOpIdx = OperandIndices[4];
1150     RootSecondOpIdx = OperandIndices[2];
1151     break;
1152   case MachineCombinerPattern::REASSOC_XA_BY:
1153     PrevFirstOpIdx = OperandIndices[3];
1154     PrevSecondOpIdx = OperandIndices[1];
1155     RootFirstOpIdx = OperandIndices[2];
1156     RootSecondOpIdx = OperandIndices[4];
1157     break;
1158   case MachineCombinerPattern::REASSOC_XA_YB:
1159     PrevFirstOpIdx = OperandIndices[3];
1160     PrevSecondOpIdx = OperandIndices[1];
1161     RootFirstOpIdx = OperandIndices[4];
1162     RootSecondOpIdx = OperandIndices[2];
1163     break;
1164   default:
1165     llvm_unreachable("unexpected MachineCombinerPattern");
1166   }
1167 
1168   // Basically BuildMI but doesn't add implicit operands by default.
1169   auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1170                               const MCInstrDesc &MCID, Register DestReg) {
1171     return MachineInstrBuilder(
1172                MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1173         .setPCSections(MIMD.getPCSections())
1174         .addReg(DestReg, RegState::Define);
1175   };
1176 
1177   // Create new instructions for insertion.
1178   MachineInstrBuilder MIB1 =
1179       buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1180   for (const auto &MO : Prev.explicit_operands()) {
1181     unsigned Idx = MO.getOperandNo();
1182     // Skip the result operand we'd already added.
1183     if (Idx == 0)
1184       continue;
1185     if (Idx == PrevFirstOpIdx)
1186       MIB1.addReg(RegX, getKillRegState(KillX));
1187     else if (Idx == PrevSecondOpIdx)
1188       MIB1.addReg(RegY, getKillRegState(KillY));
1189     else
1190       MIB1.add(MO);
1191   }
1192   MIB1.copyImplicitOps(Prev);
1193 
1194   if (SwapRootOperands) {
1195     std::swap(RegA, NewVR);
1196     std::swap(KillA, KillNewVR);
1197   }
1198 
1199   MachineInstrBuilder MIB2 =
1200       buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1201   for (const auto &MO : Root.explicit_operands()) {
1202     unsigned Idx = MO.getOperandNo();
1203     // Skip the result operand.
1204     if (Idx == 0)
1205       continue;
1206     if (Idx == RootFirstOpIdx)
1207       MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1208     else if (Idx == RootSecondOpIdx)
1209       MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1210     else
1211       MIB2 = MIB2.add(MO);
1212   }
1213   MIB2.copyImplicitOps(Root);
1214 
1215   // Propagate FP flags from the original instructions.
1216   // But clear poison-generating flags because those may not be valid now.
1217   // TODO: There should be a helper function for copying only fast-math-flags.
1218   uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1219   MIB1->setFlags(IntersectedFlags);
1220   MIB1->clearFlag(MachineInstr::MIFlag::NoSWrap);
1221   MIB1->clearFlag(MachineInstr::MIFlag::NoUWrap);
1222   MIB1->clearFlag(MachineInstr::MIFlag::IsExact);
1223 
1224   MIB2->setFlags(IntersectedFlags);
1225   MIB2->clearFlag(MachineInstr::MIFlag::NoSWrap);
1226   MIB2->clearFlag(MachineInstr::MIFlag::NoUWrap);
1227   MIB2->clearFlag(MachineInstr::MIFlag::IsExact);
1228 
1229   setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1230 
1231   // Record new instructions for insertion and old instructions for deletion.
1232   InsInstrs.push_back(MIB1);
1233   InsInstrs.push_back(MIB2);
1234   DelInstrs.push_back(&Prev);
1235   DelInstrs.push_back(&Root);
1236 
1237   // We transformed:
1238   // B = A op X (Prev)
1239   // C = B op Y (Root)
1240   // Into:
1241   // B = X op Y (MIB1)
1242   // C = A op B (MIB2)
1243   // C has the same value as before, B doesn't; as such, keep the debug number
1244   // of C but not of B.
1245   if (unsigned OldRootNum = Root.peekDebugInstrNum())
1246     MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1247 }
1248 
1249 void TargetInstrInfo::genAlternativeCodeSequence(
1250     MachineInstr &Root, unsigned Pattern,
1251     SmallVectorImpl<MachineInstr *> &InsInstrs,
1252     SmallVectorImpl<MachineInstr *> &DelInstrs,
1253     DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1254   MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1255 
1256   // Select the previous instruction in the sequence based on the input pattern.
1257   std::array<unsigned, 5> OperandIndices;
1258   getReassociateOperandIndices(Root, Pattern, OperandIndices);
1259   MachineInstr *Prev =
1260       MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1261 
1262   // Don't reassociate if Prev and Root are in different blocks.
1263   if (Prev->getParent() != Root.getParent())
1264     return;
1265 
1266   reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1267                  InstIdxForVirtReg);
1268 }
1269 
1270 MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1271   return MachineTraceStrategy::TS_MinInstrCount;
1272 }
1273 
1274 bool TargetInstrInfo::isReallyTriviallyReMaterializable(
1275     const MachineInstr &MI) const {
1276   const MachineFunction &MF = *MI.getMF();
1277   const MachineRegisterInfo &MRI = MF.getRegInfo();
1278 
1279   // Remat clients assume operand 0 is the defined register.
1280   if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1281     return false;
1282   Register DefReg = MI.getOperand(0).getReg();
1283 
1284   // A sub-register definition can only be rematerialized if the instruction
1285   // doesn't read the other parts of the register.  Otherwise it is really a
1286   // read-modify-write operation on the full virtual register which cannot be
1287   // moved safely.
1288   if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1289       MI.readsVirtualRegister(DefReg))
1290     return false;
1291 
1292   // A load from a fixed stack slot can be rematerialized. This may be
1293   // redundant with subsequent checks, but it's target-independent,
1294   // simple, and a common case.
1295   int FrameIdx = 0;
1296   if (isLoadFromStackSlot(MI, FrameIdx) &&
1297       MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1298     return true;
1299 
1300   // Avoid instructions obviously unsafe for remat.
1301   if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1302       MI.hasUnmodeledSideEffects())
1303     return false;
1304 
1305   // Don't remat inline asm. We have no idea how expensive it is
1306   // even if it's side effect free.
1307   if (MI.isInlineAsm())
1308     return false;
1309 
1310   // Avoid instructions which load from potentially varying memory.
1311   if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1312     return false;
1313 
1314   // If any of the registers accessed are non-constant, conservatively assume
1315   // the instruction is not rematerializable.
1316   for (const MachineOperand &MO : MI.operands()) {
1317     if (!MO.isReg()) continue;
1318     Register Reg = MO.getReg();
1319     if (Reg == 0)
1320       continue;
1321 
1322     // Check for a well-behaved physical register.
1323     if (Reg.isPhysical()) {
1324       if (MO.isUse()) {
1325         // If the physreg has no defs anywhere, it's just an ambient register
1326         // and we can freely move its uses. Alternatively, if it's allocatable,
1327         // it could get allocated to something with a def during allocation.
1328         if (!MRI.isConstantPhysReg(Reg))
1329           return false;
1330       } else {
1331         // A physreg def. We can't remat it.
1332         return false;
1333       }
1334       continue;
1335     }
1336 
1337     // Only allow one virtual-register def.  There may be multiple defs of the
1338     // same virtual register, though.
1339     if (MO.isDef() && Reg != DefReg)
1340       return false;
1341 
1342     // Don't allow any virtual-register uses. Rematting an instruction with
1343     // virtual register uses would length the live ranges of the uses, which
1344     // is not necessarily a good idea, certainly not "trivial".
1345     if (MO.isUse())
1346       return false;
1347   }
1348 
1349   // Everything checked out.
1350   return true;
1351 }
1352 
1353 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1354   const MachineFunction *MF = MI.getMF();
1355   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1356   bool StackGrowsDown =
1357     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1358 
1359   unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1360   unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1361 
1362   if (!isFrameInstr(MI))
1363     return 0;
1364 
1365   int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1366 
1367   if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1368       (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1369     SPAdj = -SPAdj;
1370 
1371   return SPAdj;
1372 }
1373 
1374 /// isSchedulingBoundary - Test if the given instruction should be
1375 /// considered a scheduling boundary. This primarily includes labels
1376 /// and terminators.
1377 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1378                                            const MachineBasicBlock *MBB,
1379                                            const MachineFunction &MF) const {
1380   // Terminators and labels can't be scheduled around.
1381   if (MI.isTerminator() || MI.isPosition())
1382     return true;
1383 
1384   // INLINEASM_BR can jump to another block
1385   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1386     return true;
1387 
1388   // Don't attempt to schedule around any instruction that defines
1389   // a stack-oriented pointer, as it's unlikely to be profitable. This
1390   // saves compile time, because it doesn't require every single
1391   // stack slot reference to depend on the instruction that does the
1392   // modification.
1393   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1394   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1395   return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1396 }
1397 
1398 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1399 // may choose to honor.
1400 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1401   return !DisableHazardRecognizer;
1402 }
1403 
1404 // Default implementation of CreateTargetRAHazardRecognizer.
1405 ScheduleHazardRecognizer *TargetInstrInfo::
1406 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1407                              const ScheduleDAG *DAG) const {
1408   // Dummy hazard recognizer allows all instructions to issue.
1409   return new ScheduleHazardRecognizer();
1410 }
1411 
1412 // Default implementation of CreateTargetMIHazardRecognizer.
1413 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1414     const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1415   return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1416 }
1417 
1418 // Default implementation of CreateTargetPostRAHazardRecognizer.
1419 ScheduleHazardRecognizer *TargetInstrInfo::
1420 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1421                                    const ScheduleDAG *DAG) const {
1422   return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1423 }
1424 
1425 // Default implementation of getMemOperandWithOffset.
1426 bool TargetInstrInfo::getMemOperandWithOffset(
1427     const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1428     bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1429   SmallVector<const MachineOperand *, 4> BaseOps;
1430   LocationSize Width = 0;
1431   if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1432                                      Width, TRI) ||
1433       BaseOps.size() != 1)
1434     return false;
1435   BaseOp = BaseOps.front();
1436   return true;
1437 }
1438 
1439 //===----------------------------------------------------------------------===//
1440 //  SelectionDAG latency interface.
1441 //===----------------------------------------------------------------------===//
1442 
1443 std::optional<unsigned>
1444 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1445                                    SDNode *DefNode, unsigned DefIdx,
1446                                    SDNode *UseNode, unsigned UseIdx) const {
1447   if (!ItinData || ItinData->isEmpty())
1448     return std::nullopt;
1449 
1450   if (!DefNode->isMachineOpcode())
1451     return std::nullopt;
1452 
1453   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1454   if (!UseNode->isMachineOpcode())
1455     return ItinData->getOperandCycle(DefClass, DefIdx);
1456   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1457   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1458 }
1459 
1460 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1461                                           SDNode *N) const {
1462   if (!ItinData || ItinData->isEmpty())
1463     return 1;
1464 
1465   if (!N->isMachineOpcode())
1466     return 1;
1467 
1468   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1469 }
1470 
1471 //===----------------------------------------------------------------------===//
1472 //  MachineInstr latency interface.
1473 //===----------------------------------------------------------------------===//
1474 
1475 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1476                                          const MachineInstr &MI) const {
1477   if (!ItinData || ItinData->isEmpty())
1478     return 1;
1479 
1480   unsigned Class = MI.getDesc().getSchedClass();
1481   int UOps = ItinData->Itineraries[Class].NumMicroOps;
1482   if (UOps >= 0)
1483     return UOps;
1484 
1485   // The # of u-ops is dynamically determined. The specific target should
1486   // override this function to return the right number.
1487   return 1;
1488 }
1489 
1490 /// Return the default expected latency for a def based on it's opcode.
1491 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1492                                             const MachineInstr &DefMI) const {
1493   if (DefMI.isTransient())
1494     return 0;
1495   if (DefMI.mayLoad())
1496     return SchedModel.LoadLatency;
1497   if (isHighLatencyDef(DefMI.getOpcode()))
1498     return SchedModel.HighLatency;
1499   return 1;
1500 }
1501 
1502 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1503   return 0;
1504 }
1505 
1506 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1507                                           const MachineInstr &MI,
1508                                           unsigned *PredCost) const {
1509   // Default to one cycle for no itinerary. However, an "empty" itinerary may
1510   // still have a MinLatency property, which getStageLatency checks.
1511   if (!ItinData)
1512     return MI.mayLoad() ? 2 : 1;
1513 
1514   return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1515 }
1516 
1517 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1518                                        const MachineInstr &DefMI,
1519                                        unsigned DefIdx) const {
1520   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1521   if (!ItinData || ItinData->isEmpty())
1522     return false;
1523 
1524   unsigned DefClass = DefMI.getDesc().getSchedClass();
1525   std::optional<unsigned> DefCycle =
1526       ItinData->getOperandCycle(DefClass, DefIdx);
1527   return DefCycle && DefCycle <= 1U;
1528 }
1529 
1530 bool TargetInstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const {
1531   // TODO: We don't split functions where a section attribute has been set
1532   // since the split part may not be placed in a contiguous region. It may also
1533   // be more beneficial to augment the linker to ensure contiguous layout of
1534   // split functions within the same section as specified by the attribute.
1535   if (MF.getFunction().hasSection())
1536     return false;
1537 
1538   // We don't want to proceed further for cold functions
1539   // or functions of unknown hotness. Lukewarm functions have no prefix.
1540   std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1541   if (SectionPrefix &&
1542       (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1543     return false;
1544   }
1545 
1546   return true;
1547 }
1548 
1549 std::optional<ParamLoadedValue>
1550 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1551                                      Register Reg) const {
1552   const MachineFunction *MF = MI.getMF();
1553   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1554   DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1555   int64_t Offset;
1556   bool OffsetIsScalable;
1557 
1558   // To simplify the sub-register handling, verify that we only need to
1559   // consider physical registers.
1560   assert(MF->getProperties().hasProperty(
1561       MachineFunctionProperties::Property::NoVRegs));
1562 
1563   if (auto DestSrc = isCopyInstr(MI)) {
1564     Register DestReg = DestSrc->Destination->getReg();
1565 
1566     // If the copy destination is the forwarding reg, describe the forwarding
1567     // reg using the copy source as the backup location. Example:
1568     //
1569     //   x0 = MOV x7
1570     //   call callee(x0)      ; x0 described as x7
1571     if (Reg == DestReg)
1572       return ParamLoadedValue(*DestSrc->Source, Expr);
1573 
1574     // If the target's hook couldn't describe this copy, give up.
1575     return std::nullopt;
1576   } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1577     Register SrcReg = RegImm->Reg;
1578     Offset = RegImm->Imm;
1579     Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1580     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1581   } else if (MI.hasOneMemOperand()) {
1582     // Only describe memory which provably does not escape the function. As
1583     // described in llvm.org/PR43343, escaped memory may be clobbered by the
1584     // callee (or by another thread).
1585     const auto &TII = MF->getSubtarget().getInstrInfo();
1586     const MachineFrameInfo &MFI = MF->getFrameInfo();
1587     const MachineMemOperand *MMO = MI.memoperands()[0];
1588     const PseudoSourceValue *PSV = MMO->getPseudoValue();
1589 
1590     // If the address points to "special" memory (e.g. a spill slot), it's
1591     // sufficient to check that it isn't aliased by any high-level IR value.
1592     if (!PSV || PSV->mayAlias(&MFI))
1593       return std::nullopt;
1594 
1595     const MachineOperand *BaseOp;
1596     if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1597                                       TRI))
1598       return std::nullopt;
1599 
1600     // FIXME: Scalable offsets are not yet handled in the offset code below.
1601     if (OffsetIsScalable)
1602       return std::nullopt;
1603 
1604     // TODO: Can currently only handle mem instructions with a single define.
1605     // An example from the x86 target:
1606     //    ...
1607     //    DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1608     //    ...
1609     //
1610     if (MI.getNumExplicitDefs() != 1)
1611       return std::nullopt;
1612 
1613     // TODO: In what way do we need to take Reg into consideration here?
1614 
1615     SmallVector<uint64_t, 8> Ops;
1616     DIExpression::appendOffset(Ops, Offset);
1617     Ops.push_back(dwarf::DW_OP_deref_size);
1618     Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1619                                             : ~UINT64_C(0));
1620     Expr = DIExpression::prependOpcodes(Expr, Ops);
1621     return ParamLoadedValue(*BaseOp, Expr);
1622   }
1623 
1624   return std::nullopt;
1625 }
1626 
1627 // Get the call frame size just before MI.
1628 unsigned TargetInstrInfo::getCallFrameSizeAt(MachineInstr &MI) const {
1629   // Search backwards from MI for the most recent call frame instruction.
1630   MachineBasicBlock *MBB = MI.getParent();
1631   for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1632     if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1633       return getFrameTotalSize(AdjI);
1634     if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1635       return 0;
1636   }
1637 
1638   // If none was found, use the call frame size from the start of the basic
1639   // block.
1640   return MBB->getCallFrameSize();
1641 }
1642 
1643 /// Both DefMI and UseMI must be valid.  By default, call directly to the
1644 /// itinerary. This may be overriden by the target.
1645 std::optional<unsigned> TargetInstrInfo::getOperandLatency(
1646     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1647     unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1648   unsigned DefClass = DefMI.getDesc().getSchedClass();
1649   unsigned UseClass = UseMI.getDesc().getSchedClass();
1650   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1651 }
1652 
1653 bool TargetInstrInfo::getRegSequenceInputs(
1654     const MachineInstr &MI, unsigned DefIdx,
1655     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1656   assert((MI.isRegSequence() ||
1657           MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1658 
1659   if (!MI.isRegSequence())
1660     return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1661 
1662   // We are looking at:
1663   // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1664   assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1665   for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1666        OpIdx += 2) {
1667     const MachineOperand &MOReg = MI.getOperand(OpIdx);
1668     if (MOReg.isUndef())
1669       continue;
1670     const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1671     assert(MOSubIdx.isImm() &&
1672            "One of the subindex of the reg_sequence is not an immediate");
1673     // Record Reg:SubReg, SubIdx.
1674     InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1675                                             (unsigned)MOSubIdx.getImm()));
1676   }
1677   return true;
1678 }
1679 
1680 bool TargetInstrInfo::getExtractSubregInputs(
1681     const MachineInstr &MI, unsigned DefIdx,
1682     RegSubRegPairAndIdx &InputReg) const {
1683   assert((MI.isExtractSubreg() ||
1684       MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1685 
1686   if (!MI.isExtractSubreg())
1687     return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1688 
1689   // We are looking at:
1690   // Def = EXTRACT_SUBREG v0.sub1, sub0.
1691   assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1692   const MachineOperand &MOReg = MI.getOperand(1);
1693   if (MOReg.isUndef())
1694     return false;
1695   const MachineOperand &MOSubIdx = MI.getOperand(2);
1696   assert(MOSubIdx.isImm() &&
1697          "The subindex of the extract_subreg is not an immediate");
1698 
1699   InputReg.Reg = MOReg.getReg();
1700   InputReg.SubReg = MOReg.getSubReg();
1701   InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1702   return true;
1703 }
1704 
1705 bool TargetInstrInfo::getInsertSubregInputs(
1706     const MachineInstr &MI, unsigned DefIdx,
1707     RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1708   assert((MI.isInsertSubreg() ||
1709       MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1710 
1711   if (!MI.isInsertSubreg())
1712     return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1713 
1714   // We are looking at:
1715   // Def = INSERT_SEQUENCE v0, v1, sub0.
1716   assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1717   const MachineOperand &MOBaseReg = MI.getOperand(1);
1718   const MachineOperand &MOInsertedReg = MI.getOperand(2);
1719   if (MOInsertedReg.isUndef())
1720     return false;
1721   const MachineOperand &MOSubIdx = MI.getOperand(3);
1722   assert(MOSubIdx.isImm() &&
1723          "One of the subindex of the reg_sequence is not an immediate");
1724   BaseReg.Reg = MOBaseReg.getReg();
1725   BaseReg.SubReg = MOBaseReg.getSubReg();
1726 
1727   InsertedReg.Reg = MOInsertedReg.getReg();
1728   InsertedReg.SubReg = MOInsertedReg.getSubReg();
1729   InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1730   return true;
1731 }
1732 
1733 // Returns a MIRPrinter comment for this machine operand.
1734 std::string TargetInstrInfo::createMIROperandComment(
1735     const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1736     const TargetRegisterInfo *TRI) const {
1737 
1738   if (!MI.isInlineAsm())
1739     return "";
1740 
1741   std::string Flags;
1742   raw_string_ostream OS(Flags);
1743 
1744   if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1745     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1746     unsigned ExtraInfo = Op.getImm();
1747     bool First = true;
1748     for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1749       if (!First)
1750         OS << " ";
1751       First = false;
1752       OS << Info;
1753     }
1754 
1755     return Flags;
1756   }
1757 
1758   int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1759   if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1760     return "";
1761 
1762   assert(Op.isImm() && "Expected flag operand to be an immediate");
1763   // Pretty print the inline asm operand descriptor.
1764   unsigned Flag = Op.getImm();
1765   const InlineAsm::Flag F(Flag);
1766   OS << F.getKindName();
1767 
1768   unsigned RCID;
1769   if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1770     if (TRI) {
1771       OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1772     } else
1773       OS << ":RC" << RCID;
1774   }
1775 
1776   if (F.isMemKind()) {
1777     InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1778     OS << ":" << InlineAsm::getMemConstraintName(MCID);
1779   }
1780 
1781   unsigned TiedTo;
1782   if (F.isUseOperandTiedToDef(TiedTo))
1783     OS << " tiedto:$" << TiedTo;
1784 
1785   if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
1786       F.getRegMayBeFolded())
1787     OS << " foldable";
1788 
1789   return Flags;
1790 }
1791 
1792 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1793 
1794 void TargetInstrInfo::mergeOutliningCandidateAttributes(
1795     Function &F, std::vector<outliner::Candidate> &Candidates) const {
1796   // Include target features from an arbitrary candidate for the outlined
1797   // function. This makes sure the outlined function knows what kinds of
1798   // instructions are going into it. This is fine, since all parent functions
1799   // must necessarily support the instructions that are in the outlined region.
1800   outliner::Candidate &FirstCand = Candidates.front();
1801   const Function &ParentFn = FirstCand.getMF()->getFunction();
1802   if (ParentFn.hasFnAttribute("target-features"))
1803     F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1804   if (ParentFn.hasFnAttribute("target-cpu"))
1805     F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1806 
1807   // Set nounwind, so we don't generate eh_frame.
1808   if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1809         return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1810       }))
1811     F.addFnAttr(Attribute::NoUnwind);
1812 }
1813 
1814 outliner::InstrType
1815 TargetInstrInfo::getOutliningType(const MachineModuleInfo &MMI,
1816                                   MachineBasicBlock::iterator &MIT,
1817                                   unsigned Flags) const {
1818   MachineInstr &MI = *MIT;
1819 
1820   // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1821   // have support for outlining those. Special-case that here.
1822   if (MI.isCFIInstruction())
1823     // Just go right to the target implementation.
1824     return getOutliningTypeImpl(MMI, MIT, Flags);
1825 
1826   // Be conservative about inline assembly.
1827   if (MI.isInlineAsm())
1828     return outliner::InstrType::Illegal;
1829 
1830   // Labels generally can't safely be outlined.
1831   if (MI.isLabel())
1832     return outliner::InstrType::Illegal;
1833 
1834   // Don't let debug instructions impact analysis.
1835   if (MI.isDebugInstr())
1836     return outliner::InstrType::Invisible;
1837 
1838   // Some other special cases.
1839   switch (MI.getOpcode()) {
1840     case TargetOpcode::IMPLICIT_DEF:
1841     case TargetOpcode::KILL:
1842     case TargetOpcode::LIFETIME_START:
1843     case TargetOpcode::LIFETIME_END:
1844       return outliner::InstrType::Invisible;
1845     default:
1846       break;
1847   }
1848 
1849   // Is this a terminator for a basic block?
1850   if (MI.isTerminator()) {
1851     // If this is a branch to another block, we can't outline it.
1852     if (!MI.getParent()->succ_empty())
1853       return outliner::InstrType::Illegal;
1854 
1855     // Don't outline if the branch is not unconditional.
1856     if (isPredicated(MI))
1857       return outliner::InstrType::Illegal;
1858   }
1859 
1860   // Make sure none of the operands of this instruction do anything that
1861   // might break if they're moved outside their current function.
1862   // This includes MachineBasicBlock references, BlockAddressses,
1863   // Constant pool indices and jump table indices.
1864   //
1865   // A quick note on MO_TargetIndex:
1866   // This doesn't seem to be used in any of the architectures that the
1867   // MachineOutliner supports, but it was still filtered out in all of them.
1868   // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1869   // As such, this check is removed both here and in the target-specific
1870   // implementations. Instead, we assert to make sure this doesn't
1871   // catch anyone off-guard somewhere down the line.
1872   for (const MachineOperand &MOP : MI.operands()) {
1873     // If you hit this assertion, please remove it and adjust
1874     // `getOutliningTypeImpl` for your target appropriately if necessary.
1875     // Adding the assertion back to other supported architectures
1876     // would be nice too :)
1877     assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
1878 
1879     // CFI instructions should already have been filtered out at this point.
1880     assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
1881 
1882     // PrologEpilogInserter should've already run at this point.
1883     assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
1884 
1885     if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1886       return outliner::InstrType::Illegal;
1887   }
1888 
1889   // If we don't know, delegate to the target-specific hook.
1890   return getOutliningTypeImpl(MMI, MIT, Flags);
1891 }
1892 
1893 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1894                                              unsigned &Flags) const {
1895   // Some instrumentations create special TargetOpcode at the start which
1896   // expands to special code sequences which must be present.
1897   auto First = MBB.getFirstNonDebugInstr();
1898   if (First == MBB.end())
1899     return true;
1900 
1901   if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1902       First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1903     return false;
1904 
1905   // Some instrumentations create special pseudo-instructions at or just before
1906   // the end that must be present.
1907   auto Last = MBB.getLastNonDebugInstr();
1908   if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
1909       Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1910     return false;
1911 
1912   if (Last != First && Last->isReturn()) {
1913     --Last;
1914     if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
1915         Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1916       return false;
1917   }
1918   return true;
1919 }
1920 
1921 bool TargetInstrInfo::isGlobalMemoryObject(const MachineInstr *MI) const {
1922   return MI->isCall() || MI->hasUnmodeledSideEffects() ||
1923          (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
1924 }
1925