xref: /llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision 74ca5275e9509e5ec4e9f1d9123bc10674734b20)
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Methods common to all machine instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/FoldingSet.h"
17 #include "llvm/ADT/Hashing.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallBitVector.h"
21 #include "llvm/ADT/SmallString.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/Loads.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineInstrBundle.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/PseudoSourceValue.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/ModuleSlotTracker.h"
53 #include "llvm/IR/Operator.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCInstrDesc.h"
57 #include "llvm/MC/MCRegisterInfo.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/FormattedStream.h"
65 #include "llvm/Support/LowLevelTypeImpl.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetIntrinsicInfo.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cstddef>
73 #include <cstdint>
74 #include <cstring>
75 #include <iterator>
76 #include <utility>
77 
78 using namespace llvm;
79 
80 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
81   if (const MachineBasicBlock *MBB = MI.getParent())
82     if (const MachineFunction *MF = MBB->getParent())
83       return MF;
84   return nullptr;
85 }
86 
87 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
88 // it.
89 static void tryToGetTargetInfo(const MachineInstr &MI,
90                                const TargetRegisterInfo *&TRI,
91                                const MachineRegisterInfo *&MRI,
92                                const TargetIntrinsicInfo *&IntrinsicInfo,
93                                const TargetInstrInfo *&TII) {
94 
95   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
96     TRI = MF->getSubtarget().getRegisterInfo();
97     MRI = &MF->getRegInfo();
98     IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
99     TII = MF->getSubtarget().getInstrInfo();
100   }
101 }
102 
103 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
104   if (MCID->ImplicitDefs)
105     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
106            ++ImpDefs)
107       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
108   if (MCID->ImplicitUses)
109     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
110            ++ImpUses)
111       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
112 }
113 
114 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
115 /// implicit operands. It reserves space for the number of operands specified by
116 /// the MCInstrDesc.
117 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
118                            DebugLoc dl, bool NoImp)
119     : MCID(&tid), debugLoc(std::move(dl)) {
120   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
121 
122   // Reserve space for the expected number of operands.
123   if (unsigned NumOps = MCID->getNumOperands() +
124     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
125     CapOperands = OperandCapacity::get(NumOps);
126     Operands = MF.allocateOperandArray(CapOperands);
127   }
128 
129   if (!NoImp)
130     addImplicitDefUseOperands(MF);
131 }
132 
133 /// MachineInstr ctor - Copies MachineInstr arg exactly
134 ///
135 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
136     : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
137   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
138 
139   CapOperands = OperandCapacity::get(MI.getNumOperands());
140   Operands = MF.allocateOperandArray(CapOperands);
141 
142   // Copy operands.
143   for (const MachineOperand &MO : MI.operands())
144     addOperand(MF, MO);
145 
146   // Copy all the sensible flags.
147   setFlags(MI.Flags);
148 }
149 
150 void MachineInstr::moveBefore(MachineInstr *MovePos) {
151   MovePos->getParent()->splice(MovePos, getParent(), getIterator());
152 }
153 
154 /// getRegInfo - If this instruction is embedded into a MachineFunction,
155 /// return the MachineRegisterInfo object for the current function, otherwise
156 /// return null.
157 MachineRegisterInfo *MachineInstr::getRegInfo() {
158   if (MachineBasicBlock *MBB = getParent())
159     return &MBB->getParent()->getRegInfo();
160   return nullptr;
161 }
162 
163 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
164 /// this instruction from their respective use lists.  This requires that the
165 /// operands already be on their use lists.
166 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
167   for (MachineOperand &MO : operands())
168     if (MO.isReg())
169       MRI.removeRegOperandFromUseList(&MO);
170 }
171 
172 /// AddRegOperandsToUseLists - Add all of the register operands in
173 /// this instruction from their respective use lists.  This requires that the
174 /// operands not be on their use lists yet.
175 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
176   for (MachineOperand &MO : operands())
177     if (MO.isReg())
178       MRI.addRegOperandToUseList(&MO);
179 }
180 
181 void MachineInstr::addOperand(const MachineOperand &Op) {
182   MachineBasicBlock *MBB = getParent();
183   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
184   MachineFunction *MF = MBB->getParent();
185   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
186   addOperand(*MF, Op);
187 }
188 
189 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
190 /// ranges. If MRI is non-null also update use-def chains.
191 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
192                          unsigned NumOps, MachineRegisterInfo *MRI) {
193   if (MRI)
194     return MRI->moveOperands(Dst, Src, NumOps);
195   // MachineOperand is a trivially copyable type so we can just use memmove.
196   assert(Dst && Src && "Unknown operands");
197   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
198 }
199 
200 /// addOperand - Add the specified operand to the instruction.  If it is an
201 /// implicit operand, it is added to the end of the operand list.  If it is
202 /// an explicit operand it is added at the end of the explicit operand list
203 /// (before the first implicit operand).
204 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
205   assert(MCID && "Cannot add operands before providing an instr descriptor");
206 
207   // Check if we're adding one of our existing operands.
208   if (&Op >= Operands && &Op < Operands + NumOperands) {
209     // This is unusual: MI->addOperand(MI->getOperand(i)).
210     // If adding Op requires reallocating or moving existing operands around,
211     // the Op reference could go stale. Support it by copying Op.
212     MachineOperand CopyOp(Op);
213     return addOperand(MF, CopyOp);
214   }
215 
216   // Find the insert location for the new operand.  Implicit registers go at
217   // the end, everything else goes before the implicit regs.
218   //
219   // FIXME: Allow mixed explicit and implicit operands on inline asm.
220   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
221   // implicit-defs, but they must not be moved around.  See the FIXME in
222   // InstrEmitter.cpp.
223   unsigned OpNo = getNumOperands();
224   bool isImpReg = Op.isReg() && Op.isImplicit();
225   if (!isImpReg && !isInlineAsm()) {
226     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
227       --OpNo;
228       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
229     }
230   }
231 
232 #ifndef NDEBUG
233   bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata ||
234                    Op.getType() == MachineOperand::MO_MCSymbol;
235   // OpNo now points as the desired insertion point.  Unless this is a variadic
236   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
237   // RegMask operands go between the explicit and implicit operands.
238   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
239           OpNo < MCID->getNumOperands() || isDebugOp) &&
240          "Trying to add an operand to a machine instr that is already done!");
241 #endif
242 
243   MachineRegisterInfo *MRI = getRegInfo();
244 
245   // Determine if the Operands array needs to be reallocated.
246   // Save the old capacity and operand array.
247   OperandCapacity OldCap = CapOperands;
248   MachineOperand *OldOperands = Operands;
249   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
250     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
251     Operands = MF.allocateOperandArray(CapOperands);
252     // Move the operands before the insertion point.
253     if (OpNo)
254       moveOperands(Operands, OldOperands, OpNo, MRI);
255   }
256 
257   // Move the operands following the insertion point.
258   if (OpNo != NumOperands)
259     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
260                  MRI);
261   ++NumOperands;
262 
263   // Deallocate the old operand array.
264   if (OldOperands != Operands && OldOperands)
265     MF.deallocateOperandArray(OldCap, OldOperands);
266 
267   // Copy Op into place. It still needs to be inserted into the MRI use lists.
268   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
269   NewMO->ParentMI = this;
270 
271   // When adding a register operand, tell MRI about it.
272   if (NewMO->isReg()) {
273     // Ensure isOnRegUseList() returns false, regardless of Op's status.
274     NewMO->Contents.Reg.Prev = nullptr;
275     // Ignore existing ties. This is not a property that can be copied.
276     NewMO->TiedTo = 0;
277     // Add the new operand to MRI, but only for instructions in an MBB.
278     if (MRI)
279       MRI->addRegOperandToUseList(NewMO);
280     // The MCID operand information isn't accurate until we start adding
281     // explicit operands. The implicit operands are added first, then the
282     // explicits are inserted before them.
283     if (!isImpReg) {
284       // Tie uses to defs as indicated in MCInstrDesc.
285       if (NewMO->isUse()) {
286         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
287         if (DefIdx != -1)
288           tieOperands(DefIdx, OpNo);
289       }
290       // If the register operand is flagged as early, mark the operand as such.
291       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
292         NewMO->setIsEarlyClobber(true);
293     }
294   }
295 }
296 
297 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
298 /// fewer operand than it started with.
299 ///
300 void MachineInstr::RemoveOperand(unsigned OpNo) {
301   assert(OpNo < getNumOperands() && "Invalid operand number");
302   untieRegOperand(OpNo);
303 
304 #ifndef NDEBUG
305   // Moving tied operands would break the ties.
306   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
307     if (Operands[i].isReg())
308       assert(!Operands[i].isTied() && "Cannot move tied operands");
309 #endif
310 
311   MachineRegisterInfo *MRI = getRegInfo();
312   if (MRI && Operands[OpNo].isReg())
313     MRI->removeRegOperandFromUseList(Operands + OpNo);
314 
315   // Don't call the MachineOperand destructor. A lot of this code depends on
316   // MachineOperand having a trivial destructor anyway, and adding a call here
317   // wouldn't make it 'destructor-correct'.
318 
319   if (unsigned N = NumOperands - 1 - OpNo)
320     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
321   --NumOperands;
322 }
323 
324 void MachineInstr::setExtraInfo(MachineFunction &MF,
325                                 ArrayRef<MachineMemOperand *> MMOs,
326                                 MCSymbol *PreInstrSymbol,
327                                 MCSymbol *PostInstrSymbol,
328                                 MDNode *HeapAllocMarker) {
329   bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
330   bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
331   bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
332   int NumPointers =
333       MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol + HasHeapAllocMarker;
334 
335   // Drop all extra info if there is none.
336   if (NumPointers <= 0) {
337     Info.clear();
338     return;
339   }
340 
341   // If more than one pointer, then store out of line. Store heap alloc markers
342   // out of line because PointerSumType cannot hold more than 4 tag types with
343   // 32-bit pointers.
344   // FIXME: Maybe we should make the symbols in the extra info mutable?
345   else if (NumPointers > 1 || HasHeapAllocMarker) {
346     Info.set<EIIK_OutOfLine>(MF.createMIExtraInfo(
347         MMOs, PreInstrSymbol, PostInstrSymbol, HeapAllocMarker));
348     return;
349   }
350 
351   // Otherwise store the single pointer inline.
352   if (HasPreInstrSymbol)
353     Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
354   else if (HasPostInstrSymbol)
355     Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
356   else
357     Info.set<EIIK_MMO>(MMOs[0]);
358 }
359 
360 void MachineInstr::dropMemRefs(MachineFunction &MF) {
361   if (memoperands_empty())
362     return;
363 
364   setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
365                getHeapAllocMarker());
366 }
367 
368 void MachineInstr::setMemRefs(MachineFunction &MF,
369                               ArrayRef<MachineMemOperand *> MMOs) {
370   if (MMOs.empty()) {
371     dropMemRefs(MF);
372     return;
373   }
374 
375   setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
376                getHeapAllocMarker());
377 }
378 
379 void MachineInstr::addMemOperand(MachineFunction &MF,
380                                  MachineMemOperand *MO) {
381   SmallVector<MachineMemOperand *, 2> MMOs;
382   MMOs.append(memoperands_begin(), memoperands_end());
383   MMOs.push_back(MO);
384   setMemRefs(MF, MMOs);
385 }
386 
387 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
388   if (this == &MI)
389     // Nothing to do for a self-clone!
390     return;
391 
392   assert(&MF == MI.getMF() &&
393          "Invalid machine functions when cloning memory refrences!");
394   // See if we can just steal the extra info already allocated for the
395   // instruction. We can do this whenever the pre- and post-instruction symbols
396   // are the same (including null).
397   if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
398       getPostInstrSymbol() == MI.getPostInstrSymbol() &&
399       getHeapAllocMarker() == MI.getHeapAllocMarker()) {
400     Info = MI.Info;
401     return;
402   }
403 
404   // Otherwise, fall back on a copy-based clone.
405   setMemRefs(MF, MI.memoperands());
406 }
407 
408 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
409 /// identical.
410 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
411                              ArrayRef<MachineMemOperand *> RHS) {
412   if (LHS.size() != RHS.size())
413     return false;
414 
415   auto LHSPointees = make_pointee_range(LHS);
416   auto RHSPointees = make_pointee_range(RHS);
417   return std::equal(LHSPointees.begin(), LHSPointees.end(),
418                     RHSPointees.begin());
419 }
420 
421 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
422                                       ArrayRef<const MachineInstr *> MIs) {
423   // Try handling easy numbers of MIs with simpler mechanisms.
424   if (MIs.empty()) {
425     dropMemRefs(MF);
426     return;
427   }
428   if (MIs.size() == 1) {
429     cloneMemRefs(MF, *MIs[0]);
430     return;
431   }
432   // Because an empty memoperands list provides *no* information and must be
433   // handled conservatively (assuming the instruction can do anything), the only
434   // way to merge with it is to drop all other memoperands.
435   if (MIs[0]->memoperands_empty()) {
436     dropMemRefs(MF);
437     return;
438   }
439 
440   // Handle the general case.
441   SmallVector<MachineMemOperand *, 2> MergedMMOs;
442   // Start with the first instruction.
443   assert(&MF == MIs[0]->getMF() &&
444          "Invalid machine functions when cloning memory references!");
445   MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
446   // Now walk all the other instructions and accumulate any different MMOs.
447   for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
448     assert(&MF == MI.getMF() &&
449            "Invalid machine functions when cloning memory references!");
450 
451     // Skip MIs with identical operands to the first. This is a somewhat
452     // arbitrary hack but will catch common cases without being quadratic.
453     // TODO: We could fully implement merge semantics here if needed.
454     if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
455       continue;
456 
457     // Because an empty memoperands list provides *no* information and must be
458     // handled conservatively (assuming the instruction can do anything), the
459     // only way to merge with it is to drop all other memoperands.
460     if (MI.memoperands_empty()) {
461       dropMemRefs(MF);
462       return;
463     }
464 
465     // Otherwise accumulate these into our temporary buffer of the merged state.
466     MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
467   }
468 
469   setMemRefs(MF, MergedMMOs);
470 }
471 
472 void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
473   // Do nothing if old and new symbols are the same.
474   if (Symbol == getPreInstrSymbol())
475     return;
476 
477   // If there was only one symbol and we're removing it, just clear info.
478   if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
479     Info.clear();
480     return;
481   }
482 
483   setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
484                getHeapAllocMarker());
485 }
486 
487 void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
488   // Do nothing if old and new symbols are the same.
489   if (Symbol == getPostInstrSymbol())
490     return;
491 
492   // If there was only one symbol and we're removing it, just clear info.
493   if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
494     Info.clear();
495     return;
496   }
497 
498   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
499                getHeapAllocMarker());
500 }
501 
502 void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
503   // Do nothing if old and new symbols are the same.
504   if (Marker == getHeapAllocMarker())
505     return;
506 
507   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
508                Marker);
509 }
510 
511 void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
512                                      const MachineInstr &MI) {
513   if (this == &MI)
514     // Nothing to do for a self-clone!
515     return;
516 
517   assert(&MF == MI.getMF() &&
518          "Invalid machine functions when cloning instruction symbols!");
519 
520   setPreInstrSymbol(MF, MI.getPreInstrSymbol());
521   setPostInstrSymbol(MF, MI.getPostInstrSymbol());
522   setHeapAllocMarker(MF, MI.getHeapAllocMarker());
523 }
524 
525 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
526   // For now, the just return the union of the flags. If the flags get more
527   // complicated over time, we might need more logic here.
528   return getFlags() | Other.getFlags();
529 }
530 
531 uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
532   uint16_t MIFlags = 0;
533   // Copy the wrapping flags.
534   if (const OverflowingBinaryOperator *OB =
535           dyn_cast<OverflowingBinaryOperator>(&I)) {
536     if (OB->hasNoSignedWrap())
537       MIFlags |= MachineInstr::MIFlag::NoSWrap;
538     if (OB->hasNoUnsignedWrap())
539       MIFlags |= MachineInstr::MIFlag::NoUWrap;
540   }
541 
542   // Copy the exact flag.
543   if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
544     if (PE->isExact())
545       MIFlags |= MachineInstr::MIFlag::IsExact;
546 
547   // Copy the fast-math flags.
548   if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
549     const FastMathFlags Flags = FP->getFastMathFlags();
550     if (Flags.noNaNs())
551       MIFlags |= MachineInstr::MIFlag::FmNoNans;
552     if (Flags.noInfs())
553       MIFlags |= MachineInstr::MIFlag::FmNoInfs;
554     if (Flags.noSignedZeros())
555       MIFlags |= MachineInstr::MIFlag::FmNsz;
556     if (Flags.allowReciprocal())
557       MIFlags |= MachineInstr::MIFlag::FmArcp;
558     if (Flags.allowContract())
559       MIFlags |= MachineInstr::MIFlag::FmContract;
560     if (Flags.approxFunc())
561       MIFlags |= MachineInstr::MIFlag::FmAfn;
562     if (Flags.allowReassoc())
563       MIFlags |= MachineInstr::MIFlag::FmReassoc;
564   }
565 
566   return MIFlags;
567 }
568 
569 void MachineInstr::copyIRFlags(const Instruction &I) {
570   Flags = copyFlagsFromInstruction(I);
571 }
572 
573 bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
574   assert(!isBundledWithPred() && "Must be called on bundle header");
575   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
576     if (MII->getDesc().getFlags() & Mask) {
577       if (Type == AnyInBundle)
578         return true;
579     } else {
580       if (Type == AllInBundle && !MII->isBundle())
581         return false;
582     }
583     // This was the last instruction in the bundle.
584     if (!MII->isBundledWithSucc())
585       return Type == AllInBundle;
586   }
587 }
588 
589 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
590                                  MICheckType Check) const {
591   // If opcodes or number of operands are not the same then the two
592   // instructions are obviously not identical.
593   if (Other.getOpcode() != getOpcode() ||
594       Other.getNumOperands() != getNumOperands())
595     return false;
596 
597   if (isBundle()) {
598     // We have passed the test above that both instructions have the same
599     // opcode, so we know that both instructions are bundles here. Let's compare
600     // MIs inside the bundle.
601     assert(Other.isBundle() && "Expected that both instructions are bundles.");
602     MachineBasicBlock::const_instr_iterator I1 = getIterator();
603     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
604     // Loop until we analysed the last intruction inside at least one of the
605     // bundles.
606     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
607       ++I1;
608       ++I2;
609       if (!I1->isIdenticalTo(*I2, Check))
610         return false;
611     }
612     // If we've reached the end of just one of the two bundles, but not both,
613     // the instructions are not identical.
614     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
615       return false;
616   }
617 
618   // Check operands to make sure they match.
619   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
620     const MachineOperand &MO = getOperand(i);
621     const MachineOperand &OMO = Other.getOperand(i);
622     if (!MO.isReg()) {
623       if (!MO.isIdenticalTo(OMO))
624         return false;
625       continue;
626     }
627 
628     // Clients may or may not want to ignore defs when testing for equality.
629     // For example, machine CSE pass only cares about finding common
630     // subexpressions, so it's safe to ignore virtual register defs.
631     if (MO.isDef()) {
632       if (Check == IgnoreDefs)
633         continue;
634       else if (Check == IgnoreVRegDefs) {
635         if (!Register::isVirtualRegister(MO.getReg()) ||
636             !Register::isVirtualRegister(OMO.getReg()))
637           if (!MO.isIdenticalTo(OMO))
638             return false;
639       } else {
640         if (!MO.isIdenticalTo(OMO))
641           return false;
642         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
643           return false;
644       }
645     } else {
646       if (!MO.isIdenticalTo(OMO))
647         return false;
648       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
649         return false;
650     }
651   }
652   // If DebugLoc does not match then two debug instructions are not identical.
653   if (isDebugInstr())
654     if (getDebugLoc() && Other.getDebugLoc() &&
655         getDebugLoc() != Other.getDebugLoc())
656       return false;
657   return true;
658 }
659 
660 const MachineFunction *MachineInstr::getMF() const {
661   return getParent()->getParent();
662 }
663 
664 MachineInstr *MachineInstr::removeFromParent() {
665   assert(getParent() && "Not embedded in a basic block!");
666   return getParent()->remove(this);
667 }
668 
669 MachineInstr *MachineInstr::removeFromBundle() {
670   assert(getParent() && "Not embedded in a basic block!");
671   return getParent()->remove_instr(this);
672 }
673 
674 void MachineInstr::eraseFromParent() {
675   assert(getParent() && "Not embedded in a basic block!");
676   getParent()->erase(this);
677 }
678 
679 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
680   assert(getParent() && "Not embedded in a basic block!");
681   MachineBasicBlock *MBB = getParent();
682   MachineFunction *MF = MBB->getParent();
683   assert(MF && "Not embedded in a function!");
684 
685   MachineInstr *MI = (MachineInstr *)this;
686   MachineRegisterInfo &MRI = MF->getRegInfo();
687 
688   for (const MachineOperand &MO : MI->operands()) {
689     if (!MO.isReg() || !MO.isDef())
690       continue;
691     Register Reg = MO.getReg();
692     if (!Reg.isVirtual())
693       continue;
694     MRI.markUsesInDebugValueAsUndef(Reg);
695   }
696   MI->eraseFromParent();
697 }
698 
699 void MachineInstr::eraseFromBundle() {
700   assert(getParent() && "Not embedded in a basic block!");
701   getParent()->erase_instr(this);
702 }
703 
704 bool MachineInstr::isCandidateForCallSiteEntry(QueryType Type) const {
705   if (!isCall(Type))
706     return false;
707   switch (getOpcode()) {
708   case TargetOpcode::PATCHABLE_EVENT_CALL:
709   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
710   case TargetOpcode::PATCHPOINT:
711   case TargetOpcode::STACKMAP:
712   case TargetOpcode::STATEPOINT:
713     return false;
714   }
715   return true;
716 }
717 
718 bool MachineInstr::shouldUpdateCallSiteInfo() const {
719   if (isBundle())
720     return isCandidateForCallSiteEntry(MachineInstr::AnyInBundle);
721   return isCandidateForCallSiteEntry();
722 }
723 
724 unsigned MachineInstr::getNumExplicitOperands() const {
725   unsigned NumOperands = MCID->getNumOperands();
726   if (!MCID->isVariadic())
727     return NumOperands;
728 
729   for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
730     const MachineOperand &MO = getOperand(I);
731     // The operands must always be in the following order:
732     // - explicit reg defs,
733     // - other explicit operands (reg uses, immediates, etc.),
734     // - implicit reg defs
735     // - implicit reg uses
736     if (MO.isReg() && MO.isImplicit())
737       break;
738     ++NumOperands;
739   }
740   return NumOperands;
741 }
742 
743 unsigned MachineInstr::getNumExplicitDefs() const {
744   unsigned NumDefs = MCID->getNumDefs();
745   if (!MCID->isVariadic())
746     return NumDefs;
747 
748   for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
749     const MachineOperand &MO = getOperand(I);
750     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
751       break;
752     ++NumDefs;
753   }
754   return NumDefs;
755 }
756 
757 void MachineInstr::bundleWithPred() {
758   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
759   setFlag(BundledPred);
760   MachineBasicBlock::instr_iterator Pred = getIterator();
761   --Pred;
762   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
763   Pred->setFlag(BundledSucc);
764 }
765 
766 void MachineInstr::bundleWithSucc() {
767   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
768   setFlag(BundledSucc);
769   MachineBasicBlock::instr_iterator Succ = getIterator();
770   ++Succ;
771   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
772   Succ->setFlag(BundledPred);
773 }
774 
775 void MachineInstr::unbundleFromPred() {
776   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
777   clearFlag(BundledPred);
778   MachineBasicBlock::instr_iterator Pred = getIterator();
779   --Pred;
780   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
781   Pred->clearFlag(BundledSucc);
782 }
783 
784 void MachineInstr::unbundleFromSucc() {
785   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
786   clearFlag(BundledSucc);
787   MachineBasicBlock::instr_iterator Succ = getIterator();
788   ++Succ;
789   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
790   Succ->clearFlag(BundledPred);
791 }
792 
793 bool MachineInstr::isStackAligningInlineAsm() const {
794   if (isInlineAsm()) {
795     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
796     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
797       return true;
798   }
799   return false;
800 }
801 
802 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
803   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
804   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
805   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
806 }
807 
808 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
809                                        unsigned *GroupNo) const {
810   assert(isInlineAsm() && "Expected an inline asm instruction");
811   assert(OpIdx < getNumOperands() && "OpIdx out of range");
812 
813   // Ignore queries about the initial operands.
814   if (OpIdx < InlineAsm::MIOp_FirstOperand)
815     return -1;
816 
817   unsigned Group = 0;
818   unsigned NumOps;
819   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
820        i += NumOps) {
821     const MachineOperand &FlagMO = getOperand(i);
822     // If we reach the implicit register operands, stop looking.
823     if (!FlagMO.isImm())
824       return -1;
825     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
826     if (i + NumOps > OpIdx) {
827       if (GroupNo)
828         *GroupNo = Group;
829       return i;
830     }
831     ++Group;
832   }
833   return -1;
834 }
835 
836 const DILabel *MachineInstr::getDebugLabel() const {
837   assert(isDebugLabel() && "not a DBG_LABEL");
838   return cast<DILabel>(getOperand(0).getMetadata());
839 }
840 
841 const MachineOperand &MachineInstr::getDebugVariableOp() const {
842   assert(isDebugValue() && "not a DBG_VALUE");
843   return getOperand(2);
844 }
845 
846 MachineOperand &MachineInstr::getDebugVariableOp() {
847   assert(isDebugValue() && "not a DBG_VALUE");
848   return getOperand(2);
849 }
850 
851 const DILocalVariable *MachineInstr::getDebugVariable() const {
852   assert(isDebugValue() && "not a DBG_VALUE");
853   return cast<DILocalVariable>(getOperand(2).getMetadata());
854 }
855 
856 MachineOperand &MachineInstr::getDebugExpressionOp() {
857   assert(isDebugValue() && "not a DBG_VALUE");
858   return getOperand(3);
859 }
860 
861 const DIExpression *MachineInstr::getDebugExpression() const {
862   assert(isDebugValue() && "not a DBG_VALUE");
863   return cast<DIExpression>(getOperand(3).getMetadata());
864 }
865 
866 bool MachineInstr::isDebugEntryValue() const {
867   return isDebugValue() && getDebugExpression()->isEntryValue();
868 }
869 
870 const TargetRegisterClass*
871 MachineInstr::getRegClassConstraint(unsigned OpIdx,
872                                     const TargetInstrInfo *TII,
873                                     const TargetRegisterInfo *TRI) const {
874   assert(getParent() && "Can't have an MBB reference here!");
875   assert(getMF() && "Can't have an MF reference here!");
876   const MachineFunction &MF = *getMF();
877 
878   // Most opcodes have fixed constraints in their MCInstrDesc.
879   if (!isInlineAsm())
880     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
881 
882   if (!getOperand(OpIdx).isReg())
883     return nullptr;
884 
885   // For tied uses on inline asm, get the constraint from the def.
886   unsigned DefIdx;
887   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
888     OpIdx = DefIdx;
889 
890   // Inline asm stores register class constraints in the flag word.
891   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
892   if (FlagIdx < 0)
893     return nullptr;
894 
895   unsigned Flag = getOperand(FlagIdx).getImm();
896   unsigned RCID;
897   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
898        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
899        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
900       InlineAsm::hasRegClassConstraint(Flag, RCID))
901     return TRI->getRegClass(RCID);
902 
903   // Assume that all registers in a memory operand are pointers.
904   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
905     return TRI->getPointerRegClass(MF);
906 
907   return nullptr;
908 }
909 
910 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
911     Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
912     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
913   // Check every operands inside the bundle if we have
914   // been asked to.
915   if (ExploreBundle)
916     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
917          ++OpndIt)
918       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
919           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
920   else
921     // Otherwise, just check the current operands.
922     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
923       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
924   return CurRC;
925 }
926 
927 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
928     unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
929     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
930   assert(CurRC && "Invalid initial register class");
931   // Check if Reg is constrained by some of its use/def from MI.
932   const MachineOperand &MO = getOperand(OpIdx);
933   if (!MO.isReg() || MO.getReg() != Reg)
934     return CurRC;
935   // If yes, accumulate the constraints through the operand.
936   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
937 }
938 
939 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
940     unsigned OpIdx, const TargetRegisterClass *CurRC,
941     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
942   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
943   const MachineOperand &MO = getOperand(OpIdx);
944   assert(MO.isReg() &&
945          "Cannot get register constraints for non-register operand");
946   assert(CurRC && "Invalid initial register class");
947   if (unsigned SubIdx = MO.getSubReg()) {
948     if (OpRC)
949       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
950     else
951       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
952   } else if (OpRC)
953     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
954   return CurRC;
955 }
956 
957 /// Return the number of instructions inside the MI bundle, not counting the
958 /// header instruction.
959 unsigned MachineInstr::getBundleSize() const {
960   MachineBasicBlock::const_instr_iterator I = getIterator();
961   unsigned Size = 0;
962   while (I->isBundledWithSucc()) {
963     ++Size;
964     ++I;
965   }
966   return Size;
967 }
968 
969 /// Returns true if the MachineInstr has an implicit-use operand of exactly
970 /// the given register (not considering sub/super-registers).
971 bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
972   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
973     const MachineOperand &MO = getOperand(i);
974     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
975       return true;
976   }
977   return false;
978 }
979 
980 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
981 /// the specific register or -1 if it is not found. It further tightens
982 /// the search criteria to a use that kills the register if isKill is true.
983 int MachineInstr::findRegisterUseOperandIdx(
984     Register Reg, bool isKill, const TargetRegisterInfo *TRI) const {
985   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
986     const MachineOperand &MO = getOperand(i);
987     if (!MO.isReg() || !MO.isUse())
988       continue;
989     Register MOReg = MO.getReg();
990     if (!MOReg)
991       continue;
992     if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
993       if (!isKill || MO.isKill())
994         return i;
995   }
996   return -1;
997 }
998 
999 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1000 /// indicating if this instruction reads or writes Reg. This also considers
1001 /// partial defines.
1002 std::pair<bool,bool>
1003 MachineInstr::readsWritesVirtualRegister(Register Reg,
1004                                          SmallVectorImpl<unsigned> *Ops) const {
1005   bool PartDef = false; // Partial redefine.
1006   bool FullDef = false; // Full define.
1007   bool Use = false;
1008 
1009   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1010     const MachineOperand &MO = getOperand(i);
1011     if (!MO.isReg() || MO.getReg() != Reg)
1012       continue;
1013     if (Ops)
1014       Ops->push_back(i);
1015     if (MO.isUse())
1016       Use |= !MO.isUndef();
1017     else if (MO.getSubReg() && !MO.isUndef())
1018       // A partial def undef doesn't count as reading the register.
1019       PartDef = true;
1020     else
1021       FullDef = true;
1022   }
1023   // A partial redefine uses Reg unless there is also a full define.
1024   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1025 }
1026 
1027 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1028 /// the specified register or -1 if it is not found. If isDead is true, defs
1029 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1030 /// also checks if there is a def of a super-register.
1031 int
1032 MachineInstr::findRegisterDefOperandIdx(Register Reg, bool isDead, bool Overlap,
1033                                         const TargetRegisterInfo *TRI) const {
1034   bool isPhys = Register::isPhysicalRegister(Reg);
1035   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1036     const MachineOperand &MO = getOperand(i);
1037     // Accept regmask operands when Overlap is set.
1038     // Ignore them when looking for a specific def operand (Overlap == false).
1039     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1040       return i;
1041     if (!MO.isReg() || !MO.isDef())
1042       continue;
1043     Register MOReg = MO.getReg();
1044     bool Found = (MOReg == Reg);
1045     if (!Found && TRI && isPhys && Register::isPhysicalRegister(MOReg)) {
1046       if (Overlap)
1047         Found = TRI->regsOverlap(MOReg, Reg);
1048       else
1049         Found = TRI->isSubRegister(MOReg, Reg);
1050     }
1051     if (Found && (!isDead || MO.isDead()))
1052       return i;
1053   }
1054   return -1;
1055 }
1056 
1057 /// findFirstPredOperandIdx() - Find the index of the first operand in the
1058 /// operand list that is used to represent the predicate. It returns -1 if
1059 /// none is found.
1060 int MachineInstr::findFirstPredOperandIdx() const {
1061   // Don't call MCID.findFirstPredOperandIdx() because this variant
1062   // is sometimes called on an instruction that's not yet complete, and
1063   // so the number of operands is less than the MCID indicates. In
1064   // particular, the PTX target does this.
1065   const MCInstrDesc &MCID = getDesc();
1066   if (MCID.isPredicable()) {
1067     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1068       if (MCID.OpInfo[i].isPredicate())
1069         return i;
1070   }
1071 
1072   return -1;
1073 }
1074 
1075 // MachineOperand::TiedTo is 4 bits wide.
1076 const unsigned TiedMax = 15;
1077 
1078 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1079 ///
1080 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
1081 /// field. TiedTo can have these values:
1082 ///
1083 /// 0:              Operand is not tied to anything.
1084 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1085 /// TiedMax:        Tied to an operand >= TiedMax-1.
1086 ///
1087 /// The tied def must be one of the first TiedMax operands on a normal
1088 /// instruction. INLINEASM instructions allow more tied defs.
1089 ///
1090 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1091   MachineOperand &DefMO = getOperand(DefIdx);
1092   MachineOperand &UseMO = getOperand(UseIdx);
1093   assert(DefMO.isDef() && "DefIdx must be a def operand");
1094   assert(UseMO.isUse() && "UseIdx must be a use operand");
1095   assert(!DefMO.isTied() && "Def is already tied to another use");
1096   assert(!UseMO.isTied() && "Use is already tied to another def");
1097 
1098   if (DefIdx < TiedMax)
1099     UseMO.TiedTo = DefIdx + 1;
1100   else {
1101     // Inline asm can use the group descriptors to find tied operands, but on
1102     // normal instruction, the tied def must be within the first TiedMax
1103     // operands.
1104     assert(isInlineAsm() && "DefIdx out of range");
1105     UseMO.TiedTo = TiedMax;
1106   }
1107 
1108   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1109   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1110 }
1111 
1112 /// Given the index of a tied register operand, find the operand it is tied to.
1113 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1114 /// which must exist.
1115 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1116   const MachineOperand &MO = getOperand(OpIdx);
1117   assert(MO.isTied() && "Operand isn't tied");
1118 
1119   // Normally TiedTo is in range.
1120   if (MO.TiedTo < TiedMax)
1121     return MO.TiedTo - 1;
1122 
1123   // Uses on normal instructions can be out of range.
1124   if (!isInlineAsm()) {
1125     // Normal tied defs must be in the 0..TiedMax-1 range.
1126     if (MO.isUse())
1127       return TiedMax - 1;
1128     // MO is a def. Search for the tied use.
1129     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1130       const MachineOperand &UseMO = getOperand(i);
1131       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1132         return i;
1133     }
1134     llvm_unreachable("Can't find tied use");
1135   }
1136 
1137   // Now deal with inline asm by parsing the operand group descriptor flags.
1138   // Find the beginning of each operand group.
1139   SmallVector<unsigned, 8> GroupIdx;
1140   unsigned OpIdxGroup = ~0u;
1141   unsigned NumOps;
1142   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1143        i += NumOps) {
1144     const MachineOperand &FlagMO = getOperand(i);
1145     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1146     unsigned CurGroup = GroupIdx.size();
1147     GroupIdx.push_back(i);
1148     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1149     // OpIdx belongs to this operand group.
1150     if (OpIdx > i && OpIdx < i + NumOps)
1151       OpIdxGroup = CurGroup;
1152     unsigned TiedGroup;
1153     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1154       continue;
1155     // Operands in this group are tied to operands in TiedGroup which must be
1156     // earlier. Find the number of operands between the two groups.
1157     unsigned Delta = i - GroupIdx[TiedGroup];
1158 
1159     // OpIdx is a use tied to TiedGroup.
1160     if (OpIdxGroup == CurGroup)
1161       return OpIdx - Delta;
1162 
1163     // OpIdx is a def tied to this use group.
1164     if (OpIdxGroup == TiedGroup)
1165       return OpIdx + Delta;
1166   }
1167   llvm_unreachable("Invalid tied operand on inline asm");
1168 }
1169 
1170 /// clearKillInfo - Clears kill flags on all operands.
1171 ///
1172 void MachineInstr::clearKillInfo() {
1173   for (MachineOperand &MO : operands()) {
1174     if (MO.isReg() && MO.isUse())
1175       MO.setIsKill(false);
1176   }
1177 }
1178 
1179 void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1180                                       unsigned SubIdx,
1181                                       const TargetRegisterInfo &RegInfo) {
1182   if (Register::isPhysicalRegister(ToReg)) {
1183     if (SubIdx)
1184       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1185     for (MachineOperand &MO : operands()) {
1186       if (!MO.isReg() || MO.getReg() != FromReg)
1187         continue;
1188       MO.substPhysReg(ToReg, RegInfo);
1189     }
1190   } else {
1191     for (MachineOperand &MO : operands()) {
1192       if (!MO.isReg() || MO.getReg() != FromReg)
1193         continue;
1194       MO.substVirtReg(ToReg, SubIdx, RegInfo);
1195     }
1196   }
1197 }
1198 
1199 /// isSafeToMove - Return true if it is safe to move this instruction. If
1200 /// SawStore is set to true, it means that there is a store (or call) between
1201 /// the instruction's location and its intended destination.
1202 bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const {
1203   // Ignore stuff that we obviously can't move.
1204   //
1205   // Treat volatile loads as stores. This is not strictly necessary for
1206   // volatiles, but it is required for atomic loads. It is not allowed to move
1207   // a load across an atomic load with Ordering > Monotonic.
1208   if (mayStore() || isCall() || isPHI() ||
1209       (mayLoad() && hasOrderedMemoryRef())) {
1210     SawStore = true;
1211     return false;
1212   }
1213 
1214   if (isPosition() || isDebugInstr() || isTerminator() ||
1215       mayRaiseFPException() || hasUnmodeledSideEffects())
1216     return false;
1217 
1218   // See if this instruction does a load.  If so, we have to guarantee that the
1219   // loaded value doesn't change between the load and the its intended
1220   // destination. The check for isInvariantLoad gives the target the chance to
1221   // classify the load as always returning a constant, e.g. a constant pool
1222   // load.
1223   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1224     // Otherwise, this is a real load.  If there is a store between the load and
1225     // end of block, we can't move it.
1226     return !SawStore;
1227 
1228   return true;
1229 }
1230 
1231 bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1232                             bool UseTBAA) const {
1233   const MachineFunction *MF = getMF();
1234   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1235   const MachineFrameInfo &MFI = MF->getFrameInfo();
1236 
1237   // If neither instruction stores to memory, they can't alias in any
1238   // meaningful way, even if they read from the same address.
1239   if (!mayStore() && !Other.mayStore())
1240     return false;
1241 
1242   // Both instructions must be memory operations to be able to alias.
1243   if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1244     return false;
1245 
1246   // Let the target decide if memory accesses cannot possibly overlap.
1247   if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1248     return false;
1249 
1250   // FIXME: Need to handle multiple memory operands to support all targets.
1251   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1252     return true;
1253 
1254   MachineMemOperand *MMOa = *memoperands_begin();
1255   MachineMemOperand *MMOb = *Other.memoperands_begin();
1256 
1257   // The following interface to AA is fashioned after DAGCombiner::isAlias
1258   // and operates with MachineMemOperand offset with some important
1259   // assumptions:
1260   //   - LLVM fundamentally assumes flat address spaces.
1261   //   - MachineOperand offset can *only* result from legalization and
1262   //     cannot affect queries other than the trivial case of overlap
1263   //     checking.
1264   //   - These offsets never wrap and never step outside
1265   //     of allocated objects.
1266   //   - There should never be any negative offsets here.
1267   //
1268   // FIXME: Modify API to hide this math from "user"
1269   // Even before we go to AA we can reason locally about some
1270   // memory objects. It can save compile time, and possibly catch some
1271   // corner cases not currently covered.
1272 
1273   int64_t OffsetA = MMOa->getOffset();
1274   int64_t OffsetB = MMOb->getOffset();
1275   int64_t MinOffset = std::min(OffsetA, OffsetB);
1276 
1277   uint64_t WidthA = MMOa->getSize();
1278   uint64_t WidthB = MMOb->getSize();
1279   bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
1280   bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
1281 
1282   const Value *ValA = MMOa->getValue();
1283   const Value *ValB = MMOb->getValue();
1284   bool SameVal = (ValA && ValB && (ValA == ValB));
1285   if (!SameVal) {
1286     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1287     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1288     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1289       return false;
1290     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1291       return false;
1292     if (PSVa && PSVb && (PSVa == PSVb))
1293       SameVal = true;
1294   }
1295 
1296   if (SameVal) {
1297     if (!KnownWidthA || !KnownWidthB)
1298       return true;
1299     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1300     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1301     return (MinOffset + LowWidth > MaxOffset);
1302   }
1303 
1304   if (!AA)
1305     return true;
1306 
1307   if (!ValA || !ValB)
1308     return true;
1309 
1310   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1311   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1312 
1313   int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
1314                                  : MemoryLocation::UnknownSize;
1315   int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
1316                                  : MemoryLocation::UnknownSize;
1317 
1318   AliasResult AAResult = AA->alias(
1319       MemoryLocation(ValA, OverlapA,
1320                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1321       MemoryLocation(ValB, OverlapB,
1322                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1323 
1324   return (AAResult != NoAlias);
1325 }
1326 
1327 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1328 /// or volatile memory reference, or if the information describing the memory
1329 /// reference is not available. Return false if it is known to have no ordered
1330 /// memory references.
1331 bool MachineInstr::hasOrderedMemoryRef() const {
1332   // An instruction known never to access memory won't have a volatile access.
1333   if (!mayStore() &&
1334       !mayLoad() &&
1335       !isCall() &&
1336       !hasUnmodeledSideEffects())
1337     return false;
1338 
1339   // Otherwise, if the instruction has no memory reference information,
1340   // conservatively assume it wasn't preserved.
1341   if (memoperands_empty())
1342     return true;
1343 
1344   // Check if any of our memory operands are ordered.
1345   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1346     return !MMO->isUnordered();
1347   });
1348 }
1349 
1350 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1351 /// trap and is loading from a location whose value is invariant across a run of
1352 /// this function.
1353 bool MachineInstr::isDereferenceableInvariantLoad(AAResults *AA) const {
1354   // If the instruction doesn't load at all, it isn't an invariant load.
1355   if (!mayLoad())
1356     return false;
1357 
1358   // If the instruction has lost its memoperands, conservatively assume that
1359   // it may not be an invariant load.
1360   if (memoperands_empty())
1361     return false;
1362 
1363   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1364 
1365   for (MachineMemOperand *MMO : memoperands()) {
1366     if (!MMO->isUnordered())
1367       // If the memory operand has ordering side effects, we can't move the
1368       // instruction.  Such an instruction is technically an invariant load,
1369       // but the caller code would need updated to expect that.
1370       return false;
1371     if (MMO->isStore()) return false;
1372     if (MMO->isInvariant() && MMO->isDereferenceable())
1373       continue;
1374 
1375     // A load from a constant PseudoSourceValue is invariant.
1376     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1377       if (PSV->isConstant(&MFI))
1378         continue;
1379 
1380     if (const Value *V = MMO->getValue()) {
1381       // If we have an AliasAnalysis, ask it whether the memory is constant.
1382       if (AA &&
1383           AA->pointsToConstantMemory(
1384               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1385         continue;
1386     }
1387 
1388     // Otherwise assume conservatively.
1389     return false;
1390   }
1391 
1392   // Everything checks out.
1393   return true;
1394 }
1395 
1396 /// isConstantValuePHI - If the specified instruction is a PHI that always
1397 /// merges together the same virtual register, return the register, otherwise
1398 /// return 0.
1399 unsigned MachineInstr::isConstantValuePHI() const {
1400   if (!isPHI())
1401     return 0;
1402   assert(getNumOperands() >= 3 &&
1403          "It's illegal to have a PHI without source operands");
1404 
1405   Register Reg = getOperand(1).getReg();
1406   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1407     if (getOperand(i).getReg() != Reg)
1408       return 0;
1409   return Reg;
1410 }
1411 
1412 bool MachineInstr::hasUnmodeledSideEffects() const {
1413   if (hasProperty(MCID::UnmodeledSideEffects))
1414     return true;
1415   if (isInlineAsm()) {
1416     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1417     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1418       return true;
1419   }
1420 
1421   return false;
1422 }
1423 
1424 bool MachineInstr::isLoadFoldBarrier() const {
1425   return mayStore() || isCall() || hasUnmodeledSideEffects();
1426 }
1427 
1428 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1429 ///
1430 bool MachineInstr::allDefsAreDead() const {
1431   for (const MachineOperand &MO : operands()) {
1432     if (!MO.isReg() || MO.isUse())
1433       continue;
1434     if (!MO.isDead())
1435       return false;
1436   }
1437   return true;
1438 }
1439 
1440 /// copyImplicitOps - Copy implicit register operands from specified
1441 /// instruction to this instruction.
1442 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1443                                    const MachineInstr &MI) {
1444   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1445        i != e; ++i) {
1446     const MachineOperand &MO = MI.getOperand(i);
1447     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1448       addOperand(MF, MO);
1449   }
1450 }
1451 
1452 bool MachineInstr::hasComplexRegisterTies() const {
1453   const MCInstrDesc &MCID = getDesc();
1454   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1455     const auto &Operand = getOperand(I);
1456     if (!Operand.isReg() || Operand.isDef())
1457       // Ignore the defined registers as MCID marks only the uses as tied.
1458       continue;
1459     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1460     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1461     if (ExpectedTiedIdx != TiedIdx)
1462       return true;
1463   }
1464   return false;
1465 }
1466 
1467 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1468                                  const MachineRegisterInfo &MRI) const {
1469   const MachineOperand &Op = getOperand(OpIdx);
1470   if (!Op.isReg())
1471     return LLT{};
1472 
1473   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1474     return MRI.getType(Op.getReg());
1475 
1476   auto &OpInfo = getDesc().OpInfo[OpIdx];
1477   if (!OpInfo.isGenericType())
1478     return MRI.getType(Op.getReg());
1479 
1480   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1481     return LLT{};
1482 
1483   LLT TypeToPrint = MRI.getType(Op.getReg());
1484   // Don't mark the type index printed if it wasn't actually printed: maybe
1485   // another operand with the same type index has an actual type attached:
1486   if (TypeToPrint.isValid())
1487     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1488   return TypeToPrint;
1489 }
1490 
1491 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1492 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1493   dbgs() << "  ";
1494   print(dbgs());
1495 }
1496 
1497 LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1498     const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1499     SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1500   if (Depth >= MaxDepth)
1501     return;
1502   if (!AlreadySeenInstrs.insert(this).second)
1503     return;
1504   // PadToColumn always inserts at least one space.
1505   // Don't mess up the alignment if we don't want any space.
1506   if (Depth)
1507     fdbgs().PadToColumn(Depth * 2);
1508   print(fdbgs());
1509   for (const MachineOperand &MO : operands()) {
1510     if (!MO.isReg() || MO.isDef())
1511       continue;
1512     Register Reg = MO.getReg();
1513     if (Reg.isPhysical())
1514       continue;
1515     const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1516     if (NewMI == nullptr)
1517       continue;
1518     NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1519   }
1520 }
1521 
1522 LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1523                                           unsigned MaxDepth) const {
1524   SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1525   dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1526 }
1527 #endif
1528 
1529 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1530                          bool SkipDebugLoc, bool AddNewLine,
1531                          const TargetInstrInfo *TII) const {
1532   const Module *M = nullptr;
1533   const Function *F = nullptr;
1534   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1535     F = &MF->getFunction();
1536     M = F->getParent();
1537     if (!TII)
1538       TII = MF->getSubtarget().getInstrInfo();
1539   }
1540 
1541   ModuleSlotTracker MST(M);
1542   if (F)
1543     MST.incorporateFunction(*F);
1544   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1545 }
1546 
1547 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1548                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1549                          bool AddNewLine, const TargetInstrInfo *TII) const {
1550   // We can be a bit tidier if we know the MachineFunction.
1551   const TargetRegisterInfo *TRI = nullptr;
1552   const MachineRegisterInfo *MRI = nullptr;
1553   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1554   tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1555 
1556   if (isCFIInstruction())
1557     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1558 
1559   SmallBitVector PrintedTypes(8);
1560   bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1561   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1562     if (!ShouldPrintRegisterTies)
1563       return 0U;
1564     const MachineOperand &MO = getOperand(OpIdx);
1565     if (MO.isReg() && MO.isTied() && !MO.isDef())
1566       return findTiedOperandIdx(OpIdx);
1567     return 0U;
1568   };
1569   unsigned StartOp = 0;
1570   unsigned e = getNumOperands();
1571 
1572   // Print explicitly defined operands on the left of an assignment syntax.
1573   while (StartOp < e) {
1574     const MachineOperand &MO = getOperand(StartOp);
1575     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1576       break;
1577 
1578     if (StartOp != 0)
1579       OS << ", ";
1580 
1581     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1582     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1583     MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1584              ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1585     ++StartOp;
1586   }
1587 
1588   if (StartOp != 0)
1589     OS << " = ";
1590 
1591   if (getFlag(MachineInstr::FrameSetup))
1592     OS << "frame-setup ";
1593   if (getFlag(MachineInstr::FrameDestroy))
1594     OS << "frame-destroy ";
1595   if (getFlag(MachineInstr::FmNoNans))
1596     OS << "nnan ";
1597   if (getFlag(MachineInstr::FmNoInfs))
1598     OS << "ninf ";
1599   if (getFlag(MachineInstr::FmNsz))
1600     OS << "nsz ";
1601   if (getFlag(MachineInstr::FmArcp))
1602     OS << "arcp ";
1603   if (getFlag(MachineInstr::FmContract))
1604     OS << "contract ";
1605   if (getFlag(MachineInstr::FmAfn))
1606     OS << "afn ";
1607   if (getFlag(MachineInstr::FmReassoc))
1608     OS << "reassoc ";
1609   if (getFlag(MachineInstr::NoUWrap))
1610     OS << "nuw ";
1611   if (getFlag(MachineInstr::NoSWrap))
1612     OS << "nsw ";
1613   if (getFlag(MachineInstr::IsExact))
1614     OS << "exact ";
1615   if (getFlag(MachineInstr::NoFPExcept))
1616     OS << "nofpexcept ";
1617   if (getFlag(MachineInstr::NoMerge))
1618     OS << "nomerge ";
1619 
1620   // Print the opcode name.
1621   if (TII)
1622     OS << TII->getName(getOpcode());
1623   else
1624     OS << "UNKNOWN";
1625 
1626   if (SkipOpers)
1627     return;
1628 
1629   // Print the rest of the operands.
1630   bool FirstOp = true;
1631   unsigned AsmDescOp = ~0u;
1632   unsigned AsmOpCount = 0;
1633 
1634   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1635     // Print asm string.
1636     OS << " ";
1637     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1638     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1639     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1640     getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1641                             ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1642                             IntrinsicInfo);
1643 
1644     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1645     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1646     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1647       OS << " [sideeffect]";
1648     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1649       OS << " [mayload]";
1650     if (ExtraInfo & InlineAsm::Extra_MayStore)
1651       OS << " [maystore]";
1652     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1653       OS << " [isconvergent]";
1654     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1655       OS << " [alignstack]";
1656     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1657       OS << " [attdialect]";
1658     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1659       OS << " [inteldialect]";
1660 
1661     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1662     FirstOp = false;
1663   }
1664 
1665   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1666     const MachineOperand &MO = getOperand(i);
1667 
1668     if (FirstOp) FirstOp = false; else OS << ",";
1669     OS << " ";
1670 
1671     if (isDebugValue() && MO.isMetadata()) {
1672       // Pretty print DBG_VALUE instructions.
1673       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1674       if (DIV && !DIV->getName().empty())
1675         OS << "!\"" << DIV->getName() << '\"';
1676       else {
1677         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1678         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1679         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1680                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1681       }
1682     } else if (isDebugLabel() && MO.isMetadata()) {
1683       // Pretty print DBG_LABEL instructions.
1684       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1685       if (DIL && !DIL->getName().empty())
1686         OS << "\"" << DIL->getName() << '\"';
1687       else {
1688         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1689         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1690         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1691                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1692       }
1693     } else if (i == AsmDescOp && MO.isImm()) {
1694       // Pretty print the inline asm operand descriptor.
1695       OS << '$' << AsmOpCount++;
1696       unsigned Flag = MO.getImm();
1697       OS << ":[";
1698       OS << InlineAsm::getKindName(InlineAsm::getKind(Flag));
1699 
1700       unsigned RCID = 0;
1701       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1702           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1703         if (TRI) {
1704           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1705         } else
1706           OS << ":RC" << RCID;
1707       }
1708 
1709       if (InlineAsm::isMemKind(Flag)) {
1710         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1711         OS << ":" << InlineAsm::getMemConstraintName(MCID);
1712       }
1713 
1714       unsigned TiedTo = 0;
1715       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1716         OS << " tiedto:$" << TiedTo;
1717 
1718       OS << ']';
1719 
1720       // Compute the index of the next operand descriptor.
1721       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1722     } else {
1723       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1724       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1725       if (MO.isImm() && isOperandSubregIdx(i))
1726         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1727       else
1728         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1729                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1730     }
1731   }
1732 
1733   // Print any optional symbols attached to this instruction as-if they were
1734   // operands.
1735   if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1736     if (!FirstOp) {
1737       FirstOp = false;
1738       OS << ',';
1739     }
1740     OS << " pre-instr-symbol ";
1741     MachineOperand::printSymbol(OS, *PreInstrSymbol);
1742   }
1743   if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1744     if (!FirstOp) {
1745       FirstOp = false;
1746       OS << ',';
1747     }
1748     OS << " post-instr-symbol ";
1749     MachineOperand::printSymbol(OS, *PostInstrSymbol);
1750   }
1751   if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1752     if (!FirstOp) {
1753       FirstOp = false;
1754       OS << ',';
1755     }
1756     OS << " heap-alloc-marker ";
1757     HeapAllocMarker->printAsOperand(OS, MST);
1758   }
1759 
1760   if (!SkipDebugLoc) {
1761     if (const DebugLoc &DL = getDebugLoc()) {
1762       if (!FirstOp)
1763         OS << ',';
1764       OS << " debug-location ";
1765       DL->printAsOperand(OS, MST);
1766     }
1767   }
1768 
1769   if (!memoperands_empty()) {
1770     SmallVector<StringRef, 0> SSNs;
1771     const LLVMContext *Context = nullptr;
1772     std::unique_ptr<LLVMContext> CtxPtr;
1773     const MachineFrameInfo *MFI = nullptr;
1774     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1775       MFI = &MF->getFrameInfo();
1776       Context = &MF->getFunction().getContext();
1777     } else {
1778       CtxPtr = std::make_unique<LLVMContext>();
1779       Context = CtxPtr.get();
1780     }
1781 
1782     OS << " :: ";
1783     bool NeedComma = false;
1784     for (const MachineMemOperand *Op : memoperands()) {
1785       if (NeedComma)
1786         OS << ", ";
1787       Op->print(OS, MST, SSNs, *Context, MFI, TII);
1788       NeedComma = true;
1789     }
1790   }
1791 
1792   if (SkipDebugLoc)
1793     return;
1794 
1795   bool HaveSemi = false;
1796 
1797   // Print debug location information.
1798   if (const DebugLoc &DL = getDebugLoc()) {
1799     if (!HaveSemi) {
1800       OS << ';';
1801       HaveSemi = true;
1802     }
1803     OS << ' ';
1804     DL.print(OS);
1805   }
1806 
1807   // Print extra comments for DEBUG_VALUE.
1808   if (isDebugValue() && getDebugVariableOp().isMetadata()) {
1809     if (!HaveSemi) {
1810       OS << ";";
1811       HaveSemi = true;
1812     }
1813     auto *DV = getDebugVariable();
1814     OS << " line no:" <<  DV->getLine();
1815     if (isIndirectDebugValue())
1816       OS << " indirect";
1817   }
1818   // TODO: DBG_LABEL
1819 
1820   if (AddNewLine)
1821     OS << '\n';
1822 }
1823 
1824 bool MachineInstr::addRegisterKilled(Register IncomingReg,
1825                                      const TargetRegisterInfo *RegInfo,
1826                                      bool AddIfNotFound) {
1827   bool isPhysReg = Register::isPhysicalRegister(IncomingReg);
1828   bool hasAliases = isPhysReg &&
1829     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1830   bool Found = false;
1831   SmallVector<unsigned,4> DeadOps;
1832   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1833     MachineOperand &MO = getOperand(i);
1834     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1835       continue;
1836 
1837     // DEBUG_VALUE nodes do not contribute to code generation and should
1838     // always be ignored. Failure to do so may result in trying to modify
1839     // KILL flags on DEBUG_VALUE nodes.
1840     if (MO.isDebug())
1841       continue;
1842 
1843     Register Reg = MO.getReg();
1844     if (!Reg)
1845       continue;
1846 
1847     if (Reg == IncomingReg) {
1848       if (!Found) {
1849         if (MO.isKill())
1850           // The register is already marked kill.
1851           return true;
1852         if (isPhysReg && isRegTiedToDefOperand(i))
1853           // Two-address uses of physregs must not be marked kill.
1854           return true;
1855         MO.setIsKill();
1856         Found = true;
1857       }
1858     } else if (hasAliases && MO.isKill() && Register::isPhysicalRegister(Reg)) {
1859       // A super-register kill already exists.
1860       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1861         return true;
1862       if (RegInfo->isSubRegister(IncomingReg, Reg))
1863         DeadOps.push_back(i);
1864     }
1865   }
1866 
1867   // Trim unneeded kill operands.
1868   while (!DeadOps.empty()) {
1869     unsigned OpIdx = DeadOps.back();
1870     if (getOperand(OpIdx).isImplicit() &&
1871         (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1872       RemoveOperand(OpIdx);
1873     else
1874       getOperand(OpIdx).setIsKill(false);
1875     DeadOps.pop_back();
1876   }
1877 
1878   // If not found, this means an alias of one of the operands is killed. Add a
1879   // new implicit operand if required.
1880   if (!Found && AddIfNotFound) {
1881     addOperand(MachineOperand::CreateReg(IncomingReg,
1882                                          false /*IsDef*/,
1883                                          true  /*IsImp*/,
1884                                          true  /*IsKill*/));
1885     return true;
1886   }
1887   return Found;
1888 }
1889 
1890 void MachineInstr::clearRegisterKills(Register Reg,
1891                                       const TargetRegisterInfo *RegInfo) {
1892   if (!Register::isPhysicalRegister(Reg))
1893     RegInfo = nullptr;
1894   for (MachineOperand &MO : operands()) {
1895     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1896       continue;
1897     Register OpReg = MO.getReg();
1898     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1899       MO.setIsKill(false);
1900   }
1901 }
1902 
1903 bool MachineInstr::addRegisterDead(Register Reg,
1904                                    const TargetRegisterInfo *RegInfo,
1905                                    bool AddIfNotFound) {
1906   bool isPhysReg = Register::isPhysicalRegister(Reg);
1907   bool hasAliases = isPhysReg &&
1908     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1909   bool Found = false;
1910   SmallVector<unsigned,4> DeadOps;
1911   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1912     MachineOperand &MO = getOperand(i);
1913     if (!MO.isReg() || !MO.isDef())
1914       continue;
1915     Register MOReg = MO.getReg();
1916     if (!MOReg)
1917       continue;
1918 
1919     if (MOReg == Reg) {
1920       MO.setIsDead();
1921       Found = true;
1922     } else if (hasAliases && MO.isDead() &&
1923                Register::isPhysicalRegister(MOReg)) {
1924       // There exists a super-register that's marked dead.
1925       if (RegInfo->isSuperRegister(Reg, MOReg))
1926         return true;
1927       if (RegInfo->isSubRegister(Reg, MOReg))
1928         DeadOps.push_back(i);
1929     }
1930   }
1931 
1932   // Trim unneeded dead operands.
1933   while (!DeadOps.empty()) {
1934     unsigned OpIdx = DeadOps.back();
1935     if (getOperand(OpIdx).isImplicit() &&
1936         (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1937       RemoveOperand(OpIdx);
1938     else
1939       getOperand(OpIdx).setIsDead(false);
1940     DeadOps.pop_back();
1941   }
1942 
1943   // If not found, this means an alias of one of the operands is dead. Add a
1944   // new implicit operand if required.
1945   if (Found || !AddIfNotFound)
1946     return Found;
1947 
1948   addOperand(MachineOperand::CreateReg(Reg,
1949                                        true  /*IsDef*/,
1950                                        true  /*IsImp*/,
1951                                        false /*IsKill*/,
1952                                        true  /*IsDead*/));
1953   return true;
1954 }
1955 
1956 void MachineInstr::clearRegisterDeads(Register Reg) {
1957   for (MachineOperand &MO : operands()) {
1958     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1959       continue;
1960     MO.setIsDead(false);
1961   }
1962 }
1963 
1964 void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
1965   for (MachineOperand &MO : operands()) {
1966     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1967       continue;
1968     MO.setIsUndef(IsUndef);
1969   }
1970 }
1971 
1972 void MachineInstr::addRegisterDefined(Register Reg,
1973                                       const TargetRegisterInfo *RegInfo) {
1974   if (Register::isPhysicalRegister(Reg)) {
1975     MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
1976     if (MO)
1977       return;
1978   } else {
1979     for (const MachineOperand &MO : operands()) {
1980       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1981           MO.getSubReg() == 0)
1982         return;
1983     }
1984   }
1985   addOperand(MachineOperand::CreateReg(Reg,
1986                                        true  /*IsDef*/,
1987                                        true  /*IsImp*/));
1988 }
1989 
1990 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
1991                                          const TargetRegisterInfo &TRI) {
1992   bool HasRegMask = false;
1993   for (MachineOperand &MO : operands()) {
1994     if (MO.isRegMask()) {
1995       HasRegMask = true;
1996       continue;
1997     }
1998     if (!MO.isReg() || !MO.isDef()) continue;
1999     Register Reg = MO.getReg();
2000     if (!Reg.isPhysical())
2001       continue;
2002     // If there are no uses, including partial uses, the def is dead.
2003     if (llvm::none_of(UsedRegs,
2004                       [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2005       MO.setIsDead();
2006   }
2007 
2008   // This is a call with a register mask operand.
2009   // Mask clobbers are always dead, so add defs for the non-dead defines.
2010   if (HasRegMask)
2011     for (ArrayRef<Register>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
2012          I != E; ++I)
2013       addRegisterDefined(*I, &TRI);
2014 }
2015 
2016 unsigned
2017 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2018   // Build up a buffer of hash code components.
2019   SmallVector<size_t, 16> HashComponents;
2020   HashComponents.reserve(MI->getNumOperands() + 1);
2021   HashComponents.push_back(MI->getOpcode());
2022   for (const MachineOperand &MO : MI->operands()) {
2023     if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
2024       continue;  // Skip virtual register defs.
2025 
2026     HashComponents.push_back(hash_value(MO));
2027   }
2028   return hash_combine_range(HashComponents.begin(), HashComponents.end());
2029 }
2030 
2031 void MachineInstr::emitError(StringRef Msg) const {
2032   // Find the source location cookie.
2033   unsigned LocCookie = 0;
2034   const MDNode *LocMD = nullptr;
2035   for (unsigned i = getNumOperands(); i != 0; --i) {
2036     if (getOperand(i-1).isMetadata() &&
2037         (LocMD = getOperand(i-1).getMetadata()) &&
2038         LocMD->getNumOperands() != 0) {
2039       if (const ConstantInt *CI =
2040               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
2041         LocCookie = CI->getZExtValue();
2042         break;
2043       }
2044     }
2045   }
2046 
2047   if (const MachineBasicBlock *MBB = getParent())
2048     if (const MachineFunction *MF = MBB->getParent())
2049       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
2050   report_fatal_error(Msg);
2051 }
2052 
2053 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2054                                   const MCInstrDesc &MCID, bool IsIndirect,
2055                                   Register Reg, const MDNode *Variable,
2056                                   const MDNode *Expr) {
2057   assert(isa<DILocalVariable>(Variable) && "not a variable");
2058   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2059   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2060          "Expected inlined-at fields to agree");
2061   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
2062   if (IsIndirect)
2063     MIB.addImm(0U);
2064   else
2065     MIB.addReg(0U, RegState::Debug);
2066   return MIB.addMetadata(Variable).addMetadata(Expr);
2067 }
2068 
2069 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2070                                   const MCInstrDesc &MCID, bool IsIndirect,
2071                                   MachineOperand &MO, const MDNode *Variable,
2072                                   const MDNode *Expr) {
2073   assert(isa<DILocalVariable>(Variable) && "not a variable");
2074   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2075   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2076          "Expected inlined-at fields to agree");
2077   if (MO.isReg())
2078     return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
2079 
2080   auto MIB = BuildMI(MF, DL, MCID).add(MO);
2081   if (IsIndirect)
2082     MIB.addImm(0U);
2083   else
2084     MIB.addReg(0U, RegState::Debug);
2085   return MIB.addMetadata(Variable).addMetadata(Expr);
2086  }
2087 
2088 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2089                                   MachineBasicBlock::iterator I,
2090                                   const DebugLoc &DL, const MCInstrDesc &MCID,
2091                                   bool IsIndirect, Register Reg,
2092                                   const MDNode *Variable, const MDNode *Expr) {
2093   MachineFunction &MF = *BB.getParent();
2094   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2095   BB.insert(I, MI);
2096   return MachineInstrBuilder(MF, MI);
2097 }
2098 
2099 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2100                                   MachineBasicBlock::iterator I,
2101                                   const DebugLoc &DL, const MCInstrDesc &MCID,
2102                                   bool IsIndirect, MachineOperand &MO,
2103                                   const MDNode *Variable, const MDNode *Expr) {
2104   MachineFunction &MF = *BB.getParent();
2105   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
2106   BB.insert(I, MI);
2107   return MachineInstrBuilder(MF, *MI);
2108 }
2109 
2110 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2111 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2112 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
2113   assert(MI.getOperand(0).isReg() && "can't spill non-register");
2114   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2115          "Expected inlined-at fields to agree");
2116 
2117   const DIExpression *Expr = MI.getDebugExpression();
2118   if (MI.isIndirectDebugValue()) {
2119     assert(MI.getDebugOffset().getImm() == 0 &&
2120            "DBG_VALUE with nonzero offset");
2121     Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2122   }
2123   return Expr;
2124 }
2125 
2126 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2127                                           MachineBasicBlock::iterator I,
2128                                           const MachineInstr &Orig,
2129                                           int FrameIndex) {
2130   const DIExpression *Expr = computeExprForSpill(Orig);
2131   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
2132       .addFrameIndex(FrameIndex)
2133       .addImm(0U)
2134       .addMetadata(Orig.getDebugVariable())
2135       .addMetadata(Expr);
2136 }
2137 
2138 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
2139   const DIExpression *Expr = computeExprForSpill(Orig);
2140   Orig.getDebugOperand(0).ChangeToFrameIndex(FrameIndex);
2141   Orig.getDebugOffset().ChangeToImmediate(0U);
2142   Orig.getDebugExpressionOp().setMetadata(Expr);
2143 }
2144 
2145 void MachineInstr::collectDebugValues(
2146                                 SmallVectorImpl<MachineInstr *> &DbgValues) {
2147   MachineInstr &MI = *this;
2148   if (!MI.getOperand(0).isReg())
2149     return;
2150 
2151   MachineBasicBlock::iterator DI = MI; ++DI;
2152   for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2153        DI != DE; ++DI) {
2154     if (!DI->isDebugValue())
2155       return;
2156     if (DI->getDebugOperandForReg(MI.getOperand(0).getReg()))
2157       DbgValues.push_back(&*DI);
2158   }
2159 }
2160 
2161 void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2162   // Collect matching debug values.
2163   SmallVector<MachineInstr *, 2> DbgValues;
2164 
2165   if (!getOperand(0).isReg())
2166     return;
2167 
2168   Register DefReg = getOperand(0).getReg();
2169   auto *MRI = getRegInfo();
2170   for (auto &MO : MRI->use_operands(DefReg)) {
2171     auto *DI = MO.getParent();
2172     if (!DI->isDebugValue())
2173       continue;
2174     if (DI->getDebugOperandForReg(DefReg)) {
2175       DbgValues.push_back(DI);
2176     }
2177   }
2178 
2179   // Propagate Reg to debug value instructions.
2180   for (auto *DBI : DbgValues)
2181     DBI->getDebugOperandForReg(DefReg)->setReg(Reg);
2182 }
2183 
2184 using MMOList = SmallVector<const MachineMemOperand *, 2>;
2185 
2186 static unsigned getSpillSlotSize(const MMOList &Accesses,
2187                                  const MachineFrameInfo &MFI) {
2188   unsigned Size = 0;
2189   for (auto A : Accesses)
2190     if (MFI.isSpillSlotObjectIndex(
2191             cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2192                 ->getFrameIndex()))
2193       Size += A->getSize();
2194   return Size;
2195 }
2196 
2197 Optional<unsigned>
2198 MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2199   int FI;
2200   if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2201     const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2202     if (MFI.isSpillSlotObjectIndex(FI))
2203       return (*memoperands_begin())->getSize();
2204   }
2205   return None;
2206 }
2207 
2208 Optional<unsigned>
2209 MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2210   MMOList Accesses;
2211   if (TII->hasStoreToStackSlot(*this, Accesses))
2212     return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2213   return None;
2214 }
2215 
2216 Optional<unsigned>
2217 MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2218   int FI;
2219   if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2220     const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2221     if (MFI.isSpillSlotObjectIndex(FI))
2222       return (*memoperands_begin())->getSize();
2223   }
2224   return None;
2225 }
2226 
2227 Optional<unsigned>
2228 MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2229   MMOList Accesses;
2230   if (TII->hasLoadFromStackSlot(*this, Accesses))
2231     return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2232   return None;
2233 }
2234