xref: /llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision 06adfa17188859cab229fb977ec9dfe2b7dce6ed)
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/FoldingSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/Loads.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineInstrBundle.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/PseudoSourceValue.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/ModuleSlotTracker.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCInstrDesc.h"
56 #include "llvm/MC/MCRegisterInfo.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/LowLevelTypeImpl.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <cstring>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 
78 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
79   if (const MachineBasicBlock *MBB = MI.getParent())
80     if (const MachineFunction *MF = MBB->getParent())
81       return MF;
82   return nullptr;
83 }
84 
85 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
86 // it.
87 static void tryToGetTargetInfo(const MachineInstr &MI,
88                                const TargetRegisterInfo *&TRI,
89                                const MachineRegisterInfo *&MRI,
90                                const TargetIntrinsicInfo *&IntrinsicInfo,
91                                const TargetInstrInfo *&TII) {
92 
93   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
94     TRI = MF->getSubtarget().getRegisterInfo();
95     MRI = &MF->getRegInfo();
96     IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
97     TII = MF->getSubtarget().getInstrInfo();
98   }
99 }
100 
101 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
102   if (MCID->ImplicitDefs)
103     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
104            ++ImpDefs)
105       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
106   if (MCID->ImplicitUses)
107     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
108            ++ImpUses)
109       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
110 }
111 
112 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
113 /// implicit operands. It reserves space for the number of operands specified by
114 /// the MCInstrDesc.
115 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
116                            DebugLoc dl, bool NoImp)
117     : MCID(&tid), debugLoc(std::move(dl)) {
118   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
119 
120   // Reserve space for the expected number of operands.
121   if (unsigned NumOps = MCID->getNumOperands() +
122     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
123     CapOperands = OperandCapacity::get(NumOps);
124     Operands = MF.allocateOperandArray(CapOperands);
125   }
126 
127   if (!NoImp)
128     addImplicitDefUseOperands(MF);
129 }
130 
131 /// MachineInstr ctor - Copies MachineInstr arg exactly
132 ///
133 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
134     : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
135   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
136 
137   CapOperands = OperandCapacity::get(MI.getNumOperands());
138   Operands = MF.allocateOperandArray(CapOperands);
139 
140   // Copy operands.
141   for (const MachineOperand &MO : MI.operands())
142     addOperand(MF, MO);
143 
144   // Copy all the sensible flags.
145   setFlags(MI.Flags);
146 }
147 
148 /// getRegInfo - If this instruction is embedded into a MachineFunction,
149 /// return the MachineRegisterInfo object for the current function, otherwise
150 /// return null.
151 MachineRegisterInfo *MachineInstr::getRegInfo() {
152   if (MachineBasicBlock *MBB = getParent())
153     return &MBB->getParent()->getRegInfo();
154   return nullptr;
155 }
156 
157 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
158 /// this instruction from their respective use lists.  This requires that the
159 /// operands already be on their use lists.
160 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
161   for (MachineOperand &MO : operands())
162     if (MO.isReg())
163       MRI.removeRegOperandFromUseList(&MO);
164 }
165 
166 /// AddRegOperandsToUseLists - Add all of the register operands in
167 /// this instruction from their respective use lists.  This requires that the
168 /// operands not be on their use lists yet.
169 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
170   for (MachineOperand &MO : operands())
171     if (MO.isReg())
172       MRI.addRegOperandToUseList(&MO);
173 }
174 
175 void MachineInstr::addOperand(const MachineOperand &Op) {
176   MachineBasicBlock *MBB = getParent();
177   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
178   MachineFunction *MF = MBB->getParent();
179   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
180   addOperand(*MF, Op);
181 }
182 
183 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
184 /// ranges. If MRI is non-null also update use-def chains.
185 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
186                          unsigned NumOps, MachineRegisterInfo *MRI) {
187   if (MRI)
188     return MRI->moveOperands(Dst, Src, NumOps);
189 
190   // MachineOperand is a trivially copyable type so we can just use memmove.
191   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
192 }
193 
194 /// addOperand - Add the specified operand to the instruction.  If it is an
195 /// implicit operand, it is added to the end of the operand list.  If it is
196 /// an explicit operand it is added at the end of the explicit operand list
197 /// (before the first implicit operand).
198 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
199   assert(MCID && "Cannot add operands before providing an instr descriptor");
200 
201   // Check if we're adding one of our existing operands.
202   if (&Op >= Operands && &Op < Operands + NumOperands) {
203     // This is unusual: MI->addOperand(MI->getOperand(i)).
204     // If adding Op requires reallocating or moving existing operands around,
205     // the Op reference could go stale. Support it by copying Op.
206     MachineOperand CopyOp(Op);
207     return addOperand(MF, CopyOp);
208   }
209 
210   // Find the insert location for the new operand.  Implicit registers go at
211   // the end, everything else goes before the implicit regs.
212   //
213   // FIXME: Allow mixed explicit and implicit operands on inline asm.
214   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
215   // implicit-defs, but they must not be moved around.  See the FIXME in
216   // InstrEmitter.cpp.
217   unsigned OpNo = getNumOperands();
218   bool isImpReg = Op.isReg() && Op.isImplicit();
219   if (!isImpReg && !isInlineAsm()) {
220     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
221       --OpNo;
222       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
223     }
224   }
225 
226 #ifndef NDEBUG
227   bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
228   // OpNo now points as the desired insertion point.  Unless this is a variadic
229   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
230   // RegMask operands go between the explicit and implicit operands.
231   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
232           OpNo < MCID->getNumOperands() || isMetaDataOp) &&
233          "Trying to add an operand to a machine instr that is already done!");
234 #endif
235 
236   MachineRegisterInfo *MRI = getRegInfo();
237 
238   // Determine if the Operands array needs to be reallocated.
239   // Save the old capacity and operand array.
240   OperandCapacity OldCap = CapOperands;
241   MachineOperand *OldOperands = Operands;
242   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
243     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
244     Operands = MF.allocateOperandArray(CapOperands);
245     // Move the operands before the insertion point.
246     if (OpNo)
247       moveOperands(Operands, OldOperands, OpNo, MRI);
248   }
249 
250   // Move the operands following the insertion point.
251   if (OpNo != NumOperands)
252     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
253                  MRI);
254   ++NumOperands;
255 
256   // Deallocate the old operand array.
257   if (OldOperands != Operands && OldOperands)
258     MF.deallocateOperandArray(OldCap, OldOperands);
259 
260   // Copy Op into place. It still needs to be inserted into the MRI use lists.
261   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
262   NewMO->ParentMI = this;
263 
264   // When adding a register operand, tell MRI about it.
265   if (NewMO->isReg()) {
266     // Ensure isOnRegUseList() returns false, regardless of Op's status.
267     NewMO->Contents.Reg.Prev = nullptr;
268     // Ignore existing ties. This is not a property that can be copied.
269     NewMO->TiedTo = 0;
270     // Add the new operand to MRI, but only for instructions in an MBB.
271     if (MRI)
272       MRI->addRegOperandToUseList(NewMO);
273     // The MCID operand information isn't accurate until we start adding
274     // explicit operands. The implicit operands are added first, then the
275     // explicits are inserted before them.
276     if (!isImpReg) {
277       // Tie uses to defs as indicated in MCInstrDesc.
278       if (NewMO->isUse()) {
279         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
280         if (DefIdx != -1)
281           tieOperands(DefIdx, OpNo);
282       }
283       // If the register operand is flagged as early, mark the operand as such.
284       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
285         NewMO->setIsEarlyClobber(true);
286     }
287   }
288 }
289 
290 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
291 /// fewer operand than it started with.
292 ///
293 void MachineInstr::RemoveOperand(unsigned OpNo) {
294   assert(OpNo < getNumOperands() && "Invalid operand number");
295   untieRegOperand(OpNo);
296 
297 #ifndef NDEBUG
298   // Moving tied operands would break the ties.
299   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
300     if (Operands[i].isReg())
301       assert(!Operands[i].isTied() && "Cannot move tied operands");
302 #endif
303 
304   MachineRegisterInfo *MRI = getRegInfo();
305   if (MRI && Operands[OpNo].isReg())
306     MRI->removeRegOperandFromUseList(Operands + OpNo);
307 
308   // Don't call the MachineOperand destructor. A lot of this code depends on
309   // MachineOperand having a trivial destructor anyway, and adding a call here
310   // wouldn't make it 'destructor-correct'.
311 
312   if (unsigned N = NumOperands - 1 - OpNo)
313     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
314   --NumOperands;
315 }
316 
317 void MachineInstr::dropMemRefs(MachineFunction &MF) {
318   if (memoperands_empty())
319     return;
320 
321   // See if we can just drop all of our extra info.
322   if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
323     Info.clear();
324     return;
325   }
326   if (!getPostInstrSymbol()) {
327     Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
328     return;
329   }
330   if (!getPreInstrSymbol()) {
331     Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
332     return;
333   }
334 
335   // Otherwise allocate a fresh extra info with just these symbols.
336   Info.set<EIIK_OutOfLine>(
337       MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
338 }
339 
340 void MachineInstr::setMemRefs(MachineFunction &MF,
341                               ArrayRef<MachineMemOperand *> MMOs) {
342   if (MMOs.empty()) {
343     dropMemRefs(MF);
344     return;
345   }
346 
347   // Try to store a single MMO inline.
348   if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
349     Info.set<EIIK_MMO>(MMOs[0]);
350     return;
351   }
352 
353   // Otherwise create an extra info struct with all of our info.
354   Info.set<EIIK_OutOfLine>(
355       MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
356 }
357 
358 void MachineInstr::addMemOperand(MachineFunction &MF,
359                                  MachineMemOperand *MO) {
360   SmallVector<MachineMemOperand *, 2> MMOs;
361   MMOs.append(memoperands_begin(), memoperands_end());
362   MMOs.push_back(MO);
363   setMemRefs(MF, MMOs);
364 }
365 
366 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
367   if (this == &MI)
368     // Nothing to do for a self-clone!
369     return;
370 
371   assert(&MF == MI.getMF() &&
372          "Invalid machine functions when cloning memory refrences!");
373   // See if we can just steal the extra info already allocated for the
374   // instruction. We can do this whenever the pre- and post-instruction symbols
375   // are the same (including null).
376   if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
377       getPostInstrSymbol() == MI.getPostInstrSymbol()) {
378     Info = MI.Info;
379     return;
380   }
381 
382   // Otherwise, fall back on a copy-based clone.
383   setMemRefs(MF, MI.memoperands());
384 }
385 
386 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
387 /// identical.
388 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
389                              ArrayRef<MachineMemOperand *> RHS) {
390   if (LHS.size() != RHS.size())
391     return false;
392 
393   auto LHSPointees = make_pointee_range(LHS);
394   auto RHSPointees = make_pointee_range(RHS);
395   return std::equal(LHSPointees.begin(), LHSPointees.end(),
396                     RHSPointees.begin());
397 }
398 
399 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
400                                       ArrayRef<const MachineInstr *> MIs) {
401   // Try handling easy numbers of MIs with simpler mechanisms.
402   if (MIs.empty()) {
403     dropMemRefs(MF);
404     return;
405   }
406   if (MIs.size() == 1) {
407     cloneMemRefs(MF, *MIs[0]);
408     return;
409   }
410   // Because an empty memoperands list provides *no* information and must be
411   // handled conservatively (assuming the instruction can do anything), the only
412   // way to merge with it is to drop all other memoperands.
413   if (MIs[0]->memoperands_empty()) {
414     dropMemRefs(MF);
415     return;
416   }
417 
418   // Handle the general case.
419   SmallVector<MachineMemOperand *, 2> MergedMMOs;
420   // Start with the first instruction.
421   assert(&MF == MIs[0]->getMF() &&
422          "Invalid machine functions when cloning memory references!");
423   MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
424   // Now walk all the other instructions and accumulate any different MMOs.
425   for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
426     assert(&MF == MI.getMF() &&
427            "Invalid machine functions when cloning memory references!");
428 
429     // Skip MIs with identical operands to the first. This is a somewhat
430     // arbitrary hack but will catch common cases without being quadratic.
431     // TODO: We could fully implement merge semantics here if needed.
432     if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
433       continue;
434 
435     // Because an empty memoperands list provides *no* information and must be
436     // handled conservatively (assuming the instruction can do anything), the
437     // only way to merge with it is to drop all other memoperands.
438     if (MI.memoperands_empty()) {
439       dropMemRefs(MF);
440       return;
441     }
442 
443     // Otherwise accumulate these into our temporary buffer of the merged state.
444     MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
445   }
446 
447   setMemRefs(MF, MergedMMOs);
448 }
449 
450 void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
451   MCSymbol *OldSymbol = getPreInstrSymbol();
452   if (OldSymbol == Symbol)
453     return;
454   if (OldSymbol && !Symbol) {
455     // We're removing a symbol rather than adding one. Try to clean up any
456     // extra info carried around.
457     if (Info.is<EIIK_PreInstrSymbol>()) {
458       Info.clear();
459       return;
460     }
461 
462     if (memoperands_empty()) {
463       assert(getPostInstrSymbol() &&
464              "Should never have only a single symbol allocated out-of-line!");
465       Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
466       return;
467     }
468 
469     // Otherwise fallback on the generic update.
470   } else if (!Info || Info.is<EIIK_PreInstrSymbol>()) {
471     // If we don't have any other extra info, we can store this inline.
472     Info.set<EIIK_PreInstrSymbol>(Symbol);
473     return;
474   }
475 
476   // Otherwise, allocate a full new set of extra info.
477   // FIXME: Maybe we should make the symbols in the extra info mutable?
478   Info.set<EIIK_OutOfLine>(
479       MF.createMIExtraInfo(memoperands(), Symbol, getPostInstrSymbol()));
480 }
481 
482 void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
483   MCSymbol *OldSymbol = getPostInstrSymbol();
484   if (OldSymbol == Symbol)
485     return;
486   if (OldSymbol && !Symbol) {
487     // We're removing a symbol rather than adding one. Try to clean up any
488     // extra info carried around.
489     if (Info.is<EIIK_PostInstrSymbol>()) {
490       Info.clear();
491       return;
492     }
493 
494     if (memoperands_empty()) {
495       assert(getPreInstrSymbol() &&
496              "Should never have only a single symbol allocated out-of-line!");
497       Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
498       return;
499     }
500 
501     // Otherwise fallback on the generic update.
502   } else if (!Info || Info.is<EIIK_PostInstrSymbol>()) {
503     // If we don't have any other extra info, we can store this inline.
504     Info.set<EIIK_PostInstrSymbol>(Symbol);
505     return;
506   }
507 
508   // Otherwise, allocate a full new set of extra info.
509   // FIXME: Maybe we should make the symbols in the extra info mutable?
510   Info.set<EIIK_OutOfLine>(
511       MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol));
512 }
513 
514 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
515   // For now, the just return the union of the flags. If the flags get more
516   // complicated over time, we might need more logic here.
517   return getFlags() | Other.getFlags();
518 }
519 
520 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
521   assert(!isBundledWithPred() && "Must be called on bundle header");
522   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
523     if (MII->getDesc().getFlags() & Mask) {
524       if (Type == AnyInBundle)
525         return true;
526     } else {
527       if (Type == AllInBundle && !MII->isBundle())
528         return false;
529     }
530     // This was the last instruction in the bundle.
531     if (!MII->isBundledWithSucc())
532       return Type == AllInBundle;
533   }
534 }
535 
536 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
537                                  MICheckType Check) const {
538   // If opcodes or number of operands are not the same then the two
539   // instructions are obviously not identical.
540   if (Other.getOpcode() != getOpcode() ||
541       Other.getNumOperands() != getNumOperands())
542     return false;
543 
544   if (isBundle()) {
545     // We have passed the test above that both instructions have the same
546     // opcode, so we know that both instructions are bundles here. Let's compare
547     // MIs inside the bundle.
548     assert(Other.isBundle() && "Expected that both instructions are bundles.");
549     MachineBasicBlock::const_instr_iterator I1 = getIterator();
550     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
551     // Loop until we analysed the last intruction inside at least one of the
552     // bundles.
553     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
554       ++I1;
555       ++I2;
556       if (!I1->isIdenticalTo(*I2, Check))
557         return false;
558     }
559     // If we've reached the end of just one of the two bundles, but not both,
560     // the instructions are not identical.
561     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
562       return false;
563   }
564 
565   // Check operands to make sure they match.
566   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
567     const MachineOperand &MO = getOperand(i);
568     const MachineOperand &OMO = Other.getOperand(i);
569     if (!MO.isReg()) {
570       if (!MO.isIdenticalTo(OMO))
571         return false;
572       continue;
573     }
574 
575     // Clients may or may not want to ignore defs when testing for equality.
576     // For example, machine CSE pass only cares about finding common
577     // subexpressions, so it's safe to ignore virtual register defs.
578     if (MO.isDef()) {
579       if (Check == IgnoreDefs)
580         continue;
581       else if (Check == IgnoreVRegDefs) {
582         if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
583             !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
584           if (!MO.isIdenticalTo(OMO))
585             return false;
586       } else {
587         if (!MO.isIdenticalTo(OMO))
588           return false;
589         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
590           return false;
591       }
592     } else {
593       if (!MO.isIdenticalTo(OMO))
594         return false;
595       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
596         return false;
597     }
598   }
599   // If DebugLoc does not match then two debug instructions are not identical.
600   if (isDebugInstr())
601     if (getDebugLoc() && Other.getDebugLoc() &&
602         getDebugLoc() != Other.getDebugLoc())
603       return false;
604   return true;
605 }
606 
607 const MachineFunction *MachineInstr::getMF() const {
608   return getParent()->getParent();
609 }
610 
611 MachineInstr *MachineInstr::removeFromParent() {
612   assert(getParent() && "Not embedded in a basic block!");
613   return getParent()->remove(this);
614 }
615 
616 MachineInstr *MachineInstr::removeFromBundle() {
617   assert(getParent() && "Not embedded in a basic block!");
618   return getParent()->remove_instr(this);
619 }
620 
621 void MachineInstr::eraseFromParent() {
622   assert(getParent() && "Not embedded in a basic block!");
623   getParent()->erase(this);
624 }
625 
626 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
627   assert(getParent() && "Not embedded in a basic block!");
628   MachineBasicBlock *MBB = getParent();
629   MachineFunction *MF = MBB->getParent();
630   assert(MF && "Not embedded in a function!");
631 
632   MachineInstr *MI = (MachineInstr *)this;
633   MachineRegisterInfo &MRI = MF->getRegInfo();
634 
635   for (const MachineOperand &MO : MI->operands()) {
636     if (!MO.isReg() || !MO.isDef())
637       continue;
638     unsigned Reg = MO.getReg();
639     if (!TargetRegisterInfo::isVirtualRegister(Reg))
640       continue;
641     MRI.markUsesInDebugValueAsUndef(Reg);
642   }
643   MI->eraseFromParent();
644 }
645 
646 void MachineInstr::eraseFromBundle() {
647   assert(getParent() && "Not embedded in a basic block!");
648   getParent()->erase_instr(this);
649 }
650 
651 unsigned MachineInstr::getNumExplicitOperands() const {
652   unsigned NumOperands = MCID->getNumOperands();
653   if (!MCID->isVariadic())
654     return NumOperands;
655 
656   for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
657     const MachineOperand &MO = getOperand(I);
658     // The operands must always be in the following order:
659     // - explicit reg defs,
660     // - other explicit operands (reg uses, immediates, etc.),
661     // - implicit reg defs
662     // - implicit reg uses
663     if (MO.isReg() && MO.isImplicit())
664       break;
665     ++NumOperands;
666   }
667   return NumOperands;
668 }
669 
670 unsigned MachineInstr::getNumExplicitDefs() const {
671   unsigned NumDefs = MCID->getNumDefs();
672   if (!MCID->isVariadic())
673     return NumDefs;
674 
675   for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
676     const MachineOperand &MO = getOperand(I);
677     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
678       break;
679     ++NumDefs;
680   }
681   return NumDefs;
682 }
683 
684 void MachineInstr::bundleWithPred() {
685   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
686   setFlag(BundledPred);
687   MachineBasicBlock::instr_iterator Pred = getIterator();
688   --Pred;
689   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
690   Pred->setFlag(BundledSucc);
691 }
692 
693 void MachineInstr::bundleWithSucc() {
694   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
695   setFlag(BundledSucc);
696   MachineBasicBlock::instr_iterator Succ = getIterator();
697   ++Succ;
698   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
699   Succ->setFlag(BundledPred);
700 }
701 
702 void MachineInstr::unbundleFromPred() {
703   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
704   clearFlag(BundledPred);
705   MachineBasicBlock::instr_iterator Pred = getIterator();
706   --Pred;
707   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
708   Pred->clearFlag(BundledSucc);
709 }
710 
711 void MachineInstr::unbundleFromSucc() {
712   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
713   clearFlag(BundledSucc);
714   MachineBasicBlock::instr_iterator Succ = getIterator();
715   ++Succ;
716   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
717   Succ->clearFlag(BundledPred);
718 }
719 
720 bool MachineInstr::isStackAligningInlineAsm() const {
721   if (isInlineAsm()) {
722     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
723     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
724       return true;
725   }
726   return false;
727 }
728 
729 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
730   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
731   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
732   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
733 }
734 
735 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
736                                        unsigned *GroupNo) const {
737   assert(isInlineAsm() && "Expected an inline asm instruction");
738   assert(OpIdx < getNumOperands() && "OpIdx out of range");
739 
740   // Ignore queries about the initial operands.
741   if (OpIdx < InlineAsm::MIOp_FirstOperand)
742     return -1;
743 
744   unsigned Group = 0;
745   unsigned NumOps;
746   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
747        i += NumOps) {
748     const MachineOperand &FlagMO = getOperand(i);
749     // If we reach the implicit register operands, stop looking.
750     if (!FlagMO.isImm())
751       return -1;
752     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
753     if (i + NumOps > OpIdx) {
754       if (GroupNo)
755         *GroupNo = Group;
756       return i;
757     }
758     ++Group;
759   }
760   return -1;
761 }
762 
763 const DILabel *MachineInstr::getDebugLabel() const {
764   assert(isDebugLabel() && "not a DBG_LABEL");
765   return cast<DILabel>(getOperand(0).getMetadata());
766 }
767 
768 const DILocalVariable *MachineInstr::getDebugVariable() const {
769   assert(isDebugValue() && "not a DBG_VALUE");
770   return cast<DILocalVariable>(getOperand(2).getMetadata());
771 }
772 
773 const DIExpression *MachineInstr::getDebugExpression() const {
774   assert(isDebugValue() && "not a DBG_VALUE");
775   return cast<DIExpression>(getOperand(3).getMetadata());
776 }
777 
778 const TargetRegisterClass*
779 MachineInstr::getRegClassConstraint(unsigned OpIdx,
780                                     const TargetInstrInfo *TII,
781                                     const TargetRegisterInfo *TRI) const {
782   assert(getParent() && "Can't have an MBB reference here!");
783   assert(getMF() && "Can't have an MF reference here!");
784   const MachineFunction &MF = *getMF();
785 
786   // Most opcodes have fixed constraints in their MCInstrDesc.
787   if (!isInlineAsm())
788     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
789 
790   if (!getOperand(OpIdx).isReg())
791     return nullptr;
792 
793   // For tied uses on inline asm, get the constraint from the def.
794   unsigned DefIdx;
795   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
796     OpIdx = DefIdx;
797 
798   // Inline asm stores register class constraints in the flag word.
799   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
800   if (FlagIdx < 0)
801     return nullptr;
802 
803   unsigned Flag = getOperand(FlagIdx).getImm();
804   unsigned RCID;
805   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
806        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
807        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
808       InlineAsm::hasRegClassConstraint(Flag, RCID))
809     return TRI->getRegClass(RCID);
810 
811   // Assume that all registers in a memory operand are pointers.
812   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
813     return TRI->getPointerRegClass(MF);
814 
815   return nullptr;
816 }
817 
818 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
819     unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
820     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
821   // Check every operands inside the bundle if we have
822   // been asked to.
823   if (ExploreBundle)
824     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
825          ++OpndIt)
826       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
827           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
828   else
829     // Otherwise, just check the current operands.
830     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
831       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
832   return CurRC;
833 }
834 
835 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
836     unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
837     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
838   assert(CurRC && "Invalid initial register class");
839   // Check if Reg is constrained by some of its use/def from MI.
840   const MachineOperand &MO = getOperand(OpIdx);
841   if (!MO.isReg() || MO.getReg() != Reg)
842     return CurRC;
843   // If yes, accumulate the constraints through the operand.
844   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
845 }
846 
847 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
848     unsigned OpIdx, const TargetRegisterClass *CurRC,
849     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
850   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
851   const MachineOperand &MO = getOperand(OpIdx);
852   assert(MO.isReg() &&
853          "Cannot get register constraints for non-register operand");
854   assert(CurRC && "Invalid initial register class");
855   if (unsigned SubIdx = MO.getSubReg()) {
856     if (OpRC)
857       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
858     else
859       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
860   } else if (OpRC)
861     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
862   return CurRC;
863 }
864 
865 /// Return the number of instructions inside the MI bundle, not counting the
866 /// header instruction.
867 unsigned MachineInstr::getBundleSize() const {
868   MachineBasicBlock::const_instr_iterator I = getIterator();
869   unsigned Size = 0;
870   while (I->isBundledWithSucc()) {
871     ++Size;
872     ++I;
873   }
874   return Size;
875 }
876 
877 /// Returns true if the MachineInstr has an implicit-use operand of exactly
878 /// the given register (not considering sub/super-registers).
879 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
880   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
881     const MachineOperand &MO = getOperand(i);
882     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
883       return true;
884   }
885   return false;
886 }
887 
888 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
889 /// the specific register or -1 if it is not found. It further tightens
890 /// the search criteria to a use that kills the register if isKill is true.
891 int MachineInstr::findRegisterUseOperandIdx(
892     unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
893   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
894     const MachineOperand &MO = getOperand(i);
895     if (!MO.isReg() || !MO.isUse())
896       continue;
897     unsigned MOReg = MO.getReg();
898     if (!MOReg)
899       continue;
900     if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
901                          TargetRegisterInfo::isPhysicalRegister(Reg) &&
902                          TRI->isSubRegister(MOReg, Reg)))
903       if (!isKill || MO.isKill())
904         return i;
905   }
906   return -1;
907 }
908 
909 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
910 /// indicating if this instruction reads or writes Reg. This also considers
911 /// partial defines.
912 std::pair<bool,bool>
913 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
914                                          SmallVectorImpl<unsigned> *Ops) const {
915   bool PartDef = false; // Partial redefine.
916   bool FullDef = false; // Full define.
917   bool Use = false;
918 
919   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
920     const MachineOperand &MO = getOperand(i);
921     if (!MO.isReg() || MO.getReg() != Reg)
922       continue;
923     if (Ops)
924       Ops->push_back(i);
925     if (MO.isUse())
926       Use |= !MO.isUndef();
927     else if (MO.getSubReg() && !MO.isUndef())
928       // A partial def undef doesn't count as reading the register.
929       PartDef = true;
930     else
931       FullDef = true;
932   }
933   // A partial redefine uses Reg unless there is also a full define.
934   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
935 }
936 
937 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
938 /// the specified register or -1 if it is not found. If isDead is true, defs
939 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
940 /// also checks if there is a def of a super-register.
941 int
942 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
943                                         const TargetRegisterInfo *TRI) const {
944   bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
945   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
946     const MachineOperand &MO = getOperand(i);
947     // Accept regmask operands when Overlap is set.
948     // Ignore them when looking for a specific def operand (Overlap == false).
949     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
950       return i;
951     if (!MO.isReg() || !MO.isDef())
952       continue;
953     unsigned MOReg = MO.getReg();
954     bool Found = (MOReg == Reg);
955     if (!Found && TRI && isPhys &&
956         TargetRegisterInfo::isPhysicalRegister(MOReg)) {
957       if (Overlap)
958         Found = TRI->regsOverlap(MOReg, Reg);
959       else
960         Found = TRI->isSubRegister(MOReg, Reg);
961     }
962     if (Found && (!isDead || MO.isDead()))
963       return i;
964   }
965   return -1;
966 }
967 
968 /// findFirstPredOperandIdx() - Find the index of the first operand in the
969 /// operand list that is used to represent the predicate. It returns -1 if
970 /// none is found.
971 int MachineInstr::findFirstPredOperandIdx() const {
972   // Don't call MCID.findFirstPredOperandIdx() because this variant
973   // is sometimes called on an instruction that's not yet complete, and
974   // so the number of operands is less than the MCID indicates. In
975   // particular, the PTX target does this.
976   const MCInstrDesc &MCID = getDesc();
977   if (MCID.isPredicable()) {
978     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
979       if (MCID.OpInfo[i].isPredicate())
980         return i;
981   }
982 
983   return -1;
984 }
985 
986 // MachineOperand::TiedTo is 4 bits wide.
987 const unsigned TiedMax = 15;
988 
989 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
990 ///
991 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
992 /// field. TiedTo can have these values:
993 ///
994 /// 0:              Operand is not tied to anything.
995 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
996 /// TiedMax:        Tied to an operand >= TiedMax-1.
997 ///
998 /// The tied def must be one of the first TiedMax operands on a normal
999 /// instruction. INLINEASM instructions allow more tied defs.
1000 ///
1001 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1002   MachineOperand &DefMO = getOperand(DefIdx);
1003   MachineOperand &UseMO = getOperand(UseIdx);
1004   assert(DefMO.isDef() && "DefIdx must be a def operand");
1005   assert(UseMO.isUse() && "UseIdx must be a use operand");
1006   assert(!DefMO.isTied() && "Def is already tied to another use");
1007   assert(!UseMO.isTied() && "Use is already tied to another def");
1008 
1009   if (DefIdx < TiedMax)
1010     UseMO.TiedTo = DefIdx + 1;
1011   else {
1012     // Inline asm can use the group descriptors to find tied operands, but on
1013     // normal instruction, the tied def must be within the first TiedMax
1014     // operands.
1015     assert(isInlineAsm() && "DefIdx out of range");
1016     UseMO.TiedTo = TiedMax;
1017   }
1018 
1019   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1020   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1021 }
1022 
1023 /// Given the index of a tied register operand, find the operand it is tied to.
1024 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1025 /// which must exist.
1026 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1027   const MachineOperand &MO = getOperand(OpIdx);
1028   assert(MO.isTied() && "Operand isn't tied");
1029 
1030   // Normally TiedTo is in range.
1031   if (MO.TiedTo < TiedMax)
1032     return MO.TiedTo - 1;
1033 
1034   // Uses on normal instructions can be out of range.
1035   if (!isInlineAsm()) {
1036     // Normal tied defs must be in the 0..TiedMax-1 range.
1037     if (MO.isUse())
1038       return TiedMax - 1;
1039     // MO is a def. Search for the tied use.
1040     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1041       const MachineOperand &UseMO = getOperand(i);
1042       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1043         return i;
1044     }
1045     llvm_unreachable("Can't find tied use");
1046   }
1047 
1048   // Now deal with inline asm by parsing the operand group descriptor flags.
1049   // Find the beginning of each operand group.
1050   SmallVector<unsigned, 8> GroupIdx;
1051   unsigned OpIdxGroup = ~0u;
1052   unsigned NumOps;
1053   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1054        i += NumOps) {
1055     const MachineOperand &FlagMO = getOperand(i);
1056     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1057     unsigned CurGroup = GroupIdx.size();
1058     GroupIdx.push_back(i);
1059     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1060     // OpIdx belongs to this operand group.
1061     if (OpIdx > i && OpIdx < i + NumOps)
1062       OpIdxGroup = CurGroup;
1063     unsigned TiedGroup;
1064     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1065       continue;
1066     // Operands in this group are tied to operands in TiedGroup which must be
1067     // earlier. Find the number of operands between the two groups.
1068     unsigned Delta = i - GroupIdx[TiedGroup];
1069 
1070     // OpIdx is a use tied to TiedGroup.
1071     if (OpIdxGroup == CurGroup)
1072       return OpIdx - Delta;
1073 
1074     // OpIdx is a def tied to this use group.
1075     if (OpIdxGroup == TiedGroup)
1076       return OpIdx + Delta;
1077   }
1078   llvm_unreachable("Invalid tied operand on inline asm");
1079 }
1080 
1081 /// clearKillInfo - Clears kill flags on all operands.
1082 ///
1083 void MachineInstr::clearKillInfo() {
1084   for (MachineOperand &MO : operands()) {
1085     if (MO.isReg() && MO.isUse())
1086       MO.setIsKill(false);
1087   }
1088 }
1089 
1090 void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
1091                                       unsigned SubIdx,
1092                                       const TargetRegisterInfo &RegInfo) {
1093   if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
1094     if (SubIdx)
1095       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1096     for (MachineOperand &MO : operands()) {
1097       if (!MO.isReg() || MO.getReg() != FromReg)
1098         continue;
1099       MO.substPhysReg(ToReg, RegInfo);
1100     }
1101   } else {
1102     for (MachineOperand &MO : operands()) {
1103       if (!MO.isReg() || MO.getReg() != FromReg)
1104         continue;
1105       MO.substVirtReg(ToReg, SubIdx, RegInfo);
1106     }
1107   }
1108 }
1109 
1110 /// isSafeToMove - Return true if it is safe to move this instruction. If
1111 /// SawStore is set to true, it means that there is a store (or call) between
1112 /// the instruction's location and its intended destination.
1113 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
1114   // Ignore stuff that we obviously can't move.
1115   //
1116   // Treat volatile loads as stores. This is not strictly necessary for
1117   // volatiles, but it is required for atomic loads. It is not allowed to move
1118   // a load across an atomic load with Ordering > Monotonic.
1119   if (mayStore() || isCall() || isPHI() ||
1120       (mayLoad() && hasOrderedMemoryRef())) {
1121     SawStore = true;
1122     return false;
1123   }
1124 
1125   if (isPosition() || isDebugInstr() || isTerminator() ||
1126       hasUnmodeledSideEffects())
1127     return false;
1128 
1129   // See if this instruction does a load.  If so, we have to guarantee that the
1130   // loaded value doesn't change between the load and the its intended
1131   // destination. The check for isInvariantLoad gives the targe the chance to
1132   // classify the load as always returning a constant, e.g. a constant pool
1133   // load.
1134   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1135     // Otherwise, this is a real load.  If there is a store between the load and
1136     // end of block, we can't move it.
1137     return !SawStore;
1138 
1139   return true;
1140 }
1141 
1142 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
1143                             bool UseTBAA) {
1144   const MachineFunction *MF = getMF();
1145   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1146   const MachineFrameInfo &MFI = MF->getFrameInfo();
1147 
1148   // If neither instruction stores to memory, they can't alias in any
1149   // meaningful way, even if they read from the same address.
1150   if (!mayStore() && !Other.mayStore())
1151     return false;
1152 
1153   // Let the target decide if memory accesses cannot possibly overlap.
1154   if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
1155     return false;
1156 
1157   // FIXME: Need to handle multiple memory operands to support all targets.
1158   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1159     return true;
1160 
1161   MachineMemOperand *MMOa = *memoperands_begin();
1162   MachineMemOperand *MMOb = *Other.memoperands_begin();
1163 
1164   // The following interface to AA is fashioned after DAGCombiner::isAlias
1165   // and operates with MachineMemOperand offset with some important
1166   // assumptions:
1167   //   - LLVM fundamentally assumes flat address spaces.
1168   //   - MachineOperand offset can *only* result from legalization and
1169   //     cannot affect queries other than the trivial case of overlap
1170   //     checking.
1171   //   - These offsets never wrap and never step outside
1172   //     of allocated objects.
1173   //   - There should never be any negative offsets here.
1174   //
1175   // FIXME: Modify API to hide this math from "user"
1176   // Even before we go to AA we can reason locally about some
1177   // memory objects. It can save compile time, and possibly catch some
1178   // corner cases not currently covered.
1179 
1180   int64_t OffsetA = MMOa->getOffset();
1181   int64_t OffsetB = MMOb->getOffset();
1182   int64_t MinOffset = std::min(OffsetA, OffsetB);
1183 
1184   uint64_t WidthA = MMOa->getSize();
1185   uint64_t WidthB = MMOb->getSize();
1186   bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
1187   bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
1188 
1189   const Value *ValA = MMOa->getValue();
1190   const Value *ValB = MMOb->getValue();
1191   bool SameVal = (ValA && ValB && (ValA == ValB));
1192   if (!SameVal) {
1193     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1194     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1195     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1196       return false;
1197     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1198       return false;
1199     if (PSVa && PSVb && (PSVa == PSVb))
1200       SameVal = true;
1201   }
1202 
1203   if (SameVal) {
1204     if (!KnownWidthA || !KnownWidthB)
1205       return true;
1206     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1207     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1208     return (MinOffset + LowWidth > MaxOffset);
1209   }
1210 
1211   if (!AA)
1212     return true;
1213 
1214   if (!ValA || !ValB)
1215     return true;
1216 
1217   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1218   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1219 
1220   int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
1221                                  : MemoryLocation::UnknownSize;
1222   int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
1223                                  : MemoryLocation::UnknownSize;
1224 
1225   AliasResult AAResult = AA->alias(
1226       MemoryLocation(ValA, OverlapA,
1227                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1228       MemoryLocation(ValB, OverlapB,
1229                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1230 
1231   return (AAResult != NoAlias);
1232 }
1233 
1234 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1235 /// or volatile memory reference, or if the information describing the memory
1236 /// reference is not available. Return false if it is known to have no ordered
1237 /// memory references.
1238 bool MachineInstr::hasOrderedMemoryRef() const {
1239   // An instruction known never to access memory won't have a volatile access.
1240   if (!mayStore() &&
1241       !mayLoad() &&
1242       !isCall() &&
1243       !hasUnmodeledSideEffects())
1244     return false;
1245 
1246   // Otherwise, if the instruction has no memory reference information,
1247   // conservatively assume it wasn't preserved.
1248   if (memoperands_empty())
1249     return true;
1250 
1251   // Check if any of our memory operands are ordered.
1252   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1253     return !MMO->isUnordered();
1254   });
1255 }
1256 
1257 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1258 /// trap and is loading from a location whose value is invariant across a run of
1259 /// this function.
1260 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1261   // If the instruction doesn't load at all, it isn't an invariant load.
1262   if (!mayLoad())
1263     return false;
1264 
1265   // If the instruction has lost its memoperands, conservatively assume that
1266   // it may not be an invariant load.
1267   if (memoperands_empty())
1268     return false;
1269 
1270   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1271 
1272   for (MachineMemOperand *MMO : memoperands()) {
1273     if (MMO->isVolatile()) return false;
1274     if (MMO->isStore()) return false;
1275     if (MMO->isInvariant() && MMO->isDereferenceable())
1276       continue;
1277 
1278     // A load from a constant PseudoSourceValue is invariant.
1279     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1280       if (PSV->isConstant(&MFI))
1281         continue;
1282 
1283     if (const Value *V = MMO->getValue()) {
1284       // If we have an AliasAnalysis, ask it whether the memory is constant.
1285       if (AA &&
1286           AA->pointsToConstantMemory(
1287               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1288         continue;
1289     }
1290 
1291     // Otherwise assume conservatively.
1292     return false;
1293   }
1294 
1295   // Everything checks out.
1296   return true;
1297 }
1298 
1299 /// isConstantValuePHI - If the specified instruction is a PHI that always
1300 /// merges together the same virtual register, return the register, otherwise
1301 /// return 0.
1302 unsigned MachineInstr::isConstantValuePHI() const {
1303   if (!isPHI())
1304     return 0;
1305   assert(getNumOperands() >= 3 &&
1306          "It's illegal to have a PHI without source operands");
1307 
1308   unsigned Reg = getOperand(1).getReg();
1309   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1310     if (getOperand(i).getReg() != Reg)
1311       return 0;
1312   return Reg;
1313 }
1314 
1315 bool MachineInstr::hasUnmodeledSideEffects() const {
1316   if (hasProperty(MCID::UnmodeledSideEffects))
1317     return true;
1318   if (isInlineAsm()) {
1319     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1320     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1321       return true;
1322   }
1323 
1324   return false;
1325 }
1326 
1327 bool MachineInstr::isLoadFoldBarrier() const {
1328   return mayStore() || isCall() || hasUnmodeledSideEffects();
1329 }
1330 
1331 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1332 ///
1333 bool MachineInstr::allDefsAreDead() const {
1334   for (const MachineOperand &MO : operands()) {
1335     if (!MO.isReg() || MO.isUse())
1336       continue;
1337     if (!MO.isDead())
1338       return false;
1339   }
1340   return true;
1341 }
1342 
1343 /// copyImplicitOps - Copy implicit register operands from specified
1344 /// instruction to this instruction.
1345 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1346                                    const MachineInstr &MI) {
1347   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1348        i != e; ++i) {
1349     const MachineOperand &MO = MI.getOperand(i);
1350     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1351       addOperand(MF, MO);
1352   }
1353 }
1354 
1355 bool MachineInstr::hasComplexRegisterTies() const {
1356   const MCInstrDesc &MCID = getDesc();
1357   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1358     const auto &Operand = getOperand(I);
1359     if (!Operand.isReg() || Operand.isDef())
1360       // Ignore the defined registers as MCID marks only the uses as tied.
1361       continue;
1362     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1363     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1364     if (ExpectedTiedIdx != TiedIdx)
1365       return true;
1366   }
1367   return false;
1368 }
1369 
1370 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1371                                  const MachineRegisterInfo &MRI) const {
1372   const MachineOperand &Op = getOperand(OpIdx);
1373   if (!Op.isReg())
1374     return LLT{};
1375 
1376   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1377     return MRI.getType(Op.getReg());
1378 
1379   auto &OpInfo = getDesc().OpInfo[OpIdx];
1380   if (!OpInfo.isGenericType())
1381     return MRI.getType(Op.getReg());
1382 
1383   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1384     return LLT{};
1385 
1386   LLT TypeToPrint = MRI.getType(Op.getReg());
1387   // Don't mark the type index printed if it wasn't actually printed: maybe
1388   // another operand with the same type index has an actual type attached:
1389   if (TypeToPrint.isValid())
1390     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1391   return TypeToPrint;
1392 }
1393 
1394 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1395 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1396   dbgs() << "  ";
1397   print(dbgs());
1398 }
1399 #endif
1400 
1401 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1402                          bool SkipDebugLoc, bool AddNewLine,
1403                          const TargetInstrInfo *TII) const {
1404   const Module *M = nullptr;
1405   const Function *F = nullptr;
1406   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1407     F = &MF->getFunction();
1408     M = F->getParent();
1409     if (!TII)
1410       TII = MF->getSubtarget().getInstrInfo();
1411   }
1412 
1413   ModuleSlotTracker MST(M);
1414   if (F)
1415     MST.incorporateFunction(*F);
1416   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, TII);
1417 }
1418 
1419 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1420                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1421                          bool AddNewLine, const TargetInstrInfo *TII) const {
1422   // We can be a bit tidier if we know the MachineFunction.
1423   const MachineFunction *MF = nullptr;
1424   const TargetRegisterInfo *TRI = nullptr;
1425   const MachineRegisterInfo *MRI = nullptr;
1426   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1427   tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1428 
1429   if (isCFIInstruction())
1430     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1431 
1432   SmallBitVector PrintedTypes(8);
1433   bool ShouldPrintRegisterTies = hasComplexRegisterTies();
1434   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1435     if (!ShouldPrintRegisterTies)
1436       return 0U;
1437     const MachineOperand &MO = getOperand(OpIdx);
1438     if (MO.isReg() && MO.isTied() && !MO.isDef())
1439       return findTiedOperandIdx(OpIdx);
1440     return 0U;
1441   };
1442   unsigned StartOp = 0;
1443   unsigned e = getNumOperands();
1444 
1445   // Print explicitly defined operands on the left of an assignment syntax.
1446   while (StartOp < e) {
1447     const MachineOperand &MO = getOperand(StartOp);
1448     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1449       break;
1450 
1451     if (StartOp != 0)
1452       OS << ", ";
1453 
1454     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1455     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1456     MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
1457              ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1458     ++StartOp;
1459   }
1460 
1461   if (StartOp != 0)
1462     OS << " = ";
1463 
1464   if (getFlag(MachineInstr::FrameSetup))
1465     OS << "frame-setup ";
1466   if (getFlag(MachineInstr::FrameDestroy))
1467     OS << "frame-destroy ";
1468   if (getFlag(MachineInstr::FmNoNans))
1469     OS << "nnan ";
1470   if (getFlag(MachineInstr::FmNoInfs))
1471     OS << "ninf ";
1472   if (getFlag(MachineInstr::FmNsz))
1473     OS << "nsz ";
1474   if (getFlag(MachineInstr::FmArcp))
1475     OS << "arcp ";
1476   if (getFlag(MachineInstr::FmContract))
1477     OS << "contract ";
1478   if (getFlag(MachineInstr::FmAfn))
1479     OS << "afn ";
1480   if (getFlag(MachineInstr::FmReassoc))
1481     OS << "reassoc ";
1482 
1483   // Print the opcode name.
1484   if (TII)
1485     OS << TII->getName(getOpcode());
1486   else
1487     OS << "UNKNOWN";
1488 
1489   if (SkipOpers)
1490     return;
1491 
1492   // Print the rest of the operands.
1493   bool FirstOp = true;
1494   unsigned AsmDescOp = ~0u;
1495   unsigned AsmOpCount = 0;
1496 
1497   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1498     // Print asm string.
1499     OS << " ";
1500     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1501     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1502     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1503     getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1504                             ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1505                             IntrinsicInfo);
1506 
1507     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1508     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1509     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1510       OS << " [sideeffect]";
1511     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1512       OS << " [mayload]";
1513     if (ExtraInfo & InlineAsm::Extra_MayStore)
1514       OS << " [maystore]";
1515     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1516       OS << " [isconvergent]";
1517     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1518       OS << " [alignstack]";
1519     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1520       OS << " [attdialect]";
1521     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1522       OS << " [inteldialect]";
1523 
1524     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1525     FirstOp = false;
1526   }
1527 
1528   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1529     const MachineOperand &MO = getOperand(i);
1530 
1531     if (FirstOp) FirstOp = false; else OS << ",";
1532     OS << " ";
1533 
1534     if (isDebugValue() && MO.isMetadata()) {
1535       // Pretty print DBG_VALUE instructions.
1536       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1537       if (DIV && !DIV->getName().empty())
1538         OS << "!\"" << DIV->getName() << '\"';
1539       else {
1540         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1541         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1542         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1543                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1544       }
1545     } else if (isDebugLabel() && MO.isMetadata()) {
1546       // Pretty print DBG_LABEL instructions.
1547       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1548       if (DIL && !DIL->getName().empty())
1549         OS << "\"" << DIL->getName() << '\"';
1550       else {
1551         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1552         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1553         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1554                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1555       }
1556     } else if (i == AsmDescOp && MO.isImm()) {
1557       // Pretty print the inline asm operand descriptor.
1558       OS << '$' << AsmOpCount++;
1559       unsigned Flag = MO.getImm();
1560       switch (InlineAsm::getKind(Flag)) {
1561       case InlineAsm::Kind_RegUse:             OS << ":[reguse"; break;
1562       case InlineAsm::Kind_RegDef:             OS << ":[regdef"; break;
1563       case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1564       case InlineAsm::Kind_Clobber:            OS << ":[clobber"; break;
1565       case InlineAsm::Kind_Imm:                OS << ":[imm"; break;
1566       case InlineAsm::Kind_Mem:                OS << ":[mem"; break;
1567       default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1568       }
1569 
1570       unsigned RCID = 0;
1571       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1572           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1573         if (TRI) {
1574           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1575         } else
1576           OS << ":RC" << RCID;
1577       }
1578 
1579       if (InlineAsm::isMemKind(Flag)) {
1580         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1581         switch (MCID) {
1582         case InlineAsm::Constraint_es: OS << ":es"; break;
1583         case InlineAsm::Constraint_i:  OS << ":i"; break;
1584         case InlineAsm::Constraint_m:  OS << ":m"; break;
1585         case InlineAsm::Constraint_o:  OS << ":o"; break;
1586         case InlineAsm::Constraint_v:  OS << ":v"; break;
1587         case InlineAsm::Constraint_Q:  OS << ":Q"; break;
1588         case InlineAsm::Constraint_R:  OS << ":R"; break;
1589         case InlineAsm::Constraint_S:  OS << ":S"; break;
1590         case InlineAsm::Constraint_T:  OS << ":T"; break;
1591         case InlineAsm::Constraint_Um: OS << ":Um"; break;
1592         case InlineAsm::Constraint_Un: OS << ":Un"; break;
1593         case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1594         case InlineAsm::Constraint_Us: OS << ":Us"; break;
1595         case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1596         case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1597         case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1598         case InlineAsm::Constraint_X:  OS << ":X"; break;
1599         case InlineAsm::Constraint_Z:  OS << ":Z"; break;
1600         case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1601         case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1602         default: OS << ":?"; break;
1603         }
1604       }
1605 
1606       unsigned TiedTo = 0;
1607       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1608         OS << " tiedto:$" << TiedTo;
1609 
1610       OS << ']';
1611 
1612       // Compute the index of the next operand descriptor.
1613       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1614     } else {
1615       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1616       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1617       if (MO.isImm() && isOperandSubregIdx(i))
1618         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1619       else
1620         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1621                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1622     }
1623   }
1624 
1625   // Print any optional symbols attached to this instruction as-if they were
1626   // operands.
1627   if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1628     if (!FirstOp) {
1629       FirstOp = false;
1630       OS << ',';
1631     }
1632     OS << " pre-instr-symbol ";
1633     MachineOperand::printSymbol(OS, *PreInstrSymbol);
1634   }
1635   if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1636     if (!FirstOp) {
1637       FirstOp = false;
1638       OS << ',';
1639     }
1640     OS << " post-instr-symbol ";
1641     MachineOperand::printSymbol(OS, *PostInstrSymbol);
1642   }
1643 
1644   if (!SkipDebugLoc) {
1645     if (const DebugLoc &DL = getDebugLoc()) {
1646       if (!FirstOp)
1647         OS << ',';
1648       OS << " debug-location ";
1649       DL->printAsOperand(OS, MST);
1650     }
1651   }
1652 
1653   if (!memoperands_empty()) {
1654     SmallVector<StringRef, 0> SSNs;
1655     const LLVMContext *Context = nullptr;
1656     std::unique_ptr<LLVMContext> CtxPtr;
1657     const MachineFrameInfo *MFI = nullptr;
1658     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1659       MFI = &MF->getFrameInfo();
1660       Context = &MF->getFunction().getContext();
1661     } else {
1662       CtxPtr = llvm::make_unique<LLVMContext>();
1663       Context = CtxPtr.get();
1664     }
1665 
1666     OS << " :: ";
1667     bool NeedComma = false;
1668     for (const MachineMemOperand *Op : memoperands()) {
1669       if (NeedComma)
1670         OS << ", ";
1671       Op->print(OS, MST, SSNs, *Context, MFI, TII);
1672       NeedComma = true;
1673     }
1674   }
1675 
1676   if (SkipDebugLoc)
1677     return;
1678 
1679   bool HaveSemi = false;
1680 
1681   // Print debug location information.
1682   if (const DebugLoc &DL = getDebugLoc()) {
1683     if (!HaveSemi) {
1684       OS << ';';
1685       HaveSemi = true;
1686     }
1687     OS << ' ';
1688     DL.print(OS);
1689   }
1690 
1691   // Print extra comments for DEBUG_VALUE.
1692   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1693     if (!HaveSemi) {
1694       OS << ";";
1695       HaveSemi = true;
1696     }
1697     auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1698     OS << " line no:" <<  DV->getLine();
1699     if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1700       DebugLoc InlinedAtDL(InlinedAt);
1701       if (InlinedAtDL && MF) {
1702         OS << " inlined @[ ";
1703         InlinedAtDL.print(OS);
1704         OS << " ]";
1705       }
1706     }
1707     if (isIndirectDebugValue())
1708       OS << " indirect";
1709   }
1710   // TODO: DBG_LABEL
1711 
1712   if (AddNewLine)
1713     OS << '\n';
1714 }
1715 
1716 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1717                                      const TargetRegisterInfo *RegInfo,
1718                                      bool AddIfNotFound) {
1719   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1720   bool hasAliases = isPhysReg &&
1721     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1722   bool Found = false;
1723   SmallVector<unsigned,4> DeadOps;
1724   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1725     MachineOperand &MO = getOperand(i);
1726     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1727       continue;
1728 
1729     // DEBUG_VALUE nodes do not contribute to code generation and should
1730     // always be ignored. Failure to do so may result in trying to modify
1731     // KILL flags on DEBUG_VALUE nodes.
1732     if (MO.isDebug())
1733       continue;
1734 
1735     unsigned Reg = MO.getReg();
1736     if (!Reg)
1737       continue;
1738 
1739     if (Reg == IncomingReg) {
1740       if (!Found) {
1741         if (MO.isKill())
1742           // The register is already marked kill.
1743           return true;
1744         if (isPhysReg && isRegTiedToDefOperand(i))
1745           // Two-address uses of physregs must not be marked kill.
1746           return true;
1747         MO.setIsKill();
1748         Found = true;
1749       }
1750     } else if (hasAliases && MO.isKill() &&
1751                TargetRegisterInfo::isPhysicalRegister(Reg)) {
1752       // A super-register kill already exists.
1753       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1754         return true;
1755       if (RegInfo->isSubRegister(IncomingReg, Reg))
1756         DeadOps.push_back(i);
1757     }
1758   }
1759 
1760   // Trim unneeded kill operands.
1761   while (!DeadOps.empty()) {
1762     unsigned OpIdx = DeadOps.back();
1763     if (getOperand(OpIdx).isImplicit())
1764       RemoveOperand(OpIdx);
1765     else
1766       getOperand(OpIdx).setIsKill(false);
1767     DeadOps.pop_back();
1768   }
1769 
1770   // If not found, this means an alias of one of the operands is killed. Add a
1771   // new implicit operand if required.
1772   if (!Found && AddIfNotFound) {
1773     addOperand(MachineOperand::CreateReg(IncomingReg,
1774                                          false /*IsDef*/,
1775                                          true  /*IsImp*/,
1776                                          true  /*IsKill*/));
1777     return true;
1778   }
1779   return Found;
1780 }
1781 
1782 void MachineInstr::clearRegisterKills(unsigned Reg,
1783                                       const TargetRegisterInfo *RegInfo) {
1784   if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1785     RegInfo = nullptr;
1786   for (MachineOperand &MO : operands()) {
1787     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1788       continue;
1789     unsigned OpReg = MO.getReg();
1790     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1791       MO.setIsKill(false);
1792   }
1793 }
1794 
1795 bool MachineInstr::addRegisterDead(unsigned Reg,
1796                                    const TargetRegisterInfo *RegInfo,
1797                                    bool AddIfNotFound) {
1798   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1799   bool hasAliases = isPhysReg &&
1800     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1801   bool Found = false;
1802   SmallVector<unsigned,4> DeadOps;
1803   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1804     MachineOperand &MO = getOperand(i);
1805     if (!MO.isReg() || !MO.isDef())
1806       continue;
1807     unsigned MOReg = MO.getReg();
1808     if (!MOReg)
1809       continue;
1810 
1811     if (MOReg == Reg) {
1812       MO.setIsDead();
1813       Found = true;
1814     } else if (hasAliases && MO.isDead() &&
1815                TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1816       // There exists a super-register that's marked dead.
1817       if (RegInfo->isSuperRegister(Reg, MOReg))
1818         return true;
1819       if (RegInfo->isSubRegister(Reg, MOReg))
1820         DeadOps.push_back(i);
1821     }
1822   }
1823 
1824   // Trim unneeded dead operands.
1825   while (!DeadOps.empty()) {
1826     unsigned OpIdx = DeadOps.back();
1827     if (getOperand(OpIdx).isImplicit())
1828       RemoveOperand(OpIdx);
1829     else
1830       getOperand(OpIdx).setIsDead(false);
1831     DeadOps.pop_back();
1832   }
1833 
1834   // If not found, this means an alias of one of the operands is dead. Add a
1835   // new implicit operand if required.
1836   if (Found || !AddIfNotFound)
1837     return Found;
1838 
1839   addOperand(MachineOperand::CreateReg(Reg,
1840                                        true  /*IsDef*/,
1841                                        true  /*IsImp*/,
1842                                        false /*IsKill*/,
1843                                        true  /*IsDead*/));
1844   return true;
1845 }
1846 
1847 void MachineInstr::clearRegisterDeads(unsigned Reg) {
1848   for (MachineOperand &MO : operands()) {
1849     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1850       continue;
1851     MO.setIsDead(false);
1852   }
1853 }
1854 
1855 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1856   for (MachineOperand &MO : operands()) {
1857     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1858       continue;
1859     MO.setIsUndef(IsUndef);
1860   }
1861 }
1862 
1863 void MachineInstr::addRegisterDefined(unsigned Reg,
1864                                       const TargetRegisterInfo *RegInfo) {
1865   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1866     MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1867     if (MO)
1868       return;
1869   } else {
1870     for (const MachineOperand &MO : operands()) {
1871       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1872           MO.getSubReg() == 0)
1873         return;
1874     }
1875   }
1876   addOperand(MachineOperand::CreateReg(Reg,
1877                                        true  /*IsDef*/,
1878                                        true  /*IsImp*/));
1879 }
1880 
1881 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1882                                          const TargetRegisterInfo &TRI) {
1883   bool HasRegMask = false;
1884   for (MachineOperand &MO : operands()) {
1885     if (MO.isRegMask()) {
1886       HasRegMask = true;
1887       continue;
1888     }
1889     if (!MO.isReg() || !MO.isDef()) continue;
1890     unsigned Reg = MO.getReg();
1891     if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1892     // If there are no uses, including partial uses, the def is dead.
1893     if (llvm::none_of(UsedRegs,
1894                       [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1895       MO.setIsDead();
1896   }
1897 
1898   // This is a call with a register mask operand.
1899   // Mask clobbers are always dead, so add defs for the non-dead defines.
1900   if (HasRegMask)
1901     for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1902          I != E; ++I)
1903       addRegisterDefined(*I, &TRI);
1904 }
1905 
1906 unsigned
1907 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1908   // Build up a buffer of hash code components.
1909   SmallVector<size_t, 8> HashComponents;
1910   HashComponents.reserve(MI->getNumOperands() + 1);
1911   HashComponents.push_back(MI->getOpcode());
1912   for (const MachineOperand &MO : MI->operands()) {
1913     if (MO.isReg() && MO.isDef() &&
1914         TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1915       continue;  // Skip virtual register defs.
1916 
1917     HashComponents.push_back(hash_value(MO));
1918   }
1919   return hash_combine_range(HashComponents.begin(), HashComponents.end());
1920 }
1921 
1922 void MachineInstr::emitError(StringRef Msg) const {
1923   // Find the source location cookie.
1924   unsigned LocCookie = 0;
1925   const MDNode *LocMD = nullptr;
1926   for (unsigned i = getNumOperands(); i != 0; --i) {
1927     if (getOperand(i-1).isMetadata() &&
1928         (LocMD = getOperand(i-1).getMetadata()) &&
1929         LocMD->getNumOperands() != 0) {
1930       if (const ConstantInt *CI =
1931               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1932         LocCookie = CI->getZExtValue();
1933         break;
1934       }
1935     }
1936   }
1937 
1938   if (const MachineBasicBlock *MBB = getParent())
1939     if (const MachineFunction *MF = MBB->getParent())
1940       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1941   report_fatal_error(Msg);
1942 }
1943 
1944 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1945                                   const MCInstrDesc &MCID, bool IsIndirect,
1946                                   unsigned Reg, const MDNode *Variable,
1947                                   const MDNode *Expr) {
1948   assert(isa<DILocalVariable>(Variable) && "not a variable");
1949   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1950   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1951          "Expected inlined-at fields to agree");
1952   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
1953   if (IsIndirect)
1954     MIB.addImm(0U);
1955   else
1956     MIB.addReg(0U, RegState::Debug);
1957   return MIB.addMetadata(Variable).addMetadata(Expr);
1958 }
1959 
1960 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1961                                   const MCInstrDesc &MCID, bool IsIndirect,
1962                                   MachineOperand &MO, const MDNode *Variable,
1963                                   const MDNode *Expr) {
1964   assert(isa<DILocalVariable>(Variable) && "not a variable");
1965   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1966   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1967          "Expected inlined-at fields to agree");
1968   if (MO.isReg())
1969     return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
1970 
1971   auto MIB = BuildMI(MF, DL, MCID).add(MO);
1972   if (IsIndirect)
1973     MIB.addImm(0U);
1974   else
1975     MIB.addReg(0U, RegState::Debug);
1976   return MIB.addMetadata(Variable).addMetadata(Expr);
1977  }
1978 
1979 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1980                                   MachineBasicBlock::iterator I,
1981                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1982                                   bool IsIndirect, unsigned Reg,
1983                                   const MDNode *Variable, const MDNode *Expr) {
1984   MachineFunction &MF = *BB.getParent();
1985   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1986   BB.insert(I, MI);
1987   return MachineInstrBuilder(MF, MI);
1988 }
1989 
1990 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1991                                   MachineBasicBlock::iterator I,
1992                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1993                                   bool IsIndirect, MachineOperand &MO,
1994                                   const MDNode *Variable, const MDNode *Expr) {
1995   MachineFunction &MF = *BB.getParent();
1996   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
1997   BB.insert(I, MI);
1998   return MachineInstrBuilder(MF, *MI);
1999 }
2000 
2001 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2002 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2003 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
2004   assert(MI.getOperand(0).isReg() && "can't spill non-register");
2005   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2006          "Expected inlined-at fields to agree");
2007 
2008   const DIExpression *Expr = MI.getDebugExpression();
2009   if (MI.isIndirectDebugValue()) {
2010     assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
2011     Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
2012   }
2013   return Expr;
2014 }
2015 
2016 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2017                                           MachineBasicBlock::iterator I,
2018                                           const MachineInstr &Orig,
2019                                           int FrameIndex) {
2020   const DIExpression *Expr = computeExprForSpill(Orig);
2021   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
2022       .addFrameIndex(FrameIndex)
2023       .addImm(0U)
2024       .addMetadata(Orig.getDebugVariable())
2025       .addMetadata(Expr);
2026 }
2027 
2028 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
2029   const DIExpression *Expr = computeExprForSpill(Orig);
2030   Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
2031   Orig.getOperand(1).ChangeToImmediate(0U);
2032   Orig.getOperand(3).setMetadata(Expr);
2033 }
2034 
2035 void MachineInstr::collectDebugValues(
2036                                 SmallVectorImpl<MachineInstr *> &DbgValues) {
2037   MachineInstr &MI = *this;
2038   if (!MI.getOperand(0).isReg())
2039     return;
2040 
2041   MachineBasicBlock::iterator DI = MI; ++DI;
2042   for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2043        DI != DE; ++DI) {
2044     if (!DI->isDebugValue())
2045       return;
2046     if (DI->getOperand(0).isReg() &&
2047         DI->getOperand(0).getReg() == MI.getOperand(0).getReg())
2048       DbgValues.push_back(&*DI);
2049   }
2050 }
2051