xref: /llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision c73c0307fe71a4f1a98d99dbc5d7852d44c30fff)
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/FoldingSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/Loads.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineInstrBundle.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/PseudoSourceValue.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/ModuleSlotTracker.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCInstrDesc.h"
56 #include "llvm/MC/MCRegisterInfo.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/LowLevelTypeImpl.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <cstring>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 
78 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
79   if (const MachineBasicBlock *MBB = MI.getParent())
80     if (const MachineFunction *MF = MBB->getParent())
81       return MF;
82   return nullptr;
83 }
84 
85 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
86 // it.
87 static void tryToGetTargetInfo(const MachineInstr &MI,
88                                const TargetRegisterInfo *&TRI,
89                                const MachineRegisterInfo *&MRI,
90                                const TargetIntrinsicInfo *&IntrinsicInfo,
91                                const TargetInstrInfo *&TII) {
92 
93   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
94     TRI = MF->getSubtarget().getRegisterInfo();
95     MRI = &MF->getRegInfo();
96     IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
97     TII = MF->getSubtarget().getInstrInfo();
98   }
99 }
100 
101 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
102   if (MCID->ImplicitDefs)
103     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
104            ++ImpDefs)
105       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
106   if (MCID->ImplicitUses)
107     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
108            ++ImpUses)
109       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
110 }
111 
112 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
113 /// implicit operands. It reserves space for the number of operands specified by
114 /// the MCInstrDesc.
115 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
116                            DebugLoc dl, bool NoImp)
117     : MCID(&tid), debugLoc(std::move(dl)) {
118   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
119 
120   // Reserve space for the expected number of operands.
121   if (unsigned NumOps = MCID->getNumOperands() +
122     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
123     CapOperands = OperandCapacity::get(NumOps);
124     Operands = MF.allocateOperandArray(CapOperands);
125   }
126 
127   if (!NoImp)
128     addImplicitDefUseOperands(MF);
129 }
130 
131 /// MachineInstr ctor - Copies MachineInstr arg exactly
132 ///
133 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
134     : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
135   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
136 
137   CapOperands = OperandCapacity::get(MI.getNumOperands());
138   Operands = MF.allocateOperandArray(CapOperands);
139 
140   // Copy operands.
141   for (const MachineOperand &MO : MI.operands())
142     addOperand(MF, MO);
143 
144   // Copy all the sensible flags.
145   setFlags(MI.Flags);
146 }
147 
148 /// getRegInfo - If this instruction is embedded into a MachineFunction,
149 /// return the MachineRegisterInfo object for the current function, otherwise
150 /// return null.
151 MachineRegisterInfo *MachineInstr::getRegInfo() {
152   if (MachineBasicBlock *MBB = getParent())
153     return &MBB->getParent()->getRegInfo();
154   return nullptr;
155 }
156 
157 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
158 /// this instruction from their respective use lists.  This requires that the
159 /// operands already be on their use lists.
160 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
161   for (MachineOperand &MO : operands())
162     if (MO.isReg())
163       MRI.removeRegOperandFromUseList(&MO);
164 }
165 
166 /// AddRegOperandsToUseLists - Add all of the register operands in
167 /// this instruction from their respective use lists.  This requires that the
168 /// operands not be on their use lists yet.
169 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
170   for (MachineOperand &MO : operands())
171     if (MO.isReg())
172       MRI.addRegOperandToUseList(&MO);
173 }
174 
175 void MachineInstr::addOperand(const MachineOperand &Op) {
176   MachineBasicBlock *MBB = getParent();
177   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
178   MachineFunction *MF = MBB->getParent();
179   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
180   addOperand(*MF, Op);
181 }
182 
183 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
184 /// ranges. If MRI is non-null also update use-def chains.
185 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
186                          unsigned NumOps, MachineRegisterInfo *MRI) {
187   if (MRI)
188     return MRI->moveOperands(Dst, Src, NumOps);
189 
190   // MachineOperand is a trivially copyable type so we can just use memmove.
191   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
192 }
193 
194 /// addOperand - Add the specified operand to the instruction.  If it is an
195 /// implicit operand, it is added to the end of the operand list.  If it is
196 /// an explicit operand it is added at the end of the explicit operand list
197 /// (before the first implicit operand).
198 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
199   assert(MCID && "Cannot add operands before providing an instr descriptor");
200 
201   // Check if we're adding one of our existing operands.
202   if (&Op >= Operands && &Op < Operands + NumOperands) {
203     // This is unusual: MI->addOperand(MI->getOperand(i)).
204     // If adding Op requires reallocating or moving existing operands around,
205     // the Op reference could go stale. Support it by copying Op.
206     MachineOperand CopyOp(Op);
207     return addOperand(MF, CopyOp);
208   }
209 
210   // Find the insert location for the new operand.  Implicit registers go at
211   // the end, everything else goes before the implicit regs.
212   //
213   // FIXME: Allow mixed explicit and implicit operands on inline asm.
214   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
215   // implicit-defs, but they must not be moved around.  See the FIXME in
216   // InstrEmitter.cpp.
217   unsigned OpNo = getNumOperands();
218   bool isImpReg = Op.isReg() && Op.isImplicit();
219   if (!isImpReg && !isInlineAsm()) {
220     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
221       --OpNo;
222       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
223     }
224   }
225 
226 #ifndef NDEBUG
227   bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
228   // OpNo now points as the desired insertion point.  Unless this is a variadic
229   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
230   // RegMask operands go between the explicit and implicit operands.
231   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
232           OpNo < MCID->getNumOperands() || isMetaDataOp) &&
233          "Trying to add an operand to a machine instr that is already done!");
234 #endif
235 
236   MachineRegisterInfo *MRI = getRegInfo();
237 
238   // Determine if the Operands array needs to be reallocated.
239   // Save the old capacity and operand array.
240   OperandCapacity OldCap = CapOperands;
241   MachineOperand *OldOperands = Operands;
242   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
243     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
244     Operands = MF.allocateOperandArray(CapOperands);
245     // Move the operands before the insertion point.
246     if (OpNo)
247       moveOperands(Operands, OldOperands, OpNo, MRI);
248   }
249 
250   // Move the operands following the insertion point.
251   if (OpNo != NumOperands)
252     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
253                  MRI);
254   ++NumOperands;
255 
256   // Deallocate the old operand array.
257   if (OldOperands != Operands && OldOperands)
258     MF.deallocateOperandArray(OldCap, OldOperands);
259 
260   // Copy Op into place. It still needs to be inserted into the MRI use lists.
261   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
262   NewMO->ParentMI = this;
263 
264   // When adding a register operand, tell MRI about it.
265   if (NewMO->isReg()) {
266     // Ensure isOnRegUseList() returns false, regardless of Op's status.
267     NewMO->Contents.Reg.Prev = nullptr;
268     // Ignore existing ties. This is not a property that can be copied.
269     NewMO->TiedTo = 0;
270     // Add the new operand to MRI, but only for instructions in an MBB.
271     if (MRI)
272       MRI->addRegOperandToUseList(NewMO);
273     // The MCID operand information isn't accurate until we start adding
274     // explicit operands. The implicit operands are added first, then the
275     // explicits are inserted before them.
276     if (!isImpReg) {
277       // Tie uses to defs as indicated in MCInstrDesc.
278       if (NewMO->isUse()) {
279         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
280         if (DefIdx != -1)
281           tieOperands(DefIdx, OpNo);
282       }
283       // If the register operand is flagged as early, mark the operand as such.
284       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
285         NewMO->setIsEarlyClobber(true);
286     }
287   }
288 }
289 
290 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
291 /// fewer operand than it started with.
292 ///
293 void MachineInstr::RemoveOperand(unsigned OpNo) {
294   assert(OpNo < getNumOperands() && "Invalid operand number");
295   untieRegOperand(OpNo);
296 
297 #ifndef NDEBUG
298   // Moving tied operands would break the ties.
299   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
300     if (Operands[i].isReg())
301       assert(!Operands[i].isTied() && "Cannot move tied operands");
302 #endif
303 
304   MachineRegisterInfo *MRI = getRegInfo();
305   if (MRI && Operands[OpNo].isReg())
306     MRI->removeRegOperandFromUseList(Operands + OpNo);
307 
308   // Don't call the MachineOperand destructor. A lot of this code depends on
309   // MachineOperand having a trivial destructor anyway, and adding a call here
310   // wouldn't make it 'destructor-correct'.
311 
312   if (unsigned N = NumOperands - 1 - OpNo)
313     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
314   --NumOperands;
315 }
316 
317 void MachineInstr::dropMemRefs(MachineFunction &MF) {
318   if (memoperands_empty())
319     return;
320 
321   // See if we can just drop all of our extra info.
322   if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
323     Info.clear();
324     return;
325   }
326   if (!getPostInstrSymbol()) {
327     Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
328     return;
329   }
330   if (!getPreInstrSymbol()) {
331     Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
332     return;
333   }
334 
335   // Otherwise allocate a fresh extra info with just these symbols.
336   Info.set<EIIK_OutOfLine>(
337       MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
338 }
339 
340 void MachineInstr::setMemRefs(MachineFunction &MF,
341                               ArrayRef<MachineMemOperand *> MMOs) {
342   if (MMOs.empty()) {
343     dropMemRefs(MF);
344     return;
345   }
346 
347   // Try to store a single MMO inline.
348   if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
349     Info.set<EIIK_MMO>(MMOs[0]);
350     return;
351   }
352 
353   // Otherwise create an extra info struct with all of our info.
354   Info.set<EIIK_OutOfLine>(
355       MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
356 }
357 
358 void MachineInstr::addMemOperand(MachineFunction &MF,
359                                  MachineMemOperand *MO) {
360   SmallVector<MachineMemOperand *, 2> MMOs;
361   MMOs.append(memoperands_begin(), memoperands_end());
362   MMOs.push_back(MO);
363   setMemRefs(MF, MMOs);
364 }
365 
366 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
367   if (this == &MI)
368     // Nothing to do for a self-clone!
369     return;
370 
371   assert(&MF == MI.getMF() &&
372          "Invalid machine functions when cloning memory refrences!");
373   // See if we can just steal the extra info already allocated for the
374   // instruction. We can do this whenever the pre- and post-instruction symbols
375   // are the same (including null).
376   if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
377       getPostInstrSymbol() == MI.getPostInstrSymbol()) {
378     Info = MI.Info;
379     return;
380   }
381 
382   // Otherwise, fall back on a copy-based clone.
383   setMemRefs(MF, MI.memoperands());
384 }
385 
386 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
387 /// identical.
388 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
389                              ArrayRef<MachineMemOperand *> RHS) {
390   if (LHS.size() != RHS.size())
391     return false;
392 
393   auto LHSPointees = make_pointee_range(LHS);
394   auto RHSPointees = make_pointee_range(RHS);
395   return std::equal(LHSPointees.begin(), LHSPointees.end(),
396                     RHSPointees.begin());
397 }
398 
399 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
400                                       ArrayRef<const MachineInstr *> MIs) {
401   // Try handling easy numbers of MIs with simpler mechanisms.
402   if (MIs.empty()) {
403     dropMemRefs(MF);
404     return;
405   }
406   if (MIs.size() == 1) {
407     cloneMemRefs(MF, *MIs[0]);
408     return;
409   }
410   // Because an empty memoperands list provides *no* information and must be
411   // handled conservatively (assuming the instruction can do anything), the only
412   // way to merge with it is to drop all other memoperands.
413   if (MIs[0]->memoperands_empty()) {
414     dropMemRefs(MF);
415     return;
416   }
417 
418   // Handle the general case.
419   SmallVector<MachineMemOperand *, 2> MergedMMOs;
420   // Start with the first instruction.
421   assert(&MF == MIs[0]->getMF() &&
422          "Invalid machine functions when cloning memory references!");
423   MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
424   // Now walk all the other instructions and accumulate any different MMOs.
425   for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
426     assert(&MF == MI.getMF() &&
427            "Invalid machine functions when cloning memory references!");
428 
429     // Skip MIs with identical operands to the first. This is a somewhat
430     // arbitrary hack but will catch common cases without being quadratic.
431     // TODO: We could fully implement merge semantics here if needed.
432     if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
433       continue;
434 
435     // Because an empty memoperands list provides *no* information and must be
436     // handled conservatively (assuming the instruction can do anything), the
437     // only way to merge with it is to drop all other memoperands.
438     if (MI.memoperands_empty()) {
439       dropMemRefs(MF);
440       return;
441     }
442 
443     // Otherwise accumulate these into our temporary buffer of the merged state.
444     MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
445   }
446 
447   setMemRefs(MF, MergedMMOs);
448 }
449 
450 MCSymbol *MachineInstr::getOrCreatePreInstrTempSymbol(MCContext &MCCtx) {
451   MCSymbol *S = getPreInstrSymbol();
452   if (S)
453     return S;
454 
455   // Create a new temp symbol.
456   S = MCCtx.createTempSymbol();
457 
458   if (!Info) {
459     // If we don't have any other extra info, we can store this inline.
460     Info.set<EIIK_PreInstrSymbol>(S);
461     return S;
462   }
463 
464   // Otherwise, allocate a fully set of extra info.
465   Info.set<EIIK_OutOfLine>(
466       getMF()->createMIExtraInfo(memoperands(), S, getPostInstrSymbol()));
467 
468   return S;
469 }
470 
471 MCSymbol *MachineInstr::getOrCreatePostInstrTempSymbol(MCContext &MCCtx) {
472   MCSymbol *S = getPostInstrSymbol();
473   if (S)
474     return S;
475 
476   // Create a new temp symbol.
477   S = MCCtx.createTempSymbol();
478 
479   if (!Info) {
480     // If we don't have any other extra info, we can store this inline.
481     Info.set<EIIK_PostInstrSymbol>(S);
482     return S;
483   }
484 
485   // Otherwise, allocate a fully set of extra info.
486   Info.set<EIIK_OutOfLine>(
487       getMF()->createMIExtraInfo(memoperands(), getPreInstrSymbol(), S));
488   return S;
489 }
490 
491 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
492   // For now, the just return the union of the flags. If the flags get more
493   // complicated over time, we might need more logic here.
494   return getFlags() | Other.getFlags();
495 }
496 
497 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
498   assert(!isBundledWithPred() && "Must be called on bundle header");
499   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
500     if (MII->getDesc().getFlags() & Mask) {
501       if (Type == AnyInBundle)
502         return true;
503     } else {
504       if (Type == AllInBundle && !MII->isBundle())
505         return false;
506     }
507     // This was the last instruction in the bundle.
508     if (!MII->isBundledWithSucc())
509       return Type == AllInBundle;
510   }
511 }
512 
513 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
514                                  MICheckType Check) const {
515   // If opcodes or number of operands are not the same then the two
516   // instructions are obviously not identical.
517   if (Other.getOpcode() != getOpcode() ||
518       Other.getNumOperands() != getNumOperands())
519     return false;
520 
521   if (isBundle()) {
522     // We have passed the test above that both instructions have the same
523     // opcode, so we know that both instructions are bundles here. Let's compare
524     // MIs inside the bundle.
525     assert(Other.isBundle() && "Expected that both instructions are bundles.");
526     MachineBasicBlock::const_instr_iterator I1 = getIterator();
527     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
528     // Loop until we analysed the last intruction inside at least one of the
529     // bundles.
530     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
531       ++I1;
532       ++I2;
533       if (!I1->isIdenticalTo(*I2, Check))
534         return false;
535     }
536     // If we've reached the end of just one of the two bundles, but not both,
537     // the instructions are not identical.
538     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
539       return false;
540   }
541 
542   // Check operands to make sure they match.
543   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
544     const MachineOperand &MO = getOperand(i);
545     const MachineOperand &OMO = Other.getOperand(i);
546     if (!MO.isReg()) {
547       if (!MO.isIdenticalTo(OMO))
548         return false;
549       continue;
550     }
551 
552     // Clients may or may not want to ignore defs when testing for equality.
553     // For example, machine CSE pass only cares about finding common
554     // subexpressions, so it's safe to ignore virtual register defs.
555     if (MO.isDef()) {
556       if (Check == IgnoreDefs)
557         continue;
558       else if (Check == IgnoreVRegDefs) {
559         if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
560             !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
561           if (!MO.isIdenticalTo(OMO))
562             return false;
563       } else {
564         if (!MO.isIdenticalTo(OMO))
565           return false;
566         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
567           return false;
568       }
569     } else {
570       if (!MO.isIdenticalTo(OMO))
571         return false;
572       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
573         return false;
574     }
575   }
576   // If DebugLoc does not match then two debug instructions are not identical.
577   if (isDebugInstr())
578     if (getDebugLoc() && Other.getDebugLoc() &&
579         getDebugLoc() != Other.getDebugLoc())
580       return false;
581   return true;
582 }
583 
584 const MachineFunction *MachineInstr::getMF() const {
585   return getParent()->getParent();
586 }
587 
588 MachineInstr *MachineInstr::removeFromParent() {
589   assert(getParent() && "Not embedded in a basic block!");
590   return getParent()->remove(this);
591 }
592 
593 MachineInstr *MachineInstr::removeFromBundle() {
594   assert(getParent() && "Not embedded in a basic block!");
595   return getParent()->remove_instr(this);
596 }
597 
598 void MachineInstr::eraseFromParent() {
599   assert(getParent() && "Not embedded in a basic block!");
600   getParent()->erase(this);
601 }
602 
603 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
604   assert(getParent() && "Not embedded in a basic block!");
605   MachineBasicBlock *MBB = getParent();
606   MachineFunction *MF = MBB->getParent();
607   assert(MF && "Not embedded in a function!");
608 
609   MachineInstr *MI = (MachineInstr *)this;
610   MachineRegisterInfo &MRI = MF->getRegInfo();
611 
612   for (const MachineOperand &MO : MI->operands()) {
613     if (!MO.isReg() || !MO.isDef())
614       continue;
615     unsigned Reg = MO.getReg();
616     if (!TargetRegisterInfo::isVirtualRegister(Reg))
617       continue;
618     MRI.markUsesInDebugValueAsUndef(Reg);
619   }
620   MI->eraseFromParent();
621 }
622 
623 void MachineInstr::eraseFromBundle() {
624   assert(getParent() && "Not embedded in a basic block!");
625   getParent()->erase_instr(this);
626 }
627 
628 unsigned MachineInstr::getNumExplicitOperands() const {
629   unsigned NumOperands = MCID->getNumOperands();
630   if (!MCID->isVariadic())
631     return NumOperands;
632 
633   for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
634     const MachineOperand &MO = getOperand(I);
635     // The operands must always be in the following order:
636     // - explicit reg defs,
637     // - other explicit operands (reg uses, immediates, etc.),
638     // - implicit reg defs
639     // - implicit reg uses
640     if (MO.isReg() && MO.isImplicit())
641       break;
642     ++NumOperands;
643   }
644   return NumOperands;
645 }
646 
647 unsigned MachineInstr::getNumExplicitDefs() const {
648   unsigned NumDefs = MCID->getNumDefs();
649   if (!MCID->isVariadic())
650     return NumDefs;
651 
652   for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
653     const MachineOperand &MO = getOperand(I);
654     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
655       break;
656     ++NumDefs;
657   }
658   return NumDefs;
659 }
660 
661 void MachineInstr::bundleWithPred() {
662   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
663   setFlag(BundledPred);
664   MachineBasicBlock::instr_iterator Pred = getIterator();
665   --Pred;
666   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
667   Pred->setFlag(BundledSucc);
668 }
669 
670 void MachineInstr::bundleWithSucc() {
671   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
672   setFlag(BundledSucc);
673   MachineBasicBlock::instr_iterator Succ = getIterator();
674   ++Succ;
675   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
676   Succ->setFlag(BundledPred);
677 }
678 
679 void MachineInstr::unbundleFromPred() {
680   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
681   clearFlag(BundledPred);
682   MachineBasicBlock::instr_iterator Pred = getIterator();
683   --Pred;
684   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
685   Pred->clearFlag(BundledSucc);
686 }
687 
688 void MachineInstr::unbundleFromSucc() {
689   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
690   clearFlag(BundledSucc);
691   MachineBasicBlock::instr_iterator Succ = getIterator();
692   ++Succ;
693   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
694   Succ->clearFlag(BundledPred);
695 }
696 
697 bool MachineInstr::isStackAligningInlineAsm() const {
698   if (isInlineAsm()) {
699     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
700     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
701       return true;
702   }
703   return false;
704 }
705 
706 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
707   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
708   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
709   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
710 }
711 
712 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
713                                        unsigned *GroupNo) const {
714   assert(isInlineAsm() && "Expected an inline asm instruction");
715   assert(OpIdx < getNumOperands() && "OpIdx out of range");
716 
717   // Ignore queries about the initial operands.
718   if (OpIdx < InlineAsm::MIOp_FirstOperand)
719     return -1;
720 
721   unsigned Group = 0;
722   unsigned NumOps;
723   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
724        i += NumOps) {
725     const MachineOperand &FlagMO = getOperand(i);
726     // If we reach the implicit register operands, stop looking.
727     if (!FlagMO.isImm())
728       return -1;
729     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
730     if (i + NumOps > OpIdx) {
731       if (GroupNo)
732         *GroupNo = Group;
733       return i;
734     }
735     ++Group;
736   }
737   return -1;
738 }
739 
740 const DILabel *MachineInstr::getDebugLabel() const {
741   assert(isDebugLabel() && "not a DBG_LABEL");
742   return cast<DILabel>(getOperand(0).getMetadata());
743 }
744 
745 const DILocalVariable *MachineInstr::getDebugVariable() const {
746   assert(isDebugValue() && "not a DBG_VALUE");
747   return cast<DILocalVariable>(getOperand(2).getMetadata());
748 }
749 
750 const DIExpression *MachineInstr::getDebugExpression() const {
751   assert(isDebugValue() && "not a DBG_VALUE");
752   return cast<DIExpression>(getOperand(3).getMetadata());
753 }
754 
755 const TargetRegisterClass*
756 MachineInstr::getRegClassConstraint(unsigned OpIdx,
757                                     const TargetInstrInfo *TII,
758                                     const TargetRegisterInfo *TRI) const {
759   assert(getParent() && "Can't have an MBB reference here!");
760   assert(getMF() && "Can't have an MF reference here!");
761   const MachineFunction &MF = *getMF();
762 
763   // Most opcodes have fixed constraints in their MCInstrDesc.
764   if (!isInlineAsm())
765     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
766 
767   if (!getOperand(OpIdx).isReg())
768     return nullptr;
769 
770   // For tied uses on inline asm, get the constraint from the def.
771   unsigned DefIdx;
772   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
773     OpIdx = DefIdx;
774 
775   // Inline asm stores register class constraints in the flag word.
776   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
777   if (FlagIdx < 0)
778     return nullptr;
779 
780   unsigned Flag = getOperand(FlagIdx).getImm();
781   unsigned RCID;
782   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
783        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
784        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
785       InlineAsm::hasRegClassConstraint(Flag, RCID))
786     return TRI->getRegClass(RCID);
787 
788   // Assume that all registers in a memory operand are pointers.
789   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
790     return TRI->getPointerRegClass(MF);
791 
792   return nullptr;
793 }
794 
795 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
796     unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
797     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
798   // Check every operands inside the bundle if we have
799   // been asked to.
800   if (ExploreBundle)
801     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
802          ++OpndIt)
803       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
804           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
805   else
806     // Otherwise, just check the current operands.
807     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
808       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
809   return CurRC;
810 }
811 
812 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
813     unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
814     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
815   assert(CurRC && "Invalid initial register class");
816   // Check if Reg is constrained by some of its use/def from MI.
817   const MachineOperand &MO = getOperand(OpIdx);
818   if (!MO.isReg() || MO.getReg() != Reg)
819     return CurRC;
820   // If yes, accumulate the constraints through the operand.
821   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
822 }
823 
824 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
825     unsigned OpIdx, const TargetRegisterClass *CurRC,
826     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
827   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
828   const MachineOperand &MO = getOperand(OpIdx);
829   assert(MO.isReg() &&
830          "Cannot get register constraints for non-register operand");
831   assert(CurRC && "Invalid initial register class");
832   if (unsigned SubIdx = MO.getSubReg()) {
833     if (OpRC)
834       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
835     else
836       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
837   } else if (OpRC)
838     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
839   return CurRC;
840 }
841 
842 /// Return the number of instructions inside the MI bundle, not counting the
843 /// header instruction.
844 unsigned MachineInstr::getBundleSize() const {
845   MachineBasicBlock::const_instr_iterator I = getIterator();
846   unsigned Size = 0;
847   while (I->isBundledWithSucc()) {
848     ++Size;
849     ++I;
850   }
851   return Size;
852 }
853 
854 /// Returns true if the MachineInstr has an implicit-use operand of exactly
855 /// the given register (not considering sub/super-registers).
856 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
857   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
858     const MachineOperand &MO = getOperand(i);
859     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
860       return true;
861   }
862   return false;
863 }
864 
865 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
866 /// the specific register or -1 if it is not found. It further tightens
867 /// the search criteria to a use that kills the register if isKill is true.
868 int MachineInstr::findRegisterUseOperandIdx(
869     unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
870   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
871     const MachineOperand &MO = getOperand(i);
872     if (!MO.isReg() || !MO.isUse())
873       continue;
874     unsigned MOReg = MO.getReg();
875     if (!MOReg)
876       continue;
877     if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
878                          TargetRegisterInfo::isPhysicalRegister(Reg) &&
879                          TRI->isSubRegister(MOReg, Reg)))
880       if (!isKill || MO.isKill())
881         return i;
882   }
883   return -1;
884 }
885 
886 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
887 /// indicating if this instruction reads or writes Reg. This also considers
888 /// partial defines.
889 std::pair<bool,bool>
890 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
891                                          SmallVectorImpl<unsigned> *Ops) const {
892   bool PartDef = false; // Partial redefine.
893   bool FullDef = false; // Full define.
894   bool Use = false;
895 
896   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
897     const MachineOperand &MO = getOperand(i);
898     if (!MO.isReg() || MO.getReg() != Reg)
899       continue;
900     if (Ops)
901       Ops->push_back(i);
902     if (MO.isUse())
903       Use |= !MO.isUndef();
904     else if (MO.getSubReg() && !MO.isUndef())
905       // A partial def undef doesn't count as reading the register.
906       PartDef = true;
907     else
908       FullDef = true;
909   }
910   // A partial redefine uses Reg unless there is also a full define.
911   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
912 }
913 
914 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
915 /// the specified register or -1 if it is not found. If isDead is true, defs
916 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
917 /// also checks if there is a def of a super-register.
918 int
919 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
920                                         const TargetRegisterInfo *TRI) const {
921   bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
922   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
923     const MachineOperand &MO = getOperand(i);
924     // Accept regmask operands when Overlap is set.
925     // Ignore them when looking for a specific def operand (Overlap == false).
926     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
927       return i;
928     if (!MO.isReg() || !MO.isDef())
929       continue;
930     unsigned MOReg = MO.getReg();
931     bool Found = (MOReg == Reg);
932     if (!Found && TRI && isPhys &&
933         TargetRegisterInfo::isPhysicalRegister(MOReg)) {
934       if (Overlap)
935         Found = TRI->regsOverlap(MOReg, Reg);
936       else
937         Found = TRI->isSubRegister(MOReg, Reg);
938     }
939     if (Found && (!isDead || MO.isDead()))
940       return i;
941   }
942   return -1;
943 }
944 
945 /// findFirstPredOperandIdx() - Find the index of the first operand in the
946 /// operand list that is used to represent the predicate. It returns -1 if
947 /// none is found.
948 int MachineInstr::findFirstPredOperandIdx() const {
949   // Don't call MCID.findFirstPredOperandIdx() because this variant
950   // is sometimes called on an instruction that's not yet complete, and
951   // so the number of operands is less than the MCID indicates. In
952   // particular, the PTX target does this.
953   const MCInstrDesc &MCID = getDesc();
954   if (MCID.isPredicable()) {
955     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
956       if (MCID.OpInfo[i].isPredicate())
957         return i;
958   }
959 
960   return -1;
961 }
962 
963 // MachineOperand::TiedTo is 4 bits wide.
964 const unsigned TiedMax = 15;
965 
966 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
967 ///
968 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
969 /// field. TiedTo can have these values:
970 ///
971 /// 0:              Operand is not tied to anything.
972 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
973 /// TiedMax:        Tied to an operand >= TiedMax-1.
974 ///
975 /// The tied def must be one of the first TiedMax operands on a normal
976 /// instruction. INLINEASM instructions allow more tied defs.
977 ///
978 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
979   MachineOperand &DefMO = getOperand(DefIdx);
980   MachineOperand &UseMO = getOperand(UseIdx);
981   assert(DefMO.isDef() && "DefIdx must be a def operand");
982   assert(UseMO.isUse() && "UseIdx must be a use operand");
983   assert(!DefMO.isTied() && "Def is already tied to another use");
984   assert(!UseMO.isTied() && "Use is already tied to another def");
985 
986   if (DefIdx < TiedMax)
987     UseMO.TiedTo = DefIdx + 1;
988   else {
989     // Inline asm can use the group descriptors to find tied operands, but on
990     // normal instruction, the tied def must be within the first TiedMax
991     // operands.
992     assert(isInlineAsm() && "DefIdx out of range");
993     UseMO.TiedTo = TiedMax;
994   }
995 
996   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
997   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
998 }
999 
1000 /// Given the index of a tied register operand, find the operand it is tied to.
1001 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1002 /// which must exist.
1003 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1004   const MachineOperand &MO = getOperand(OpIdx);
1005   assert(MO.isTied() && "Operand isn't tied");
1006 
1007   // Normally TiedTo is in range.
1008   if (MO.TiedTo < TiedMax)
1009     return MO.TiedTo - 1;
1010 
1011   // Uses on normal instructions can be out of range.
1012   if (!isInlineAsm()) {
1013     // Normal tied defs must be in the 0..TiedMax-1 range.
1014     if (MO.isUse())
1015       return TiedMax - 1;
1016     // MO is a def. Search for the tied use.
1017     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1018       const MachineOperand &UseMO = getOperand(i);
1019       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1020         return i;
1021     }
1022     llvm_unreachable("Can't find tied use");
1023   }
1024 
1025   // Now deal with inline asm by parsing the operand group descriptor flags.
1026   // Find the beginning of each operand group.
1027   SmallVector<unsigned, 8> GroupIdx;
1028   unsigned OpIdxGroup = ~0u;
1029   unsigned NumOps;
1030   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1031        i += NumOps) {
1032     const MachineOperand &FlagMO = getOperand(i);
1033     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1034     unsigned CurGroup = GroupIdx.size();
1035     GroupIdx.push_back(i);
1036     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1037     // OpIdx belongs to this operand group.
1038     if (OpIdx > i && OpIdx < i + NumOps)
1039       OpIdxGroup = CurGroup;
1040     unsigned TiedGroup;
1041     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1042       continue;
1043     // Operands in this group are tied to operands in TiedGroup which must be
1044     // earlier. Find the number of operands between the two groups.
1045     unsigned Delta = i - GroupIdx[TiedGroup];
1046 
1047     // OpIdx is a use tied to TiedGroup.
1048     if (OpIdxGroup == CurGroup)
1049       return OpIdx - Delta;
1050 
1051     // OpIdx is a def tied to this use group.
1052     if (OpIdxGroup == TiedGroup)
1053       return OpIdx + Delta;
1054   }
1055   llvm_unreachable("Invalid tied operand on inline asm");
1056 }
1057 
1058 /// clearKillInfo - Clears kill flags on all operands.
1059 ///
1060 void MachineInstr::clearKillInfo() {
1061   for (MachineOperand &MO : operands()) {
1062     if (MO.isReg() && MO.isUse())
1063       MO.setIsKill(false);
1064   }
1065 }
1066 
1067 void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
1068                                       unsigned SubIdx,
1069                                       const TargetRegisterInfo &RegInfo) {
1070   if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
1071     if (SubIdx)
1072       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1073     for (MachineOperand &MO : operands()) {
1074       if (!MO.isReg() || MO.getReg() != FromReg)
1075         continue;
1076       MO.substPhysReg(ToReg, RegInfo);
1077     }
1078   } else {
1079     for (MachineOperand &MO : operands()) {
1080       if (!MO.isReg() || MO.getReg() != FromReg)
1081         continue;
1082       MO.substVirtReg(ToReg, SubIdx, RegInfo);
1083     }
1084   }
1085 }
1086 
1087 /// isSafeToMove - Return true if it is safe to move this instruction. If
1088 /// SawStore is set to true, it means that there is a store (or call) between
1089 /// the instruction's location and its intended destination.
1090 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
1091   // Ignore stuff that we obviously can't move.
1092   //
1093   // Treat volatile loads as stores. This is not strictly necessary for
1094   // volatiles, but it is required for atomic loads. It is not allowed to move
1095   // a load across an atomic load with Ordering > Monotonic.
1096   if (mayStore() || isCall() || isPHI() ||
1097       (mayLoad() && hasOrderedMemoryRef())) {
1098     SawStore = true;
1099     return false;
1100   }
1101 
1102   if (isPosition() || isDebugInstr() || isTerminator() ||
1103       hasUnmodeledSideEffects())
1104     return false;
1105 
1106   // See if this instruction does a load.  If so, we have to guarantee that the
1107   // loaded value doesn't change between the load and the its intended
1108   // destination. The check for isInvariantLoad gives the targe the chance to
1109   // classify the load as always returning a constant, e.g. a constant pool
1110   // load.
1111   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1112     // Otherwise, this is a real load.  If there is a store between the load and
1113     // end of block, we can't move it.
1114     return !SawStore;
1115 
1116   return true;
1117 }
1118 
1119 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
1120                             bool UseTBAA) {
1121   const MachineFunction *MF = getMF();
1122   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1123   const MachineFrameInfo &MFI = MF->getFrameInfo();
1124 
1125   // If neither instruction stores to memory, they can't alias in any
1126   // meaningful way, even if they read from the same address.
1127   if (!mayStore() && !Other.mayStore())
1128     return false;
1129 
1130   // Let the target decide if memory accesses cannot possibly overlap.
1131   if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
1132     return false;
1133 
1134   // FIXME: Need to handle multiple memory operands to support all targets.
1135   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1136     return true;
1137 
1138   MachineMemOperand *MMOa = *memoperands_begin();
1139   MachineMemOperand *MMOb = *Other.memoperands_begin();
1140 
1141   // The following interface to AA is fashioned after DAGCombiner::isAlias
1142   // and operates with MachineMemOperand offset with some important
1143   // assumptions:
1144   //   - LLVM fundamentally assumes flat address spaces.
1145   //   - MachineOperand offset can *only* result from legalization and
1146   //     cannot affect queries other than the trivial case of overlap
1147   //     checking.
1148   //   - These offsets never wrap and never step outside
1149   //     of allocated objects.
1150   //   - There should never be any negative offsets here.
1151   //
1152   // FIXME: Modify API to hide this math from "user"
1153   // Even before we go to AA we can reason locally about some
1154   // memory objects. It can save compile time, and possibly catch some
1155   // corner cases not currently covered.
1156 
1157   int64_t OffsetA = MMOa->getOffset();
1158   int64_t OffsetB = MMOb->getOffset();
1159 
1160   int64_t MinOffset = std::min(OffsetA, OffsetB);
1161   int64_t WidthA = MMOa->getSize();
1162   int64_t WidthB = MMOb->getSize();
1163   const Value *ValA = MMOa->getValue();
1164   const Value *ValB = MMOb->getValue();
1165   bool SameVal = (ValA && ValB && (ValA == ValB));
1166   if (!SameVal) {
1167     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1168     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1169     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1170       return false;
1171     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1172       return false;
1173     if (PSVa && PSVb && (PSVa == PSVb))
1174       SameVal = true;
1175   }
1176 
1177   if (SameVal) {
1178     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1179     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1180     return (MinOffset + LowWidth > MaxOffset);
1181   }
1182 
1183   if (!AA)
1184     return true;
1185 
1186   if (!ValA || !ValB)
1187     return true;
1188 
1189   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1190   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1191 
1192   int64_t Overlapa = WidthA + OffsetA - MinOffset;
1193   int64_t Overlapb = WidthB + OffsetB - MinOffset;
1194 
1195   AliasResult AAResult = AA->alias(
1196       MemoryLocation(ValA, Overlapa,
1197                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1198       MemoryLocation(ValB, Overlapb,
1199                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1200 
1201   return (AAResult != NoAlias);
1202 }
1203 
1204 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1205 /// or volatile memory reference, or if the information describing the memory
1206 /// reference is not available. Return false if it is known to have no ordered
1207 /// memory references.
1208 bool MachineInstr::hasOrderedMemoryRef() const {
1209   // An instruction known never to access memory won't have a volatile access.
1210   if (!mayStore() &&
1211       !mayLoad() &&
1212       !isCall() &&
1213       !hasUnmodeledSideEffects())
1214     return false;
1215 
1216   // Otherwise, if the instruction has no memory reference information,
1217   // conservatively assume it wasn't preserved.
1218   if (memoperands_empty())
1219     return true;
1220 
1221   // Check if any of our memory operands are ordered.
1222   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1223     return !MMO->isUnordered();
1224   });
1225 }
1226 
1227 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1228 /// trap and is loading from a location whose value is invariant across a run of
1229 /// this function.
1230 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1231   // If the instruction doesn't load at all, it isn't an invariant load.
1232   if (!mayLoad())
1233     return false;
1234 
1235   // If the instruction has lost its memoperands, conservatively assume that
1236   // it may not be an invariant load.
1237   if (memoperands_empty())
1238     return false;
1239 
1240   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1241 
1242   for (MachineMemOperand *MMO : memoperands()) {
1243     if (MMO->isVolatile()) return false;
1244     if (MMO->isStore()) return false;
1245     if (MMO->isInvariant() && MMO->isDereferenceable())
1246       continue;
1247 
1248     // A load from a constant PseudoSourceValue is invariant.
1249     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1250       if (PSV->isConstant(&MFI))
1251         continue;
1252 
1253     if (const Value *V = MMO->getValue()) {
1254       // If we have an AliasAnalysis, ask it whether the memory is constant.
1255       if (AA &&
1256           AA->pointsToConstantMemory(
1257               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1258         continue;
1259     }
1260 
1261     // Otherwise assume conservatively.
1262     return false;
1263   }
1264 
1265   // Everything checks out.
1266   return true;
1267 }
1268 
1269 /// isConstantValuePHI - If the specified instruction is a PHI that always
1270 /// merges together the same virtual register, return the register, otherwise
1271 /// return 0.
1272 unsigned MachineInstr::isConstantValuePHI() const {
1273   if (!isPHI())
1274     return 0;
1275   assert(getNumOperands() >= 3 &&
1276          "It's illegal to have a PHI without source operands");
1277 
1278   unsigned Reg = getOperand(1).getReg();
1279   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1280     if (getOperand(i).getReg() != Reg)
1281       return 0;
1282   return Reg;
1283 }
1284 
1285 bool MachineInstr::hasUnmodeledSideEffects() const {
1286   if (hasProperty(MCID::UnmodeledSideEffects))
1287     return true;
1288   if (isInlineAsm()) {
1289     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1290     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1291       return true;
1292   }
1293 
1294   return false;
1295 }
1296 
1297 bool MachineInstr::isLoadFoldBarrier() const {
1298   return mayStore() || isCall() || hasUnmodeledSideEffects();
1299 }
1300 
1301 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1302 ///
1303 bool MachineInstr::allDefsAreDead() const {
1304   for (const MachineOperand &MO : operands()) {
1305     if (!MO.isReg() || MO.isUse())
1306       continue;
1307     if (!MO.isDead())
1308       return false;
1309   }
1310   return true;
1311 }
1312 
1313 /// copyImplicitOps - Copy implicit register operands from specified
1314 /// instruction to this instruction.
1315 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1316                                    const MachineInstr &MI) {
1317   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1318        i != e; ++i) {
1319     const MachineOperand &MO = MI.getOperand(i);
1320     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1321       addOperand(MF, MO);
1322   }
1323 }
1324 
1325 bool MachineInstr::hasComplexRegisterTies() const {
1326   const MCInstrDesc &MCID = getDesc();
1327   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1328     const auto &Operand = getOperand(I);
1329     if (!Operand.isReg() || Operand.isDef())
1330       // Ignore the defined registers as MCID marks only the uses as tied.
1331       continue;
1332     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1333     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1334     if (ExpectedTiedIdx != TiedIdx)
1335       return true;
1336   }
1337   return false;
1338 }
1339 
1340 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1341                                  const MachineRegisterInfo &MRI) const {
1342   const MachineOperand &Op = getOperand(OpIdx);
1343   if (!Op.isReg())
1344     return LLT{};
1345 
1346   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1347     return MRI.getType(Op.getReg());
1348 
1349   auto &OpInfo = getDesc().OpInfo[OpIdx];
1350   if (!OpInfo.isGenericType())
1351     return MRI.getType(Op.getReg());
1352 
1353   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1354     return LLT{};
1355 
1356   LLT TypeToPrint = MRI.getType(Op.getReg());
1357   // Don't mark the type index printed if it wasn't actually printed: maybe
1358   // another operand with the same type index has an actual type attached:
1359   if (TypeToPrint.isValid())
1360     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1361   return TypeToPrint;
1362 }
1363 
1364 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1365 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1366   dbgs() << "  ";
1367   print(dbgs());
1368 }
1369 #endif
1370 
1371 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1372                          bool SkipDebugLoc, bool AddNewLine,
1373                          const TargetInstrInfo *TII) const {
1374   const Module *M = nullptr;
1375   const Function *F = nullptr;
1376   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1377     F = &MF->getFunction();
1378     M = F->getParent();
1379     if (!TII)
1380       TII = MF->getSubtarget().getInstrInfo();
1381   }
1382 
1383   ModuleSlotTracker MST(M);
1384   if (F)
1385     MST.incorporateFunction(*F);
1386   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, TII);
1387 }
1388 
1389 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1390                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1391                          bool AddNewLine, const TargetInstrInfo *TII) const {
1392   // We can be a bit tidier if we know the MachineFunction.
1393   const MachineFunction *MF = nullptr;
1394   const TargetRegisterInfo *TRI = nullptr;
1395   const MachineRegisterInfo *MRI = nullptr;
1396   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1397   tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1398 
1399   if (isCFIInstruction())
1400     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1401 
1402   SmallBitVector PrintedTypes(8);
1403   bool ShouldPrintRegisterTies = hasComplexRegisterTies();
1404   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1405     if (!ShouldPrintRegisterTies)
1406       return 0U;
1407     const MachineOperand &MO = getOperand(OpIdx);
1408     if (MO.isReg() && MO.isTied() && !MO.isDef())
1409       return findTiedOperandIdx(OpIdx);
1410     return 0U;
1411   };
1412   unsigned StartOp = 0;
1413   unsigned e = getNumOperands();
1414 
1415   // Print explicitly defined operands on the left of an assignment syntax.
1416   while (StartOp < e) {
1417     const MachineOperand &MO = getOperand(StartOp);
1418     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1419       break;
1420 
1421     if (StartOp != 0)
1422       OS << ", ";
1423 
1424     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1425     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1426     MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
1427              ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1428     ++StartOp;
1429   }
1430 
1431   if (StartOp != 0)
1432     OS << " = ";
1433 
1434   if (getFlag(MachineInstr::FrameSetup))
1435     OS << "frame-setup ";
1436   if (getFlag(MachineInstr::FrameDestroy))
1437     OS << "frame-destroy ";
1438   if (getFlag(MachineInstr::FmNoNans))
1439     OS << "nnan ";
1440   if (getFlag(MachineInstr::FmNoInfs))
1441     OS << "ninf ";
1442   if (getFlag(MachineInstr::FmNsz))
1443     OS << "nsz ";
1444   if (getFlag(MachineInstr::FmArcp))
1445     OS << "arcp ";
1446   if (getFlag(MachineInstr::FmContract))
1447     OS << "contract ";
1448   if (getFlag(MachineInstr::FmAfn))
1449     OS << "afn ";
1450   if (getFlag(MachineInstr::FmReassoc))
1451     OS << "reassoc ";
1452 
1453   // Print the opcode name.
1454   if (TII)
1455     OS << TII->getName(getOpcode());
1456   else
1457     OS << "UNKNOWN";
1458 
1459   if (SkipOpers)
1460     return;
1461 
1462   // Print the rest of the operands.
1463   bool FirstOp = true;
1464   unsigned AsmDescOp = ~0u;
1465   unsigned AsmOpCount = 0;
1466 
1467   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1468     // Print asm string.
1469     OS << " ";
1470     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1471     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1472     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1473     getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1474                             ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1475                             IntrinsicInfo);
1476 
1477     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1478     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1479     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1480       OS << " [sideeffect]";
1481     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1482       OS << " [mayload]";
1483     if (ExtraInfo & InlineAsm::Extra_MayStore)
1484       OS << " [maystore]";
1485     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1486       OS << " [isconvergent]";
1487     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1488       OS << " [alignstack]";
1489     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1490       OS << " [attdialect]";
1491     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1492       OS << " [inteldialect]";
1493 
1494     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1495     FirstOp = false;
1496   }
1497 
1498   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1499     const MachineOperand &MO = getOperand(i);
1500 
1501     if (FirstOp) FirstOp = false; else OS << ",";
1502     OS << " ";
1503 
1504     if (isDebugValue() && MO.isMetadata()) {
1505       // Pretty print DBG_VALUE instructions.
1506       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1507       if (DIV && !DIV->getName().empty())
1508         OS << "!\"" << DIV->getName() << '\"';
1509       else {
1510         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1511         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1512         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1513                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1514       }
1515     } else if (isDebugLabel() && MO.isMetadata()) {
1516       // Pretty print DBG_LABEL instructions.
1517       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1518       if (DIL && !DIL->getName().empty())
1519         OS << "\"" << DIL->getName() << '\"';
1520       else {
1521         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1522         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1523         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1524                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1525       }
1526     } else if (i == AsmDescOp && MO.isImm()) {
1527       // Pretty print the inline asm operand descriptor.
1528       OS << '$' << AsmOpCount++;
1529       unsigned Flag = MO.getImm();
1530       switch (InlineAsm::getKind(Flag)) {
1531       case InlineAsm::Kind_RegUse:             OS << ":[reguse"; break;
1532       case InlineAsm::Kind_RegDef:             OS << ":[regdef"; break;
1533       case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1534       case InlineAsm::Kind_Clobber:            OS << ":[clobber"; break;
1535       case InlineAsm::Kind_Imm:                OS << ":[imm"; break;
1536       case InlineAsm::Kind_Mem:                OS << ":[mem"; break;
1537       default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1538       }
1539 
1540       unsigned RCID = 0;
1541       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1542           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1543         if (TRI) {
1544           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1545         } else
1546           OS << ":RC" << RCID;
1547       }
1548 
1549       if (InlineAsm::isMemKind(Flag)) {
1550         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1551         switch (MCID) {
1552         case InlineAsm::Constraint_es: OS << ":es"; break;
1553         case InlineAsm::Constraint_i:  OS << ":i"; break;
1554         case InlineAsm::Constraint_m:  OS << ":m"; break;
1555         case InlineAsm::Constraint_o:  OS << ":o"; break;
1556         case InlineAsm::Constraint_v:  OS << ":v"; break;
1557         case InlineAsm::Constraint_Q:  OS << ":Q"; break;
1558         case InlineAsm::Constraint_R:  OS << ":R"; break;
1559         case InlineAsm::Constraint_S:  OS << ":S"; break;
1560         case InlineAsm::Constraint_T:  OS << ":T"; break;
1561         case InlineAsm::Constraint_Um: OS << ":Um"; break;
1562         case InlineAsm::Constraint_Un: OS << ":Un"; break;
1563         case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1564         case InlineAsm::Constraint_Us: OS << ":Us"; break;
1565         case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1566         case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1567         case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1568         case InlineAsm::Constraint_X:  OS << ":X"; break;
1569         case InlineAsm::Constraint_Z:  OS << ":Z"; break;
1570         case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1571         case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1572         default: OS << ":?"; break;
1573         }
1574       }
1575 
1576       unsigned TiedTo = 0;
1577       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1578         OS << " tiedto:$" << TiedTo;
1579 
1580       OS << ']';
1581 
1582       // Compute the index of the next operand descriptor.
1583       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1584     } else {
1585       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1586       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1587       if (MO.isImm() && isOperandSubregIdx(i))
1588         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1589       else
1590         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1591                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1592     }
1593   }
1594 
1595   if (!SkipDebugLoc) {
1596     if (const DebugLoc &DL = getDebugLoc()) {
1597       if (!FirstOp)
1598         OS << ',';
1599       OS << " debug-location ";
1600       DL->printAsOperand(OS, MST);
1601     }
1602   }
1603 
1604   if (!memoperands_empty()) {
1605     SmallVector<StringRef, 0> SSNs;
1606     const LLVMContext *Context = nullptr;
1607     std::unique_ptr<LLVMContext> CtxPtr;
1608     const MachineFrameInfo *MFI = nullptr;
1609     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1610       MFI = &MF->getFrameInfo();
1611       Context = &MF->getFunction().getContext();
1612     } else {
1613       CtxPtr = llvm::make_unique<LLVMContext>();
1614       Context = CtxPtr.get();
1615     }
1616 
1617     OS << " :: ";
1618     bool NeedComma = false;
1619     for (const MachineMemOperand *Op : memoperands()) {
1620       if (NeedComma)
1621         OS << ", ";
1622       Op->print(OS, MST, SSNs, *Context, MFI, TII);
1623       NeedComma = true;
1624     }
1625   }
1626 
1627   if (SkipDebugLoc)
1628     return;
1629 
1630   bool HaveSemi = false;
1631 
1632   // Print debug location information.
1633   if (const DebugLoc &DL = getDebugLoc()) {
1634     if (!HaveSemi) {
1635       OS << ';';
1636       HaveSemi = true;
1637     }
1638     OS << ' ';
1639     DL.print(OS);
1640   }
1641 
1642   // Print extra comments for DEBUG_VALUE.
1643   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1644     if (!HaveSemi) {
1645       OS << ";";
1646       HaveSemi = true;
1647     }
1648     auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1649     OS << " line no:" <<  DV->getLine();
1650     if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1651       DebugLoc InlinedAtDL(InlinedAt);
1652       if (InlinedAtDL && MF) {
1653         OS << " inlined @[ ";
1654         InlinedAtDL.print(OS);
1655         OS << " ]";
1656       }
1657     }
1658     if (isIndirectDebugValue())
1659       OS << " indirect";
1660   }
1661   // TODO: DBG_LABEL
1662 
1663   if (AddNewLine)
1664     OS << '\n';
1665 }
1666 
1667 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1668                                      const TargetRegisterInfo *RegInfo,
1669                                      bool AddIfNotFound) {
1670   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1671   bool hasAliases = isPhysReg &&
1672     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1673   bool Found = false;
1674   SmallVector<unsigned,4> DeadOps;
1675   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1676     MachineOperand &MO = getOperand(i);
1677     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1678       continue;
1679 
1680     // DEBUG_VALUE nodes do not contribute to code generation and should
1681     // always be ignored. Failure to do so may result in trying to modify
1682     // KILL flags on DEBUG_VALUE nodes.
1683     if (MO.isDebug())
1684       continue;
1685 
1686     unsigned Reg = MO.getReg();
1687     if (!Reg)
1688       continue;
1689 
1690     if (Reg == IncomingReg) {
1691       if (!Found) {
1692         if (MO.isKill())
1693           // The register is already marked kill.
1694           return true;
1695         if (isPhysReg && isRegTiedToDefOperand(i))
1696           // Two-address uses of physregs must not be marked kill.
1697           return true;
1698         MO.setIsKill();
1699         Found = true;
1700       }
1701     } else if (hasAliases && MO.isKill() &&
1702                TargetRegisterInfo::isPhysicalRegister(Reg)) {
1703       // A super-register kill already exists.
1704       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1705         return true;
1706       if (RegInfo->isSubRegister(IncomingReg, Reg))
1707         DeadOps.push_back(i);
1708     }
1709   }
1710 
1711   // Trim unneeded kill operands.
1712   while (!DeadOps.empty()) {
1713     unsigned OpIdx = DeadOps.back();
1714     if (getOperand(OpIdx).isImplicit())
1715       RemoveOperand(OpIdx);
1716     else
1717       getOperand(OpIdx).setIsKill(false);
1718     DeadOps.pop_back();
1719   }
1720 
1721   // If not found, this means an alias of one of the operands is killed. Add a
1722   // new implicit operand if required.
1723   if (!Found && AddIfNotFound) {
1724     addOperand(MachineOperand::CreateReg(IncomingReg,
1725                                          false /*IsDef*/,
1726                                          true  /*IsImp*/,
1727                                          true  /*IsKill*/));
1728     return true;
1729   }
1730   return Found;
1731 }
1732 
1733 void MachineInstr::clearRegisterKills(unsigned Reg,
1734                                       const TargetRegisterInfo *RegInfo) {
1735   if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1736     RegInfo = nullptr;
1737   for (MachineOperand &MO : operands()) {
1738     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1739       continue;
1740     unsigned OpReg = MO.getReg();
1741     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1742       MO.setIsKill(false);
1743   }
1744 }
1745 
1746 bool MachineInstr::addRegisterDead(unsigned Reg,
1747                                    const TargetRegisterInfo *RegInfo,
1748                                    bool AddIfNotFound) {
1749   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1750   bool hasAliases = isPhysReg &&
1751     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1752   bool Found = false;
1753   SmallVector<unsigned,4> DeadOps;
1754   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1755     MachineOperand &MO = getOperand(i);
1756     if (!MO.isReg() || !MO.isDef())
1757       continue;
1758     unsigned MOReg = MO.getReg();
1759     if (!MOReg)
1760       continue;
1761 
1762     if (MOReg == Reg) {
1763       MO.setIsDead();
1764       Found = true;
1765     } else if (hasAliases && MO.isDead() &&
1766                TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1767       // There exists a super-register that's marked dead.
1768       if (RegInfo->isSuperRegister(Reg, MOReg))
1769         return true;
1770       if (RegInfo->isSubRegister(Reg, MOReg))
1771         DeadOps.push_back(i);
1772     }
1773   }
1774 
1775   // Trim unneeded dead operands.
1776   while (!DeadOps.empty()) {
1777     unsigned OpIdx = DeadOps.back();
1778     if (getOperand(OpIdx).isImplicit())
1779       RemoveOperand(OpIdx);
1780     else
1781       getOperand(OpIdx).setIsDead(false);
1782     DeadOps.pop_back();
1783   }
1784 
1785   // If not found, this means an alias of one of the operands is dead. Add a
1786   // new implicit operand if required.
1787   if (Found || !AddIfNotFound)
1788     return Found;
1789 
1790   addOperand(MachineOperand::CreateReg(Reg,
1791                                        true  /*IsDef*/,
1792                                        true  /*IsImp*/,
1793                                        false /*IsKill*/,
1794                                        true  /*IsDead*/));
1795   return true;
1796 }
1797 
1798 void MachineInstr::clearRegisterDeads(unsigned Reg) {
1799   for (MachineOperand &MO : operands()) {
1800     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1801       continue;
1802     MO.setIsDead(false);
1803   }
1804 }
1805 
1806 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1807   for (MachineOperand &MO : operands()) {
1808     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1809       continue;
1810     MO.setIsUndef(IsUndef);
1811   }
1812 }
1813 
1814 void MachineInstr::addRegisterDefined(unsigned Reg,
1815                                       const TargetRegisterInfo *RegInfo) {
1816   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1817     MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1818     if (MO)
1819       return;
1820   } else {
1821     for (const MachineOperand &MO : operands()) {
1822       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1823           MO.getSubReg() == 0)
1824         return;
1825     }
1826   }
1827   addOperand(MachineOperand::CreateReg(Reg,
1828                                        true  /*IsDef*/,
1829                                        true  /*IsImp*/));
1830 }
1831 
1832 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1833                                          const TargetRegisterInfo &TRI) {
1834   bool HasRegMask = false;
1835   for (MachineOperand &MO : operands()) {
1836     if (MO.isRegMask()) {
1837       HasRegMask = true;
1838       continue;
1839     }
1840     if (!MO.isReg() || !MO.isDef()) continue;
1841     unsigned Reg = MO.getReg();
1842     if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1843     // If there are no uses, including partial uses, the def is dead.
1844     if (llvm::none_of(UsedRegs,
1845                       [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1846       MO.setIsDead();
1847   }
1848 
1849   // This is a call with a register mask operand.
1850   // Mask clobbers are always dead, so add defs for the non-dead defines.
1851   if (HasRegMask)
1852     for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1853          I != E; ++I)
1854       addRegisterDefined(*I, &TRI);
1855 }
1856 
1857 unsigned
1858 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1859   // Build up a buffer of hash code components.
1860   SmallVector<size_t, 8> HashComponents;
1861   HashComponents.reserve(MI->getNumOperands() + 1);
1862   HashComponents.push_back(MI->getOpcode());
1863   for (const MachineOperand &MO : MI->operands()) {
1864     if (MO.isReg() && MO.isDef() &&
1865         TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1866       continue;  // Skip virtual register defs.
1867 
1868     HashComponents.push_back(hash_value(MO));
1869   }
1870   return hash_combine_range(HashComponents.begin(), HashComponents.end());
1871 }
1872 
1873 void MachineInstr::emitError(StringRef Msg) const {
1874   // Find the source location cookie.
1875   unsigned LocCookie = 0;
1876   const MDNode *LocMD = nullptr;
1877   for (unsigned i = getNumOperands(); i != 0; --i) {
1878     if (getOperand(i-1).isMetadata() &&
1879         (LocMD = getOperand(i-1).getMetadata()) &&
1880         LocMD->getNumOperands() != 0) {
1881       if (const ConstantInt *CI =
1882               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1883         LocCookie = CI->getZExtValue();
1884         break;
1885       }
1886     }
1887   }
1888 
1889   if (const MachineBasicBlock *MBB = getParent())
1890     if (const MachineFunction *MF = MBB->getParent())
1891       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1892   report_fatal_error(Msg);
1893 }
1894 
1895 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1896                                   const MCInstrDesc &MCID, bool IsIndirect,
1897                                   unsigned Reg, const MDNode *Variable,
1898                                   const MDNode *Expr) {
1899   assert(isa<DILocalVariable>(Variable) && "not a variable");
1900   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1901   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1902          "Expected inlined-at fields to agree");
1903   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
1904   if (IsIndirect)
1905     MIB.addImm(0U);
1906   else
1907     MIB.addReg(0U, RegState::Debug);
1908   return MIB.addMetadata(Variable).addMetadata(Expr);
1909 }
1910 
1911 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1912                                   const MCInstrDesc &MCID, bool IsIndirect,
1913                                   MachineOperand &MO, const MDNode *Variable,
1914                                   const MDNode *Expr) {
1915   assert(isa<DILocalVariable>(Variable) && "not a variable");
1916   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1917   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1918          "Expected inlined-at fields to agree");
1919   if (MO.isReg())
1920     return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
1921 
1922   auto MIB = BuildMI(MF, DL, MCID).add(MO);
1923   if (IsIndirect)
1924     MIB.addImm(0U);
1925   else
1926     MIB.addReg(0U, RegState::Debug);
1927   return MIB.addMetadata(Variable).addMetadata(Expr);
1928  }
1929 
1930 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1931                                   MachineBasicBlock::iterator I,
1932                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1933                                   bool IsIndirect, unsigned Reg,
1934                                   const MDNode *Variable, const MDNode *Expr) {
1935   MachineFunction &MF = *BB.getParent();
1936   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1937   BB.insert(I, MI);
1938   return MachineInstrBuilder(MF, MI);
1939 }
1940 
1941 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1942                                   MachineBasicBlock::iterator I,
1943                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1944                                   bool IsIndirect, MachineOperand &MO,
1945                                   const MDNode *Variable, const MDNode *Expr) {
1946   MachineFunction &MF = *BB.getParent();
1947   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
1948   BB.insert(I, MI);
1949   return MachineInstrBuilder(MF, *MI);
1950 }
1951 
1952 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
1953 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
1954 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
1955   assert(MI.getOperand(0).isReg() && "can't spill non-register");
1956   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
1957          "Expected inlined-at fields to agree");
1958 
1959   const DIExpression *Expr = MI.getDebugExpression();
1960   if (MI.isIndirectDebugValue()) {
1961     assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
1962     Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
1963   }
1964   return Expr;
1965 }
1966 
1967 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
1968                                           MachineBasicBlock::iterator I,
1969                                           const MachineInstr &Orig,
1970                                           int FrameIndex) {
1971   const DIExpression *Expr = computeExprForSpill(Orig);
1972   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
1973       .addFrameIndex(FrameIndex)
1974       .addImm(0U)
1975       .addMetadata(Orig.getDebugVariable())
1976       .addMetadata(Expr);
1977 }
1978 
1979 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
1980   const DIExpression *Expr = computeExprForSpill(Orig);
1981   Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
1982   Orig.getOperand(1).ChangeToImmediate(0U);
1983   Orig.getOperand(3).setMetadata(Expr);
1984 }
1985