xref: /llvm-project/llvm/lib/CodeGen/MachineVerifier.cpp (revision 6dc24f6a2fcf0a199e007dc127ca5a4901a3a24e)
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/CodeGen/MachineVerifier.h"
24 #include "llvm/ADT/BitVector.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/DepthFirstIterator.h"
28 #include "llvm/ADT/PostOrderIterator.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SetOperations.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/Twine.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
37 #include "llvm/CodeGen/LiveInterval.h"
38 #include "llvm/CodeGen/LiveIntervals.h"
39 #include "llvm/CodeGen/LiveRangeCalc.h"
40 #include "llvm/CodeGen/LiveStacks.h"
41 #include "llvm/CodeGen/LiveVariables.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineConvergenceVerifier.h"
44 #include "llvm/CodeGen/MachineDominators.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineFunctionPass.h"
48 #include "llvm/CodeGen/MachineInstr.h"
49 #include "llvm/CodeGen/MachineInstrBundle.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/PseudoSourceValue.h"
54 #include "llvm/CodeGen/RegisterBank.h"
55 #include "llvm/CodeGen/RegisterBankInfo.h"
56 #include "llvm/CodeGen/SlotIndexes.h"
57 #include "llvm/CodeGen/StackMaps.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGenTypes/LowLevelType.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/EHPersonalities.h"
67 #include "llvm/IR/Function.h"
68 #include "llvm/IR/InlineAsm.h"
69 #include "llvm/IR/Instructions.h"
70 #include "llvm/InitializePasses.h"
71 #include "llvm/MC/LaneBitmask.h"
72 #include "llvm/MC/MCAsmInfo.h"
73 #include "llvm/MC/MCDwarf.h"
74 #include "llvm/MC/MCInstrDesc.h"
75 #include "llvm/MC/MCRegisterInfo.h"
76 #include "llvm/MC/MCTargetOptions.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/ManagedStatic.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/ModRef.h"
83 #include "llvm/Support/Mutex.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/Target/TargetMachine.h"
86 #include <algorithm>
87 #include <cassert>
88 #include <cstddef>
89 #include <cstdint>
90 #include <iterator>
91 #include <string>
92 #include <utility>
93 
94 using namespace llvm;
95 
96 namespace {
97 
98 /// Used the by the ReportedErrors class to guarantee only one error is reported
99 /// at one time.
100 static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101 
102 struct MachineVerifier {
103   MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104                   raw_ostream *OS, bool AbortOnError = true)
105       : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106         ReportedErrs(AbortOnError) {}
107 
108   MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109                   bool AbortOnError = true)
110       : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111         ReportedErrs(AbortOnError) {}
112 
113   MachineVerifier(const char *b, LiveVariables *LiveVars,
114                   LiveIntervals *LiveInts, LiveStacks *LiveStks,
115                   SlotIndexes *Indexes, raw_ostream *OS,
116                   bool AbortOnError = true)
117       : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118         LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119         ReportedErrs(AbortOnError) {}
120 
121   /// \returns true if no problems were found.
122   bool verify(const MachineFunction &MF);
123 
124   MachineFunctionAnalysisManager *MFAM = nullptr;
125   Pass *const PASS = nullptr;
126   raw_ostream &OS;
127   const char *Banner;
128   const MachineFunction *MF = nullptr;
129   const TargetMachine *TM = nullptr;
130   const TargetInstrInfo *TII = nullptr;
131   const TargetRegisterInfo *TRI = nullptr;
132   const MachineRegisterInfo *MRI = nullptr;
133   const RegisterBankInfo *RBI = nullptr;
134 
135   // Avoid querying the MachineFunctionProperties for each operand.
136   bool isFunctionRegBankSelected = false;
137   bool isFunctionSelected = false;
138   bool isFunctionTracksDebugUserValues = false;
139 
140   using RegVector = SmallVector<Register, 16>;
141   using RegMaskVector = SmallVector<const uint32_t *, 4>;
142   using RegSet = DenseSet<Register>;
143   using RegMap = DenseMap<Register, const MachineInstr *>;
144   using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
145 
146   const MachineInstr *FirstNonPHI = nullptr;
147   const MachineInstr *FirstTerminator = nullptr;
148   BlockSet FunctionBlocks;
149 
150   BitVector regsReserved;
151   RegSet regsLive;
152   RegVector regsDefined, regsDead, regsKilled;
153   RegMaskVector regMasks;
154 
155   SlotIndex lastIndex;
156 
157   // Add Reg and any sub-registers to RV
158   void addRegWithSubRegs(RegVector &RV, Register Reg) {
159     RV.push_back(Reg);
160     if (Reg.isPhysical())
161       append_range(RV, TRI->subregs(Reg.asMCReg()));
162   }
163 
164   struct BBInfo {
165     // Is this MBB reachable from the MF entry point?
166     bool reachable = false;
167 
168     // Vregs that must be live in because they are used without being
169     // defined. Map value is the user. vregsLiveIn doesn't include regs
170     // that only are used by PHI nodes.
171     RegMap vregsLiveIn;
172 
173     // Regs killed in MBB. They may be defined again, and will then be in both
174     // regsKilled and regsLiveOut.
175     RegSet regsKilled;
176 
177     // Regs defined in MBB and live out. Note that vregs passing through may
178     // be live out without being mentioned here.
179     RegSet regsLiveOut;
180 
181     // Vregs that pass through MBB untouched. This set is disjoint from
182     // regsKilled and regsLiveOut.
183     RegSet vregsPassed;
184 
185     // Vregs that must pass through MBB because they are needed by a successor
186     // block. This set is disjoint from regsLiveOut.
187     RegSet vregsRequired;
188 
189     // Set versions of block's predecessor and successor lists.
190     BlockSet Preds, Succs;
191 
192     BBInfo() = default;
193 
194     // Add register to vregsRequired if it belongs there. Return true if
195     // anything changed.
196     bool addRequired(Register Reg) {
197       if (!Reg.isVirtual())
198         return false;
199       if (regsLiveOut.count(Reg))
200         return false;
201       return vregsRequired.insert(Reg).second;
202     }
203 
204     // Same for a full set.
205     bool addRequired(const RegSet &RS) {
206       bool Changed = false;
207       for (Register Reg : RS)
208         Changed |= addRequired(Reg);
209       return Changed;
210     }
211 
212     // Same for a full map.
213     bool addRequired(const RegMap &RM) {
214       bool Changed = false;
215       for (const auto &I : RM)
216         Changed |= addRequired(I.first);
217       return Changed;
218     }
219 
220     // Live-out registers are either in regsLiveOut or vregsPassed.
221     bool isLiveOut(Register Reg) const {
222       return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223     }
224   };
225 
226   // Extra register info per MBB.
227   DenseMap<const MachineBasicBlock *, BBInfo> MBBInfoMap;
228 
229   bool isReserved(Register Reg) {
230     return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231   }
232 
233   bool isAllocatable(Register Reg) const {
234     return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235            !regsReserved.test(Reg.id());
236   }
237 
238   // Analysis information if available
239   LiveVariables *LiveVars = nullptr;
240   LiveIntervals *LiveInts = nullptr;
241   LiveStacks *LiveStks = nullptr;
242   SlotIndexes *Indexes = nullptr;
243 
244   /// A class to track the number of reported error and to guarantee that only
245   /// one error is reported at one time.
246   class ReportedErrors {
247     unsigned NumReported = 0;
248     bool AbortOnError;
249 
250   public:
251     /// \param AbortOnError -- If set, abort after printing the first error.
252     ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253 
254     ~ReportedErrors() {
255       if (!hasError())
256         return;
257       if (AbortOnError)
258         report_fatal_error("Found " + Twine(NumReported) +
259                            " machine code errors.");
260       // Since we haven't aborted, release the lock to allow other threads to
261       // report errors.
262       ReportedErrorsLock->unlock();
263     }
264 
265     /// Increment the number of reported errors.
266     /// \returns true if this is the first reported error.
267     bool increment() {
268       // If this is the first error this thread has encountered, grab the lock
269       // to prevent other threads from reporting errors at the same time.
270       // Otherwise we assume we already have the lock.
271       if (!hasError())
272         ReportedErrorsLock->lock();
273       ++NumReported;
274       return NumReported == 1;
275     }
276 
277     /// \returns true if an error was reported.
278     bool hasError() { return NumReported; }
279   };
280   ReportedErrors ReportedErrs;
281 
282   // This is calculated only when trying to verify convergence control tokens.
283   // Similar to the LLVM IR verifier, we calculate this locally instead of
284   // relying on the pass manager.
285   MachineDominatorTree DT;
286 
287   void visitMachineFunctionBefore();
288   void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289   void visitMachineBundleBefore(const MachineInstr *MI);
290 
291   /// Verify that all of \p MI's virtual register operands are scalars.
292   /// \returns True if all virtual register operands are scalar. False
293   /// otherwise.
294   bool verifyAllRegOpsScalar(const MachineInstr &MI,
295                              const MachineRegisterInfo &MRI);
296   bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297 
298   bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299   bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300   void verifyPreISelGenericInstruction(const MachineInstr *MI);
301 
302   void visitMachineInstrBefore(const MachineInstr *MI);
303   void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304   void visitMachineBundleAfter(const MachineInstr *MI);
305   void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306   void visitMachineFunctionAfter();
307 
308   void report(const char *msg, const MachineFunction *MF);
309   void report(const char *msg, const MachineBasicBlock *MBB);
310   void report(const char *msg, const MachineInstr *MI);
311   void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312               LLT MOVRegType = LLT{});
313   void report(const Twine &Msg, const MachineInstr *MI);
314 
315   void report_context(const LiveInterval &LI) const;
316   void report_context(const LiveRange &LR, Register VRegUnit,
317                       LaneBitmask LaneMask) const;
318   void report_context(const LiveRange::Segment &S) const;
319   void report_context(const VNInfo &VNI) const;
320   void report_context(SlotIndex Pos) const;
321   void report_context(MCPhysReg PhysReg) const;
322   void report_context_liverange(const LiveRange &LR) const;
323   void report_context_lanemask(LaneBitmask LaneMask) const;
324   void report_context_vreg(Register VReg) const;
325   void report_context_vreg_regunit(Register VRegOrUnit) const;
326 
327   void verifyInlineAsm(const MachineInstr *MI);
328 
329   void checkLiveness(const MachineOperand *MO, unsigned MONum);
330   void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331                           SlotIndex UseIdx, const LiveRange &LR,
332                           Register VRegOrUnit,
333                           LaneBitmask LaneMask = LaneBitmask::getNone());
334   void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335                           SlotIndex DefIdx, const LiveRange &LR,
336                           Register VRegOrUnit, bool SubRangeCheck = false,
337                           LaneBitmask LaneMask = LaneBitmask::getNone());
338 
339   void markReachable(const MachineBasicBlock *MBB);
340   void calcRegsPassed();
341   void checkPHIOps(const MachineBasicBlock &MBB);
342 
343   void calcRegsRequired();
344   void verifyLiveVariables();
345   void verifyLiveIntervals();
346   void verifyLiveInterval(const LiveInterval &);
347   void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
348                             LaneBitmask);
349   void verifyLiveRangeSegment(const LiveRange &,
350                               const LiveRange::const_iterator I, Register,
351                               LaneBitmask);
352   void verifyLiveRange(const LiveRange &, Register,
353                        LaneBitmask LaneMask = LaneBitmask::getNone());
354 
355   void verifyStackFrame();
356 
357   void verifySlotIndexes() const;
358   void verifyProperties(const MachineFunction &MF);
359 };
360 
361 struct MachineVerifierLegacyPass : public MachineFunctionPass {
362   static char ID; // Pass ID, replacement for typeid
363 
364   const std::string Banner;
365 
366   MachineVerifierLegacyPass(std::string banner = std::string())
367       : MachineFunctionPass(ID), Banner(std::move(banner)) {
368     initializeMachineVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
369   }
370 
371   void getAnalysisUsage(AnalysisUsage &AU) const override {
372     AU.addUsedIfAvailable<LiveStacksWrapperLegacy>();
373     AU.addUsedIfAvailable<LiveVariablesWrapperPass>();
374     AU.addUsedIfAvailable<SlotIndexesWrapperPass>();
375     AU.addUsedIfAvailable<LiveIntervalsWrapperPass>();
376     AU.setPreservesAll();
377     MachineFunctionPass::getAnalysisUsage(AU);
378   }
379 
380   bool runOnMachineFunction(MachineFunction &MF) override {
381     // Skip functions that have known verification problems.
382     // FIXME: Remove this mechanism when all problematic passes have been
383     // fixed.
384     if (MF.getProperties().hasProperty(
385             MachineFunctionProperties::Property::FailsVerification))
386       return false;
387 
388     MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
389     return false;
390   }
391 };
392 
393 } // end anonymous namespace
394 
395 PreservedAnalyses
396 MachineVerifierPass::run(MachineFunction &MF,
397                          MachineFunctionAnalysisManager &MFAM) {
398   // Skip functions that have known verification problems.
399   // FIXME: Remove this mechanism when all problematic passes have been
400   // fixed.
401   if (MF.getProperties().hasProperty(
402           MachineFunctionProperties::Property::FailsVerification))
403     return PreservedAnalyses::all();
404   MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
405   return PreservedAnalyses::all();
406 }
407 
408 char MachineVerifierLegacyPass::ID = 0;
409 
410 INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
411                 "Verify generated machine code", false, false)
412 
413 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
414   return new MachineVerifierLegacyPass(Banner);
415 }
416 
417 void llvm::verifyMachineFunction(const std::string &Banner,
418                                  const MachineFunction &MF) {
419   // TODO: Use MFAM after porting below analyses.
420   // LiveVariables *LiveVars;
421   // LiveIntervals *LiveInts;
422   // LiveStacks *LiveStks;
423   // SlotIndexes *Indexes;
424   MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
425 }
426 
427 bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
428                              bool AbortOnError) const {
429   return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
430 }
431 
432 bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
433                              const char *Banner, raw_ostream *OS,
434                              bool AbortOnError) const {
435   return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
436                          /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
437       .verify(*this);
438 }
439 
440 void MachineVerifier::verifySlotIndexes() const {
441   if (Indexes == nullptr)
442     return;
443 
444   // Ensure the IdxMBB list is sorted by slot indexes.
445   SlotIndex Last;
446   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
447        E = Indexes->MBBIndexEnd(); I != E; ++I) {
448     assert(!Last.isValid() || I->first > Last);
449     Last = I->first;
450   }
451 }
452 
453 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
454   // If a pass has introduced virtual registers without clearing the
455   // NoVRegs property (or set it without allocating the vregs)
456   // then report an error.
457   if (MF.getProperties().hasProperty(
458           MachineFunctionProperties::Property::NoVRegs) &&
459       MRI->getNumVirtRegs())
460     report("Function has NoVRegs property but there are VReg operands", &MF);
461 }
462 
463 bool MachineVerifier::verify(const MachineFunction &MF) {
464   this->MF = &MF;
465   TM = &MF.getTarget();
466   TII = MF.getSubtarget().getInstrInfo();
467   TRI = MF.getSubtarget().getRegisterInfo();
468   RBI = MF.getSubtarget().getRegBankInfo();
469   MRI = &MF.getRegInfo();
470 
471   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
472       MachineFunctionProperties::Property::FailedISel);
473 
474   // If we're mid-GlobalISel and we already triggered the fallback path then
475   // it's expected that the MIR is somewhat broken but that's ok since we'll
476   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
477   if (isFunctionFailedISel)
478     return true;
479 
480   isFunctionRegBankSelected = MF.getProperties().hasProperty(
481       MachineFunctionProperties::Property::RegBankSelected);
482   isFunctionSelected = MF.getProperties().hasProperty(
483       MachineFunctionProperties::Property::Selected);
484   isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
485       MachineFunctionProperties::Property::TracksDebugUserValues);
486 
487   if (PASS) {
488     auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
489     LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
490     // We don't want to verify LiveVariables if LiveIntervals is available.
491     auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
492     if (!LiveInts)
493       LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
494     auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
495     LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
496     auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
497     Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
498   }
499   if (MFAM) {
500     MachineFunction &Func = const_cast<MachineFunction &>(MF);
501     LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
502     if (!LiveInts)
503       LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
504     // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
505     Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
506   }
507 
508   verifySlotIndexes();
509 
510   verifyProperties(MF);
511 
512   visitMachineFunctionBefore();
513   for (const MachineBasicBlock &MBB : MF) {
514     visitMachineBasicBlockBefore(&MBB);
515     // Keep track of the current bundle header.
516     const MachineInstr *CurBundle = nullptr;
517     // Do we expect the next instruction to be part of the same bundle?
518     bool InBundle = false;
519 
520     for (const MachineInstr &MI : MBB.instrs()) {
521       if (MI.getParent() != &MBB) {
522         report("Bad instruction parent pointer", &MBB);
523         OS << "Instruction: " << MI;
524         continue;
525       }
526 
527       // Check for consistent bundle flags.
528       if (InBundle && !MI.isBundledWithPred())
529         report("Missing BundledPred flag, "
530                "BundledSucc was set on predecessor",
531                &MI);
532       if (!InBundle && MI.isBundledWithPred())
533         report("BundledPred flag is set, "
534                "but BundledSucc not set on predecessor",
535                &MI);
536 
537       // Is this a bundle header?
538       if (!MI.isInsideBundle()) {
539         if (CurBundle)
540           visitMachineBundleAfter(CurBundle);
541         CurBundle = &MI;
542         visitMachineBundleBefore(CurBundle);
543       } else if (!CurBundle)
544         report("No bundle header", &MI);
545       visitMachineInstrBefore(&MI);
546       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
547         const MachineOperand &Op = MI.getOperand(I);
548         if (Op.getParent() != &MI) {
549           // Make sure to use correct addOperand / removeOperand / ChangeTo
550           // functions when replacing operands of a MachineInstr.
551           report("Instruction has operand with wrong parent set", &MI);
552         }
553 
554         visitMachineOperand(&Op, I);
555       }
556 
557       // Was this the last bundled instruction?
558       InBundle = MI.isBundledWithSucc();
559     }
560     if (CurBundle)
561       visitMachineBundleAfter(CurBundle);
562     if (InBundle)
563       report("BundledSucc flag set on last instruction in block", &MBB.back());
564     visitMachineBasicBlockAfter(&MBB);
565   }
566   visitMachineFunctionAfter();
567 
568   // Clean up.
569   regsLive.clear();
570   regsDefined.clear();
571   regsDead.clear();
572   regsKilled.clear();
573   regMasks.clear();
574   MBBInfoMap.clear();
575 
576   return !ReportedErrs.hasError();
577 }
578 
579 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
580   assert(MF);
581   OS << '\n';
582   if (ReportedErrs.increment()) {
583     if (Banner)
584       OS << "# " << Banner << '\n';
585 
586     if (LiveInts != nullptr)
587       LiveInts->print(OS);
588     else
589       MF->print(OS, Indexes);
590   }
591 
592   OS << "*** Bad machine code: " << msg << " ***\n"
593      << "- function:    " << MF->getName() << '\n';
594 }
595 
596 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
597   assert(MBB);
598   report(msg, MBB->getParent());
599   OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
600      << " (" << (const void *)MBB << ')';
601   if (Indexes)
602     OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
603        << Indexes->getMBBEndIdx(MBB) << ')';
604   OS << '\n';
605 }
606 
607 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
608   assert(MI);
609   report(msg, MI->getParent());
610   OS << "- instruction: ";
611   if (Indexes && Indexes->hasIndex(*MI))
612     OS << Indexes->getInstructionIndex(*MI) << '\t';
613   MI->print(OS, /*IsStandalone=*/true);
614 }
615 
616 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
617                              unsigned MONum, LLT MOVRegType) {
618   assert(MO);
619   report(msg, MO->getParent());
620   OS << "- operand " << MONum << ":   ";
621   MO->print(OS, MOVRegType, TRI);
622   OS << '\n';
623 }
624 
625 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
626   report(Msg.str().c_str(), MI);
627 }
628 
629 void MachineVerifier::report_context(SlotIndex Pos) const {
630   OS << "- at:          " << Pos << '\n';
631 }
632 
633 void MachineVerifier::report_context(const LiveInterval &LI) const {
634   OS << "- interval:    " << LI << '\n';
635 }
636 
637 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
638                                      LaneBitmask LaneMask) const {
639   report_context_liverange(LR);
640   report_context_vreg_regunit(VRegUnit);
641   if (LaneMask.any())
642     report_context_lanemask(LaneMask);
643 }
644 
645 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
646   OS << "- segment:     " << S << '\n';
647 }
648 
649 void MachineVerifier::report_context(const VNInfo &VNI) const {
650   OS << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
651 }
652 
653 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
654   OS << "- liverange:   " << LR << '\n';
655 }
656 
657 void MachineVerifier::report_context(MCPhysReg PReg) const {
658   OS << "- p. register: " << printReg(PReg, TRI) << '\n';
659 }
660 
661 void MachineVerifier::report_context_vreg(Register VReg) const {
662   OS << "- v. register: " << printReg(VReg, TRI) << '\n';
663 }
664 
665 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
666   if (VRegOrUnit.isVirtual()) {
667     report_context_vreg(VRegOrUnit);
668   } else {
669     OS << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
670   }
671 }
672 
673 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
674   OS << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
675 }
676 
677 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
678   BBInfo &MInfo = MBBInfoMap[MBB];
679   if (!MInfo.reachable) {
680     MInfo.reachable = true;
681     for (const MachineBasicBlock *Succ : MBB->successors())
682       markReachable(Succ);
683   }
684 }
685 
686 void MachineVerifier::visitMachineFunctionBefore() {
687   lastIndex = SlotIndex();
688   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
689                                            : TRI->getReservedRegs(*MF);
690 
691   if (!MF->empty())
692     markReachable(&MF->front());
693 
694   // Build a set of the basic blocks in the function.
695   FunctionBlocks.clear();
696   for (const auto &MBB : *MF) {
697     FunctionBlocks.insert(&MBB);
698     BBInfo &MInfo = MBBInfoMap[&MBB];
699 
700     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
701     if (MInfo.Preds.size() != MBB.pred_size())
702       report("MBB has duplicate entries in its predecessor list.", &MBB);
703 
704     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
705     if (MInfo.Succs.size() != MBB.succ_size())
706       report("MBB has duplicate entries in its successor list.", &MBB);
707   }
708 
709   // Check that the register use lists are sane.
710   MRI->verifyUseLists();
711 
712   if (!MF->empty())
713     verifyStackFrame();
714 }
715 
716 void
717 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
718   FirstTerminator = nullptr;
719   FirstNonPHI = nullptr;
720 
721   if (!MF->getProperties().hasProperty(
722       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
723     // If this block has allocatable physical registers live-in, check that
724     // it is an entry block or landing pad.
725     for (const auto &LI : MBB->liveins()) {
726       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
727           MBB->getIterator() != MBB->getParent()->begin() &&
728           !MBB->isInlineAsmBrIndirectTarget()) {
729         report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
730                "inlineasm-br-indirect-target.",
731                MBB);
732         report_context(LI.PhysReg);
733       }
734     }
735   }
736 
737   if (MBB->isIRBlockAddressTaken()) {
738     if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
739       report("ir-block-address-taken is associated with basic block not used by "
740              "a blockaddress.",
741              MBB);
742   }
743 
744   // Count the number of landing pad successors.
745   SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
746   for (const auto *succ : MBB->successors()) {
747     if (succ->isEHPad())
748       LandingPadSuccs.insert(succ);
749     if (!FunctionBlocks.count(succ))
750       report("MBB has successor that isn't part of the function.", MBB);
751     if (!MBBInfoMap[succ].Preds.count(MBB)) {
752       report("Inconsistent CFG", MBB);
753       OS << "MBB is not in the predecessor list of the successor "
754          << printMBBReference(*succ) << ".\n";
755     }
756   }
757 
758   // Check the predecessor list.
759   for (const MachineBasicBlock *Pred : MBB->predecessors()) {
760     if (!FunctionBlocks.count(Pred))
761       report("MBB has predecessor that isn't part of the function.", MBB);
762     if (!MBBInfoMap[Pred].Succs.count(MBB)) {
763       report("Inconsistent CFG", MBB);
764       OS << "MBB is not in the successor list of the predecessor "
765          << printMBBReference(*Pred) << ".\n";
766     }
767   }
768 
769   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
770   const BasicBlock *BB = MBB->getBasicBlock();
771   const Function &F = MF->getFunction();
772   if (LandingPadSuccs.size() > 1 &&
773       !(AsmInfo &&
774         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
775         BB && isa<SwitchInst>(BB->getTerminator())) &&
776       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
777     report("MBB has more than one landing pad successor", MBB);
778 
779   // Call analyzeBranch. If it succeeds, there several more conditions to check.
780   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
781   SmallVector<MachineOperand, 4> Cond;
782   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
783                           Cond)) {
784     // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
785     // check whether its answers match up with reality.
786     if (!TBB && !FBB) {
787       // Block falls through to its successor.
788       if (!MBB->empty() && MBB->back().isBarrier() &&
789           !TII->isPredicated(MBB->back())) {
790         report("MBB exits via unconditional fall-through but ends with a "
791                "barrier instruction!", MBB);
792       }
793       if (!Cond.empty()) {
794         report("MBB exits via unconditional fall-through but has a condition!",
795                MBB);
796       }
797     } else if (TBB && !FBB && Cond.empty()) {
798       // Block unconditionally branches somewhere.
799       if (MBB->empty()) {
800         report("MBB exits via unconditional branch but doesn't contain "
801                "any instructions!", MBB);
802       } else if (!MBB->back().isBarrier()) {
803         report("MBB exits via unconditional branch but doesn't end with a "
804                "barrier instruction!", MBB);
805       } else if (!MBB->back().isTerminator()) {
806         report("MBB exits via unconditional branch but the branch isn't a "
807                "terminator instruction!", MBB);
808       }
809     } else if (TBB && !FBB && !Cond.empty()) {
810       // Block conditionally branches somewhere, otherwise falls through.
811       if (MBB->empty()) {
812         report("MBB exits via conditional branch/fall-through but doesn't "
813                "contain any instructions!", MBB);
814       } else if (MBB->back().isBarrier()) {
815         report("MBB exits via conditional branch/fall-through but ends with a "
816                "barrier instruction!", MBB);
817       } else if (!MBB->back().isTerminator()) {
818         report("MBB exits via conditional branch/fall-through but the branch "
819                "isn't a terminator instruction!", MBB);
820       }
821     } else if (TBB && FBB) {
822       // Block conditionally branches somewhere, otherwise branches
823       // somewhere else.
824       if (MBB->empty()) {
825         report("MBB exits via conditional branch/branch but doesn't "
826                "contain any instructions!", MBB);
827       } else if (!MBB->back().isBarrier()) {
828         report("MBB exits via conditional branch/branch but doesn't end with a "
829                "barrier instruction!", MBB);
830       } else if (!MBB->back().isTerminator()) {
831         report("MBB exits via conditional branch/branch but the branch "
832                "isn't a terminator instruction!", MBB);
833       }
834       if (Cond.empty()) {
835         report("MBB exits via conditional branch/branch but there's no "
836                "condition!", MBB);
837       }
838     } else {
839       report("analyzeBranch returned invalid data!", MBB);
840     }
841 
842     // Now check that the successors match up with the answers reported by
843     // analyzeBranch.
844     if (TBB && !MBB->isSuccessor(TBB))
845       report("MBB exits via jump or conditional branch, but its target isn't a "
846              "CFG successor!",
847              MBB);
848     if (FBB && !MBB->isSuccessor(FBB))
849       report("MBB exits via conditional branch, but its target isn't a CFG "
850              "successor!",
851              MBB);
852 
853     // There might be a fallthrough to the next block if there's either no
854     // unconditional true branch, or if there's a condition, and one of the
855     // branches is missing.
856     bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
857 
858     // A conditional fallthrough must be an actual CFG successor, not
859     // unreachable. (Conversely, an unconditional fallthrough might not really
860     // be a successor, because the block might end in unreachable.)
861     if (!Cond.empty() && !FBB) {
862       MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
863       if (MBBI == MF->end()) {
864         report("MBB conditionally falls through out of function!", MBB);
865       } else if (!MBB->isSuccessor(&*MBBI))
866         report("MBB exits via conditional branch/fall-through but the CFG "
867                "successors don't match the actual successors!",
868                MBB);
869     }
870 
871     // Verify that there aren't any extra un-accounted-for successors.
872     for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
873       // If this successor is one of the branch targets, it's okay.
874       if (SuccMBB == TBB || SuccMBB == FBB)
875         continue;
876       // If we might have a fallthrough, and the successor is the fallthrough
877       // block, that's also ok.
878       if (Fallthrough && SuccMBB == MBB->getNextNode())
879         continue;
880       // Also accept successors which are for exception-handling or might be
881       // inlineasm_br targets.
882       if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
883         continue;
884       report("MBB has unexpected successors which are not branch targets, "
885              "fallthrough, EHPads, or inlineasm_br targets.",
886              MBB);
887     }
888   }
889 
890   regsLive.clear();
891   if (MRI->tracksLiveness()) {
892     for (const auto &LI : MBB->liveins()) {
893       if (!Register::isPhysicalRegister(LI.PhysReg)) {
894         report("MBB live-in list contains non-physical register", MBB);
895         continue;
896       }
897       for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
898         regsLive.insert(SubReg);
899     }
900   }
901 
902   const MachineFrameInfo &MFI = MF->getFrameInfo();
903   BitVector PR = MFI.getPristineRegs(*MF);
904   for (unsigned I : PR.set_bits()) {
905     for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
906       regsLive.insert(SubReg);
907   }
908 
909   regsKilled.clear();
910   regsDefined.clear();
911 
912   if (Indexes)
913     lastIndex = Indexes->getMBBStartIdx(MBB);
914 }
915 
916 // This function gets called for all bundle headers, including normal
917 // stand-alone unbundled instructions.
918 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
919   if (Indexes && Indexes->hasIndex(*MI)) {
920     SlotIndex idx = Indexes->getInstructionIndex(*MI);
921     if (!(idx > lastIndex)) {
922       report("Instruction index out of order", MI);
923       OS << "Last instruction was at " << lastIndex << '\n';
924     }
925     lastIndex = idx;
926   }
927 
928   // Ensure non-terminators don't follow terminators.
929   if (MI->isTerminator()) {
930     if (!FirstTerminator)
931       FirstTerminator = MI;
932   } else if (FirstTerminator) {
933     // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
934     // precede non-terminators.
935     if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
936       report("Non-terminator instruction after the first terminator", MI);
937       OS << "First terminator was:\t" << *FirstTerminator;
938     }
939   }
940 }
941 
942 // The operands on an INLINEASM instruction must follow a template.
943 // Verify that the flag operands make sense.
944 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
945   // The first two operands on INLINEASM are the asm string and global flags.
946   if (MI->getNumOperands() < 2) {
947     report("Too few operands on inline asm", MI);
948     return;
949   }
950   if (!MI->getOperand(0).isSymbol())
951     report("Asm string must be an external symbol", MI);
952   if (!MI->getOperand(1).isImm())
953     report("Asm flags must be an immediate", MI);
954   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
955   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
956   // and Extra_IsConvergent = 32.
957   if (!isUInt<6>(MI->getOperand(1).getImm()))
958     report("Unknown asm flags", &MI->getOperand(1), 1);
959 
960   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
961 
962   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
963   unsigned NumOps;
964   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
965     const MachineOperand &MO = MI->getOperand(OpNo);
966     // There may be implicit ops after the fixed operands.
967     if (!MO.isImm())
968       break;
969     const InlineAsm::Flag F(MO.getImm());
970     NumOps = 1 + F.getNumOperandRegisters();
971   }
972 
973   if (OpNo > MI->getNumOperands())
974     report("Missing operands in last group", MI);
975 
976   // An optional MDNode follows the groups.
977   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
978     ++OpNo;
979 
980   // All trailing operands must be implicit registers.
981   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
982     const MachineOperand &MO = MI->getOperand(OpNo);
983     if (!MO.isReg() || !MO.isImplicit())
984       report("Expected implicit register after groups", &MO, OpNo);
985   }
986 
987   if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
988     const MachineBasicBlock *MBB = MI->getParent();
989 
990     for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
991          i != e; ++i) {
992       const MachineOperand &MO = MI->getOperand(i);
993 
994       if (!MO.isMBB())
995         continue;
996 
997       // Check the successor & predecessor lists look ok, assume they are
998       // not. Find the indirect target without going through the successors.
999       const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1000       if (!IndirectTargetMBB) {
1001         report("INLINEASM_BR indirect target does not exist", &MO, i);
1002         break;
1003       }
1004 
1005       if (!MBB->isSuccessor(IndirectTargetMBB))
1006         report("INLINEASM_BR indirect target missing from successor list", &MO,
1007                i);
1008 
1009       if (!IndirectTargetMBB->isPredecessor(MBB))
1010         report("INLINEASM_BR indirect target predecessor list missing parent",
1011                &MO, i);
1012     }
1013   }
1014 }
1015 
1016 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1017                                             const MachineRegisterInfo &MRI) {
1018   if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1019         if (!Op.isReg())
1020           return false;
1021         const auto Reg = Op.getReg();
1022         if (Reg.isPhysical())
1023           return false;
1024         return !MRI.getType(Reg).isScalar();
1025       }))
1026     return true;
1027   report("All register operands must have scalar types", &MI);
1028   return false;
1029 }
1030 
1031 /// Check that types are consistent when two operands need to have the same
1032 /// number of vector elements.
1033 /// \return true if the types are valid.
1034 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1035                                                const MachineInstr *MI) {
1036   if (Ty0.isVector() != Ty1.isVector()) {
1037     report("operand types must be all-vector or all-scalar", MI);
1038     // Generally we try to report as many issues as possible at once, but in
1039     // this case it's not clear what should we be comparing the size of the
1040     // scalar with: the size of the whole vector or its lane. Instead of
1041     // making an arbitrary choice and emitting not so helpful message, let's
1042     // avoid the extra noise and stop here.
1043     return false;
1044   }
1045 
1046   if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1047     report("operand types must preserve number of vector elements", MI);
1048     return false;
1049   }
1050 
1051   return true;
1052 }
1053 
1054 bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1055   auto Opcode = MI->getOpcode();
1056   bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1057                        Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1058   unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1059   if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1060     AttributeList Attrs = Intrinsic::getAttributes(
1061         MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1062     bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1063     if (NoSideEffects && DeclHasSideEffects) {
1064       report(Twine(TII->getName(Opcode),
1065                    " used with intrinsic that accesses memory"),
1066              MI);
1067       return false;
1068     }
1069     if (!NoSideEffects && !DeclHasSideEffects) {
1070       report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1071       return false;
1072     }
1073   }
1074 
1075   return true;
1076 }
1077 
1078 bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1079   auto Opcode = MI->getOpcode();
1080   bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1081                        Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1082   unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1083   if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1084     AttributeList Attrs = Intrinsic::getAttributes(
1085         MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1086     bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1087     if (NotConvergent && DeclIsConvergent) {
1088       report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1089              MI);
1090       return false;
1091     }
1092     if (!NotConvergent && !DeclIsConvergent) {
1093       report(
1094           Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1095           MI);
1096       return false;
1097     }
1098   }
1099 
1100   return true;
1101 }
1102 
1103 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1104   if (isFunctionSelected)
1105     report("Unexpected generic instruction in a Selected function", MI);
1106 
1107   const MCInstrDesc &MCID = MI->getDesc();
1108   unsigned NumOps = MI->getNumOperands();
1109 
1110   // Branches must reference a basic block if they are not indirect
1111   if (MI->isBranch() && !MI->isIndirectBranch()) {
1112     bool HasMBB = false;
1113     for (const MachineOperand &Op : MI->operands()) {
1114       if (Op.isMBB()) {
1115         HasMBB = true;
1116         break;
1117       }
1118     }
1119 
1120     if (!HasMBB) {
1121       report("Branch instruction is missing a basic block operand or "
1122              "isIndirectBranch property",
1123              MI);
1124     }
1125   }
1126 
1127   // Check types.
1128   SmallVector<LLT, 4> Types;
1129   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1130        I != E; ++I) {
1131     if (!MCID.operands()[I].isGenericType())
1132       continue;
1133     // Generic instructions specify type equality constraints between some of
1134     // their operands. Make sure these are consistent.
1135     size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1136     Types.resize(std::max(TypeIdx + 1, Types.size()));
1137 
1138     const MachineOperand *MO = &MI->getOperand(I);
1139     if (!MO->isReg()) {
1140       report("generic instruction must use register operands", MI);
1141       continue;
1142     }
1143 
1144     LLT OpTy = MRI->getType(MO->getReg());
1145     // Don't report a type mismatch if there is no actual mismatch, only a
1146     // type missing, to reduce noise:
1147     if (OpTy.isValid()) {
1148       // Only the first valid type for a type index will be printed: don't
1149       // overwrite it later so it's always clear which type was expected:
1150       if (!Types[TypeIdx].isValid())
1151         Types[TypeIdx] = OpTy;
1152       else if (Types[TypeIdx] != OpTy)
1153         report("Type mismatch in generic instruction", MO, I, OpTy);
1154     } else {
1155       // Generic instructions must have types attached to their operands.
1156       report("Generic instruction is missing a virtual register type", MO, I);
1157     }
1158   }
1159 
1160   // Generic opcodes must not have physical register operands.
1161   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1162     const MachineOperand *MO = &MI->getOperand(I);
1163     if (MO->isReg() && MO->getReg().isPhysical())
1164       report("Generic instruction cannot have physical register", MO, I);
1165   }
1166 
1167   // Avoid out of bounds in checks below. This was already reported earlier.
1168   if (MI->getNumOperands() < MCID.getNumOperands())
1169     return;
1170 
1171   StringRef ErrorInfo;
1172   if (!TII->verifyInstruction(*MI, ErrorInfo))
1173     report(ErrorInfo.data(), MI);
1174 
1175   // Verify properties of various specific instruction types
1176   unsigned Opc = MI->getOpcode();
1177   switch (Opc) {
1178   case TargetOpcode::G_ASSERT_SEXT:
1179   case TargetOpcode::G_ASSERT_ZEXT: {
1180     std::string OpcName =
1181         Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1182     if (!MI->getOperand(2).isImm()) {
1183       report(Twine(OpcName, " expects an immediate operand #2"), MI);
1184       break;
1185     }
1186 
1187     Register Dst = MI->getOperand(0).getReg();
1188     Register Src = MI->getOperand(1).getReg();
1189     LLT SrcTy = MRI->getType(Src);
1190     int64_t Imm = MI->getOperand(2).getImm();
1191     if (Imm <= 0) {
1192       report(Twine(OpcName, " size must be >= 1"), MI);
1193       break;
1194     }
1195 
1196     if (Imm >= SrcTy.getScalarSizeInBits()) {
1197       report(Twine(OpcName, " size must be less than source bit width"), MI);
1198       break;
1199     }
1200 
1201     const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1202     const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1203 
1204     // Allow only the source bank to be set.
1205     if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1206       report(Twine(OpcName, " cannot change register bank"), MI);
1207       break;
1208     }
1209 
1210     // Don't allow a class change. Do allow member class->regbank.
1211     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1212     if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1213       report(
1214           Twine(OpcName, " source and destination register classes must match"),
1215           MI);
1216       break;
1217     }
1218 
1219     break;
1220   }
1221 
1222   case TargetOpcode::G_CONSTANT:
1223   case TargetOpcode::G_FCONSTANT: {
1224     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1225     if (DstTy.isVector())
1226       report("Instruction cannot use a vector result type", MI);
1227 
1228     if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1229       if (!MI->getOperand(1).isCImm()) {
1230         report("G_CONSTANT operand must be cimm", MI);
1231         break;
1232       }
1233 
1234       const ConstantInt *CI = MI->getOperand(1).getCImm();
1235       if (CI->getBitWidth() != DstTy.getSizeInBits())
1236         report("inconsistent constant size", MI);
1237     } else {
1238       if (!MI->getOperand(1).isFPImm()) {
1239         report("G_FCONSTANT operand must be fpimm", MI);
1240         break;
1241       }
1242       const ConstantFP *CF = MI->getOperand(1).getFPImm();
1243 
1244       if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1245           DstTy.getSizeInBits()) {
1246         report("inconsistent constant size", MI);
1247       }
1248     }
1249 
1250     break;
1251   }
1252   case TargetOpcode::G_LOAD:
1253   case TargetOpcode::G_STORE:
1254   case TargetOpcode::G_ZEXTLOAD:
1255   case TargetOpcode::G_SEXTLOAD: {
1256     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1257     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1258     if (!PtrTy.isPointer())
1259       report("Generic memory instruction must access a pointer", MI);
1260 
1261     // Generic loads and stores must have a single MachineMemOperand
1262     // describing that access.
1263     if (!MI->hasOneMemOperand()) {
1264       report("Generic instruction accessing memory must have one mem operand",
1265              MI);
1266     } else {
1267       const MachineMemOperand &MMO = **MI->memoperands_begin();
1268       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1269           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1270         if (TypeSize::isKnownGE(MMO.getSizeInBits().getValue(),
1271                                 ValTy.getSizeInBits()))
1272           report("Generic extload must have a narrower memory type", MI);
1273       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1274         if (TypeSize::isKnownGT(MMO.getSize().getValue(),
1275                                 ValTy.getSizeInBytes()))
1276           report("load memory size cannot exceed result size", MI);
1277       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1278         if (TypeSize::isKnownLT(ValTy.getSizeInBytes(),
1279                                 MMO.getSize().getValue()))
1280           report("store memory size cannot exceed value size", MI);
1281       }
1282 
1283       const AtomicOrdering Order = MMO.getSuccessOrdering();
1284       if (Opc == TargetOpcode::G_STORE) {
1285         if (Order == AtomicOrdering::Acquire ||
1286             Order == AtomicOrdering::AcquireRelease)
1287           report("atomic store cannot use acquire ordering", MI);
1288 
1289       } else {
1290         if (Order == AtomicOrdering::Release ||
1291             Order == AtomicOrdering::AcquireRelease)
1292           report("atomic load cannot use release ordering", MI);
1293       }
1294     }
1295 
1296     break;
1297   }
1298   case TargetOpcode::G_PHI: {
1299     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1300     if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1301                                     [this, &DstTy](const MachineOperand &MO) {
1302                                       if (!MO.isReg())
1303                                         return true;
1304                                       LLT Ty = MRI->getType(MO.getReg());
1305                                       if (!Ty.isValid() || (Ty != DstTy))
1306                                         return false;
1307                                       return true;
1308                                     }))
1309       report("Generic Instruction G_PHI has operands with incompatible/missing "
1310              "types",
1311              MI);
1312     break;
1313   }
1314   case TargetOpcode::G_BITCAST: {
1315     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1316     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1317     if (!DstTy.isValid() || !SrcTy.isValid())
1318       break;
1319 
1320     if (SrcTy.isPointer() != DstTy.isPointer())
1321       report("bitcast cannot convert between pointers and other types", MI);
1322 
1323     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1324       report("bitcast sizes must match", MI);
1325 
1326     if (SrcTy == DstTy)
1327       report("bitcast must change the type", MI);
1328 
1329     break;
1330   }
1331   case TargetOpcode::G_INTTOPTR:
1332   case TargetOpcode::G_PTRTOINT:
1333   case TargetOpcode::G_ADDRSPACE_CAST: {
1334     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1335     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1336     if (!DstTy.isValid() || !SrcTy.isValid())
1337       break;
1338 
1339     verifyVectorElementMatch(DstTy, SrcTy, MI);
1340 
1341     DstTy = DstTy.getScalarType();
1342     SrcTy = SrcTy.getScalarType();
1343 
1344     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1345       if (!DstTy.isPointer())
1346         report("inttoptr result type must be a pointer", MI);
1347       if (SrcTy.isPointer())
1348         report("inttoptr source type must not be a pointer", MI);
1349     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1350       if (!SrcTy.isPointer())
1351         report("ptrtoint source type must be a pointer", MI);
1352       if (DstTy.isPointer())
1353         report("ptrtoint result type must not be a pointer", MI);
1354     } else {
1355       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1356       if (!SrcTy.isPointer() || !DstTy.isPointer())
1357         report("addrspacecast types must be pointers", MI);
1358       else {
1359         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1360           report("addrspacecast must convert different address spaces", MI);
1361       }
1362     }
1363 
1364     break;
1365   }
1366   case TargetOpcode::G_PTR_ADD: {
1367     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1368     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1369     LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1370     if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1371       break;
1372 
1373     if (!PtrTy.isPointerOrPointerVector())
1374       report("gep first operand must be a pointer", MI);
1375 
1376     if (OffsetTy.isPointerOrPointerVector())
1377       report("gep offset operand must not be a pointer", MI);
1378 
1379     if (PtrTy.isPointerOrPointerVector()) {
1380       const DataLayout &DL = MF->getDataLayout();
1381       unsigned AS = PtrTy.getAddressSpace();
1382       unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1383       if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1384         report("gep offset operand must match index size for address space",
1385                MI);
1386       }
1387     }
1388 
1389     // TODO: Is the offset allowed to be a scalar with a vector?
1390     break;
1391   }
1392   case TargetOpcode::G_PTRMASK: {
1393     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1394     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1395     LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1396     if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1397       break;
1398 
1399     if (!DstTy.isPointerOrPointerVector())
1400       report("ptrmask result type must be a pointer", MI);
1401 
1402     if (!MaskTy.getScalarType().isScalar())
1403       report("ptrmask mask type must be an integer", MI);
1404 
1405     verifyVectorElementMatch(DstTy, MaskTy, MI);
1406     break;
1407   }
1408   case TargetOpcode::G_SEXT:
1409   case TargetOpcode::G_ZEXT:
1410   case TargetOpcode::G_ANYEXT:
1411   case TargetOpcode::G_TRUNC:
1412   case TargetOpcode::G_FPEXT:
1413   case TargetOpcode::G_FPTRUNC: {
1414     // Number of operands and presense of types is already checked (and
1415     // reported in case of any issues), so no need to report them again. As
1416     // we're trying to report as many issues as possible at once, however, the
1417     // instructions aren't guaranteed to have the right number of operands or
1418     // types attached to them at this point
1419     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1420     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1421     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1422     if (!DstTy.isValid() || !SrcTy.isValid())
1423       break;
1424 
1425     if (DstTy.isPointerOrPointerVector() || SrcTy.isPointerOrPointerVector())
1426       report("Generic extend/truncate can not operate on pointers", MI);
1427 
1428     verifyVectorElementMatch(DstTy, SrcTy, MI);
1429 
1430     unsigned DstSize = DstTy.getScalarSizeInBits();
1431     unsigned SrcSize = SrcTy.getScalarSizeInBits();
1432     switch (MI->getOpcode()) {
1433     default:
1434       if (DstSize <= SrcSize)
1435         report("Generic extend has destination type no larger than source", MI);
1436       break;
1437     case TargetOpcode::G_TRUNC:
1438     case TargetOpcode::G_FPTRUNC:
1439       if (DstSize >= SrcSize)
1440         report("Generic truncate has destination type no smaller than source",
1441                MI);
1442       break;
1443     }
1444     break;
1445   }
1446   case TargetOpcode::G_SELECT: {
1447     LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1448     LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1449     if (!SelTy.isValid() || !CondTy.isValid())
1450       break;
1451 
1452     // Scalar condition select on a vector is valid.
1453     if (CondTy.isVector())
1454       verifyVectorElementMatch(SelTy, CondTy, MI);
1455     break;
1456   }
1457   case TargetOpcode::G_MERGE_VALUES: {
1458     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1459     // e.g. s2N = MERGE sN, sN
1460     // Merging multiple scalars into a vector is not allowed, should use
1461     // G_BUILD_VECTOR for that.
1462     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1463     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1464     if (DstTy.isVector() || SrcTy.isVector())
1465       report("G_MERGE_VALUES cannot operate on vectors", MI);
1466 
1467     const unsigned NumOps = MI->getNumOperands();
1468     if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1469       report("G_MERGE_VALUES result size is inconsistent", MI);
1470 
1471     for (unsigned I = 2; I != NumOps; ++I) {
1472       if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1473         report("G_MERGE_VALUES source types do not match", MI);
1474     }
1475 
1476     break;
1477   }
1478   case TargetOpcode::G_UNMERGE_VALUES: {
1479     unsigned NumDsts = MI->getNumOperands() - 1;
1480     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1481     for (unsigned i = 1; i < NumDsts; ++i) {
1482       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1483         report("G_UNMERGE_VALUES destination types do not match", MI);
1484         break;
1485       }
1486     }
1487 
1488     LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1489     if (DstTy.isVector()) {
1490       // This case is the converse of G_CONCAT_VECTORS.
1491       if (!SrcTy.isVector() ||
1492           (SrcTy.getScalarType() != DstTy.getScalarType() &&
1493            !SrcTy.isPointerVector()) ||
1494           SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1495           SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1496         report("G_UNMERGE_VALUES source operand does not match vector "
1497                "destination operands",
1498                MI);
1499     } else if (SrcTy.isVector()) {
1500       // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1501       // mismatched types as long as the total size matches:
1502       //   %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1503       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1504         report("G_UNMERGE_VALUES vector source operand does not match scalar "
1505                "destination operands",
1506                MI);
1507     } else {
1508       // This case is the converse of G_MERGE_VALUES.
1509       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1510         report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1511                "destination operands",
1512                MI);
1513       }
1514     }
1515     break;
1516   }
1517   case TargetOpcode::G_BUILD_VECTOR: {
1518     // Source types must be scalars, dest type a vector. Total size of scalars
1519     // must match the dest vector size.
1520     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1521     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1522     if (!DstTy.isVector() || SrcEltTy.isVector()) {
1523       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1524       break;
1525     }
1526 
1527     if (DstTy.getElementType() != SrcEltTy)
1528       report("G_BUILD_VECTOR result element type must match source type", MI);
1529 
1530     if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1531       report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1532 
1533     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1534       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1535         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1536 
1537     break;
1538   }
1539   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1540     // Source types must be scalars, dest type a vector. Scalar types must be
1541     // larger than the dest vector elt type, as this is a truncating operation.
1542     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1543     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1544     if (!DstTy.isVector() || SrcEltTy.isVector())
1545       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1546              MI);
1547     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1548       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1549         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1550                MI);
1551     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1552       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1553              "dest elt type",
1554              MI);
1555     break;
1556   }
1557   case TargetOpcode::G_CONCAT_VECTORS: {
1558     // Source types should be vectors, and total size should match the dest
1559     // vector size.
1560     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1561     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1562     if (!DstTy.isVector() || !SrcTy.isVector())
1563       report("G_CONCAT_VECTOR requires vector source and destination operands",
1564              MI);
1565 
1566     if (MI->getNumOperands() < 3)
1567       report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1568 
1569     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1570       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1571         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1572     if (DstTy.getElementCount() !=
1573         SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1574       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1575     break;
1576   }
1577   case TargetOpcode::G_ICMP:
1578   case TargetOpcode::G_FCMP: {
1579     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1580     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1581 
1582     if ((DstTy.isVector() != SrcTy.isVector()) ||
1583         (DstTy.isVector() &&
1584          DstTy.getElementCount() != SrcTy.getElementCount()))
1585       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1586 
1587     break;
1588   }
1589   case TargetOpcode::G_SCMP:
1590   case TargetOpcode::G_UCMP: {
1591     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1592     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1593 
1594     if (SrcTy.isPointerOrPointerVector()) {
1595       report("Generic scmp/ucmp does not support pointers as operands", MI);
1596       break;
1597     }
1598 
1599     if (DstTy.isPointerOrPointerVector()) {
1600       report("Generic scmp/ucmp does not support pointers as a result", MI);
1601       break;
1602     }
1603 
1604     if (DstTy.getScalarSizeInBits() < 2) {
1605       report("Result type must be at least 2 bits wide", MI);
1606       break;
1607     }
1608 
1609     if ((DstTy.isVector() != SrcTy.isVector()) ||
1610         (DstTy.isVector() &&
1611          DstTy.getElementCount() != SrcTy.getElementCount())) {
1612       report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1613       break;
1614     }
1615 
1616     break;
1617   }
1618   case TargetOpcode::G_EXTRACT: {
1619     const MachineOperand &SrcOp = MI->getOperand(1);
1620     if (!SrcOp.isReg()) {
1621       report("extract source must be a register", MI);
1622       break;
1623     }
1624 
1625     const MachineOperand &OffsetOp = MI->getOperand(2);
1626     if (!OffsetOp.isImm()) {
1627       report("extract offset must be a constant", MI);
1628       break;
1629     }
1630 
1631     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1632     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1633     if (SrcSize == DstSize)
1634       report("extract source must be larger than result", MI);
1635 
1636     if (DstSize + OffsetOp.getImm() > SrcSize)
1637       report("extract reads past end of register", MI);
1638     break;
1639   }
1640   case TargetOpcode::G_INSERT: {
1641     const MachineOperand &SrcOp = MI->getOperand(2);
1642     if (!SrcOp.isReg()) {
1643       report("insert source must be a register", MI);
1644       break;
1645     }
1646 
1647     const MachineOperand &OffsetOp = MI->getOperand(3);
1648     if (!OffsetOp.isImm()) {
1649       report("insert offset must be a constant", MI);
1650       break;
1651     }
1652 
1653     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1654     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1655 
1656     if (DstSize <= SrcSize)
1657       report("inserted size must be smaller than total register", MI);
1658 
1659     if (SrcSize + OffsetOp.getImm() > DstSize)
1660       report("insert writes past end of register", MI);
1661 
1662     break;
1663   }
1664   case TargetOpcode::G_JUMP_TABLE: {
1665     if (!MI->getOperand(1).isJTI())
1666       report("G_JUMP_TABLE source operand must be a jump table index", MI);
1667     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1668     if (!DstTy.isPointer())
1669       report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1670     break;
1671   }
1672   case TargetOpcode::G_BRJT: {
1673     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1674       report("G_BRJT src operand 0 must be a pointer type", MI);
1675 
1676     if (!MI->getOperand(1).isJTI())
1677       report("G_BRJT src operand 1 must be a jump table index", MI);
1678 
1679     const auto &IdxOp = MI->getOperand(2);
1680     if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1681       report("G_BRJT src operand 2 must be a scalar reg type", MI);
1682     break;
1683   }
1684   case TargetOpcode::G_INTRINSIC:
1685   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1686   case TargetOpcode::G_INTRINSIC_CONVERGENT:
1687   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1688     // TODO: Should verify number of def and use operands, but the current
1689     // interface requires passing in IR types for mangling.
1690     const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1691     if (!IntrIDOp.isIntrinsicID()) {
1692       report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1693       break;
1694     }
1695 
1696     if (!verifyGIntrinsicSideEffects(MI))
1697       break;
1698     if (!verifyGIntrinsicConvergence(MI))
1699       break;
1700 
1701     break;
1702   }
1703   case TargetOpcode::G_SEXT_INREG: {
1704     if (!MI->getOperand(2).isImm()) {
1705       report("G_SEXT_INREG expects an immediate operand #2", MI);
1706       break;
1707     }
1708 
1709     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1710     int64_t Imm = MI->getOperand(2).getImm();
1711     if (Imm <= 0)
1712       report("G_SEXT_INREG size must be >= 1", MI);
1713     if (Imm >= SrcTy.getScalarSizeInBits())
1714       report("G_SEXT_INREG size must be less than source bit width", MI);
1715     break;
1716   }
1717   case TargetOpcode::G_BSWAP: {
1718     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1719     if (DstTy.getScalarSizeInBits() % 16 != 0)
1720       report("G_BSWAP size must be a multiple of 16 bits", MI);
1721     break;
1722   }
1723   case TargetOpcode::G_VSCALE: {
1724     if (!MI->getOperand(1).isCImm()) {
1725       report("G_VSCALE operand must be cimm", MI);
1726       break;
1727     }
1728     if (MI->getOperand(1).getCImm()->isZero()) {
1729       report("G_VSCALE immediate cannot be zero", MI);
1730       break;
1731     }
1732     break;
1733   }
1734   case TargetOpcode::G_STEP_VECTOR: {
1735     if (!MI->getOperand(1).isCImm()) {
1736       report("operand must be cimm", MI);
1737       break;
1738     }
1739 
1740     if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1741       report("step must be > 0", MI);
1742       break;
1743     }
1744 
1745     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1746     if (!DstTy.isScalableVector()) {
1747       report("Destination type must be a scalable vector", MI);
1748       break;
1749     }
1750 
1751     // <vscale x 2 x p0>
1752     if (!DstTy.getElementType().isScalar()) {
1753       report("Destination element type must be scalar", MI);
1754       break;
1755     }
1756 
1757     if (MI->getOperand(1).getCImm()->getBitWidth() !=
1758         DstTy.getElementType().getScalarSizeInBits()) {
1759       report("step bitwidth differs from result type element bitwidth", MI);
1760       break;
1761     }
1762     break;
1763   }
1764   case TargetOpcode::G_INSERT_SUBVECTOR: {
1765     const MachineOperand &Src0Op = MI->getOperand(1);
1766     if (!Src0Op.isReg()) {
1767       report("G_INSERT_SUBVECTOR first source must be a register", MI);
1768       break;
1769     }
1770 
1771     const MachineOperand &Src1Op = MI->getOperand(2);
1772     if (!Src1Op.isReg()) {
1773       report("G_INSERT_SUBVECTOR second source must be a register", MI);
1774       break;
1775     }
1776 
1777     const MachineOperand &IndexOp = MI->getOperand(3);
1778     if (!IndexOp.isImm()) {
1779       report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1780       break;
1781     }
1782 
1783     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1784     LLT Src1Ty = MRI->getType(Src1Op.getReg());
1785 
1786     if (!DstTy.isVector()) {
1787       report("Destination type must be a vector", MI);
1788       break;
1789     }
1790 
1791     if (!Src1Ty.isVector()) {
1792       report("Second source must be a vector", MI);
1793       break;
1794     }
1795 
1796     if (DstTy.getElementType() != Src1Ty.getElementType()) {
1797       report("Element type of vectors must be the same", MI);
1798       break;
1799     }
1800 
1801     if (Src1Ty.isScalable() != DstTy.isScalable()) {
1802       report("Vector types must both be fixed or both be scalable", MI);
1803       break;
1804     }
1805 
1806     if (ElementCount::isKnownGT(Src1Ty.getElementCount(),
1807                                 DstTy.getElementCount())) {
1808       report("Second source must be smaller than destination vector", MI);
1809       break;
1810     }
1811 
1812     uint64_t Idx = IndexOp.getImm();
1813     uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1814     if (IndexOp.getImm() % Src1MinLen != 0) {
1815       report("Index must be a multiple of the second source vector's "
1816              "minimum vector length",
1817              MI);
1818       break;
1819     }
1820 
1821     uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1822     if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1823       report("Subvector type and index must not cause insert to overrun the "
1824              "vector being inserted into",
1825              MI);
1826       break;
1827     }
1828 
1829     break;
1830   }
1831   case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1832     const MachineOperand &SrcOp = MI->getOperand(1);
1833     if (!SrcOp.isReg()) {
1834       report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1835       break;
1836     }
1837 
1838     const MachineOperand &IndexOp = MI->getOperand(2);
1839     if (!IndexOp.isImm()) {
1840       report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1841       break;
1842     }
1843 
1844     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1845     LLT SrcTy = MRI->getType(SrcOp.getReg());
1846 
1847     if (!DstTy.isVector()) {
1848       report("Destination type must be a vector", MI);
1849       break;
1850     }
1851 
1852     if (!SrcTy.isVector()) {
1853       report("Source must be a vector", MI);
1854       break;
1855     }
1856 
1857     if (DstTy.getElementType() != SrcTy.getElementType()) {
1858       report("Element type of vectors must be the same", MI);
1859       break;
1860     }
1861 
1862     if (SrcTy.isScalable() != DstTy.isScalable()) {
1863       report("Vector types must both be fixed or both be scalable", MI);
1864       break;
1865     }
1866 
1867     if (ElementCount::isKnownGT(DstTy.getElementCount(),
1868                                 SrcTy.getElementCount())) {
1869       report("Destination vector must be smaller than source vector", MI);
1870       break;
1871     }
1872 
1873     uint64_t Idx = IndexOp.getImm();
1874     uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1875     if (Idx % DstMinLen != 0) {
1876       report("Index must be a multiple of the destination vector's minimum "
1877              "vector length",
1878              MI);
1879       break;
1880     }
1881 
1882     uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1883     if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1884       report("Destination type and index must not cause extract to overrun the "
1885              "source vector",
1886              MI);
1887       break;
1888     }
1889 
1890     break;
1891   }
1892   case TargetOpcode::G_SHUFFLE_VECTOR: {
1893     const MachineOperand &MaskOp = MI->getOperand(3);
1894     if (!MaskOp.isShuffleMask()) {
1895       report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1896       break;
1897     }
1898 
1899     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1900     LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1901     LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1902 
1903     if (Src0Ty != Src1Ty)
1904       report("Source operands must be the same type", MI);
1905 
1906     if (Src0Ty.getScalarType() != DstTy.getScalarType())
1907       report("G_SHUFFLE_VECTOR cannot change element type", MI);
1908 
1909     // Don't check that all operands are vector because scalars are used in
1910     // place of 1 element vectors.
1911     int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1912     int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1913 
1914     ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1915 
1916     if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1917       report("Wrong result type for shufflemask", MI);
1918 
1919     for (int Idx : MaskIdxes) {
1920       if (Idx < 0)
1921         continue;
1922 
1923       if (Idx >= 2 * SrcNumElts)
1924         report("Out of bounds shuffle index", MI);
1925     }
1926 
1927     break;
1928   }
1929 
1930   case TargetOpcode::G_SPLAT_VECTOR: {
1931     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1932     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1933 
1934     if (!DstTy.isScalableVector()) {
1935       report("Destination type must be a scalable vector", MI);
1936       break;
1937     }
1938 
1939     if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1940       report("Source type must be a scalar or pointer", MI);
1941       break;
1942     }
1943 
1944     if (TypeSize::isKnownGT(DstTy.getElementType().getSizeInBits(),
1945                             SrcTy.getSizeInBits())) {
1946       report("Element type of the destination must be the same size or smaller "
1947              "than the source type",
1948              MI);
1949       break;
1950     }
1951 
1952     break;
1953   }
1954   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1955     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1956     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1957     LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1958 
1959     if (!DstTy.isScalar() && !DstTy.isPointer()) {
1960       report("Destination type must be a scalar or pointer", MI);
1961       break;
1962     }
1963 
1964     if (!SrcTy.isVector()) {
1965       report("First source must be a vector", MI);
1966       break;
1967     }
1968 
1969     auto TLI = MF->getSubtarget().getTargetLowering();
1970     if (IdxTy.getSizeInBits() !=
1971         TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1972       report("Index type must match VectorIdxTy", MI);
1973       break;
1974     }
1975 
1976     break;
1977   }
1978   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1979     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1980     LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1981     LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1982     LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1983 
1984     if (!DstTy.isVector()) {
1985       report("Destination type must be a vector", MI);
1986       break;
1987     }
1988 
1989     if (VecTy != DstTy) {
1990       report("Destination type and vector type must match", MI);
1991       break;
1992     }
1993 
1994     if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1995       report("Inserted element must be a scalar or pointer", MI);
1996       break;
1997     }
1998 
1999     auto TLI = MF->getSubtarget().getTargetLowering();
2000     if (IdxTy.getSizeInBits() !=
2001         TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
2002       report("Index type must match VectorIdxTy", MI);
2003       break;
2004     }
2005 
2006     break;
2007   }
2008   case TargetOpcode::G_DYN_STACKALLOC: {
2009     const MachineOperand &DstOp = MI->getOperand(0);
2010     const MachineOperand &AllocOp = MI->getOperand(1);
2011     const MachineOperand &AlignOp = MI->getOperand(2);
2012 
2013     if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2014       report("dst operand 0 must be a pointer type", MI);
2015       break;
2016     }
2017 
2018     if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2019       report("src operand 1 must be a scalar reg type", MI);
2020       break;
2021     }
2022 
2023     if (!AlignOp.isImm()) {
2024       report("src operand 2 must be an immediate type", MI);
2025       break;
2026     }
2027     break;
2028   }
2029   case TargetOpcode::G_MEMCPY_INLINE:
2030   case TargetOpcode::G_MEMCPY:
2031   case TargetOpcode::G_MEMMOVE: {
2032     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2033     if (MMOs.size() != 2) {
2034       report("memcpy/memmove must have 2 memory operands", MI);
2035       break;
2036     }
2037 
2038     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2039         (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2040       report("wrong memory operand types", MI);
2041       break;
2042     }
2043 
2044     if (MMOs[0]->getSize() != MMOs[1]->getSize())
2045       report("inconsistent memory operand sizes", MI);
2046 
2047     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2048     LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2049 
2050     if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2051       report("memory instruction operand must be a pointer", MI);
2052       break;
2053     }
2054 
2055     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2056       report("inconsistent store address space", MI);
2057     if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2058       report("inconsistent load address space", MI);
2059 
2060     if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2061       if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2062         report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2063 
2064     break;
2065   }
2066   case TargetOpcode::G_BZERO:
2067   case TargetOpcode::G_MEMSET: {
2068     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2069     std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2070     if (MMOs.size() != 1) {
2071       report(Twine(Name, " must have 1 memory operand"), MI);
2072       break;
2073     }
2074 
2075     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2076       report(Twine(Name, " memory operand must be a store"), MI);
2077       break;
2078     }
2079 
2080     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2081     if (!DstPtrTy.isPointer()) {
2082       report(Twine(Name, " operand must be a pointer"), MI);
2083       break;
2084     }
2085 
2086     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2087       report("inconsistent " + Twine(Name, " address space"), MI);
2088 
2089     if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2090         (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2091       report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2092 
2093     break;
2094   }
2095   case TargetOpcode::G_UBSANTRAP: {
2096     const MachineOperand &KindOp = MI->getOperand(0);
2097     if (!MI->getOperand(0).isImm()) {
2098       report("Crash kind must be an immediate", &KindOp, 0);
2099       break;
2100     }
2101     int64_t Kind = MI->getOperand(0).getImm();
2102     if (!isInt<8>(Kind))
2103       report("Crash kind must be 8 bit wide", &KindOp, 0);
2104     break;
2105   }
2106   case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2107   case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2108     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2109     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2110     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2111     if (!DstTy.isScalar())
2112       report("Vector reduction requires a scalar destination type", MI);
2113     if (!Src1Ty.isScalar())
2114       report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2115     if (!Src2Ty.isVector())
2116       report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2117     break;
2118   }
2119   case TargetOpcode::G_VECREDUCE_FADD:
2120   case TargetOpcode::G_VECREDUCE_FMUL:
2121   case TargetOpcode::G_VECREDUCE_FMAX:
2122   case TargetOpcode::G_VECREDUCE_FMIN:
2123   case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2124   case TargetOpcode::G_VECREDUCE_FMINIMUM:
2125   case TargetOpcode::G_VECREDUCE_ADD:
2126   case TargetOpcode::G_VECREDUCE_MUL:
2127   case TargetOpcode::G_VECREDUCE_AND:
2128   case TargetOpcode::G_VECREDUCE_OR:
2129   case TargetOpcode::G_VECREDUCE_XOR:
2130   case TargetOpcode::G_VECREDUCE_SMAX:
2131   case TargetOpcode::G_VECREDUCE_SMIN:
2132   case TargetOpcode::G_VECREDUCE_UMAX:
2133   case TargetOpcode::G_VECREDUCE_UMIN: {
2134     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2135     if (!DstTy.isScalar())
2136       report("Vector reduction requires a scalar destination type", MI);
2137     break;
2138   }
2139 
2140   case TargetOpcode::G_SBFX:
2141   case TargetOpcode::G_UBFX: {
2142     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2143     if (DstTy.isVector()) {
2144       report("Bitfield extraction is not supported on vectors", MI);
2145       break;
2146     }
2147     break;
2148   }
2149   case TargetOpcode::G_SHL:
2150   case TargetOpcode::G_LSHR:
2151   case TargetOpcode::G_ASHR:
2152   case TargetOpcode::G_ROTR:
2153   case TargetOpcode::G_ROTL: {
2154     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2155     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2156     if (Src1Ty.isVector() != Src2Ty.isVector()) {
2157       report("Shifts and rotates require operands to be either all scalars or "
2158              "all vectors",
2159              MI);
2160       break;
2161     }
2162     break;
2163   }
2164   case TargetOpcode::G_LLROUND:
2165   case TargetOpcode::G_LROUND: {
2166     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2167     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2168     if (!DstTy.isValid() || !SrcTy.isValid())
2169       break;
2170     if (SrcTy.isPointer() || DstTy.isPointer()) {
2171       StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2172       report(Twine(Op, " operand must not be a pointer type"), MI);
2173     } else if (SrcTy.isScalar()) {
2174       verifyAllRegOpsScalar(*MI, *MRI);
2175       break;
2176     } else if (SrcTy.isVector()) {
2177       verifyVectorElementMatch(SrcTy, DstTy, MI);
2178       break;
2179     }
2180     break;
2181   }
2182   case TargetOpcode::G_IS_FPCLASS: {
2183     LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2184     LLT DestEltTy = DestTy.getScalarType();
2185     if (!DestEltTy.isScalar()) {
2186       report("Destination must be a scalar or vector of scalars", MI);
2187       break;
2188     }
2189     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2190     LLT SrcEltTy = SrcTy.getScalarType();
2191     if (!SrcEltTy.isScalar()) {
2192       report("Source must be a scalar or vector of scalars", MI);
2193       break;
2194     }
2195     if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2196       break;
2197     const MachineOperand &TestMO = MI->getOperand(2);
2198     if (!TestMO.isImm()) {
2199       report("floating-point class set (operand 2) must be an immediate", MI);
2200       break;
2201     }
2202     int64_t Test = TestMO.getImm();
2203     if (Test < 0 || Test > fcAllFlags) {
2204       report("Incorrect floating-point class set (operand 2)", MI);
2205       break;
2206     }
2207     break;
2208   }
2209   case TargetOpcode::G_PREFETCH: {
2210     const MachineOperand &AddrOp = MI->getOperand(0);
2211     if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2212       report("addr operand must be a pointer", &AddrOp, 0);
2213       break;
2214     }
2215     const MachineOperand &RWOp = MI->getOperand(1);
2216     if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2217       report("rw operand must be an immediate 0-1", &RWOp, 1);
2218       break;
2219     }
2220     const MachineOperand &LocalityOp = MI->getOperand(2);
2221     if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2222       report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2223       break;
2224     }
2225     const MachineOperand &CacheTypeOp = MI->getOperand(3);
2226     if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2227       report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2228       break;
2229     }
2230     break;
2231   }
2232   case TargetOpcode::G_ASSERT_ALIGN: {
2233     if (MI->getOperand(2).getImm() < 1)
2234       report("alignment immediate must be >= 1", MI);
2235     break;
2236   }
2237   case TargetOpcode::G_CONSTANT_POOL: {
2238     if (!MI->getOperand(1).isCPI())
2239       report("Src operand 1 must be a constant pool index", MI);
2240     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2241       report("Dst operand 0 must be a pointer", MI);
2242     break;
2243   }
2244   case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2245     const MachineOperand &AddrOp = MI->getOperand(1);
2246     if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2247       report("addr operand must be a pointer", &AddrOp, 1);
2248     break;
2249   }
2250   default:
2251     break;
2252   }
2253 }
2254 
2255 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2256   const MCInstrDesc &MCID = MI->getDesc();
2257   if (MI->getNumOperands() < MCID.getNumOperands()) {
2258     report("Too few operands", MI);
2259     OS << MCID.getNumOperands() << " operands expected, but "
2260        << MI->getNumOperands() << " given.\n";
2261   }
2262 
2263   if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2264     report("NoConvergent flag expected only on convergent instructions.", MI);
2265 
2266   if (MI->isPHI()) {
2267     if (MF->getProperties().hasProperty(
2268             MachineFunctionProperties::Property::NoPHIs))
2269       report("Found PHI instruction with NoPHIs property set", MI);
2270 
2271     if (FirstNonPHI)
2272       report("Found PHI instruction after non-PHI", MI);
2273   } else if (FirstNonPHI == nullptr)
2274     FirstNonPHI = MI;
2275 
2276   // Check the tied operands.
2277   if (MI->isInlineAsm())
2278     verifyInlineAsm(MI);
2279 
2280   // Check that unspillable terminators define a reg and have at most one use.
2281   if (TII->isUnspillableTerminator(MI)) {
2282     if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2283       report("Unspillable Terminator does not define a reg", MI);
2284     Register Def = MI->getOperand(0).getReg();
2285     if (Def.isVirtual() &&
2286         !MF->getProperties().hasProperty(
2287             MachineFunctionProperties::Property::NoPHIs) &&
2288         std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2289       report("Unspillable Terminator expected to have at most one use!", MI);
2290   }
2291 
2292   // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2293   // DBG_VALUEs: these are convenient to use in tests, but should never get
2294   // generated.
2295   if (MI->isDebugValue() && MI->getNumOperands() == 4)
2296     if (!MI->getDebugLoc())
2297       report("Missing DebugLoc for debug instruction", MI);
2298 
2299   // Meta instructions should never be the subject of debug value tracking,
2300   // they don't create a value in the output program at all.
2301   if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2302     report("Metadata instruction should not have a value tracking number", MI);
2303 
2304   // Check the MachineMemOperands for basic consistency.
2305   for (MachineMemOperand *Op : MI->memoperands()) {
2306     if (Op->isLoad() && !MI->mayLoad())
2307       report("Missing mayLoad flag", MI);
2308     if (Op->isStore() && !MI->mayStore())
2309       report("Missing mayStore flag", MI);
2310   }
2311 
2312   // Debug values must not have a slot index.
2313   // Other instructions must have one, unless they are inside a bundle.
2314   if (LiveInts) {
2315     bool mapped = !LiveInts->isNotInMIMap(*MI);
2316     if (MI->isDebugOrPseudoInstr()) {
2317       if (mapped)
2318         report("Debug instruction has a slot index", MI);
2319     } else if (MI->isInsideBundle()) {
2320       if (mapped)
2321         report("Instruction inside bundle has a slot index", MI);
2322     } else {
2323       if (!mapped)
2324         report("Missing slot index", MI);
2325     }
2326   }
2327 
2328   unsigned Opc = MCID.getOpcode();
2329   if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
2330     verifyPreISelGenericInstruction(MI);
2331     return;
2332   }
2333 
2334   StringRef ErrorInfo;
2335   if (!TII->verifyInstruction(*MI, ErrorInfo))
2336     report(ErrorInfo.data(), MI);
2337 
2338   // Verify properties of various specific instruction types
2339   switch (MI->getOpcode()) {
2340   case TargetOpcode::COPY: {
2341     const MachineOperand &DstOp = MI->getOperand(0);
2342     const MachineOperand &SrcOp = MI->getOperand(1);
2343     const Register SrcReg = SrcOp.getReg();
2344     const Register DstReg = DstOp.getReg();
2345 
2346     LLT DstTy = MRI->getType(DstReg);
2347     LLT SrcTy = MRI->getType(SrcReg);
2348     if (SrcTy.isValid() && DstTy.isValid()) {
2349       // If both types are valid, check that the types are the same.
2350       if (SrcTy != DstTy) {
2351         report("Copy Instruction is illegal with mismatching types", MI);
2352         OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2353       }
2354 
2355       break;
2356     }
2357 
2358     if (!SrcTy.isValid() && !DstTy.isValid())
2359       break;
2360 
2361     // If we have only one valid type, this is likely a copy between a virtual
2362     // and physical register.
2363     TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2364     TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2365     if (SrcReg.isPhysical() && DstTy.isValid()) {
2366       const TargetRegisterClass *SrcRC =
2367           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2368       if (SrcRC)
2369         SrcSize = TRI->getRegSizeInBits(*SrcRC);
2370     }
2371 
2372     if (DstReg.isPhysical() && SrcTy.isValid()) {
2373       const TargetRegisterClass *DstRC =
2374           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2375       if (DstRC)
2376         DstSize = TRI->getRegSizeInBits(*DstRC);
2377     }
2378 
2379     // The next two checks allow COPY between physical and virtual registers,
2380     // when the virtual register has a scalable size and the physical register
2381     // has a fixed size. These checks allow COPY between *potentialy* mismatched
2382     // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2383     // be able to resolve a fixed size for the scalable vector, and at that
2384     // point this function will know for sure whether the sizes are mismatched
2385     // and correctly report a size mismatch.
2386     if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2387         !SrcSize.isScalable())
2388       break;
2389     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2390         !DstSize.isScalable())
2391       break;
2392 
2393     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2394       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2395         report("Copy Instruction is illegal with mismatching sizes", MI);
2396         OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2397       }
2398     }
2399     break;
2400   }
2401   case TargetOpcode::STATEPOINT: {
2402     StatepointOpers SO(MI);
2403     if (!MI->getOperand(SO.getIDPos()).isImm() ||
2404         !MI->getOperand(SO.getNBytesPos()).isImm() ||
2405         !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2406       report("meta operands to STATEPOINT not constant!", MI);
2407       break;
2408     }
2409 
2410     auto VerifyStackMapConstant = [&](unsigned Offset) {
2411       if (Offset >= MI->getNumOperands()) {
2412         report("stack map constant to STATEPOINT is out of range!", MI);
2413         return;
2414       }
2415       if (!MI->getOperand(Offset - 1).isImm() ||
2416           MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2417           !MI->getOperand(Offset).isImm())
2418         report("stack map constant to STATEPOINT not well formed!", MI);
2419     };
2420     VerifyStackMapConstant(SO.getCCIdx());
2421     VerifyStackMapConstant(SO.getFlagsIdx());
2422     VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2423     VerifyStackMapConstant(SO.getNumGCPtrIdx());
2424     VerifyStackMapConstant(SO.getNumAllocaIdx());
2425     VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2426 
2427     // Verify that all explicit statepoint defs are tied to gc operands as
2428     // they are expected to be a relocation of gc operands.
2429     unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2430     unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2431     for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2432       unsigned UseOpIdx;
2433       if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2434         report("STATEPOINT defs expected to be tied", MI);
2435         break;
2436       }
2437       if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2438         report("STATEPOINT def tied to non-gc operand", MI);
2439         break;
2440       }
2441     }
2442 
2443     // TODO: verify we have properly encoded deopt arguments
2444   } break;
2445   case TargetOpcode::INSERT_SUBREG: {
2446     unsigned InsertedSize;
2447     if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2448       InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2449     else
2450       InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2451     unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2452     if (SubRegSize < InsertedSize) {
2453       report("INSERT_SUBREG expected inserted value to have equal or lesser "
2454              "size than the subreg it was inserted into", MI);
2455       break;
2456     }
2457   } break;
2458   case TargetOpcode::REG_SEQUENCE: {
2459     unsigned NumOps = MI->getNumOperands();
2460     if (!(NumOps & 1)) {
2461       report("Invalid number of operands for REG_SEQUENCE", MI);
2462       break;
2463     }
2464 
2465     for (unsigned I = 1; I != NumOps; I += 2) {
2466       const MachineOperand &RegOp = MI->getOperand(I);
2467       const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2468 
2469       if (!RegOp.isReg())
2470         report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2471 
2472       if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2473           SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2474         report("Invalid subregister index operand for REG_SEQUENCE",
2475                &SubRegOp, I + 1);
2476       }
2477     }
2478 
2479     Register DstReg = MI->getOperand(0).getReg();
2480     if (DstReg.isPhysical())
2481       report("REG_SEQUENCE does not support physical register results", MI);
2482 
2483     if (MI->getOperand(0).getSubReg())
2484       report("Invalid subreg result for REG_SEQUENCE", MI);
2485 
2486     break;
2487   }
2488   }
2489 }
2490 
2491 void
2492 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2493   const MachineInstr *MI = MO->getParent();
2494   const MCInstrDesc &MCID = MI->getDesc();
2495   unsigned NumDefs = MCID.getNumDefs();
2496   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2497     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2498 
2499   // The first MCID.NumDefs operands must be explicit register defines
2500   if (MONum < NumDefs) {
2501     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2502     if (!MO->isReg())
2503       report("Explicit definition must be a register", MO, MONum);
2504     else if (!MO->isDef() && !MCOI.isOptionalDef())
2505       report("Explicit definition marked as use", MO, MONum);
2506     else if (MO->isImplicit())
2507       report("Explicit definition marked as implicit", MO, MONum);
2508   } else if (MONum < MCID.getNumOperands()) {
2509     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2510     // Don't check if it's the last operand in a variadic instruction. See,
2511     // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2512     bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2513     if (!IsOptional) {
2514       if (MO->isReg()) {
2515         if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2516           report("Explicit operand marked as def", MO, MONum);
2517         if (MO->isImplicit())
2518           report("Explicit operand marked as implicit", MO, MONum);
2519       }
2520 
2521       // Check that an instruction has register operands only as expected.
2522       if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2523           !MO->isReg() && !MO->isFI())
2524         report("Expected a register operand.", MO, MONum);
2525       if (MO->isReg()) {
2526         if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2527             (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2528              !TII->isPCRelRegisterOperandLegal(*MO)))
2529           report("Expected a non-register operand.", MO, MONum);
2530       }
2531     }
2532 
2533     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2534     if (TiedTo != -1) {
2535       if (!MO->isReg())
2536         report("Tied use must be a register", MO, MONum);
2537       else if (!MO->isTied())
2538         report("Operand should be tied", MO, MONum);
2539       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2540         report("Tied def doesn't match MCInstrDesc", MO, MONum);
2541       else if (MO->getReg().isPhysical()) {
2542         const MachineOperand &MOTied = MI->getOperand(TiedTo);
2543         if (!MOTied.isReg())
2544           report("Tied counterpart must be a register", &MOTied, TiedTo);
2545         else if (MOTied.getReg().isPhysical() &&
2546                  MO->getReg() != MOTied.getReg())
2547           report("Tied physical registers must match.", &MOTied, TiedTo);
2548       }
2549     } else if (MO->isReg() && MO->isTied())
2550       report("Explicit operand should not be tied", MO, MONum);
2551   } else if (!MI->isVariadic()) {
2552     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2553     if (!MO->isValidExcessOperand())
2554       report("Extra explicit operand on non-variadic instruction", MO, MONum);
2555   }
2556 
2557   switch (MO->getType()) {
2558   case MachineOperand::MO_Register: {
2559     // Verify debug flag on debug instructions. Check this first because reg0
2560     // indicates an undefined debug value.
2561     if (MI->isDebugInstr() && MO->isUse()) {
2562       if (!MO->isDebug())
2563         report("Register operand must be marked debug", MO, MONum);
2564     } else if (MO->isDebug()) {
2565       report("Register operand must not be marked debug", MO, MONum);
2566     }
2567 
2568     const Register Reg = MO->getReg();
2569     if (!Reg)
2570       return;
2571     if (MRI->tracksLiveness() && !MI->isDebugInstr())
2572       checkLiveness(MO, MONum);
2573 
2574     if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2575         MO->getReg().isVirtual()) // TODO: Apply to physregs too
2576       report("Undef virtual register def operands require a subregister", MO, MONum);
2577 
2578     // Verify the consistency of tied operands.
2579     if (MO->isTied()) {
2580       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2581       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2582       if (!OtherMO.isReg())
2583         report("Must be tied to a register", MO, MONum);
2584       if (!OtherMO.isTied())
2585         report("Missing tie flags on tied operand", MO, MONum);
2586       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2587         report("Inconsistent tie links", MO, MONum);
2588       if (MONum < MCID.getNumDefs()) {
2589         if (OtherIdx < MCID.getNumOperands()) {
2590           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2591             report("Explicit def tied to explicit use without tie constraint",
2592                    MO, MONum);
2593         } else {
2594           if (!OtherMO.isImplicit())
2595             report("Explicit def should be tied to implicit use", MO, MONum);
2596         }
2597       }
2598     }
2599 
2600     // Verify two-address constraints after the twoaddressinstruction pass.
2601     // Both twoaddressinstruction pass and phi-node-elimination pass call
2602     // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2603     // after twoaddressinstruction pass not after phi-node-elimination pass. So
2604     // we shouldn't use the IsSSA as the condition, we should based on
2605     // TiedOpsRewritten property to verify two-address constraints, this
2606     // property will be set in twoaddressinstruction pass.
2607     unsigned DefIdx;
2608     if (MF->getProperties().hasProperty(
2609             MachineFunctionProperties::Property::TiedOpsRewritten) &&
2610         MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2611         Reg != MI->getOperand(DefIdx).getReg())
2612       report("Two-address instruction operands must be identical", MO, MONum);
2613 
2614     // Check register classes.
2615     unsigned SubIdx = MO->getSubReg();
2616 
2617     if (Reg.isPhysical()) {
2618       if (SubIdx) {
2619         report("Illegal subregister index for physical register", MO, MONum);
2620         return;
2621       }
2622       if (MONum < MCID.getNumOperands()) {
2623         if (const TargetRegisterClass *DRC =
2624               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2625           if (!DRC->contains(Reg)) {
2626             report("Illegal physical register for instruction", MO, MONum);
2627             OS << printReg(Reg, TRI) << " is not a "
2628                << TRI->getRegClassName(DRC) << " register.\n";
2629           }
2630         }
2631       }
2632       if (MO->isRenamable()) {
2633         if (MRI->isReserved(Reg)) {
2634           report("isRenamable set on reserved register", MO, MONum);
2635           return;
2636         }
2637       }
2638     } else {
2639       // Virtual register.
2640       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2641       if (!RC) {
2642         // This is a generic virtual register.
2643 
2644         // Do not allow undef uses for generic virtual registers. This ensures
2645         // getVRegDef can never fail and return null on a generic register.
2646         //
2647         // FIXME: This restriction should probably be broadened to all SSA
2648         // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2649         // run on the SSA function just before phi elimination.
2650         if (MO->isUndef())
2651           report("Generic virtual register use cannot be undef", MO, MONum);
2652 
2653         // Debug value instruction is permitted to use undefined vregs.
2654         // This is a performance measure to skip the overhead of immediately
2655         // pruning unused debug operands. The final undef substitution occurs
2656         // when debug values are allocated in LDVImpl::handleDebugValue, so
2657         // these verifications always apply after this pass.
2658         if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2659             !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2660           // If we're post-Select, we can't have gvregs anymore.
2661           if (isFunctionSelected) {
2662             report("Generic virtual register invalid in a Selected function",
2663                    MO, MONum);
2664             return;
2665           }
2666 
2667           // The gvreg must have a type and it must not have a SubIdx.
2668           LLT Ty = MRI->getType(Reg);
2669           if (!Ty.isValid()) {
2670             report("Generic virtual register must have a valid type", MO,
2671                    MONum);
2672             return;
2673           }
2674 
2675           const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2676           const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2677 
2678           // If we're post-RegBankSelect, the gvreg must have a bank.
2679           if (!RegBank && isFunctionRegBankSelected) {
2680             report("Generic virtual register must have a bank in a "
2681                    "RegBankSelected function",
2682                    MO, MONum);
2683             return;
2684           }
2685 
2686           // Make sure the register fits into its register bank if any.
2687           if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2688               RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2689             report("Register bank is too small for virtual register", MO,
2690                    MONum);
2691             OS << "Register bank " << RegBank->getName() << " too small("
2692                << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2693                << Ty.getSizeInBits() << "-bits\n";
2694             return;
2695           }
2696         }
2697 
2698         if (SubIdx)  {
2699           report("Generic virtual register does not allow subregister index", MO,
2700                  MONum);
2701           return;
2702         }
2703 
2704         // If this is a target specific instruction and this operand
2705         // has register class constraint, the virtual register must
2706         // comply to it.
2707         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2708             MONum < MCID.getNumOperands() &&
2709             TII->getRegClass(MCID, MONum, TRI, *MF)) {
2710           report("Virtual register does not match instruction constraint", MO,
2711                  MONum);
2712           OS << "Expect register class "
2713              << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2714              << " but got nothing\n";
2715           return;
2716         }
2717 
2718         break;
2719       }
2720       if (SubIdx) {
2721         const TargetRegisterClass *SRC =
2722           TRI->getSubClassWithSubReg(RC, SubIdx);
2723         if (!SRC) {
2724           report("Invalid subregister index for virtual register", MO, MONum);
2725           OS << "Register class " << TRI->getRegClassName(RC)
2726              << " does not support subreg index " << SubIdx << '\n';
2727           return;
2728         }
2729         if (RC != SRC) {
2730           report("Invalid register class for subregister index", MO, MONum);
2731           OS << "Register class " << TRI->getRegClassName(RC)
2732              << " does not fully support subreg index " << SubIdx << '\n';
2733           return;
2734         }
2735       }
2736       if (MONum < MCID.getNumOperands()) {
2737         if (const TargetRegisterClass *DRC =
2738               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2739           if (SubIdx) {
2740             const TargetRegisterClass *SuperRC =
2741                 TRI->getLargestLegalSuperClass(RC, *MF);
2742             if (!SuperRC) {
2743               report("No largest legal super class exists.", MO, MONum);
2744               return;
2745             }
2746             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2747             if (!DRC) {
2748               report("No matching super-reg register class.", MO, MONum);
2749               return;
2750             }
2751           }
2752           if (!RC->hasSuperClassEq(DRC)) {
2753             report("Illegal virtual register for instruction", MO, MONum);
2754             OS << "Expected a " << TRI->getRegClassName(DRC)
2755                << " register, but got a " << TRI->getRegClassName(RC)
2756                << " register\n";
2757           }
2758         }
2759       }
2760     }
2761     break;
2762   }
2763 
2764   case MachineOperand::MO_RegisterMask:
2765     regMasks.push_back(MO->getRegMask());
2766     break;
2767 
2768   case MachineOperand::MO_MachineBasicBlock:
2769     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2770       report("PHI operand is not in the CFG", MO, MONum);
2771     break;
2772 
2773   case MachineOperand::MO_FrameIndex:
2774     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2775         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2776       int FI = MO->getIndex();
2777       LiveInterval &LI = LiveStks->getInterval(FI);
2778       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2779 
2780       bool stores = MI->mayStore();
2781       bool loads = MI->mayLoad();
2782       // For a memory-to-memory move, we need to check if the frame
2783       // index is used for storing or loading, by inspecting the
2784       // memory operands.
2785       if (stores && loads) {
2786         for (auto *MMO : MI->memoperands()) {
2787           const PseudoSourceValue *PSV = MMO->getPseudoValue();
2788           if (PSV == nullptr) continue;
2789           const FixedStackPseudoSourceValue *Value =
2790             dyn_cast<FixedStackPseudoSourceValue>(PSV);
2791           if (Value == nullptr) continue;
2792           if (Value->getFrameIndex() != FI) continue;
2793 
2794           if (MMO->isStore())
2795             loads = false;
2796           else
2797             stores = false;
2798           break;
2799         }
2800         if (loads == stores)
2801           report("Missing fixed stack memoperand.", MI);
2802       }
2803       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2804         report("Instruction loads from dead spill slot", MO, MONum);
2805         OS << "Live stack: " << LI << '\n';
2806       }
2807       if (stores && !LI.liveAt(Idx.getRegSlot())) {
2808         report("Instruction stores to dead spill slot", MO, MONum);
2809         OS << "Live stack: " << LI << '\n';
2810       }
2811     }
2812     break;
2813 
2814   case MachineOperand::MO_CFIIndex:
2815     if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2816       report("CFI instruction has invalid index", MO, MONum);
2817     break;
2818 
2819   default:
2820     break;
2821   }
2822 }
2823 
2824 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2825                                          unsigned MONum, SlotIndex UseIdx,
2826                                          const LiveRange &LR,
2827                                          Register VRegOrUnit,
2828                                          LaneBitmask LaneMask) {
2829   const MachineInstr *MI = MO->getParent();
2830 
2831   if (!LR.verify()) {
2832     report("invalid live range", MO, MONum);
2833     report_context_liverange(LR);
2834     report_context_vreg_regunit(VRegOrUnit);
2835     report_context(UseIdx);
2836     return;
2837   }
2838 
2839   LiveQueryResult LRQ = LR.Query(UseIdx);
2840   bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2841   // Check if we have a segment at the use, note however that we only need one
2842   // live subregister range, the others may be dead.
2843   if (!HasValue && LaneMask.none()) {
2844     report("No live segment at use", MO, MONum);
2845     report_context_liverange(LR);
2846     report_context_vreg_regunit(VRegOrUnit);
2847     report_context(UseIdx);
2848   }
2849   if (MO->isKill() && !LRQ.isKill()) {
2850     report("Live range continues after kill flag", MO, MONum);
2851     report_context_liverange(LR);
2852     report_context_vreg_regunit(VRegOrUnit);
2853     if (LaneMask.any())
2854       report_context_lanemask(LaneMask);
2855     report_context(UseIdx);
2856   }
2857 }
2858 
2859 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2860                                          unsigned MONum, SlotIndex DefIdx,
2861                                          const LiveRange &LR,
2862                                          Register VRegOrUnit,
2863                                          bool SubRangeCheck,
2864                                          LaneBitmask LaneMask) {
2865   if (!LR.verify()) {
2866     report("invalid live range", MO, MONum);
2867     report_context_liverange(LR);
2868     report_context_vreg_regunit(VRegOrUnit);
2869     if (LaneMask.any())
2870       report_context_lanemask(LaneMask);
2871     report_context(DefIdx);
2872   }
2873 
2874   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2875     // The LR can correspond to the whole reg and its def slot is not obliged
2876     // to be the same as the MO' def slot. E.g. when we check here "normal"
2877     // subreg MO but there is other EC subreg MO in the same instruction so the
2878     // whole reg has EC def slot and differs from the currently checked MO' def
2879     // slot. For example:
2880     // %0 [16e,32r:0) 0@16e  L..3 [16e,32r:0) 0@16e  L..C [16r,32r:0) 0@16r
2881     // Check that there is an early-clobber def of the same superregister
2882     // somewhere is performed in visitMachineFunctionAfter()
2883     if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2884         !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2885         (VNI->def != DefIdx &&
2886          (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2887       report("Inconsistent valno->def", MO, MONum);
2888       report_context_liverange(LR);
2889       report_context_vreg_regunit(VRegOrUnit);
2890       if (LaneMask.any())
2891         report_context_lanemask(LaneMask);
2892       report_context(*VNI);
2893       report_context(DefIdx);
2894     }
2895   } else {
2896     report("No live segment at def", MO, MONum);
2897     report_context_liverange(LR);
2898     report_context_vreg_regunit(VRegOrUnit);
2899     if (LaneMask.any())
2900       report_context_lanemask(LaneMask);
2901     report_context(DefIdx);
2902   }
2903   // Check that, if the dead def flag is present, LiveInts agree.
2904   if (MO->isDead()) {
2905     LiveQueryResult LRQ = LR.Query(DefIdx);
2906     if (!LRQ.isDeadDef()) {
2907       assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2908       // A dead subreg def only tells us that the specific subreg is dead. There
2909       // could be other non-dead defs of other subregs, or we could have other
2910       // parts of the register being live through the instruction. So unless we
2911       // are checking liveness for a subrange it is ok for the live range to
2912       // continue, given that we have a dead def of a subregister.
2913       if (SubRangeCheck || MO->getSubReg() == 0) {
2914         report("Live range continues after dead def flag", MO, MONum);
2915         report_context_liverange(LR);
2916         report_context_vreg_regunit(VRegOrUnit);
2917         if (LaneMask.any())
2918           report_context_lanemask(LaneMask);
2919       }
2920     }
2921   }
2922 }
2923 
2924 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2925   const MachineInstr *MI = MO->getParent();
2926   const Register Reg = MO->getReg();
2927   const unsigned SubRegIdx = MO->getSubReg();
2928 
2929   const LiveInterval *LI = nullptr;
2930   if (LiveInts && Reg.isVirtual()) {
2931     if (LiveInts->hasInterval(Reg)) {
2932       LI = &LiveInts->getInterval(Reg);
2933       if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2934           !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2935         report("Live interval for subreg operand has no subranges", MO, MONum);
2936     } else {
2937       report("Virtual register has no live interval", MO, MONum);
2938     }
2939   }
2940 
2941   // Both use and def operands can read a register.
2942   if (MO->readsReg()) {
2943     if (MO->isKill())
2944       addRegWithSubRegs(regsKilled, Reg);
2945 
2946     // Check that LiveVars knows this kill (unless we are inside a bundle, in
2947     // which case we have already checked that LiveVars knows any kills on the
2948     // bundle header instead).
2949     if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2950         !MI->isBundledWithPred()) {
2951       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2952       if (!is_contained(VI.Kills, MI))
2953         report("Kill missing from LiveVariables", MO, MONum);
2954     }
2955 
2956     // Check LiveInts liveness and kill.
2957     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2958       SlotIndex UseIdx;
2959       if (MI->isPHI()) {
2960         // PHI use occurs on the edge, so check for live out here instead.
2961         UseIdx = LiveInts->getMBBEndIdx(
2962           MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2963       } else {
2964         UseIdx = LiveInts->getInstructionIndex(*MI);
2965       }
2966       // Check the cached regunit intervals.
2967       if (Reg.isPhysical() && !isReserved(Reg)) {
2968         for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2969           if (MRI->isReservedRegUnit(Unit))
2970             continue;
2971           if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2972             checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2973         }
2974       }
2975 
2976       if (Reg.isVirtual()) {
2977         // This is a virtual register interval.
2978         checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2979 
2980         if (LI->hasSubRanges() && !MO->isDef()) {
2981           LaneBitmask MOMask = SubRegIdx != 0
2982                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2983                                    : MRI->getMaxLaneMaskForVReg(Reg);
2984           LaneBitmask LiveInMask;
2985           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2986             if ((MOMask & SR.LaneMask).none())
2987               continue;
2988             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2989             LiveQueryResult LRQ = SR.Query(UseIdx);
2990             if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2991               LiveInMask |= SR.LaneMask;
2992           }
2993           // At least parts of the register has to be live at the use.
2994           if ((LiveInMask & MOMask).none()) {
2995             report("No live subrange at use", MO, MONum);
2996             report_context(*LI);
2997             report_context(UseIdx);
2998           }
2999           // For PHIs all lanes should be live
3000           if (MI->isPHI() && LiveInMask != MOMask) {
3001             report("Not all lanes of PHI source live at use", MO, MONum);
3002             report_context(*LI);
3003             report_context(UseIdx);
3004           }
3005         }
3006       }
3007     }
3008 
3009     // Use of a dead register.
3010     if (!regsLive.count(Reg)) {
3011       if (Reg.isPhysical()) {
3012         // Reserved registers may be used even when 'dead'.
3013         bool Bad = !isReserved(Reg);
3014         // We are fine if just any subregister has a defined value.
3015         if (Bad) {
3016 
3017           for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3018             if (regsLive.count(SubReg)) {
3019               Bad = false;
3020               break;
3021             }
3022           }
3023         }
3024         // If there is an additional implicit-use of a super register we stop
3025         // here. By definition we are fine if the super register is not
3026         // (completely) dead, if the complete super register is dead we will
3027         // get a report for its operand.
3028         if (Bad) {
3029           for (const MachineOperand &MOP : MI->uses()) {
3030             if (!MOP.isReg() || !MOP.isImplicit())
3031               continue;
3032 
3033             if (!MOP.getReg().isPhysical())
3034               continue;
3035 
3036             if (MOP.getReg() != Reg &&
3037                 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3038                   return llvm::is_contained(TRI->regunits(MOP.getReg()),
3039                                             RegUnit);
3040                 }))
3041               Bad = false;
3042           }
3043         }
3044         if (Bad)
3045           report("Using an undefined physical register", MO, MONum);
3046       } else if (MRI->def_empty(Reg)) {
3047         report("Reading virtual register without a def", MO, MONum);
3048       } else {
3049         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3050         // We don't know which virtual registers are live in, so only complain
3051         // if vreg was killed in this MBB. Otherwise keep track of vregs that
3052         // must be live in. PHI instructions are handled separately.
3053         if (MInfo.regsKilled.count(Reg))
3054           report("Using a killed virtual register", MO, MONum);
3055         else if (!MI->isPHI())
3056           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3057       }
3058     }
3059   }
3060 
3061   if (MO->isDef()) {
3062     // Register defined.
3063     // TODO: verify that earlyclobber ops are not used.
3064     if (MO->isDead())
3065       addRegWithSubRegs(regsDead, Reg);
3066     else
3067       addRegWithSubRegs(regsDefined, Reg);
3068 
3069     // Verify SSA form.
3070     if (MRI->isSSA() && Reg.isVirtual() &&
3071         std::next(MRI->def_begin(Reg)) != MRI->def_end())
3072       report("Multiple virtual register defs in SSA form", MO, MONum);
3073 
3074     // Check LiveInts for a live segment, but only for virtual registers.
3075     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3076       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3077       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3078 
3079       if (Reg.isVirtual()) {
3080         checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
3081 
3082         if (LI->hasSubRanges()) {
3083           LaneBitmask MOMask = SubRegIdx != 0
3084                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3085                                    : MRI->getMaxLaneMaskForVReg(Reg);
3086           for (const LiveInterval::SubRange &SR : LI->subranges()) {
3087             if ((SR.LaneMask & MOMask).none())
3088               continue;
3089             checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
3090           }
3091         }
3092       }
3093     }
3094   }
3095 }
3096 
3097 // This function gets called after visiting all instructions in a bundle. The
3098 // argument points to the bundle header.
3099 // Normal stand-alone instructions are also considered 'bundles', and this
3100 // function is called for all of them.
3101 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3102   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3103   set_union(MInfo.regsKilled, regsKilled);
3104   set_subtract(regsLive, regsKilled); regsKilled.clear();
3105   // Kill any masked registers.
3106   while (!regMasks.empty()) {
3107     const uint32_t *Mask = regMasks.pop_back_val();
3108     for (Register Reg : regsLive)
3109       if (Reg.isPhysical() &&
3110           MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3111         regsDead.push_back(Reg);
3112   }
3113   set_subtract(regsLive, regsDead);   regsDead.clear();
3114   set_union(regsLive, regsDefined);   regsDefined.clear();
3115 }
3116 
3117 void
3118 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3119   MBBInfoMap[MBB].regsLiveOut = regsLive;
3120   regsLive.clear();
3121 
3122   if (Indexes) {
3123     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3124     if (!(stop > lastIndex)) {
3125       report("Block ends before last instruction index", MBB);
3126       OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3127          << '\n';
3128     }
3129     lastIndex = stop;
3130   }
3131 }
3132 
3133 namespace {
3134 // This implements a set of registers that serves as a filter: can filter other
3135 // sets by passing through elements not in the filter and blocking those that
3136 // are. Any filter implicitly includes the full set of physical registers upon
3137 // creation, thus filtering them all out. The filter itself as a set only grows,
3138 // and needs to be as efficient as possible.
3139 struct VRegFilter {
3140   // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3141   // no duplicates. Both virtual and physical registers are fine.
3142   template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3143     SmallVector<Register, 0> VRegsBuffer;
3144     filterAndAdd(FromRegSet, VRegsBuffer);
3145   }
3146   // Filter \p FromRegSet through the filter and append passed elements into \p
3147   // ToVRegs. All elements appended are then added to the filter itself.
3148   // \returns true if anything changed.
3149   template <typename RegSetT>
3150   bool filterAndAdd(const RegSetT &FromRegSet,
3151                     SmallVectorImpl<Register> &ToVRegs) {
3152     unsigned SparseUniverse = Sparse.size();
3153     unsigned NewSparseUniverse = SparseUniverse;
3154     unsigned NewDenseSize = Dense.size();
3155     size_t Begin = ToVRegs.size();
3156     for (Register Reg : FromRegSet) {
3157       if (!Reg.isVirtual())
3158         continue;
3159       unsigned Index = Register::virtReg2Index(Reg);
3160       if (Index < SparseUniverseMax) {
3161         if (Index < SparseUniverse && Sparse.test(Index))
3162           continue;
3163         NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3164       } else {
3165         if (Dense.count(Reg))
3166           continue;
3167         ++NewDenseSize;
3168       }
3169       ToVRegs.push_back(Reg);
3170     }
3171     size_t End = ToVRegs.size();
3172     if (Begin == End)
3173       return false;
3174     // Reserving space in sets once performs better than doing so continuously
3175     // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3176     // tuned all the way down) and double iteration (the second one is over a
3177     // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3178     Sparse.resize(NewSparseUniverse);
3179     Dense.reserve(NewDenseSize);
3180     for (unsigned I = Begin; I < End; ++I) {
3181       Register Reg = ToVRegs[I];
3182       unsigned Index = Register::virtReg2Index(Reg);
3183       if (Index < SparseUniverseMax)
3184         Sparse.set(Index);
3185       else
3186         Dense.insert(Reg);
3187     }
3188     return true;
3189   }
3190 
3191 private:
3192   static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3193   // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3194   // are tracked by Dense. The only purpose of the threashold and the Dense set
3195   // is to have a reasonably growing memory usage in pathological cases (large
3196   // number of very sparse VRegFilter instances live at the same time). In
3197   // practice even in the worst-by-execution time cases having all elements
3198   // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3199   // space efficient than if tracked by Dense. The threashold is set to keep the
3200   // worst-case memory usage within 2x of figures determined empirically for
3201   // "all Dense" scenario in such worst-by-execution-time cases.
3202   BitVector Sparse;
3203   DenseSet<unsigned> Dense;
3204 };
3205 
3206 // Implements both a transfer function and a (binary, in-place) join operator
3207 // for a dataflow over register sets with set union join and filtering transfer
3208 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3209 // Maintains out_b as its state, allowing for O(n) iteration over it at any
3210 // time, where n is the size of the set (as opposed to O(U) where U is the
3211 // universe). filter_b implicitly contains all physical registers at all times.
3212 class FilteringVRegSet {
3213   VRegFilter Filter;
3214   SmallVector<Register, 0> VRegs;
3215 
3216 public:
3217   // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3218   // Both virtual and physical registers are fine.
3219   template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3220     Filter.add(RS);
3221   }
3222   // Passes \p RS through the filter_b (transfer function) and adds what's left
3223   // to itself (out_b).
3224   template <typename RegSetT> bool add(const RegSetT &RS) {
3225     // Double-duty the Filter: to maintain VRegs a set (and the join operation
3226     // a set union) just add everything being added here to the Filter as well.
3227     return Filter.filterAndAdd(RS, VRegs);
3228   }
3229   using const_iterator = decltype(VRegs)::const_iterator;
3230   const_iterator begin() const { return VRegs.begin(); }
3231   const_iterator end() const { return VRegs.end(); }
3232   size_t size() const { return VRegs.size(); }
3233 };
3234 } // namespace
3235 
3236 // Calculate the largest possible vregsPassed sets. These are the registers that
3237 // can pass through an MBB live, but may not be live every time. It is assumed
3238 // that all vregsPassed sets are empty before the call.
3239 void MachineVerifier::calcRegsPassed() {
3240   if (MF->empty())
3241     // ReversePostOrderTraversal doesn't handle empty functions.
3242     return;
3243 
3244   for (const MachineBasicBlock *MB :
3245        ReversePostOrderTraversal<const MachineFunction *>(MF)) {
3246     FilteringVRegSet VRegs;
3247     BBInfo &Info = MBBInfoMap[MB];
3248     assert(Info.reachable);
3249 
3250     VRegs.addToFilter(Info.regsKilled);
3251     VRegs.addToFilter(Info.regsLiveOut);
3252     for (const MachineBasicBlock *Pred : MB->predecessors()) {
3253       const BBInfo &PredInfo = MBBInfoMap[Pred];
3254       if (!PredInfo.reachable)
3255         continue;
3256 
3257       VRegs.add(PredInfo.regsLiveOut);
3258       VRegs.add(PredInfo.vregsPassed);
3259     }
3260     Info.vregsPassed.reserve(VRegs.size());
3261     Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3262   }
3263 }
3264 
3265 // Calculate the set of virtual registers that must be passed through each basic
3266 // block in order to satisfy the requirements of successor blocks. This is very
3267 // similar to calcRegsPassed, only backwards.
3268 void MachineVerifier::calcRegsRequired() {
3269   // First push live-in regs to predecessors' vregsRequired.
3270   SmallPtrSet<const MachineBasicBlock*, 8> todo;
3271   for (const auto &MBB : *MF) {
3272     BBInfo &MInfo = MBBInfoMap[&MBB];
3273     for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3274       BBInfo &PInfo = MBBInfoMap[Pred];
3275       if (PInfo.addRequired(MInfo.vregsLiveIn))
3276         todo.insert(Pred);
3277     }
3278 
3279     // Handle the PHI node.
3280     for (const MachineInstr &MI : MBB.phis()) {
3281       for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3282         // Skip those Operands which are undef regs or not regs.
3283         if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3284           continue;
3285 
3286         // Get register and predecessor for one PHI edge.
3287         Register Reg = MI.getOperand(i).getReg();
3288         const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3289 
3290         BBInfo &PInfo = MBBInfoMap[Pred];
3291         if (PInfo.addRequired(Reg))
3292           todo.insert(Pred);
3293       }
3294     }
3295   }
3296 
3297   // Iteratively push vregsRequired to predecessors. This will converge to the
3298   // same final state regardless of DenseSet iteration order.
3299   while (!todo.empty()) {
3300     const MachineBasicBlock *MBB = *todo.begin();
3301     todo.erase(MBB);
3302     BBInfo &MInfo = MBBInfoMap[MBB];
3303     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3304       if (Pred == MBB)
3305         continue;
3306       BBInfo &SInfo = MBBInfoMap[Pred];
3307       if (SInfo.addRequired(MInfo.vregsRequired))
3308         todo.insert(Pred);
3309     }
3310   }
3311 }
3312 
3313 // Check PHI instructions at the beginning of MBB. It is assumed that
3314 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3315 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3316   BBInfo &MInfo = MBBInfoMap[&MBB];
3317 
3318   SmallPtrSet<const MachineBasicBlock*, 8> seen;
3319   for (const MachineInstr &Phi : MBB) {
3320     if (!Phi.isPHI())
3321       break;
3322     seen.clear();
3323 
3324     const MachineOperand &MODef = Phi.getOperand(0);
3325     if (!MODef.isReg() || !MODef.isDef()) {
3326       report("Expected first PHI operand to be a register def", &MODef, 0);
3327       continue;
3328     }
3329     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3330         MODef.isEarlyClobber() || MODef.isDebug())
3331       report("Unexpected flag on PHI operand", &MODef, 0);
3332     Register DefReg = MODef.getReg();
3333     if (!DefReg.isVirtual())
3334       report("Expected first PHI operand to be a virtual register", &MODef, 0);
3335 
3336     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3337       const MachineOperand &MO0 = Phi.getOperand(I);
3338       if (!MO0.isReg()) {
3339         report("Expected PHI operand to be a register", &MO0, I);
3340         continue;
3341       }
3342       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3343           MO0.isDebug() || MO0.isTied())
3344         report("Unexpected flag on PHI operand", &MO0, I);
3345 
3346       const MachineOperand &MO1 = Phi.getOperand(I + 1);
3347       if (!MO1.isMBB()) {
3348         report("Expected PHI operand to be a basic block", &MO1, I + 1);
3349         continue;
3350       }
3351 
3352       const MachineBasicBlock &Pre = *MO1.getMBB();
3353       if (!Pre.isSuccessor(&MBB)) {
3354         report("PHI input is not a predecessor block", &MO1, I + 1);
3355         continue;
3356       }
3357 
3358       if (MInfo.reachable) {
3359         seen.insert(&Pre);
3360         BBInfo &PrInfo = MBBInfoMap[&Pre];
3361         if (!MO0.isUndef() && PrInfo.reachable &&
3362             !PrInfo.isLiveOut(MO0.getReg()))
3363           report("PHI operand is not live-out from predecessor", &MO0, I);
3364       }
3365     }
3366 
3367     // Did we see all predecessors?
3368     if (MInfo.reachable) {
3369       for (MachineBasicBlock *Pred : MBB.predecessors()) {
3370         if (!seen.count(Pred)) {
3371           report("Missing PHI operand", &Phi);
3372           OS << printMBBReference(*Pred)
3373              << " is a predecessor according to the CFG.\n";
3374         }
3375       }
3376     }
3377   }
3378 }
3379 
3380 static void
3381 verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT,
3382                          std::function<void(const Twine &Message)> FailureCB,
3383                          raw_ostream &OS) {
3384   MachineConvergenceVerifier CV;
3385   CV.initialize(&OS, FailureCB, MF);
3386 
3387   for (const auto &MBB : MF) {
3388     CV.visit(MBB);
3389     for (const auto &MI : MBB.instrs())
3390       CV.visit(MI);
3391   }
3392 
3393   if (CV.sawTokens()) {
3394     DT.recalculate(const_cast<MachineFunction &>(MF));
3395     CV.verify(DT);
3396   }
3397 }
3398 
3399 void MachineVerifier::visitMachineFunctionAfter() {
3400   auto FailureCB = [this](const Twine &Message) {
3401     report(Message.str().c_str(), MF);
3402   };
3403   verifyConvergenceControl(*MF, DT, FailureCB, OS);
3404 
3405   calcRegsPassed();
3406 
3407   for (const MachineBasicBlock &MBB : *MF)
3408     checkPHIOps(MBB);
3409 
3410   // Now check liveness info if available
3411   calcRegsRequired();
3412 
3413   // Check for killed virtual registers that should be live out.
3414   for (const auto &MBB : *MF) {
3415     BBInfo &MInfo = MBBInfoMap[&MBB];
3416     for (Register VReg : MInfo.vregsRequired)
3417       if (MInfo.regsKilled.count(VReg)) {
3418         report("Virtual register killed in block, but needed live out.", &MBB);
3419         OS << "Virtual register " << printReg(VReg)
3420            << " is used after the block.\n";
3421       }
3422   }
3423 
3424   if (!MF->empty()) {
3425     BBInfo &MInfo = MBBInfoMap[&MF->front()];
3426     for (Register VReg : MInfo.vregsRequired) {
3427       report("Virtual register defs don't dominate all uses.", MF);
3428       report_context_vreg(VReg);
3429     }
3430   }
3431 
3432   if (LiveVars)
3433     verifyLiveVariables();
3434   if (LiveInts)
3435     verifyLiveIntervals();
3436 
3437   // Check live-in list of each MBB. If a register is live into MBB, check
3438   // that the register is in regsLiveOut of each predecessor block. Since
3439   // this must come from a definition in the predecesssor or its live-in
3440   // list, this will catch a live-through case where the predecessor does not
3441   // have the register in its live-in list.  This currently only checks
3442   // registers that have no aliases, are not allocatable and are not
3443   // reserved, which could mean a condition code register for instance.
3444   if (MRI->tracksLiveness())
3445     for (const auto &MBB : *MF)
3446       for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
3447         MCPhysReg LiveInReg = P.PhysReg;
3448         bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3449         if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3450           continue;
3451         for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3452           BBInfo &PInfo = MBBInfoMap[Pred];
3453           if (!PInfo.regsLiveOut.count(LiveInReg)) {
3454             report("Live in register not found to be live out from predecessor.",
3455                    &MBB);
3456             OS << TRI->getName(LiveInReg) << " not found to be live out from "
3457                << printMBBReference(*Pred) << '\n';
3458           }
3459         }
3460       }
3461 
3462   for (auto CSInfo : MF->getCallSitesInfo())
3463     if (!CSInfo.first->isCall())
3464       report("Call site info referencing instruction that is not call", MF);
3465 
3466   // If there's debug-info, check that we don't have any duplicate value
3467   // tracking numbers.
3468   if (MF->getFunction().getSubprogram()) {
3469     DenseSet<unsigned> SeenNumbers;
3470     for (const auto &MBB : *MF) {
3471       for (const auto &MI : MBB) {
3472         if (auto Num = MI.peekDebugInstrNum()) {
3473           auto Result = SeenNumbers.insert((unsigned)Num);
3474           if (!Result.second)
3475             report("Instruction has a duplicated value tracking number", &MI);
3476         }
3477       }
3478     }
3479   }
3480 }
3481 
3482 void MachineVerifier::verifyLiveVariables() {
3483   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3484   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3485     Register Reg = Register::index2VirtReg(I);
3486     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3487     for (const auto &MBB : *MF) {
3488       BBInfo &MInfo = MBBInfoMap[&MBB];
3489 
3490       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3491       if (MInfo.vregsRequired.count(Reg)) {
3492         if (!VI.AliveBlocks.test(MBB.getNumber())) {
3493           report("LiveVariables: Block missing from AliveBlocks", &MBB);
3494           OS << "Virtual register " << printReg(Reg)
3495              << " must be live through the block.\n";
3496         }
3497       } else {
3498         if (VI.AliveBlocks.test(MBB.getNumber())) {
3499           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3500           OS << "Virtual register " << printReg(Reg)
3501              << " is not needed live through the block.\n";
3502         }
3503       }
3504     }
3505   }
3506 }
3507 
3508 void MachineVerifier::verifyLiveIntervals() {
3509   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3510   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3511     Register Reg = Register::index2VirtReg(I);
3512 
3513     // Spilling and splitting may leave unused registers around. Skip them.
3514     if (MRI->reg_nodbg_empty(Reg))
3515       continue;
3516 
3517     if (!LiveInts->hasInterval(Reg)) {
3518       report("Missing live interval for virtual register", MF);
3519       OS << printReg(Reg, TRI) << " still has defs or uses\n";
3520       continue;
3521     }
3522 
3523     const LiveInterval &LI = LiveInts->getInterval(Reg);
3524     assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3525     verifyLiveInterval(LI);
3526   }
3527 
3528   // Verify all the cached regunit intervals.
3529   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3530     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3531       verifyLiveRange(*LR, i);
3532 }
3533 
3534 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3535                                            const VNInfo *VNI, Register Reg,
3536                                            LaneBitmask LaneMask) {
3537   if (VNI->isUnused())
3538     return;
3539 
3540   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3541 
3542   if (!DefVNI) {
3543     report("Value not live at VNInfo def and not marked unused", MF);
3544     report_context(LR, Reg, LaneMask);
3545     report_context(*VNI);
3546     return;
3547   }
3548 
3549   if (DefVNI != VNI) {
3550     report("Live segment at def has different VNInfo", MF);
3551     report_context(LR, Reg, LaneMask);
3552     report_context(*VNI);
3553     return;
3554   }
3555 
3556   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3557   if (!MBB) {
3558     report("Invalid VNInfo definition index", MF);
3559     report_context(LR, Reg, LaneMask);
3560     report_context(*VNI);
3561     return;
3562   }
3563 
3564   if (VNI->isPHIDef()) {
3565     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3566       report("PHIDef VNInfo is not defined at MBB start", MBB);
3567       report_context(LR, Reg, LaneMask);
3568       report_context(*VNI);
3569     }
3570     return;
3571   }
3572 
3573   // Non-PHI def.
3574   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3575   if (!MI) {
3576     report("No instruction at VNInfo def index", MBB);
3577     report_context(LR, Reg, LaneMask);
3578     report_context(*VNI);
3579     return;
3580   }
3581 
3582   if (Reg != 0) {
3583     bool hasDef = false;
3584     bool isEarlyClobber = false;
3585     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3586       if (!MOI->isReg() || !MOI->isDef())
3587         continue;
3588       if (Reg.isVirtual()) {
3589         if (MOI->getReg() != Reg)
3590           continue;
3591       } else {
3592         if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3593           continue;
3594       }
3595       if (LaneMask.any() &&
3596           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3597         continue;
3598       hasDef = true;
3599       if (MOI->isEarlyClobber())
3600         isEarlyClobber = true;
3601     }
3602 
3603     if (!hasDef) {
3604       report("Defining instruction does not modify register", MI);
3605       report_context(LR, Reg, LaneMask);
3606       report_context(*VNI);
3607     }
3608 
3609     // Early clobber defs begin at USE slots, but other defs must begin at
3610     // DEF slots.
3611     if (isEarlyClobber) {
3612       if (!VNI->def.isEarlyClobber()) {
3613         report("Early clobber def must be at an early-clobber slot", MBB);
3614         report_context(LR, Reg, LaneMask);
3615         report_context(*VNI);
3616       }
3617     } else if (!VNI->def.isRegister()) {
3618       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3619       report_context(LR, Reg, LaneMask);
3620       report_context(*VNI);
3621     }
3622   }
3623 }
3624 
3625 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3626                                              const LiveRange::const_iterator I,
3627                                              Register Reg,
3628                                              LaneBitmask LaneMask) {
3629   const LiveRange::Segment &S = *I;
3630   const VNInfo *VNI = S.valno;
3631   assert(VNI && "Live segment has no valno");
3632 
3633   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3634     report("Foreign valno in live segment", MF);
3635     report_context(LR, Reg, LaneMask);
3636     report_context(S);
3637     report_context(*VNI);
3638   }
3639 
3640   if (VNI->isUnused()) {
3641     report("Live segment valno is marked unused", MF);
3642     report_context(LR, Reg, LaneMask);
3643     report_context(S);
3644   }
3645 
3646   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3647   if (!MBB) {
3648     report("Bad start of live segment, no basic block", MF);
3649     report_context(LR, Reg, LaneMask);
3650     report_context(S);
3651     return;
3652   }
3653   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3654   if (S.start != MBBStartIdx && S.start != VNI->def) {
3655     report("Live segment must begin at MBB entry or valno def", MBB);
3656     report_context(LR, Reg, LaneMask);
3657     report_context(S);
3658   }
3659 
3660   const MachineBasicBlock *EndMBB =
3661     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3662   if (!EndMBB) {
3663     report("Bad end of live segment, no basic block", MF);
3664     report_context(LR, Reg, LaneMask);
3665     report_context(S);
3666     return;
3667   }
3668 
3669   // Checks for non-live-out segments.
3670   if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3671     // RegUnit intervals are allowed dead phis.
3672     if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3673         S.end == VNI->def.getDeadSlot())
3674       return;
3675 
3676     // The live segment is ending inside EndMBB
3677     const MachineInstr *MI =
3678         LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3679     if (!MI) {
3680       report("Live segment doesn't end at a valid instruction", EndMBB);
3681       report_context(LR, Reg, LaneMask);
3682       report_context(S);
3683       return;
3684     }
3685 
3686     // The block slot must refer to a basic block boundary.
3687     if (S.end.isBlock()) {
3688       report("Live segment ends at B slot of an instruction", EndMBB);
3689       report_context(LR, Reg, LaneMask);
3690       report_context(S);
3691     }
3692 
3693     if (S.end.isDead()) {
3694       // Segment ends on the dead slot.
3695       // That means there must be a dead def.
3696       if (!SlotIndex::isSameInstr(S.start, S.end)) {
3697         report("Live segment ending at dead slot spans instructions", EndMBB);
3698         report_context(LR, Reg, LaneMask);
3699         report_context(S);
3700       }
3701     }
3702 
3703     // After tied operands are rewritten, a live segment can only end at an
3704     // early-clobber slot if it is being redefined by an early-clobber def.
3705     // TODO: Before tied operands are rewritten, a live segment can only end at
3706     // an early-clobber slot if the last use is tied to an early-clobber def.
3707     if (MF->getProperties().hasProperty(
3708             MachineFunctionProperties::Property::TiedOpsRewritten) &&
3709         S.end.isEarlyClobber()) {
3710       if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3711         report("Live segment ending at early clobber slot must be "
3712                "redefined by an EC def in the same instruction",
3713                EndMBB);
3714         report_context(LR, Reg, LaneMask);
3715         report_context(S);
3716       }
3717     }
3718 
3719     // The following checks only apply to virtual registers. Physreg liveness
3720     // is too weird to check.
3721     if (Reg.isVirtual()) {
3722       // A live segment can end with either a redefinition, a kill flag on a
3723       // use, or a dead flag on a def.
3724       bool hasRead = false;
3725       bool hasSubRegDef = false;
3726       bool hasDeadDef = false;
3727       for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3728         if (!MOI->isReg() || MOI->getReg() != Reg)
3729           continue;
3730         unsigned Sub = MOI->getSubReg();
3731         LaneBitmask SLM =
3732             Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3733         if (MOI->isDef()) {
3734           if (Sub != 0) {
3735             hasSubRegDef = true;
3736             // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3737             // mask for subregister defs. Read-undef defs will be handled by
3738             // readsReg below.
3739             SLM = ~SLM;
3740           }
3741           if (MOI->isDead())
3742             hasDeadDef = true;
3743         }
3744         if (LaneMask.any() && (LaneMask & SLM).none())
3745           continue;
3746         if (MOI->readsReg())
3747           hasRead = true;
3748       }
3749       if (S.end.isDead()) {
3750         // Make sure that the corresponding machine operand for a "dead" live
3751         // range has the dead flag. We cannot perform this check for subregister
3752         // liveranges as partially dead values are allowed.
3753         if (LaneMask.none() && !hasDeadDef) {
3754           report(
3755               "Instruction ending live segment on dead slot has no dead flag",
3756               MI);
3757           report_context(LR, Reg, LaneMask);
3758           report_context(S);
3759         }
3760       } else {
3761         if (!hasRead) {
3762           // When tracking subregister liveness, the main range must start new
3763           // values on partial register writes, even if there is no read.
3764           if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3765               !hasSubRegDef) {
3766             report("Instruction ending live segment doesn't read the register",
3767                    MI);
3768             report_context(LR, Reg, LaneMask);
3769             report_context(S);
3770           }
3771         }
3772       }
3773     }
3774   }
3775 
3776   // Now check all the basic blocks in this live segment.
3777   MachineFunction::const_iterator MFI = MBB->getIterator();
3778   // Is this live segment the beginning of a non-PHIDef VN?
3779   if (S.start == VNI->def && !VNI->isPHIDef()) {
3780     // Not live-in to any blocks.
3781     if (MBB == EndMBB)
3782       return;
3783     // Skip this block.
3784     ++MFI;
3785   }
3786 
3787   SmallVector<SlotIndex, 4> Undefs;
3788   if (LaneMask.any()) {
3789     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3790     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3791   }
3792 
3793   while (true) {
3794     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3795     // We don't know how to track physregs into a landing pad.
3796     if (!Reg.isVirtual() && MFI->isEHPad()) {
3797       if (&*MFI == EndMBB)
3798         break;
3799       ++MFI;
3800       continue;
3801     }
3802 
3803     // Is VNI a PHI-def in the current block?
3804     bool IsPHI = VNI->isPHIDef() &&
3805       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3806 
3807     // Check that VNI is live-out of all predecessors.
3808     for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3809       SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3810       // Predecessor of landing pad live-out on last call.
3811       if (MFI->isEHPad()) {
3812         for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3813           if (MI.isCall()) {
3814             PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3815             break;
3816           }
3817         }
3818       }
3819       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3820 
3821       // All predecessors must have a live-out value. However for a phi
3822       // instruction with subregister intervals
3823       // only one of the subregisters (not necessarily the current one) needs to
3824       // be defined.
3825       if (!PVNI && (LaneMask.none() || !IsPHI)) {
3826         if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3827           continue;
3828         report("Register not marked live out of predecessor", Pred);
3829         report_context(LR, Reg, LaneMask);
3830         report_context(*VNI);
3831         OS << " live into " << printMBBReference(*MFI) << '@'
3832            << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3833            << '\n';
3834         continue;
3835       }
3836 
3837       // Only PHI-defs can take different predecessor values.
3838       if (!IsPHI && PVNI != VNI) {
3839         report("Different value live out of predecessor", Pred);
3840         report_context(LR, Reg, LaneMask);
3841         OS << "Valno #" << PVNI->id << " live out of "
3842            << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3843            << " live into " << printMBBReference(*MFI) << '@'
3844            << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3845       }
3846     }
3847     if (&*MFI == EndMBB)
3848       break;
3849     ++MFI;
3850   }
3851 }
3852 
3853 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3854                                       LaneBitmask LaneMask) {
3855   for (const VNInfo *VNI : LR.valnos)
3856     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3857 
3858   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3859     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3860 }
3861 
3862 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3863   Register Reg = LI.reg();
3864   assert(Reg.isVirtual());
3865   verifyLiveRange(LI, Reg);
3866 
3867   if (LI.hasSubRanges()) {
3868     LaneBitmask Mask;
3869     LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3870     for (const LiveInterval::SubRange &SR : LI.subranges()) {
3871       if ((Mask & SR.LaneMask).any()) {
3872         report("Lane masks of sub ranges overlap in live interval", MF);
3873         report_context(LI);
3874       }
3875       if ((SR.LaneMask & ~MaxMask).any()) {
3876         report("Subrange lanemask is invalid", MF);
3877         report_context(LI);
3878       }
3879       if (SR.empty()) {
3880         report("Subrange must not be empty", MF);
3881         report_context(SR, LI.reg(), SR.LaneMask);
3882       }
3883       Mask |= SR.LaneMask;
3884       verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3885       if (!LI.covers(SR)) {
3886         report("A Subrange is not covered by the main range", MF);
3887         report_context(LI);
3888       }
3889     }
3890   }
3891 
3892   // Check the LI only has one connected component.
3893   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3894   unsigned NumComp = ConEQ.Classify(LI);
3895   if (NumComp > 1) {
3896     report("Multiple connected components in live interval", MF);
3897     report_context(LI);
3898     for (unsigned comp = 0; comp != NumComp; ++comp) {
3899       OS << comp << ": valnos";
3900       for (const VNInfo *I : LI.valnos)
3901         if (comp == ConEQ.getEqClass(I))
3902           OS << ' ' << I->id;
3903       OS << '\n';
3904     }
3905   }
3906 }
3907 
3908 namespace {
3909 
3910   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3911   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3912   // value is zero.
3913   // We use a bool plus an integer to capture the stack state.
3914 struct StackStateOfBB {
3915   StackStateOfBB() = default;
3916   StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
3917       : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3918         ExitIsSetup(ExitSetup) {}
3919 
3920   // Can be negative, which means we are setting up a frame.
3921   int EntryValue = 0;
3922   int ExitValue = 0;
3923   bool EntryIsSetup = false;
3924   bool ExitIsSetup = false;
3925 };
3926 
3927 } // end anonymous namespace
3928 
3929 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3930 /// by a FrameDestroy <n>, stack adjustments are identical on all
3931 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3932 void MachineVerifier::verifyStackFrame() {
3933   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
3934   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3935   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3936     return;
3937 
3938   SmallVector<StackStateOfBB, 8> SPState;
3939   SPState.resize(MF->getNumBlockIDs());
3940   df_iterator_default_set<const MachineBasicBlock*> Reachable;
3941 
3942   // Visit the MBBs in DFS order.
3943   for (df_ext_iterator<const MachineFunction *,
3944                        df_iterator_default_set<const MachineBasicBlock *>>
3945        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3946        DFI != DFE; ++DFI) {
3947     const MachineBasicBlock *MBB = *DFI;
3948 
3949     StackStateOfBB BBState;
3950     // Check the exit state of the DFS stack predecessor.
3951     if (DFI.getPathLength() >= 2) {
3952       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3953       assert(Reachable.count(StackPred) &&
3954              "DFS stack predecessor is already visited.\n");
3955       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3956       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3957       BBState.ExitValue = BBState.EntryValue;
3958       BBState.ExitIsSetup = BBState.EntryIsSetup;
3959     }
3960 
3961     if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3962       report("Call frame size on entry does not match value computed from "
3963              "predecessor",
3964              MBB);
3965       OS << "Call frame size on entry " << MBB->getCallFrameSize()
3966          << " does not match value computed from predecessor "
3967          << -BBState.EntryValue << '\n';
3968     }
3969 
3970     // Update stack state by checking contents of MBB.
3971     for (const auto &I : *MBB) {
3972       if (I.getOpcode() == FrameSetupOpcode) {
3973         if (BBState.ExitIsSetup)
3974           report("FrameSetup is after another FrameSetup", &I);
3975         if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3976           report("AdjustsStack not set in presence of a frame pseudo "
3977                  "instruction.", &I);
3978         BBState.ExitValue -= TII->getFrameTotalSize(I);
3979         BBState.ExitIsSetup = true;
3980       }
3981 
3982       if (I.getOpcode() == FrameDestroyOpcode) {
3983         int Size = TII->getFrameTotalSize(I);
3984         if (!BBState.ExitIsSetup)
3985           report("FrameDestroy is not after a FrameSetup", &I);
3986         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3987                                                BBState.ExitValue;
3988         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3989           report("FrameDestroy <n> is after FrameSetup <m>", &I);
3990           OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
3991              << AbsSPAdj << ">.\n";
3992         }
3993         if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3994           report("AdjustsStack not set in presence of a frame pseudo "
3995                  "instruction.", &I);
3996         BBState.ExitValue += Size;
3997         BBState.ExitIsSetup = false;
3998       }
3999     }
4000     SPState[MBB->getNumber()] = BBState;
4001 
4002     // Make sure the exit state of any predecessor is consistent with the entry
4003     // state.
4004     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4005       if (Reachable.count(Pred) &&
4006           (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4007            SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4008         report("The exit stack state of a predecessor is inconsistent.", MBB);
4009         OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4010            << SPState[Pred->getNumber()].ExitValue << ", "
4011            << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4012            << printMBBReference(*MBB) << " has entry state ("
4013            << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4014       }
4015     }
4016 
4017     // Make sure the entry state of any successor is consistent with the exit
4018     // state.
4019     for (const MachineBasicBlock *Succ : MBB->successors()) {
4020       if (Reachable.count(Succ) &&
4021           (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4022            SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4023         report("The entry stack state of a successor is inconsistent.", MBB);
4024         OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4025            << SPState[Succ->getNumber()].EntryValue << ", "
4026            << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4027            << printMBBReference(*MBB) << " has exit state ("
4028            << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4029       }
4030     }
4031 
4032     // Make sure a basic block with return ends with zero stack adjustment.
4033     if (!MBB->empty() && MBB->back().isReturn()) {
4034       if (BBState.ExitIsSetup)
4035         report("A return block ends with a FrameSetup.", MBB);
4036       if (BBState.ExitValue)
4037         report("A return block ends with a nonzero stack adjustment.", MBB);
4038     }
4039   }
4040 }
4041