xref: /llvm-project/llvm/lib/CodeGen/MachineVerifier.cpp (revision 71ca9fcb8dc9ea0e1e3a4a47820edc78c398a85e)
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/CodeGen/MachineVerifier.h"
24 #include "llvm/ADT/BitVector.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/DepthFirstIterator.h"
28 #include "llvm/ADT/PostOrderIterator.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SetOperations.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/Twine.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
37 #include "llvm/CodeGen/LiveInterval.h"
38 #include "llvm/CodeGen/LiveIntervals.h"
39 #include "llvm/CodeGen/LiveRangeCalc.h"
40 #include "llvm/CodeGen/LiveStacks.h"
41 #include "llvm/CodeGen/LiveVariables.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineConvergenceVerifier.h"
44 #include "llvm/CodeGen/MachineDominators.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineFunctionPass.h"
48 #include "llvm/CodeGen/MachineInstr.h"
49 #include "llvm/CodeGen/MachineInstrBundle.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/PseudoSourceValue.h"
54 #include "llvm/CodeGen/RegisterBank.h"
55 #include "llvm/CodeGen/RegisterBankInfo.h"
56 #include "llvm/CodeGen/SlotIndexes.h"
57 #include "llvm/CodeGen/StackMaps.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGenTypes/LowLevelType.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/EHPersonalities.h"
67 #include "llvm/IR/Function.h"
68 #include "llvm/IR/InlineAsm.h"
69 #include "llvm/IR/Instructions.h"
70 #include "llvm/InitializePasses.h"
71 #include "llvm/MC/LaneBitmask.h"
72 #include "llvm/MC/MCAsmInfo.h"
73 #include "llvm/MC/MCDwarf.h"
74 #include "llvm/MC/MCInstrDesc.h"
75 #include "llvm/MC/MCRegisterInfo.h"
76 #include "llvm/MC/MCTargetOptions.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/ModRef.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include "llvm/Target/TargetMachine.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <iterator>
89 #include <string>
90 #include <utility>
91 
92 using namespace llvm;
93 
94 namespace {
95 
96   struct MachineVerifier {
97     MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
98                     raw_ostream *OS)
99         : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b) {}
100 
101     MachineVerifier(Pass *pass, const char *b, raw_ostream *OS)
102         : PASS(pass), OS(OS ? *OS : nulls()), Banner(b) {}
103 
104     MachineVerifier(const char *b, LiveVariables *LiveVars,
105                     LiveIntervals *LiveInts, LiveStacks *LiveStks,
106                     SlotIndexes *Indexes, raw_ostream *OS)
107         : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
108           LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes) {}
109 
110     unsigned verify(const MachineFunction &MF);
111 
112     MachineFunctionAnalysisManager *MFAM = nullptr;
113     Pass *const PASS = nullptr;
114     raw_ostream &OS;
115     const char *Banner;
116     const MachineFunction *MF = nullptr;
117     const TargetMachine *TM = nullptr;
118     const TargetInstrInfo *TII = nullptr;
119     const TargetRegisterInfo *TRI = nullptr;
120     const MachineRegisterInfo *MRI = nullptr;
121     const RegisterBankInfo *RBI = nullptr;
122 
123     unsigned foundErrors = 0;
124 
125     // Avoid querying the MachineFunctionProperties for each operand.
126     bool isFunctionRegBankSelected = false;
127     bool isFunctionSelected = false;
128     bool isFunctionTracksDebugUserValues = false;
129 
130     using RegVector = SmallVector<Register, 16>;
131     using RegMaskVector = SmallVector<const uint32_t *, 4>;
132     using RegSet = DenseSet<Register>;
133     using RegMap = DenseMap<Register, const MachineInstr *>;
134     using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
135 
136     const MachineInstr *FirstNonPHI = nullptr;
137     const MachineInstr *FirstTerminator = nullptr;
138     BlockSet FunctionBlocks;
139 
140     BitVector regsReserved;
141     RegSet regsLive;
142     RegVector regsDefined, regsDead, regsKilled;
143     RegMaskVector regMasks;
144 
145     SlotIndex lastIndex;
146 
147     // Add Reg and any sub-registers to RV
148     void addRegWithSubRegs(RegVector &RV, Register Reg) {
149       RV.push_back(Reg);
150       if (Reg.isPhysical())
151         append_range(RV, TRI->subregs(Reg.asMCReg()));
152     }
153 
154     struct BBInfo {
155       // Is this MBB reachable from the MF entry point?
156       bool reachable = false;
157 
158       // Vregs that must be live in because they are used without being
159       // defined. Map value is the user. vregsLiveIn doesn't include regs
160       // that only are used by PHI nodes.
161       RegMap vregsLiveIn;
162 
163       // Regs killed in MBB. They may be defined again, and will then be in both
164       // regsKilled and regsLiveOut.
165       RegSet regsKilled;
166 
167       // Regs defined in MBB and live out. Note that vregs passing through may
168       // be live out without being mentioned here.
169       RegSet regsLiveOut;
170 
171       // Vregs that pass through MBB untouched. This set is disjoint from
172       // regsKilled and regsLiveOut.
173       RegSet vregsPassed;
174 
175       // Vregs that must pass through MBB because they are needed by a successor
176       // block. This set is disjoint from regsLiveOut.
177       RegSet vregsRequired;
178 
179       // Set versions of block's predecessor and successor lists.
180       BlockSet Preds, Succs;
181 
182       BBInfo() = default;
183 
184       // Add register to vregsRequired if it belongs there. Return true if
185       // anything changed.
186       bool addRequired(Register Reg) {
187         if (!Reg.isVirtual())
188           return false;
189         if (regsLiveOut.count(Reg))
190           return false;
191         return vregsRequired.insert(Reg).second;
192       }
193 
194       // Same for a full set.
195       bool addRequired(const RegSet &RS) {
196         bool Changed = false;
197         for (Register Reg : RS)
198           Changed |= addRequired(Reg);
199         return Changed;
200       }
201 
202       // Same for a full map.
203       bool addRequired(const RegMap &RM) {
204         bool Changed = false;
205         for (const auto &I : RM)
206           Changed |= addRequired(I.first);
207         return Changed;
208       }
209 
210       // Live-out registers are either in regsLiveOut or vregsPassed.
211       bool isLiveOut(Register Reg) const {
212         return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
213       }
214     };
215 
216     // Extra register info per MBB.
217     DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
218 
219     bool isReserved(Register Reg) {
220       return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
221     }
222 
223     bool isAllocatable(Register Reg) const {
224       return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
225              !regsReserved.test(Reg.id());
226     }
227 
228     // Analysis information if available
229     LiveVariables *LiveVars = nullptr;
230     LiveIntervals *LiveInts = nullptr;
231     LiveStacks *LiveStks = nullptr;
232     SlotIndexes *Indexes = nullptr;
233 
234     // This is calculated only when trying to verify convergence control tokens.
235     // Similar to the LLVM IR verifier, we calculate this locally instead of
236     // relying on the pass manager.
237     MachineDominatorTree DT;
238 
239     void visitMachineFunctionBefore();
240     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
241     void visitMachineBundleBefore(const MachineInstr *MI);
242 
243     /// Verify that all of \p MI's virtual register operands are scalars.
244     /// \returns True if all virtual register operands are scalar. False
245     /// otherwise.
246     bool verifyAllRegOpsScalar(const MachineInstr &MI,
247                                const MachineRegisterInfo &MRI);
248     bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
249 
250     bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
251     bool verifyGIntrinsicConvergence(const MachineInstr *MI);
252     void verifyPreISelGenericInstruction(const MachineInstr *MI);
253 
254     void visitMachineInstrBefore(const MachineInstr *MI);
255     void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
256     void visitMachineBundleAfter(const MachineInstr *MI);
257     void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
258     void visitMachineFunctionAfter();
259 
260     void report(const char *msg, const MachineFunction *MF);
261     void report(const char *msg, const MachineBasicBlock *MBB);
262     void report(const char *msg, const MachineInstr *MI);
263     void report(const char *msg, const MachineOperand *MO, unsigned MONum,
264                 LLT MOVRegType = LLT{});
265     void report(const Twine &Msg, const MachineInstr *MI);
266 
267     void report_context(const LiveInterval &LI) const;
268     void report_context(const LiveRange &LR, Register VRegUnit,
269                         LaneBitmask LaneMask) const;
270     void report_context(const LiveRange::Segment &S) const;
271     void report_context(const VNInfo &VNI) const;
272     void report_context(SlotIndex Pos) const;
273     void report_context(MCPhysReg PhysReg) const;
274     void report_context_liverange(const LiveRange &LR) const;
275     void report_context_lanemask(LaneBitmask LaneMask) const;
276     void report_context_vreg(Register VReg) const;
277     void report_context_vreg_regunit(Register VRegOrUnit) const;
278 
279     void verifyInlineAsm(const MachineInstr *MI);
280 
281     void checkLiveness(const MachineOperand *MO, unsigned MONum);
282     void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
283                             SlotIndex UseIdx, const LiveRange &LR,
284                             Register VRegOrUnit,
285                             LaneBitmask LaneMask = LaneBitmask::getNone());
286     void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
287                             SlotIndex DefIdx, const LiveRange &LR,
288                             Register VRegOrUnit, bool SubRangeCheck = false,
289                             LaneBitmask LaneMask = LaneBitmask::getNone());
290 
291     void markReachable(const MachineBasicBlock *MBB);
292     void calcRegsPassed();
293     void checkPHIOps(const MachineBasicBlock &MBB);
294 
295     void calcRegsRequired();
296     void verifyLiveVariables();
297     void verifyLiveIntervals();
298     void verifyLiveInterval(const LiveInterval&);
299     void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
300                               LaneBitmask);
301     void verifyLiveRangeSegment(const LiveRange &,
302                                 const LiveRange::const_iterator I, Register,
303                                 LaneBitmask);
304     void verifyLiveRange(const LiveRange &, Register,
305                          LaneBitmask LaneMask = LaneBitmask::getNone());
306 
307     void verifyStackFrame();
308 
309     void verifySlotIndexes() const;
310     void verifyProperties(const MachineFunction &MF);
311   };
312 
313   struct MachineVerifierLegacyPass : public MachineFunctionPass {
314     static char ID; // Pass ID, replacement for typeid
315 
316     const std::string Banner;
317 
318     MachineVerifierLegacyPass(std::string banner = std::string())
319         : MachineFunctionPass(ID), Banner(std::move(banner)) {
320       initializeMachineVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
321     }
322 
323     void getAnalysisUsage(AnalysisUsage &AU) const override {
324       AU.addUsedIfAvailable<LiveStacks>();
325       AU.addUsedIfAvailable<LiveVariablesWrapperPass>();
326       AU.addUsedIfAvailable<SlotIndexesWrapperPass>();
327       AU.addUsedIfAvailable<LiveIntervalsWrapperPass>();
328       AU.setPreservesAll();
329       MachineFunctionPass::getAnalysisUsage(AU);
330     }
331 
332     bool runOnMachineFunction(MachineFunction &MF) override {
333       // Skip functions that have known verification problems.
334       // FIXME: Remove this mechanism when all problematic passes have been
335       // fixed.
336       if (MF.getProperties().hasProperty(
337               MachineFunctionProperties::Property::FailsVerification))
338         return false;
339 
340       unsigned FoundErrors =
341           MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
342       if (FoundErrors)
343         report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
344       return false;
345     }
346   };
347 
348 } // end anonymous namespace
349 
350 PreservedAnalyses
351 MachineVerifierPass::run(MachineFunction &MF,
352                          MachineFunctionAnalysisManager &MFAM) {
353   // Skip functions that have known verification problems.
354   // FIXME: Remove this mechanism when all problematic passes have been
355   // fixed.
356   if (MF.getProperties().hasProperty(
357           MachineFunctionProperties::Property::FailsVerification))
358     return PreservedAnalyses::all();
359   unsigned FoundErrors =
360       MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
361   if (FoundErrors)
362     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
363   return PreservedAnalyses::all();
364 }
365 
366 char MachineVerifierLegacyPass::ID = 0;
367 
368 INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
369                 "Verify generated machine code", false, false)
370 
371 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
372   return new MachineVerifierLegacyPass(Banner);
373 }
374 
375 void llvm::verifyMachineFunction(const std::string &Banner,
376                                  const MachineFunction &MF) {
377   // TODO: Use MFAM after porting below analyses.
378   // LiveVariables *LiveVars;
379   // LiveIntervals *LiveInts;
380   // LiveStacks *LiveStks;
381   // SlotIndexes *Indexes;
382   unsigned FoundErrors =
383       MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
384   if (FoundErrors)
385     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
386 }
387 
388 bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
389                              bool AbortOnErrors) const {
390   MachineFunction &MF = const_cast<MachineFunction&>(*this);
391   unsigned FoundErrors = MachineVerifier(p, Banner, OS).verify(MF);
392   if (AbortOnErrors && FoundErrors)
393     report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
394   return FoundErrors == 0;
395 }
396 
397 bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
398                              const char *Banner, raw_ostream *OS,
399                              bool AbortOnErrors) const {
400   MachineFunction &MF = const_cast<MachineFunction &>(*this);
401   unsigned FoundErrors =
402       MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes, OS)
403           .verify(MF);
404   if (AbortOnErrors && FoundErrors)
405     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
406   return FoundErrors == 0;
407 }
408 
409 void MachineVerifier::verifySlotIndexes() const {
410   if (Indexes == nullptr)
411     return;
412 
413   // Ensure the IdxMBB list is sorted by slot indexes.
414   SlotIndex Last;
415   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
416        E = Indexes->MBBIndexEnd(); I != E; ++I) {
417     assert(!Last.isValid() || I->first > Last);
418     Last = I->first;
419   }
420 }
421 
422 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
423   // If a pass has introduced virtual registers without clearing the
424   // NoVRegs property (or set it without allocating the vregs)
425   // then report an error.
426   if (MF.getProperties().hasProperty(
427           MachineFunctionProperties::Property::NoVRegs) &&
428       MRI->getNumVirtRegs())
429     report("Function has NoVRegs property but there are VReg operands", &MF);
430 }
431 
432 unsigned MachineVerifier::verify(const MachineFunction &MF) {
433   foundErrors = 0;
434 
435   this->MF = &MF;
436   TM = &MF.getTarget();
437   TII = MF.getSubtarget().getInstrInfo();
438   TRI = MF.getSubtarget().getRegisterInfo();
439   RBI = MF.getSubtarget().getRegBankInfo();
440   MRI = &MF.getRegInfo();
441 
442   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
443       MachineFunctionProperties::Property::FailedISel);
444 
445   // If we're mid-GlobalISel and we already triggered the fallback path then
446   // it's expected that the MIR is somewhat broken but that's ok since we'll
447   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
448   if (isFunctionFailedISel)
449     return foundErrors;
450 
451   isFunctionRegBankSelected = MF.getProperties().hasProperty(
452       MachineFunctionProperties::Property::RegBankSelected);
453   isFunctionSelected = MF.getProperties().hasProperty(
454       MachineFunctionProperties::Property::Selected);
455   isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
456       MachineFunctionProperties::Property::TracksDebugUserValues);
457 
458   if (PASS) {
459     auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
460     LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
461     // We don't want to verify LiveVariables if LiveIntervals is available.
462     auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
463     if (!LiveInts)
464       LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
465     LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
466     auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
467     Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
468   }
469   if (MFAM) {
470     MachineFunction &Func = const_cast<MachineFunction &>(MF);
471     LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
472     if (!LiveInts)
473       LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
474     // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
475     Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
476   }
477 
478   verifySlotIndexes();
479 
480   verifyProperties(MF);
481 
482   visitMachineFunctionBefore();
483   for (const MachineBasicBlock &MBB : MF) {
484     visitMachineBasicBlockBefore(&MBB);
485     // Keep track of the current bundle header.
486     const MachineInstr *CurBundle = nullptr;
487     // Do we expect the next instruction to be part of the same bundle?
488     bool InBundle = false;
489 
490     for (const MachineInstr &MI : MBB.instrs()) {
491       if (MI.getParent() != &MBB) {
492         report("Bad instruction parent pointer", &MBB);
493         OS << "Instruction: " << MI;
494         continue;
495       }
496 
497       // Check for consistent bundle flags.
498       if (InBundle && !MI.isBundledWithPred())
499         report("Missing BundledPred flag, "
500                "BundledSucc was set on predecessor",
501                &MI);
502       if (!InBundle && MI.isBundledWithPred())
503         report("BundledPred flag is set, "
504                "but BundledSucc not set on predecessor",
505                &MI);
506 
507       // Is this a bundle header?
508       if (!MI.isInsideBundle()) {
509         if (CurBundle)
510           visitMachineBundleAfter(CurBundle);
511         CurBundle = &MI;
512         visitMachineBundleBefore(CurBundle);
513       } else if (!CurBundle)
514         report("No bundle header", &MI);
515       visitMachineInstrBefore(&MI);
516       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
517         const MachineOperand &Op = MI.getOperand(I);
518         if (Op.getParent() != &MI) {
519           // Make sure to use correct addOperand / removeOperand / ChangeTo
520           // functions when replacing operands of a MachineInstr.
521           report("Instruction has operand with wrong parent set", &MI);
522         }
523 
524         visitMachineOperand(&Op, I);
525       }
526 
527       // Was this the last bundled instruction?
528       InBundle = MI.isBundledWithSucc();
529     }
530     if (CurBundle)
531       visitMachineBundleAfter(CurBundle);
532     if (InBundle)
533       report("BundledSucc flag set on last instruction in block", &MBB.back());
534     visitMachineBasicBlockAfter(&MBB);
535   }
536   visitMachineFunctionAfter();
537 
538   // Clean up.
539   regsLive.clear();
540   regsDefined.clear();
541   regsDead.clear();
542   regsKilled.clear();
543   regMasks.clear();
544   MBBInfoMap.clear();
545 
546   return foundErrors;
547 }
548 
549 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
550   assert(MF);
551   OS << '\n';
552   if (!foundErrors++) {
553     if (Banner)
554       OS << "# " << Banner << '\n';
555 
556     if (LiveInts != nullptr)
557       LiveInts->print(OS);
558     else
559       MF->print(OS, Indexes);
560   }
561 
562   OS << "*** Bad machine code: " << msg << " ***\n"
563      << "- function:    " << MF->getName() << '\n';
564 }
565 
566 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
567   assert(MBB);
568   report(msg, MBB->getParent());
569   OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
570      << " (" << (const void *)MBB << ')';
571   if (Indexes)
572     OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
573        << Indexes->getMBBEndIdx(MBB) << ')';
574   OS << '\n';
575 }
576 
577 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
578   assert(MI);
579   report(msg, MI->getParent());
580   OS << "- instruction: ";
581   if (Indexes && Indexes->hasIndex(*MI))
582     OS << Indexes->getInstructionIndex(*MI) << '\t';
583   MI->print(OS, /*IsStandalone=*/true);
584 }
585 
586 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
587                              unsigned MONum, LLT MOVRegType) {
588   assert(MO);
589   report(msg, MO->getParent());
590   OS << "- operand " << MONum << ":   ";
591   MO->print(OS, MOVRegType, TRI);
592   OS << '\n';
593 }
594 
595 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
596   report(Msg.str().c_str(), MI);
597 }
598 
599 void MachineVerifier::report_context(SlotIndex Pos) const {
600   OS << "- at:          " << Pos << '\n';
601 }
602 
603 void MachineVerifier::report_context(const LiveInterval &LI) const {
604   OS << "- interval:    " << LI << '\n';
605 }
606 
607 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
608                                      LaneBitmask LaneMask) const {
609   report_context_liverange(LR);
610   report_context_vreg_regunit(VRegUnit);
611   if (LaneMask.any())
612     report_context_lanemask(LaneMask);
613 }
614 
615 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
616   OS << "- segment:     " << S << '\n';
617 }
618 
619 void MachineVerifier::report_context(const VNInfo &VNI) const {
620   OS << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
621 }
622 
623 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
624   OS << "- liverange:   " << LR << '\n';
625 }
626 
627 void MachineVerifier::report_context(MCPhysReg PReg) const {
628   OS << "- p. register: " << printReg(PReg, TRI) << '\n';
629 }
630 
631 void MachineVerifier::report_context_vreg(Register VReg) const {
632   OS << "- v. register: " << printReg(VReg, TRI) << '\n';
633 }
634 
635 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
636   if (VRegOrUnit.isVirtual()) {
637     report_context_vreg(VRegOrUnit);
638   } else {
639     OS << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
640   }
641 }
642 
643 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
644   OS << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
645 }
646 
647 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
648   BBInfo &MInfo = MBBInfoMap[MBB];
649   if (!MInfo.reachable) {
650     MInfo.reachable = true;
651     for (const MachineBasicBlock *Succ : MBB->successors())
652       markReachable(Succ);
653   }
654 }
655 
656 void MachineVerifier::visitMachineFunctionBefore() {
657   lastIndex = SlotIndex();
658   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
659                                            : TRI->getReservedRegs(*MF);
660 
661   if (!MF->empty())
662     markReachable(&MF->front());
663 
664   // Build a set of the basic blocks in the function.
665   FunctionBlocks.clear();
666   for (const auto &MBB : *MF) {
667     FunctionBlocks.insert(&MBB);
668     BBInfo &MInfo = MBBInfoMap[&MBB];
669 
670     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
671     if (MInfo.Preds.size() != MBB.pred_size())
672       report("MBB has duplicate entries in its predecessor list.", &MBB);
673 
674     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
675     if (MInfo.Succs.size() != MBB.succ_size())
676       report("MBB has duplicate entries in its successor list.", &MBB);
677   }
678 
679   // Check that the register use lists are sane.
680   MRI->verifyUseLists();
681 
682   if (!MF->empty())
683     verifyStackFrame();
684 }
685 
686 void
687 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
688   FirstTerminator = nullptr;
689   FirstNonPHI = nullptr;
690 
691   if (!MF->getProperties().hasProperty(
692       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
693     // If this block has allocatable physical registers live-in, check that
694     // it is an entry block or landing pad.
695     for (const auto &LI : MBB->liveins()) {
696       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
697           MBB->getIterator() != MBB->getParent()->begin() &&
698           !MBB->isInlineAsmBrIndirectTarget()) {
699         report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
700                "inlineasm-br-indirect-target.",
701                MBB);
702         report_context(LI.PhysReg);
703       }
704     }
705   }
706 
707   if (MBB->isIRBlockAddressTaken()) {
708     if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
709       report("ir-block-address-taken is associated with basic block not used by "
710              "a blockaddress.",
711              MBB);
712   }
713 
714   // Count the number of landing pad successors.
715   SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
716   for (const auto *succ : MBB->successors()) {
717     if (succ->isEHPad())
718       LandingPadSuccs.insert(succ);
719     if (!FunctionBlocks.count(succ))
720       report("MBB has successor that isn't part of the function.", MBB);
721     if (!MBBInfoMap[succ].Preds.count(MBB)) {
722       report("Inconsistent CFG", MBB);
723       OS << "MBB is not in the predecessor list of the successor "
724          << printMBBReference(*succ) << ".\n";
725     }
726   }
727 
728   // Check the predecessor list.
729   for (const MachineBasicBlock *Pred : MBB->predecessors()) {
730     if (!FunctionBlocks.count(Pred))
731       report("MBB has predecessor that isn't part of the function.", MBB);
732     if (!MBBInfoMap[Pred].Succs.count(MBB)) {
733       report("Inconsistent CFG", MBB);
734       OS << "MBB is not in the successor list of the predecessor "
735          << printMBBReference(*Pred) << ".\n";
736     }
737   }
738 
739   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
740   const BasicBlock *BB = MBB->getBasicBlock();
741   const Function &F = MF->getFunction();
742   if (LandingPadSuccs.size() > 1 &&
743       !(AsmInfo &&
744         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
745         BB && isa<SwitchInst>(BB->getTerminator())) &&
746       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
747     report("MBB has more than one landing pad successor", MBB);
748 
749   // Call analyzeBranch. If it succeeds, there several more conditions to check.
750   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
751   SmallVector<MachineOperand, 4> Cond;
752   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
753                           Cond)) {
754     // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
755     // check whether its answers match up with reality.
756     if (!TBB && !FBB) {
757       // Block falls through to its successor.
758       if (!MBB->empty() && MBB->back().isBarrier() &&
759           !TII->isPredicated(MBB->back())) {
760         report("MBB exits via unconditional fall-through but ends with a "
761                "barrier instruction!", MBB);
762       }
763       if (!Cond.empty()) {
764         report("MBB exits via unconditional fall-through but has a condition!",
765                MBB);
766       }
767     } else if (TBB && !FBB && Cond.empty()) {
768       // Block unconditionally branches somewhere.
769       if (MBB->empty()) {
770         report("MBB exits via unconditional branch but doesn't contain "
771                "any instructions!", MBB);
772       } else if (!MBB->back().isBarrier()) {
773         report("MBB exits via unconditional branch but doesn't end with a "
774                "barrier instruction!", MBB);
775       } else if (!MBB->back().isTerminator()) {
776         report("MBB exits via unconditional branch but the branch isn't a "
777                "terminator instruction!", MBB);
778       }
779     } else if (TBB && !FBB && !Cond.empty()) {
780       // Block conditionally branches somewhere, otherwise falls through.
781       if (MBB->empty()) {
782         report("MBB exits via conditional branch/fall-through but doesn't "
783                "contain any instructions!", MBB);
784       } else if (MBB->back().isBarrier()) {
785         report("MBB exits via conditional branch/fall-through but ends with a "
786                "barrier instruction!", MBB);
787       } else if (!MBB->back().isTerminator()) {
788         report("MBB exits via conditional branch/fall-through but the branch "
789                "isn't a terminator instruction!", MBB);
790       }
791     } else if (TBB && FBB) {
792       // Block conditionally branches somewhere, otherwise branches
793       // somewhere else.
794       if (MBB->empty()) {
795         report("MBB exits via conditional branch/branch but doesn't "
796                "contain any instructions!", MBB);
797       } else if (!MBB->back().isBarrier()) {
798         report("MBB exits via conditional branch/branch but doesn't end with a "
799                "barrier instruction!", MBB);
800       } else if (!MBB->back().isTerminator()) {
801         report("MBB exits via conditional branch/branch but the branch "
802                "isn't a terminator instruction!", MBB);
803       }
804       if (Cond.empty()) {
805         report("MBB exits via conditional branch/branch but there's no "
806                "condition!", MBB);
807       }
808     } else {
809       report("analyzeBranch returned invalid data!", MBB);
810     }
811 
812     // Now check that the successors match up with the answers reported by
813     // analyzeBranch.
814     if (TBB && !MBB->isSuccessor(TBB))
815       report("MBB exits via jump or conditional branch, but its target isn't a "
816              "CFG successor!",
817              MBB);
818     if (FBB && !MBB->isSuccessor(FBB))
819       report("MBB exits via conditional branch, but its target isn't a CFG "
820              "successor!",
821              MBB);
822 
823     // There might be a fallthrough to the next block if there's either no
824     // unconditional true branch, or if there's a condition, and one of the
825     // branches is missing.
826     bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
827 
828     // A conditional fallthrough must be an actual CFG successor, not
829     // unreachable. (Conversely, an unconditional fallthrough might not really
830     // be a successor, because the block might end in unreachable.)
831     if (!Cond.empty() && !FBB) {
832       MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
833       if (MBBI == MF->end()) {
834         report("MBB conditionally falls through out of function!", MBB);
835       } else if (!MBB->isSuccessor(&*MBBI))
836         report("MBB exits via conditional branch/fall-through but the CFG "
837                "successors don't match the actual successors!",
838                MBB);
839     }
840 
841     // Verify that there aren't any extra un-accounted-for successors.
842     for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
843       // If this successor is one of the branch targets, it's okay.
844       if (SuccMBB == TBB || SuccMBB == FBB)
845         continue;
846       // If we might have a fallthrough, and the successor is the fallthrough
847       // block, that's also ok.
848       if (Fallthrough && SuccMBB == MBB->getNextNode())
849         continue;
850       // Also accept successors which are for exception-handling or might be
851       // inlineasm_br targets.
852       if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
853         continue;
854       report("MBB has unexpected successors which are not branch targets, "
855              "fallthrough, EHPads, or inlineasm_br targets.",
856              MBB);
857     }
858   }
859 
860   regsLive.clear();
861   if (MRI->tracksLiveness()) {
862     for (const auto &LI : MBB->liveins()) {
863       if (!Register::isPhysicalRegister(LI.PhysReg)) {
864         report("MBB live-in list contains non-physical register", MBB);
865         continue;
866       }
867       for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
868         regsLive.insert(SubReg);
869     }
870   }
871 
872   const MachineFrameInfo &MFI = MF->getFrameInfo();
873   BitVector PR = MFI.getPristineRegs(*MF);
874   for (unsigned I : PR.set_bits()) {
875     for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
876       regsLive.insert(SubReg);
877   }
878 
879   regsKilled.clear();
880   regsDefined.clear();
881 
882   if (Indexes)
883     lastIndex = Indexes->getMBBStartIdx(MBB);
884 }
885 
886 // This function gets called for all bundle headers, including normal
887 // stand-alone unbundled instructions.
888 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
889   if (Indexes && Indexes->hasIndex(*MI)) {
890     SlotIndex idx = Indexes->getInstructionIndex(*MI);
891     if (!(idx > lastIndex)) {
892       report("Instruction index out of order", MI);
893       OS << "Last instruction was at " << lastIndex << '\n';
894     }
895     lastIndex = idx;
896   }
897 
898   // Ensure non-terminators don't follow terminators.
899   if (MI->isTerminator()) {
900     if (!FirstTerminator)
901       FirstTerminator = MI;
902   } else if (FirstTerminator) {
903     // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
904     // precede non-terminators.
905     if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
906       report("Non-terminator instruction after the first terminator", MI);
907       OS << "First terminator was:\t" << *FirstTerminator;
908     }
909   }
910 }
911 
912 // The operands on an INLINEASM instruction must follow a template.
913 // Verify that the flag operands make sense.
914 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
915   // The first two operands on INLINEASM are the asm string and global flags.
916   if (MI->getNumOperands() < 2) {
917     report("Too few operands on inline asm", MI);
918     return;
919   }
920   if (!MI->getOperand(0).isSymbol())
921     report("Asm string must be an external symbol", MI);
922   if (!MI->getOperand(1).isImm())
923     report("Asm flags must be an immediate", MI);
924   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
925   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
926   // and Extra_IsConvergent = 32.
927   if (!isUInt<6>(MI->getOperand(1).getImm()))
928     report("Unknown asm flags", &MI->getOperand(1), 1);
929 
930   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
931 
932   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
933   unsigned NumOps;
934   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
935     const MachineOperand &MO = MI->getOperand(OpNo);
936     // There may be implicit ops after the fixed operands.
937     if (!MO.isImm())
938       break;
939     const InlineAsm::Flag F(MO.getImm());
940     NumOps = 1 + F.getNumOperandRegisters();
941   }
942 
943   if (OpNo > MI->getNumOperands())
944     report("Missing operands in last group", MI);
945 
946   // An optional MDNode follows the groups.
947   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
948     ++OpNo;
949 
950   // All trailing operands must be implicit registers.
951   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
952     const MachineOperand &MO = MI->getOperand(OpNo);
953     if (!MO.isReg() || !MO.isImplicit())
954       report("Expected implicit register after groups", &MO, OpNo);
955   }
956 
957   if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
958     const MachineBasicBlock *MBB = MI->getParent();
959 
960     for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
961          i != e; ++i) {
962       const MachineOperand &MO = MI->getOperand(i);
963 
964       if (!MO.isMBB())
965         continue;
966 
967       // Check the successor & predecessor lists look ok, assume they are
968       // not. Find the indirect target without going through the successors.
969       const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
970       if (!IndirectTargetMBB) {
971         report("INLINEASM_BR indirect target does not exist", &MO, i);
972         break;
973       }
974 
975       if (!MBB->isSuccessor(IndirectTargetMBB))
976         report("INLINEASM_BR indirect target missing from successor list", &MO,
977                i);
978 
979       if (!IndirectTargetMBB->isPredecessor(MBB))
980         report("INLINEASM_BR indirect target predecessor list missing parent",
981                &MO, i);
982     }
983   }
984 }
985 
986 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
987                                             const MachineRegisterInfo &MRI) {
988   if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
989         if (!Op.isReg())
990           return false;
991         const auto Reg = Op.getReg();
992         if (Reg.isPhysical())
993           return false;
994         return !MRI.getType(Reg).isScalar();
995       }))
996     return true;
997   report("All register operands must have scalar types", &MI);
998   return false;
999 }
1000 
1001 /// Check that types are consistent when two operands need to have the same
1002 /// number of vector elements.
1003 /// \return true if the types are valid.
1004 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1005                                                const MachineInstr *MI) {
1006   if (Ty0.isVector() != Ty1.isVector()) {
1007     report("operand types must be all-vector or all-scalar", MI);
1008     // Generally we try to report as many issues as possible at once, but in
1009     // this case it's not clear what should we be comparing the size of the
1010     // scalar with: the size of the whole vector or its lane. Instead of
1011     // making an arbitrary choice and emitting not so helpful message, let's
1012     // avoid the extra noise and stop here.
1013     return false;
1014   }
1015 
1016   if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1017     report("operand types must preserve number of vector elements", MI);
1018     return false;
1019   }
1020 
1021   return true;
1022 }
1023 
1024 bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1025   auto Opcode = MI->getOpcode();
1026   bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1027                        Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1028   unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1029   if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1030     AttributeList Attrs = Intrinsic::getAttributes(
1031         MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1032     bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1033     if (NoSideEffects && DeclHasSideEffects) {
1034       report(Twine(TII->getName(Opcode),
1035                    " used with intrinsic that accesses memory"),
1036              MI);
1037       return false;
1038     }
1039     if (!NoSideEffects && !DeclHasSideEffects) {
1040       report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1041       return false;
1042     }
1043   }
1044 
1045   return true;
1046 }
1047 
1048 bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1049   auto Opcode = MI->getOpcode();
1050   bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1051                        Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1052   unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1053   if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1054     AttributeList Attrs = Intrinsic::getAttributes(
1055         MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1056     bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1057     if (NotConvergent && DeclIsConvergent) {
1058       report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1059              MI);
1060       return false;
1061     }
1062     if (!NotConvergent && !DeclIsConvergent) {
1063       report(
1064           Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1065           MI);
1066       return false;
1067     }
1068   }
1069 
1070   return true;
1071 }
1072 
1073 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1074   if (isFunctionSelected)
1075     report("Unexpected generic instruction in a Selected function", MI);
1076 
1077   const MCInstrDesc &MCID = MI->getDesc();
1078   unsigned NumOps = MI->getNumOperands();
1079 
1080   // Branches must reference a basic block if they are not indirect
1081   if (MI->isBranch() && !MI->isIndirectBranch()) {
1082     bool HasMBB = false;
1083     for (const MachineOperand &Op : MI->operands()) {
1084       if (Op.isMBB()) {
1085         HasMBB = true;
1086         break;
1087       }
1088     }
1089 
1090     if (!HasMBB) {
1091       report("Branch instruction is missing a basic block operand or "
1092              "isIndirectBranch property",
1093              MI);
1094     }
1095   }
1096 
1097   // Check types.
1098   SmallVector<LLT, 4> Types;
1099   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1100        I != E; ++I) {
1101     if (!MCID.operands()[I].isGenericType())
1102       continue;
1103     // Generic instructions specify type equality constraints between some of
1104     // their operands. Make sure these are consistent.
1105     size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1106     Types.resize(std::max(TypeIdx + 1, Types.size()));
1107 
1108     const MachineOperand *MO = &MI->getOperand(I);
1109     if (!MO->isReg()) {
1110       report("generic instruction must use register operands", MI);
1111       continue;
1112     }
1113 
1114     LLT OpTy = MRI->getType(MO->getReg());
1115     // Don't report a type mismatch if there is no actual mismatch, only a
1116     // type missing, to reduce noise:
1117     if (OpTy.isValid()) {
1118       // Only the first valid type for a type index will be printed: don't
1119       // overwrite it later so it's always clear which type was expected:
1120       if (!Types[TypeIdx].isValid())
1121         Types[TypeIdx] = OpTy;
1122       else if (Types[TypeIdx] != OpTy)
1123         report("Type mismatch in generic instruction", MO, I, OpTy);
1124     } else {
1125       // Generic instructions must have types attached to their operands.
1126       report("Generic instruction is missing a virtual register type", MO, I);
1127     }
1128   }
1129 
1130   // Generic opcodes must not have physical register operands.
1131   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1132     const MachineOperand *MO = &MI->getOperand(I);
1133     if (MO->isReg() && MO->getReg().isPhysical())
1134       report("Generic instruction cannot have physical register", MO, I);
1135   }
1136 
1137   // Avoid out of bounds in checks below. This was already reported earlier.
1138   if (MI->getNumOperands() < MCID.getNumOperands())
1139     return;
1140 
1141   StringRef ErrorInfo;
1142   if (!TII->verifyInstruction(*MI, ErrorInfo))
1143     report(ErrorInfo.data(), MI);
1144 
1145   // Verify properties of various specific instruction types
1146   unsigned Opc = MI->getOpcode();
1147   switch (Opc) {
1148   case TargetOpcode::G_ASSERT_SEXT:
1149   case TargetOpcode::G_ASSERT_ZEXT: {
1150     std::string OpcName =
1151         Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1152     if (!MI->getOperand(2).isImm()) {
1153       report(Twine(OpcName, " expects an immediate operand #2"), MI);
1154       break;
1155     }
1156 
1157     Register Dst = MI->getOperand(0).getReg();
1158     Register Src = MI->getOperand(1).getReg();
1159     LLT SrcTy = MRI->getType(Src);
1160     int64_t Imm = MI->getOperand(2).getImm();
1161     if (Imm <= 0) {
1162       report(Twine(OpcName, " size must be >= 1"), MI);
1163       break;
1164     }
1165 
1166     if (Imm >= SrcTy.getScalarSizeInBits()) {
1167       report(Twine(OpcName, " size must be less than source bit width"), MI);
1168       break;
1169     }
1170 
1171     const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1172     const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1173 
1174     // Allow only the source bank to be set.
1175     if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1176       report(Twine(OpcName, " cannot change register bank"), MI);
1177       break;
1178     }
1179 
1180     // Don't allow a class change. Do allow member class->regbank.
1181     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1182     if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1183       report(
1184           Twine(OpcName, " source and destination register classes must match"),
1185           MI);
1186       break;
1187     }
1188 
1189     break;
1190   }
1191 
1192   case TargetOpcode::G_CONSTANT:
1193   case TargetOpcode::G_FCONSTANT: {
1194     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1195     if (DstTy.isVector())
1196       report("Instruction cannot use a vector result type", MI);
1197 
1198     if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1199       if (!MI->getOperand(1).isCImm()) {
1200         report("G_CONSTANT operand must be cimm", MI);
1201         break;
1202       }
1203 
1204       const ConstantInt *CI = MI->getOperand(1).getCImm();
1205       if (CI->getBitWidth() != DstTy.getSizeInBits())
1206         report("inconsistent constant size", MI);
1207     } else {
1208       if (!MI->getOperand(1).isFPImm()) {
1209         report("G_FCONSTANT operand must be fpimm", MI);
1210         break;
1211       }
1212       const ConstantFP *CF = MI->getOperand(1).getFPImm();
1213 
1214       if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1215           DstTy.getSizeInBits()) {
1216         report("inconsistent constant size", MI);
1217       }
1218     }
1219 
1220     break;
1221   }
1222   case TargetOpcode::G_LOAD:
1223   case TargetOpcode::G_STORE:
1224   case TargetOpcode::G_ZEXTLOAD:
1225   case TargetOpcode::G_SEXTLOAD: {
1226     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1227     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1228     if (!PtrTy.isPointer())
1229       report("Generic memory instruction must access a pointer", MI);
1230 
1231     // Generic loads and stores must have a single MachineMemOperand
1232     // describing that access.
1233     if (!MI->hasOneMemOperand()) {
1234       report("Generic instruction accessing memory must have one mem operand",
1235              MI);
1236     } else {
1237       const MachineMemOperand &MMO = **MI->memoperands_begin();
1238       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1239           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1240         if (TypeSize::isKnownGE(MMO.getSizeInBits().getValue(),
1241                                 ValTy.getSizeInBits()))
1242           report("Generic extload must have a narrower memory type", MI);
1243       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1244         if (TypeSize::isKnownGT(MMO.getSize().getValue(),
1245                                 ValTy.getSizeInBytes()))
1246           report("load memory size cannot exceed result size", MI);
1247       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1248         if (TypeSize::isKnownLT(ValTy.getSizeInBytes(),
1249                                 MMO.getSize().getValue()))
1250           report("store memory size cannot exceed value size", MI);
1251       }
1252 
1253       const AtomicOrdering Order = MMO.getSuccessOrdering();
1254       if (Opc == TargetOpcode::G_STORE) {
1255         if (Order == AtomicOrdering::Acquire ||
1256             Order == AtomicOrdering::AcquireRelease)
1257           report("atomic store cannot use acquire ordering", MI);
1258 
1259       } else {
1260         if (Order == AtomicOrdering::Release ||
1261             Order == AtomicOrdering::AcquireRelease)
1262           report("atomic load cannot use release ordering", MI);
1263       }
1264     }
1265 
1266     break;
1267   }
1268   case TargetOpcode::G_PHI: {
1269     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1270     if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1271                                     [this, &DstTy](const MachineOperand &MO) {
1272                                       if (!MO.isReg())
1273                                         return true;
1274                                       LLT Ty = MRI->getType(MO.getReg());
1275                                       if (!Ty.isValid() || (Ty != DstTy))
1276                                         return false;
1277                                       return true;
1278                                     }))
1279       report("Generic Instruction G_PHI has operands with incompatible/missing "
1280              "types",
1281              MI);
1282     break;
1283   }
1284   case TargetOpcode::G_BITCAST: {
1285     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1286     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1287     if (!DstTy.isValid() || !SrcTy.isValid())
1288       break;
1289 
1290     if (SrcTy.isPointer() != DstTy.isPointer())
1291       report("bitcast cannot convert between pointers and other types", MI);
1292 
1293     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1294       report("bitcast sizes must match", MI);
1295 
1296     if (SrcTy == DstTy)
1297       report("bitcast must change the type", MI);
1298 
1299     break;
1300   }
1301   case TargetOpcode::G_INTTOPTR:
1302   case TargetOpcode::G_PTRTOINT:
1303   case TargetOpcode::G_ADDRSPACE_CAST: {
1304     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1305     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1306     if (!DstTy.isValid() || !SrcTy.isValid())
1307       break;
1308 
1309     verifyVectorElementMatch(DstTy, SrcTy, MI);
1310 
1311     DstTy = DstTy.getScalarType();
1312     SrcTy = SrcTy.getScalarType();
1313 
1314     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1315       if (!DstTy.isPointer())
1316         report("inttoptr result type must be a pointer", MI);
1317       if (SrcTy.isPointer())
1318         report("inttoptr source type must not be a pointer", MI);
1319     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1320       if (!SrcTy.isPointer())
1321         report("ptrtoint source type must be a pointer", MI);
1322       if (DstTy.isPointer())
1323         report("ptrtoint result type must not be a pointer", MI);
1324     } else {
1325       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1326       if (!SrcTy.isPointer() || !DstTy.isPointer())
1327         report("addrspacecast types must be pointers", MI);
1328       else {
1329         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1330           report("addrspacecast must convert different address spaces", MI);
1331       }
1332     }
1333 
1334     break;
1335   }
1336   case TargetOpcode::G_PTR_ADD: {
1337     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1338     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1339     LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1340     if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1341       break;
1342 
1343     if (!PtrTy.isPointerOrPointerVector())
1344       report("gep first operand must be a pointer", MI);
1345 
1346     if (OffsetTy.isPointerOrPointerVector())
1347       report("gep offset operand must not be a pointer", MI);
1348 
1349     if (PtrTy.isPointerOrPointerVector()) {
1350       const DataLayout &DL = MF->getDataLayout();
1351       unsigned AS = PtrTy.getAddressSpace();
1352       unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1353       if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1354         report("gep offset operand must match index size for address space",
1355                MI);
1356       }
1357     }
1358 
1359     // TODO: Is the offset allowed to be a scalar with a vector?
1360     break;
1361   }
1362   case TargetOpcode::G_PTRMASK: {
1363     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1364     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1365     LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1366     if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1367       break;
1368 
1369     if (!DstTy.isPointerOrPointerVector())
1370       report("ptrmask result type must be a pointer", MI);
1371 
1372     if (!MaskTy.getScalarType().isScalar())
1373       report("ptrmask mask type must be an integer", MI);
1374 
1375     verifyVectorElementMatch(DstTy, MaskTy, MI);
1376     break;
1377   }
1378   case TargetOpcode::G_SEXT:
1379   case TargetOpcode::G_ZEXT:
1380   case TargetOpcode::G_ANYEXT:
1381   case TargetOpcode::G_TRUNC:
1382   case TargetOpcode::G_FPEXT:
1383   case TargetOpcode::G_FPTRUNC: {
1384     // Number of operands and presense of types is already checked (and
1385     // reported in case of any issues), so no need to report them again. As
1386     // we're trying to report as many issues as possible at once, however, the
1387     // instructions aren't guaranteed to have the right number of operands or
1388     // types attached to them at this point
1389     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1390     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1391     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1392     if (!DstTy.isValid() || !SrcTy.isValid())
1393       break;
1394 
1395     if (DstTy.isPointerOrPointerVector() || SrcTy.isPointerOrPointerVector())
1396       report("Generic extend/truncate can not operate on pointers", MI);
1397 
1398     verifyVectorElementMatch(DstTy, SrcTy, MI);
1399 
1400     unsigned DstSize = DstTy.getScalarSizeInBits();
1401     unsigned SrcSize = SrcTy.getScalarSizeInBits();
1402     switch (MI->getOpcode()) {
1403     default:
1404       if (DstSize <= SrcSize)
1405         report("Generic extend has destination type no larger than source", MI);
1406       break;
1407     case TargetOpcode::G_TRUNC:
1408     case TargetOpcode::G_FPTRUNC:
1409       if (DstSize >= SrcSize)
1410         report("Generic truncate has destination type no smaller than source",
1411                MI);
1412       break;
1413     }
1414     break;
1415   }
1416   case TargetOpcode::G_SELECT: {
1417     LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1418     LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1419     if (!SelTy.isValid() || !CondTy.isValid())
1420       break;
1421 
1422     // Scalar condition select on a vector is valid.
1423     if (CondTy.isVector())
1424       verifyVectorElementMatch(SelTy, CondTy, MI);
1425     break;
1426   }
1427   case TargetOpcode::G_MERGE_VALUES: {
1428     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1429     // e.g. s2N = MERGE sN, sN
1430     // Merging multiple scalars into a vector is not allowed, should use
1431     // G_BUILD_VECTOR for that.
1432     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1433     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1434     if (DstTy.isVector() || SrcTy.isVector())
1435       report("G_MERGE_VALUES cannot operate on vectors", MI);
1436 
1437     const unsigned NumOps = MI->getNumOperands();
1438     if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1439       report("G_MERGE_VALUES result size is inconsistent", MI);
1440 
1441     for (unsigned I = 2; I != NumOps; ++I) {
1442       if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1443         report("G_MERGE_VALUES source types do not match", MI);
1444     }
1445 
1446     break;
1447   }
1448   case TargetOpcode::G_UNMERGE_VALUES: {
1449     unsigned NumDsts = MI->getNumOperands() - 1;
1450     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1451     for (unsigned i = 1; i < NumDsts; ++i) {
1452       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1453         report("G_UNMERGE_VALUES destination types do not match", MI);
1454         break;
1455       }
1456     }
1457 
1458     LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1459     if (DstTy.isVector()) {
1460       // This case is the converse of G_CONCAT_VECTORS.
1461       if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1462           SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1463           SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1464         report("G_UNMERGE_VALUES source operand does not match vector "
1465                "destination operands",
1466                MI);
1467     } else if (SrcTy.isVector()) {
1468       // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1469       // mismatched types as long as the total size matches:
1470       //   %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1471       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1472         report("G_UNMERGE_VALUES vector source operand does not match scalar "
1473                "destination operands",
1474                MI);
1475     } else {
1476       // This case is the converse of G_MERGE_VALUES.
1477       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1478         report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1479                "destination operands",
1480                MI);
1481       }
1482     }
1483     break;
1484   }
1485   case TargetOpcode::G_BUILD_VECTOR: {
1486     // Source types must be scalars, dest type a vector. Total size of scalars
1487     // must match the dest vector size.
1488     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1489     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1490     if (!DstTy.isVector() || SrcEltTy.isVector()) {
1491       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1492       break;
1493     }
1494 
1495     if (DstTy.getElementType() != SrcEltTy)
1496       report("G_BUILD_VECTOR result element type must match source type", MI);
1497 
1498     if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1499       report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1500 
1501     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1502       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1503         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1504 
1505     break;
1506   }
1507   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1508     // Source types must be scalars, dest type a vector. Scalar types must be
1509     // larger than the dest vector elt type, as this is a truncating operation.
1510     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1511     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1512     if (!DstTy.isVector() || SrcEltTy.isVector())
1513       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1514              MI);
1515     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1516       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1517         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1518                MI);
1519     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1520       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1521              "dest elt type",
1522              MI);
1523     break;
1524   }
1525   case TargetOpcode::G_CONCAT_VECTORS: {
1526     // Source types should be vectors, and total size should match the dest
1527     // vector size.
1528     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1529     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1530     if (!DstTy.isVector() || !SrcTy.isVector())
1531       report("G_CONCAT_VECTOR requires vector source and destination operands",
1532              MI);
1533 
1534     if (MI->getNumOperands() < 3)
1535       report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1536 
1537     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1538       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1539         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1540     if (DstTy.getElementCount() !=
1541         SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1542       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1543     break;
1544   }
1545   case TargetOpcode::G_ICMP:
1546   case TargetOpcode::G_FCMP: {
1547     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1548     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1549 
1550     if ((DstTy.isVector() != SrcTy.isVector()) ||
1551         (DstTy.isVector() &&
1552          DstTy.getElementCount() != SrcTy.getElementCount()))
1553       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1554 
1555     break;
1556   }
1557   case TargetOpcode::G_SCMP:
1558   case TargetOpcode::G_UCMP: {
1559     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1560     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1561     LLT SrcTy2 = MRI->getType(MI->getOperand(2).getReg());
1562 
1563     if (SrcTy.isPointerOrPointerVector() || SrcTy2.isPointerOrPointerVector()) {
1564       report("Generic scmp/ucmp does not support pointers as operands", MI);
1565       break;
1566     }
1567 
1568     if (DstTy.isPointerOrPointerVector()) {
1569       report("Generic scmp/ucmp does not support pointers as a result", MI);
1570       break;
1571     }
1572 
1573     if ((DstTy.isVector() != SrcTy.isVector()) ||
1574         (DstTy.isVector() &&
1575          DstTy.getElementCount() != SrcTy.getElementCount())) {
1576       report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1577       break;
1578     }
1579 
1580     if (SrcTy != SrcTy2) {
1581       report("Generic scmp/ucmp must have same input types", MI);
1582       break;
1583     }
1584 
1585     break;
1586   }
1587   case TargetOpcode::G_EXTRACT: {
1588     const MachineOperand &SrcOp = MI->getOperand(1);
1589     if (!SrcOp.isReg()) {
1590       report("extract source must be a register", MI);
1591       break;
1592     }
1593 
1594     const MachineOperand &OffsetOp = MI->getOperand(2);
1595     if (!OffsetOp.isImm()) {
1596       report("extract offset must be a constant", MI);
1597       break;
1598     }
1599 
1600     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1601     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1602     if (SrcSize == DstSize)
1603       report("extract source must be larger than result", MI);
1604 
1605     if (DstSize + OffsetOp.getImm() > SrcSize)
1606       report("extract reads past end of register", MI);
1607     break;
1608   }
1609   case TargetOpcode::G_INSERT: {
1610     const MachineOperand &SrcOp = MI->getOperand(2);
1611     if (!SrcOp.isReg()) {
1612       report("insert source must be a register", MI);
1613       break;
1614     }
1615 
1616     const MachineOperand &OffsetOp = MI->getOperand(3);
1617     if (!OffsetOp.isImm()) {
1618       report("insert offset must be a constant", MI);
1619       break;
1620     }
1621 
1622     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1623     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1624 
1625     if (DstSize <= SrcSize)
1626       report("inserted size must be smaller than total register", MI);
1627 
1628     if (SrcSize + OffsetOp.getImm() > DstSize)
1629       report("insert writes past end of register", MI);
1630 
1631     break;
1632   }
1633   case TargetOpcode::G_JUMP_TABLE: {
1634     if (!MI->getOperand(1).isJTI())
1635       report("G_JUMP_TABLE source operand must be a jump table index", MI);
1636     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1637     if (!DstTy.isPointer())
1638       report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1639     break;
1640   }
1641   case TargetOpcode::G_BRJT: {
1642     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1643       report("G_BRJT src operand 0 must be a pointer type", MI);
1644 
1645     if (!MI->getOperand(1).isJTI())
1646       report("G_BRJT src operand 1 must be a jump table index", MI);
1647 
1648     const auto &IdxOp = MI->getOperand(2);
1649     if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1650       report("G_BRJT src operand 2 must be a scalar reg type", MI);
1651     break;
1652   }
1653   case TargetOpcode::G_INTRINSIC:
1654   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1655   case TargetOpcode::G_INTRINSIC_CONVERGENT:
1656   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1657     // TODO: Should verify number of def and use operands, but the current
1658     // interface requires passing in IR types for mangling.
1659     const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1660     if (!IntrIDOp.isIntrinsicID()) {
1661       report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1662       break;
1663     }
1664 
1665     if (!verifyGIntrinsicSideEffects(MI))
1666       break;
1667     if (!verifyGIntrinsicConvergence(MI))
1668       break;
1669 
1670     break;
1671   }
1672   case TargetOpcode::G_SEXT_INREG: {
1673     if (!MI->getOperand(2).isImm()) {
1674       report("G_SEXT_INREG expects an immediate operand #2", MI);
1675       break;
1676     }
1677 
1678     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1679     int64_t Imm = MI->getOperand(2).getImm();
1680     if (Imm <= 0)
1681       report("G_SEXT_INREG size must be >= 1", MI);
1682     if (Imm >= SrcTy.getScalarSizeInBits())
1683       report("G_SEXT_INREG size must be less than source bit width", MI);
1684     break;
1685   }
1686   case TargetOpcode::G_BSWAP: {
1687     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1688     if (DstTy.getScalarSizeInBits() % 16 != 0)
1689       report("G_BSWAP size must be a multiple of 16 bits", MI);
1690     break;
1691   }
1692   case TargetOpcode::G_VSCALE: {
1693     if (!MI->getOperand(1).isCImm()) {
1694       report("G_VSCALE operand must be cimm", MI);
1695       break;
1696     }
1697     if (MI->getOperand(1).getCImm()->isZero()) {
1698       report("G_VSCALE immediate cannot be zero", MI);
1699       break;
1700     }
1701     break;
1702   }
1703   case TargetOpcode::G_INSERT_SUBVECTOR: {
1704     const MachineOperand &Src0Op = MI->getOperand(1);
1705     if (!Src0Op.isReg()) {
1706       report("G_INSERT_SUBVECTOR first source must be a register", MI);
1707       break;
1708     }
1709 
1710     const MachineOperand &Src1Op = MI->getOperand(2);
1711     if (!Src1Op.isReg()) {
1712       report("G_INSERT_SUBVECTOR second source must be a register", MI);
1713       break;
1714     }
1715 
1716     const MachineOperand &IndexOp = MI->getOperand(3);
1717     if (!IndexOp.isImm()) {
1718       report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1719       break;
1720     }
1721 
1722     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1723     LLT Src1Ty = MRI->getType(Src1Op.getReg());
1724 
1725     if (!DstTy.isVector()) {
1726       report("Destination type must be a vector", MI);
1727       break;
1728     }
1729 
1730     if (!Src1Ty.isVector()) {
1731       report("Second source must be a vector", MI);
1732       break;
1733     }
1734 
1735     if (DstTy.getElementType() != Src1Ty.getElementType()) {
1736       report("Element type of vectors must be the same", MI);
1737       break;
1738     }
1739 
1740     if (Src1Ty.isScalable() != DstTy.isScalable()) {
1741       report("Vector types must both be fixed or both be scalable", MI);
1742       break;
1743     }
1744 
1745     if (ElementCount::isKnownGT(Src1Ty.getElementCount(),
1746                                 DstTy.getElementCount())) {
1747       report("Second source must be smaller than destination vector", MI);
1748       break;
1749     }
1750 
1751     uint64_t Idx = IndexOp.getImm();
1752     uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1753     if (IndexOp.getImm() % Src1MinLen != 0) {
1754       report("Index must be a multiple of the second source vector's "
1755              "minimum vector length",
1756              MI);
1757       break;
1758     }
1759 
1760     uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1761     if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1762       report("Subvector type and index must not cause insert to overrun the "
1763              "vector being inserted into",
1764              MI);
1765       break;
1766     }
1767 
1768     break;
1769   }
1770   case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1771     const MachineOperand &SrcOp = MI->getOperand(1);
1772     if (!SrcOp.isReg()) {
1773       report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1774       break;
1775     }
1776 
1777     const MachineOperand &IndexOp = MI->getOperand(2);
1778     if (!IndexOp.isImm()) {
1779       report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1780       break;
1781     }
1782 
1783     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1784     LLT SrcTy = MRI->getType(SrcOp.getReg());
1785 
1786     if (!DstTy.isVector()) {
1787       report("Destination type must be a vector", MI);
1788       break;
1789     }
1790 
1791     if (!SrcTy.isVector()) {
1792       report("Source must be a vector", MI);
1793       break;
1794     }
1795 
1796     if (DstTy.getElementType() != SrcTy.getElementType()) {
1797       report("Element type of vectors must be the same", MI);
1798       break;
1799     }
1800 
1801     if (SrcTy.isScalable() != DstTy.isScalable()) {
1802       report("Vector types must both be fixed or both be scalable", MI);
1803       break;
1804     }
1805 
1806     if (ElementCount::isKnownGT(DstTy.getElementCount(),
1807                                 SrcTy.getElementCount())) {
1808       report("Destination vector must be smaller than source vector", MI);
1809       break;
1810     }
1811 
1812     uint64_t Idx = IndexOp.getImm();
1813     uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1814     if (Idx % DstMinLen != 0) {
1815       report("Index must be a multiple of the destination vector's minimum "
1816              "vector length",
1817              MI);
1818       break;
1819     }
1820 
1821     uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1822     if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1823       report("Destination type and index must not cause extract to overrun the "
1824              "source vector",
1825              MI);
1826       break;
1827     }
1828 
1829     break;
1830   }
1831   case TargetOpcode::G_SHUFFLE_VECTOR: {
1832     const MachineOperand &MaskOp = MI->getOperand(3);
1833     if (!MaskOp.isShuffleMask()) {
1834       report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1835       break;
1836     }
1837 
1838     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1839     LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1840     LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1841 
1842     if (Src0Ty != Src1Ty)
1843       report("Source operands must be the same type", MI);
1844 
1845     if (Src0Ty.getScalarType() != DstTy.getScalarType())
1846       report("G_SHUFFLE_VECTOR cannot change element type", MI);
1847 
1848     // Don't check that all operands are vector because scalars are used in
1849     // place of 1 element vectors.
1850     int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1851     int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1852 
1853     ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1854 
1855     if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1856       report("Wrong result type for shufflemask", MI);
1857 
1858     for (int Idx : MaskIdxes) {
1859       if (Idx < 0)
1860         continue;
1861 
1862       if (Idx >= 2 * SrcNumElts)
1863         report("Out of bounds shuffle index", MI);
1864     }
1865 
1866     break;
1867   }
1868 
1869   case TargetOpcode::G_SPLAT_VECTOR: {
1870     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1871     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1872 
1873     if (!DstTy.isScalableVector()) {
1874       report("Destination type must be a scalable vector", MI);
1875       break;
1876     }
1877 
1878     if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1879       report("Source type must be a scalar or pointer", MI);
1880       break;
1881     }
1882 
1883     if (TypeSize::isKnownGT(DstTy.getElementType().getSizeInBits(),
1884                             SrcTy.getSizeInBits())) {
1885       report("Element type of the destination must be the same size or smaller "
1886              "than the source type",
1887              MI);
1888       break;
1889     }
1890 
1891     break;
1892   }
1893   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1894     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1895     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1896     LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1897 
1898     if (!DstTy.isScalar() && !DstTy.isPointer()) {
1899       report("Destination type must be a scalar or pointer", MI);
1900       break;
1901     }
1902 
1903     if (!SrcTy.isVector()) {
1904       report("First source must be a vector", MI);
1905       break;
1906     }
1907 
1908     auto TLI = MF->getSubtarget().getTargetLowering();
1909     if (IdxTy.getSizeInBits() !=
1910         TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1911       report("Index type must match VectorIdxTy", MI);
1912       break;
1913     }
1914 
1915     break;
1916   }
1917   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1918     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1919     LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1920     LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1921     LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1922 
1923     if (!DstTy.isVector()) {
1924       report("Destination type must be a vector", MI);
1925       break;
1926     }
1927 
1928     if (VecTy != DstTy) {
1929       report("Destination type and vector type must match", MI);
1930       break;
1931     }
1932 
1933     if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1934       report("Inserted element must be a scalar or pointer", MI);
1935       break;
1936     }
1937 
1938     auto TLI = MF->getSubtarget().getTargetLowering();
1939     if (IdxTy.getSizeInBits() !=
1940         TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1941       report("Index type must match VectorIdxTy", MI);
1942       break;
1943     }
1944 
1945     break;
1946   }
1947   case TargetOpcode::G_DYN_STACKALLOC: {
1948     const MachineOperand &DstOp = MI->getOperand(0);
1949     const MachineOperand &AllocOp = MI->getOperand(1);
1950     const MachineOperand &AlignOp = MI->getOperand(2);
1951 
1952     if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1953       report("dst operand 0 must be a pointer type", MI);
1954       break;
1955     }
1956 
1957     if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1958       report("src operand 1 must be a scalar reg type", MI);
1959       break;
1960     }
1961 
1962     if (!AlignOp.isImm()) {
1963       report("src operand 2 must be an immediate type", MI);
1964       break;
1965     }
1966     break;
1967   }
1968   case TargetOpcode::G_MEMCPY_INLINE:
1969   case TargetOpcode::G_MEMCPY:
1970   case TargetOpcode::G_MEMMOVE: {
1971     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1972     if (MMOs.size() != 2) {
1973       report("memcpy/memmove must have 2 memory operands", MI);
1974       break;
1975     }
1976 
1977     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1978         (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1979       report("wrong memory operand types", MI);
1980       break;
1981     }
1982 
1983     if (MMOs[0]->getSize() != MMOs[1]->getSize())
1984       report("inconsistent memory operand sizes", MI);
1985 
1986     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1987     LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1988 
1989     if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1990       report("memory instruction operand must be a pointer", MI);
1991       break;
1992     }
1993 
1994     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1995       report("inconsistent store address space", MI);
1996     if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1997       report("inconsistent load address space", MI);
1998 
1999     if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2000       if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2001         report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2002 
2003     break;
2004   }
2005   case TargetOpcode::G_BZERO:
2006   case TargetOpcode::G_MEMSET: {
2007     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2008     std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2009     if (MMOs.size() != 1) {
2010       report(Twine(Name, " must have 1 memory operand"), MI);
2011       break;
2012     }
2013 
2014     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2015       report(Twine(Name, " memory operand must be a store"), MI);
2016       break;
2017     }
2018 
2019     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2020     if (!DstPtrTy.isPointer()) {
2021       report(Twine(Name, " operand must be a pointer"), MI);
2022       break;
2023     }
2024 
2025     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2026       report("inconsistent " + Twine(Name, " address space"), MI);
2027 
2028     if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2029         (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2030       report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2031 
2032     break;
2033   }
2034   case TargetOpcode::G_UBSANTRAP: {
2035     const MachineOperand &KindOp = MI->getOperand(0);
2036     if (!MI->getOperand(0).isImm()) {
2037       report("Crash kind must be an immediate", &KindOp, 0);
2038       break;
2039     }
2040     int64_t Kind = MI->getOperand(0).getImm();
2041     if (!isInt<8>(Kind))
2042       report("Crash kind must be 8 bit wide", &KindOp, 0);
2043     break;
2044   }
2045   case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2046   case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2047     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2048     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2049     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2050     if (!DstTy.isScalar())
2051       report("Vector reduction requires a scalar destination type", MI);
2052     if (!Src1Ty.isScalar())
2053       report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2054     if (!Src2Ty.isVector())
2055       report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2056     break;
2057   }
2058   case TargetOpcode::G_VECREDUCE_FADD:
2059   case TargetOpcode::G_VECREDUCE_FMUL:
2060   case TargetOpcode::G_VECREDUCE_FMAX:
2061   case TargetOpcode::G_VECREDUCE_FMIN:
2062   case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2063   case TargetOpcode::G_VECREDUCE_FMINIMUM:
2064   case TargetOpcode::G_VECREDUCE_ADD:
2065   case TargetOpcode::G_VECREDUCE_MUL:
2066   case TargetOpcode::G_VECREDUCE_AND:
2067   case TargetOpcode::G_VECREDUCE_OR:
2068   case TargetOpcode::G_VECREDUCE_XOR:
2069   case TargetOpcode::G_VECREDUCE_SMAX:
2070   case TargetOpcode::G_VECREDUCE_SMIN:
2071   case TargetOpcode::G_VECREDUCE_UMAX:
2072   case TargetOpcode::G_VECREDUCE_UMIN: {
2073     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2074     if (!DstTy.isScalar())
2075       report("Vector reduction requires a scalar destination type", MI);
2076     break;
2077   }
2078 
2079   case TargetOpcode::G_SBFX:
2080   case TargetOpcode::G_UBFX: {
2081     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2082     if (DstTy.isVector()) {
2083       report("Bitfield extraction is not supported on vectors", MI);
2084       break;
2085     }
2086     break;
2087   }
2088   case TargetOpcode::G_SHL:
2089   case TargetOpcode::G_LSHR:
2090   case TargetOpcode::G_ASHR:
2091   case TargetOpcode::G_ROTR:
2092   case TargetOpcode::G_ROTL: {
2093     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2094     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2095     if (Src1Ty.isVector() != Src2Ty.isVector()) {
2096       report("Shifts and rotates require operands to be either all scalars or "
2097              "all vectors",
2098              MI);
2099       break;
2100     }
2101     break;
2102   }
2103   case TargetOpcode::G_LLROUND:
2104   case TargetOpcode::G_LROUND: {
2105     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2106     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2107     if (!DstTy.isValid() || !SrcTy.isValid())
2108       break;
2109     if (SrcTy.isPointer() || DstTy.isPointer()) {
2110       StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2111       report(Twine(Op, " operand must not be a pointer type"), MI);
2112     } else if (SrcTy.isScalar()) {
2113       verifyAllRegOpsScalar(*MI, *MRI);
2114       break;
2115     } else if (SrcTy.isVector()) {
2116       verifyVectorElementMatch(SrcTy, DstTy, MI);
2117       break;
2118     }
2119     break;
2120   }
2121   case TargetOpcode::G_IS_FPCLASS: {
2122     LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2123     LLT DestEltTy = DestTy.getScalarType();
2124     if (!DestEltTy.isScalar()) {
2125       report("Destination must be a scalar or vector of scalars", MI);
2126       break;
2127     }
2128     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2129     LLT SrcEltTy = SrcTy.getScalarType();
2130     if (!SrcEltTy.isScalar()) {
2131       report("Source must be a scalar or vector of scalars", MI);
2132       break;
2133     }
2134     if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2135       break;
2136     const MachineOperand &TestMO = MI->getOperand(2);
2137     if (!TestMO.isImm()) {
2138       report("floating-point class set (operand 2) must be an immediate", MI);
2139       break;
2140     }
2141     int64_t Test = TestMO.getImm();
2142     if (Test < 0 || Test > fcAllFlags) {
2143       report("Incorrect floating-point class set (operand 2)", MI);
2144       break;
2145     }
2146     break;
2147   }
2148   case TargetOpcode::G_PREFETCH: {
2149     const MachineOperand &AddrOp = MI->getOperand(0);
2150     if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2151       report("addr operand must be a pointer", &AddrOp, 0);
2152       break;
2153     }
2154     const MachineOperand &RWOp = MI->getOperand(1);
2155     if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2156       report("rw operand must be an immediate 0-1", &RWOp, 1);
2157       break;
2158     }
2159     const MachineOperand &LocalityOp = MI->getOperand(2);
2160     if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2161       report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2162       break;
2163     }
2164     const MachineOperand &CacheTypeOp = MI->getOperand(3);
2165     if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2166       report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2167       break;
2168     }
2169     break;
2170   }
2171   case TargetOpcode::G_ASSERT_ALIGN: {
2172     if (MI->getOperand(2).getImm() < 1)
2173       report("alignment immediate must be >= 1", MI);
2174     break;
2175   }
2176   case TargetOpcode::G_CONSTANT_POOL: {
2177     if (!MI->getOperand(1).isCPI())
2178       report("Src operand 1 must be a constant pool index", MI);
2179     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2180       report("Dst operand 0 must be a pointer", MI);
2181     break;
2182   }
2183   case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2184     const MachineOperand &AddrOp = MI->getOperand(1);
2185     if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2186       report("addr operand must be a pointer", &AddrOp, 1);
2187     break;
2188   }
2189   default:
2190     break;
2191   }
2192 }
2193 
2194 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2195   const MCInstrDesc &MCID = MI->getDesc();
2196   if (MI->getNumOperands() < MCID.getNumOperands()) {
2197     report("Too few operands", MI);
2198     OS << MCID.getNumOperands() << " operands expected, but "
2199        << MI->getNumOperands() << " given.\n";
2200   }
2201 
2202   if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2203     report("NoConvergent flag expected only on convergent instructions.", MI);
2204 
2205   if (MI->isPHI()) {
2206     if (MF->getProperties().hasProperty(
2207             MachineFunctionProperties::Property::NoPHIs))
2208       report("Found PHI instruction with NoPHIs property set", MI);
2209 
2210     if (FirstNonPHI)
2211       report("Found PHI instruction after non-PHI", MI);
2212   } else if (FirstNonPHI == nullptr)
2213     FirstNonPHI = MI;
2214 
2215   // Check the tied operands.
2216   if (MI->isInlineAsm())
2217     verifyInlineAsm(MI);
2218 
2219   // Check that unspillable terminators define a reg and have at most one use.
2220   if (TII->isUnspillableTerminator(MI)) {
2221     if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2222       report("Unspillable Terminator does not define a reg", MI);
2223     Register Def = MI->getOperand(0).getReg();
2224     if (Def.isVirtual() &&
2225         !MF->getProperties().hasProperty(
2226             MachineFunctionProperties::Property::NoPHIs) &&
2227         std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2228       report("Unspillable Terminator expected to have at most one use!", MI);
2229   }
2230 
2231   // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2232   // DBG_VALUEs: these are convenient to use in tests, but should never get
2233   // generated.
2234   if (MI->isDebugValue() && MI->getNumOperands() == 4)
2235     if (!MI->getDebugLoc())
2236       report("Missing DebugLoc for debug instruction", MI);
2237 
2238   // Meta instructions should never be the subject of debug value tracking,
2239   // they don't create a value in the output program at all.
2240   if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2241     report("Metadata instruction should not have a value tracking number", MI);
2242 
2243   // Check the MachineMemOperands for basic consistency.
2244   for (MachineMemOperand *Op : MI->memoperands()) {
2245     if (Op->isLoad() && !MI->mayLoad())
2246       report("Missing mayLoad flag", MI);
2247     if (Op->isStore() && !MI->mayStore())
2248       report("Missing mayStore flag", MI);
2249   }
2250 
2251   // Debug values must not have a slot index.
2252   // Other instructions must have one, unless they are inside a bundle.
2253   if (LiveInts) {
2254     bool mapped = !LiveInts->isNotInMIMap(*MI);
2255     if (MI->isDebugOrPseudoInstr()) {
2256       if (mapped)
2257         report("Debug instruction has a slot index", MI);
2258     } else if (MI->isInsideBundle()) {
2259       if (mapped)
2260         report("Instruction inside bundle has a slot index", MI);
2261     } else {
2262       if (!mapped)
2263         report("Missing slot index", MI);
2264     }
2265   }
2266 
2267   unsigned Opc = MCID.getOpcode();
2268   if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
2269     verifyPreISelGenericInstruction(MI);
2270     return;
2271   }
2272 
2273   StringRef ErrorInfo;
2274   if (!TII->verifyInstruction(*MI, ErrorInfo))
2275     report(ErrorInfo.data(), MI);
2276 
2277   // Verify properties of various specific instruction types
2278   switch (MI->getOpcode()) {
2279   case TargetOpcode::COPY: {
2280     const MachineOperand &DstOp = MI->getOperand(0);
2281     const MachineOperand &SrcOp = MI->getOperand(1);
2282     const Register SrcReg = SrcOp.getReg();
2283     const Register DstReg = DstOp.getReg();
2284 
2285     LLT DstTy = MRI->getType(DstReg);
2286     LLT SrcTy = MRI->getType(SrcReg);
2287     if (SrcTy.isValid() && DstTy.isValid()) {
2288       // If both types are valid, check that the types are the same.
2289       if (SrcTy != DstTy) {
2290         report("Copy Instruction is illegal with mismatching types", MI);
2291         OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2292       }
2293 
2294       break;
2295     }
2296 
2297     if (!SrcTy.isValid() && !DstTy.isValid())
2298       break;
2299 
2300     // If we have only one valid type, this is likely a copy between a virtual
2301     // and physical register.
2302     TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2303     TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2304     if (SrcReg.isPhysical() && DstTy.isValid()) {
2305       const TargetRegisterClass *SrcRC =
2306           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2307       if (SrcRC)
2308         SrcSize = TRI->getRegSizeInBits(*SrcRC);
2309     }
2310 
2311     if (DstReg.isPhysical() && SrcTy.isValid()) {
2312       const TargetRegisterClass *DstRC =
2313           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2314       if (DstRC)
2315         DstSize = TRI->getRegSizeInBits(*DstRC);
2316     }
2317 
2318     // The next two checks allow COPY between physical and virtual registers,
2319     // when the virtual register has a scalable size and the physical register
2320     // has a fixed size. These checks allow COPY between *potentialy* mismatched
2321     // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2322     // be able to resolve a fixed size for the scalable vector, and at that
2323     // point this function will know for sure whether the sizes are mismatched
2324     // and correctly report a size mismatch.
2325     if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2326         !SrcSize.isScalable())
2327       break;
2328     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2329         !DstSize.isScalable())
2330       break;
2331 
2332     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2333       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2334         report("Copy Instruction is illegal with mismatching sizes", MI);
2335         OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2336       }
2337     }
2338     break;
2339   }
2340   case TargetOpcode::STATEPOINT: {
2341     StatepointOpers SO(MI);
2342     if (!MI->getOperand(SO.getIDPos()).isImm() ||
2343         !MI->getOperand(SO.getNBytesPos()).isImm() ||
2344         !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2345       report("meta operands to STATEPOINT not constant!", MI);
2346       break;
2347     }
2348 
2349     auto VerifyStackMapConstant = [&](unsigned Offset) {
2350       if (Offset >= MI->getNumOperands()) {
2351         report("stack map constant to STATEPOINT is out of range!", MI);
2352         return;
2353       }
2354       if (!MI->getOperand(Offset - 1).isImm() ||
2355           MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2356           !MI->getOperand(Offset).isImm())
2357         report("stack map constant to STATEPOINT not well formed!", MI);
2358     };
2359     VerifyStackMapConstant(SO.getCCIdx());
2360     VerifyStackMapConstant(SO.getFlagsIdx());
2361     VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2362     VerifyStackMapConstant(SO.getNumGCPtrIdx());
2363     VerifyStackMapConstant(SO.getNumAllocaIdx());
2364     VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2365 
2366     // Verify that all explicit statepoint defs are tied to gc operands as
2367     // they are expected to be a relocation of gc operands.
2368     unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2369     unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2370     for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2371       unsigned UseOpIdx;
2372       if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2373         report("STATEPOINT defs expected to be tied", MI);
2374         break;
2375       }
2376       if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2377         report("STATEPOINT def tied to non-gc operand", MI);
2378         break;
2379       }
2380     }
2381 
2382     // TODO: verify we have properly encoded deopt arguments
2383   } break;
2384   case TargetOpcode::INSERT_SUBREG: {
2385     unsigned InsertedSize;
2386     if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2387       InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2388     else
2389       InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2390     unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2391     if (SubRegSize < InsertedSize) {
2392       report("INSERT_SUBREG expected inserted value to have equal or lesser "
2393              "size than the subreg it was inserted into", MI);
2394       break;
2395     }
2396   } break;
2397   case TargetOpcode::REG_SEQUENCE: {
2398     unsigned NumOps = MI->getNumOperands();
2399     if (!(NumOps & 1)) {
2400       report("Invalid number of operands for REG_SEQUENCE", MI);
2401       break;
2402     }
2403 
2404     for (unsigned I = 1; I != NumOps; I += 2) {
2405       const MachineOperand &RegOp = MI->getOperand(I);
2406       const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2407 
2408       if (!RegOp.isReg())
2409         report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2410 
2411       if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2412           SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2413         report("Invalid subregister index operand for REG_SEQUENCE",
2414                &SubRegOp, I + 1);
2415       }
2416     }
2417 
2418     Register DstReg = MI->getOperand(0).getReg();
2419     if (DstReg.isPhysical())
2420       report("REG_SEQUENCE does not support physical register results", MI);
2421 
2422     if (MI->getOperand(0).getSubReg())
2423       report("Invalid subreg result for REG_SEQUENCE", MI);
2424 
2425     break;
2426   }
2427   }
2428 }
2429 
2430 void
2431 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2432   const MachineInstr *MI = MO->getParent();
2433   const MCInstrDesc &MCID = MI->getDesc();
2434   unsigned NumDefs = MCID.getNumDefs();
2435   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2436     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2437 
2438   // The first MCID.NumDefs operands must be explicit register defines
2439   if (MONum < NumDefs) {
2440     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2441     if (!MO->isReg())
2442       report("Explicit definition must be a register", MO, MONum);
2443     else if (!MO->isDef() && !MCOI.isOptionalDef())
2444       report("Explicit definition marked as use", MO, MONum);
2445     else if (MO->isImplicit())
2446       report("Explicit definition marked as implicit", MO, MONum);
2447   } else if (MONum < MCID.getNumOperands()) {
2448     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2449     // Don't check if it's the last operand in a variadic instruction. See,
2450     // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2451     bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2452     if (!IsOptional) {
2453       if (MO->isReg()) {
2454         if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2455           report("Explicit operand marked as def", MO, MONum);
2456         if (MO->isImplicit())
2457           report("Explicit operand marked as implicit", MO, MONum);
2458       }
2459 
2460       // Check that an instruction has register operands only as expected.
2461       if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2462           !MO->isReg() && !MO->isFI())
2463         report("Expected a register operand.", MO, MONum);
2464       if (MO->isReg()) {
2465         if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2466             (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2467              !TII->isPCRelRegisterOperandLegal(*MO)))
2468           report("Expected a non-register operand.", MO, MONum);
2469       }
2470     }
2471 
2472     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2473     if (TiedTo != -1) {
2474       if (!MO->isReg())
2475         report("Tied use must be a register", MO, MONum);
2476       else if (!MO->isTied())
2477         report("Operand should be tied", MO, MONum);
2478       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2479         report("Tied def doesn't match MCInstrDesc", MO, MONum);
2480       else if (MO->getReg().isPhysical()) {
2481         const MachineOperand &MOTied = MI->getOperand(TiedTo);
2482         if (!MOTied.isReg())
2483           report("Tied counterpart must be a register", &MOTied, TiedTo);
2484         else if (MOTied.getReg().isPhysical() &&
2485                  MO->getReg() != MOTied.getReg())
2486           report("Tied physical registers must match.", &MOTied, TiedTo);
2487       }
2488     } else if (MO->isReg() && MO->isTied())
2489       report("Explicit operand should not be tied", MO, MONum);
2490   } else if (!MI->isVariadic()) {
2491     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2492     if (!MO->isValidExcessOperand())
2493       report("Extra explicit operand on non-variadic instruction", MO, MONum);
2494   }
2495 
2496   switch (MO->getType()) {
2497   case MachineOperand::MO_Register: {
2498     // Verify debug flag on debug instructions. Check this first because reg0
2499     // indicates an undefined debug value.
2500     if (MI->isDebugInstr() && MO->isUse()) {
2501       if (!MO->isDebug())
2502         report("Register operand must be marked debug", MO, MONum);
2503     } else if (MO->isDebug()) {
2504       report("Register operand must not be marked debug", MO, MONum);
2505     }
2506 
2507     const Register Reg = MO->getReg();
2508     if (!Reg)
2509       return;
2510     if (MRI->tracksLiveness() && !MI->isDebugInstr())
2511       checkLiveness(MO, MONum);
2512 
2513     if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2514         MO->getReg().isVirtual()) // TODO: Apply to physregs too
2515       report("Undef virtual register def operands require a subregister", MO, MONum);
2516 
2517     // Verify the consistency of tied operands.
2518     if (MO->isTied()) {
2519       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2520       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2521       if (!OtherMO.isReg())
2522         report("Must be tied to a register", MO, MONum);
2523       if (!OtherMO.isTied())
2524         report("Missing tie flags on tied operand", MO, MONum);
2525       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2526         report("Inconsistent tie links", MO, MONum);
2527       if (MONum < MCID.getNumDefs()) {
2528         if (OtherIdx < MCID.getNumOperands()) {
2529           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2530             report("Explicit def tied to explicit use without tie constraint",
2531                    MO, MONum);
2532         } else {
2533           if (!OtherMO.isImplicit())
2534             report("Explicit def should be tied to implicit use", MO, MONum);
2535         }
2536       }
2537     }
2538 
2539     // Verify two-address constraints after the twoaddressinstruction pass.
2540     // Both twoaddressinstruction pass and phi-node-elimination pass call
2541     // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2542     // after twoaddressinstruction pass not after phi-node-elimination pass. So
2543     // we shouldn't use the IsSSA as the condition, we should based on
2544     // TiedOpsRewritten property to verify two-address constraints, this
2545     // property will be set in twoaddressinstruction pass.
2546     unsigned DefIdx;
2547     if (MF->getProperties().hasProperty(
2548             MachineFunctionProperties::Property::TiedOpsRewritten) &&
2549         MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2550         Reg != MI->getOperand(DefIdx).getReg())
2551       report("Two-address instruction operands must be identical", MO, MONum);
2552 
2553     // Check register classes.
2554     unsigned SubIdx = MO->getSubReg();
2555 
2556     if (Reg.isPhysical()) {
2557       if (SubIdx) {
2558         report("Illegal subregister index for physical register", MO, MONum);
2559         return;
2560       }
2561       if (MONum < MCID.getNumOperands()) {
2562         if (const TargetRegisterClass *DRC =
2563               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2564           if (!DRC->contains(Reg)) {
2565             report("Illegal physical register for instruction", MO, MONum);
2566             OS << printReg(Reg, TRI) << " is not a "
2567                << TRI->getRegClassName(DRC) << " register.\n";
2568           }
2569         }
2570       }
2571       if (MO->isRenamable()) {
2572         if (MRI->isReserved(Reg)) {
2573           report("isRenamable set on reserved register", MO, MONum);
2574           return;
2575         }
2576       }
2577     } else {
2578       // Virtual register.
2579       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2580       if (!RC) {
2581         // This is a generic virtual register.
2582 
2583         // Do not allow undef uses for generic virtual registers. This ensures
2584         // getVRegDef can never fail and return null on a generic register.
2585         //
2586         // FIXME: This restriction should probably be broadened to all SSA
2587         // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2588         // run on the SSA function just before phi elimination.
2589         if (MO->isUndef())
2590           report("Generic virtual register use cannot be undef", MO, MONum);
2591 
2592         // Debug value instruction is permitted to use undefined vregs.
2593         // This is a performance measure to skip the overhead of immediately
2594         // pruning unused debug operands. The final undef substitution occurs
2595         // when debug values are allocated in LDVImpl::handleDebugValue, so
2596         // these verifications always apply after this pass.
2597         if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2598             !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2599           // If we're post-Select, we can't have gvregs anymore.
2600           if (isFunctionSelected) {
2601             report("Generic virtual register invalid in a Selected function",
2602                    MO, MONum);
2603             return;
2604           }
2605 
2606           // The gvreg must have a type and it must not have a SubIdx.
2607           LLT Ty = MRI->getType(Reg);
2608           if (!Ty.isValid()) {
2609             report("Generic virtual register must have a valid type", MO,
2610                    MONum);
2611             return;
2612           }
2613 
2614           const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2615           const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2616 
2617           // If we're post-RegBankSelect, the gvreg must have a bank.
2618           if (!RegBank && isFunctionRegBankSelected) {
2619             report("Generic virtual register must have a bank in a "
2620                    "RegBankSelected function",
2621                    MO, MONum);
2622             return;
2623           }
2624 
2625           // Make sure the register fits into its register bank if any.
2626           if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2627               RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2628             report("Register bank is too small for virtual register", MO,
2629                    MONum);
2630             OS << "Register bank " << RegBank->getName() << " too small("
2631                << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2632                << Ty.getSizeInBits() << "-bits\n";
2633             return;
2634           }
2635         }
2636 
2637         if (SubIdx)  {
2638           report("Generic virtual register does not allow subregister index", MO,
2639                  MONum);
2640           return;
2641         }
2642 
2643         // If this is a target specific instruction and this operand
2644         // has register class constraint, the virtual register must
2645         // comply to it.
2646         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2647             MONum < MCID.getNumOperands() &&
2648             TII->getRegClass(MCID, MONum, TRI, *MF)) {
2649           report("Virtual register does not match instruction constraint", MO,
2650                  MONum);
2651           OS << "Expect register class "
2652              << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2653              << " but got nothing\n";
2654           return;
2655         }
2656 
2657         break;
2658       }
2659       if (SubIdx) {
2660         const TargetRegisterClass *SRC =
2661           TRI->getSubClassWithSubReg(RC, SubIdx);
2662         if (!SRC) {
2663           report("Invalid subregister index for virtual register", MO, MONum);
2664           OS << "Register class " << TRI->getRegClassName(RC)
2665              << " does not support subreg index " << SubIdx << '\n';
2666           return;
2667         }
2668         if (RC != SRC) {
2669           report("Invalid register class for subregister index", MO, MONum);
2670           OS << "Register class " << TRI->getRegClassName(RC)
2671              << " does not fully support subreg index " << SubIdx << '\n';
2672           return;
2673         }
2674       }
2675       if (MONum < MCID.getNumOperands()) {
2676         if (const TargetRegisterClass *DRC =
2677               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2678           if (SubIdx) {
2679             const TargetRegisterClass *SuperRC =
2680                 TRI->getLargestLegalSuperClass(RC, *MF);
2681             if (!SuperRC) {
2682               report("No largest legal super class exists.", MO, MONum);
2683               return;
2684             }
2685             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2686             if (!DRC) {
2687               report("No matching super-reg register class.", MO, MONum);
2688               return;
2689             }
2690           }
2691           if (!RC->hasSuperClassEq(DRC)) {
2692             report("Illegal virtual register for instruction", MO, MONum);
2693             OS << "Expected a " << TRI->getRegClassName(DRC)
2694                << " register, but got a " << TRI->getRegClassName(RC)
2695                << " register\n";
2696           }
2697         }
2698       }
2699     }
2700     break;
2701   }
2702 
2703   case MachineOperand::MO_RegisterMask:
2704     regMasks.push_back(MO->getRegMask());
2705     break;
2706 
2707   case MachineOperand::MO_MachineBasicBlock:
2708     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2709       report("PHI operand is not in the CFG", MO, MONum);
2710     break;
2711 
2712   case MachineOperand::MO_FrameIndex:
2713     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2714         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2715       int FI = MO->getIndex();
2716       LiveInterval &LI = LiveStks->getInterval(FI);
2717       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2718 
2719       bool stores = MI->mayStore();
2720       bool loads = MI->mayLoad();
2721       // For a memory-to-memory move, we need to check if the frame
2722       // index is used for storing or loading, by inspecting the
2723       // memory operands.
2724       if (stores && loads) {
2725         for (auto *MMO : MI->memoperands()) {
2726           const PseudoSourceValue *PSV = MMO->getPseudoValue();
2727           if (PSV == nullptr) continue;
2728           const FixedStackPseudoSourceValue *Value =
2729             dyn_cast<FixedStackPseudoSourceValue>(PSV);
2730           if (Value == nullptr) continue;
2731           if (Value->getFrameIndex() != FI) continue;
2732 
2733           if (MMO->isStore())
2734             loads = false;
2735           else
2736             stores = false;
2737           break;
2738         }
2739         if (loads == stores)
2740           report("Missing fixed stack memoperand.", MI);
2741       }
2742       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2743         report("Instruction loads from dead spill slot", MO, MONum);
2744         OS << "Live stack: " << LI << '\n';
2745       }
2746       if (stores && !LI.liveAt(Idx.getRegSlot())) {
2747         report("Instruction stores to dead spill slot", MO, MONum);
2748         OS << "Live stack: " << LI << '\n';
2749       }
2750     }
2751     break;
2752 
2753   case MachineOperand::MO_CFIIndex:
2754     if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2755       report("CFI instruction has invalid index", MO, MONum);
2756     break;
2757 
2758   default:
2759     break;
2760   }
2761 }
2762 
2763 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2764                                          unsigned MONum, SlotIndex UseIdx,
2765                                          const LiveRange &LR,
2766                                          Register VRegOrUnit,
2767                                          LaneBitmask LaneMask) {
2768   const MachineInstr *MI = MO->getParent();
2769 
2770   if (!LR.verify()) {
2771     report("invalid live range", MO, MONum);
2772     report_context_liverange(LR);
2773     report_context_vreg_regunit(VRegOrUnit);
2774     report_context(UseIdx);
2775     return;
2776   }
2777 
2778   LiveQueryResult LRQ = LR.Query(UseIdx);
2779   bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2780   // Check if we have a segment at the use, note however that we only need one
2781   // live subregister range, the others may be dead.
2782   if (!HasValue && LaneMask.none()) {
2783     report("No live segment at use", MO, MONum);
2784     report_context_liverange(LR);
2785     report_context_vreg_regunit(VRegOrUnit);
2786     report_context(UseIdx);
2787   }
2788   if (MO->isKill() && !LRQ.isKill()) {
2789     report("Live range continues after kill flag", MO, MONum);
2790     report_context_liverange(LR);
2791     report_context_vreg_regunit(VRegOrUnit);
2792     if (LaneMask.any())
2793       report_context_lanemask(LaneMask);
2794     report_context(UseIdx);
2795   }
2796 }
2797 
2798 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2799                                          unsigned MONum, SlotIndex DefIdx,
2800                                          const LiveRange &LR,
2801                                          Register VRegOrUnit,
2802                                          bool SubRangeCheck,
2803                                          LaneBitmask LaneMask) {
2804   if (!LR.verify()) {
2805     report("invalid live range", MO, MONum);
2806     report_context_liverange(LR);
2807     report_context_vreg_regunit(VRegOrUnit);
2808     if (LaneMask.any())
2809       report_context_lanemask(LaneMask);
2810     report_context(DefIdx);
2811   }
2812 
2813   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2814     // The LR can correspond to the whole reg and its def slot is not obliged
2815     // to be the same as the MO' def slot. E.g. when we check here "normal"
2816     // subreg MO but there is other EC subreg MO in the same instruction so the
2817     // whole reg has EC def slot and differs from the currently checked MO' def
2818     // slot. For example:
2819     // %0 [16e,32r:0) 0@16e  L..3 [16e,32r:0) 0@16e  L..C [16r,32r:0) 0@16r
2820     // Check that there is an early-clobber def of the same superregister
2821     // somewhere is performed in visitMachineFunctionAfter()
2822     if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2823         !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2824         (VNI->def != DefIdx &&
2825          (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2826       report("Inconsistent valno->def", MO, MONum);
2827       report_context_liverange(LR);
2828       report_context_vreg_regunit(VRegOrUnit);
2829       if (LaneMask.any())
2830         report_context_lanemask(LaneMask);
2831       report_context(*VNI);
2832       report_context(DefIdx);
2833     }
2834   } else {
2835     report("No live segment at def", MO, MONum);
2836     report_context_liverange(LR);
2837     report_context_vreg_regunit(VRegOrUnit);
2838     if (LaneMask.any())
2839       report_context_lanemask(LaneMask);
2840     report_context(DefIdx);
2841   }
2842   // Check that, if the dead def flag is present, LiveInts agree.
2843   if (MO->isDead()) {
2844     LiveQueryResult LRQ = LR.Query(DefIdx);
2845     if (!LRQ.isDeadDef()) {
2846       assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2847       // A dead subreg def only tells us that the specific subreg is dead. There
2848       // could be other non-dead defs of other subregs, or we could have other
2849       // parts of the register being live through the instruction. So unless we
2850       // are checking liveness for a subrange it is ok for the live range to
2851       // continue, given that we have a dead def of a subregister.
2852       if (SubRangeCheck || MO->getSubReg() == 0) {
2853         report("Live range continues after dead def flag", MO, MONum);
2854         report_context_liverange(LR);
2855         report_context_vreg_regunit(VRegOrUnit);
2856         if (LaneMask.any())
2857           report_context_lanemask(LaneMask);
2858       }
2859     }
2860   }
2861 }
2862 
2863 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2864   const MachineInstr *MI = MO->getParent();
2865   const Register Reg = MO->getReg();
2866   const unsigned SubRegIdx = MO->getSubReg();
2867 
2868   const LiveInterval *LI = nullptr;
2869   if (LiveInts && Reg.isVirtual()) {
2870     if (LiveInts->hasInterval(Reg)) {
2871       LI = &LiveInts->getInterval(Reg);
2872       if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2873           !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2874         report("Live interval for subreg operand has no subranges", MO, MONum);
2875     } else {
2876       report("Virtual register has no live interval", MO, MONum);
2877     }
2878   }
2879 
2880   // Both use and def operands can read a register.
2881   if (MO->readsReg()) {
2882     if (MO->isKill())
2883       addRegWithSubRegs(regsKilled, Reg);
2884 
2885     // Check that LiveVars knows this kill (unless we are inside a bundle, in
2886     // which case we have already checked that LiveVars knows any kills on the
2887     // bundle header instead).
2888     if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2889         !MI->isBundledWithPred()) {
2890       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2891       if (!is_contained(VI.Kills, MI))
2892         report("Kill missing from LiveVariables", MO, MONum);
2893     }
2894 
2895     // Check LiveInts liveness and kill.
2896     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2897       SlotIndex UseIdx;
2898       if (MI->isPHI()) {
2899         // PHI use occurs on the edge, so check for live out here instead.
2900         UseIdx = LiveInts->getMBBEndIdx(
2901           MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2902       } else {
2903         UseIdx = LiveInts->getInstructionIndex(*MI);
2904       }
2905       // Check the cached regunit intervals.
2906       if (Reg.isPhysical() && !isReserved(Reg)) {
2907         for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2908           if (MRI->isReservedRegUnit(Unit))
2909             continue;
2910           if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2911             checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2912         }
2913       }
2914 
2915       if (Reg.isVirtual()) {
2916         // This is a virtual register interval.
2917         checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2918 
2919         if (LI->hasSubRanges() && !MO->isDef()) {
2920           LaneBitmask MOMask = SubRegIdx != 0
2921                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2922                                    : MRI->getMaxLaneMaskForVReg(Reg);
2923           LaneBitmask LiveInMask;
2924           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2925             if ((MOMask & SR.LaneMask).none())
2926               continue;
2927             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2928             LiveQueryResult LRQ = SR.Query(UseIdx);
2929             if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2930               LiveInMask |= SR.LaneMask;
2931           }
2932           // At least parts of the register has to be live at the use.
2933           if ((LiveInMask & MOMask).none()) {
2934             report("No live subrange at use", MO, MONum);
2935             report_context(*LI);
2936             report_context(UseIdx);
2937           }
2938           // For PHIs all lanes should be live
2939           if (MI->isPHI() && LiveInMask != MOMask) {
2940             report("Not all lanes of PHI source live at use", MO, MONum);
2941             report_context(*LI);
2942             report_context(UseIdx);
2943           }
2944         }
2945       }
2946     }
2947 
2948     // Use of a dead register.
2949     if (!regsLive.count(Reg)) {
2950       if (Reg.isPhysical()) {
2951         // Reserved registers may be used even when 'dead'.
2952         bool Bad = !isReserved(Reg);
2953         // We are fine if just any subregister has a defined value.
2954         if (Bad) {
2955 
2956           for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2957             if (regsLive.count(SubReg)) {
2958               Bad = false;
2959               break;
2960             }
2961           }
2962         }
2963         // If there is an additional implicit-use of a super register we stop
2964         // here. By definition we are fine if the super register is not
2965         // (completely) dead, if the complete super register is dead we will
2966         // get a report for its operand.
2967         if (Bad) {
2968           for (const MachineOperand &MOP : MI->uses()) {
2969             if (!MOP.isReg() || !MOP.isImplicit())
2970               continue;
2971 
2972             if (!MOP.getReg().isPhysical())
2973               continue;
2974 
2975             if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2976               Bad = false;
2977           }
2978         }
2979         if (Bad)
2980           report("Using an undefined physical register", MO, MONum);
2981       } else if (MRI->def_empty(Reg)) {
2982         report("Reading virtual register without a def", MO, MONum);
2983       } else {
2984         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2985         // We don't know which virtual registers are live in, so only complain
2986         // if vreg was killed in this MBB. Otherwise keep track of vregs that
2987         // must be live in. PHI instructions are handled separately.
2988         if (MInfo.regsKilled.count(Reg))
2989           report("Using a killed virtual register", MO, MONum);
2990         else if (!MI->isPHI())
2991           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2992       }
2993     }
2994   }
2995 
2996   if (MO->isDef()) {
2997     // Register defined.
2998     // TODO: verify that earlyclobber ops are not used.
2999     if (MO->isDead())
3000       addRegWithSubRegs(regsDead, Reg);
3001     else
3002       addRegWithSubRegs(regsDefined, Reg);
3003 
3004     // Verify SSA form.
3005     if (MRI->isSSA() && Reg.isVirtual() &&
3006         std::next(MRI->def_begin(Reg)) != MRI->def_end())
3007       report("Multiple virtual register defs in SSA form", MO, MONum);
3008 
3009     // Check LiveInts for a live segment, but only for virtual registers.
3010     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3011       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3012       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3013 
3014       if (Reg.isVirtual()) {
3015         checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
3016 
3017         if (LI->hasSubRanges()) {
3018           LaneBitmask MOMask = SubRegIdx != 0
3019                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3020                                    : MRI->getMaxLaneMaskForVReg(Reg);
3021           for (const LiveInterval::SubRange &SR : LI->subranges()) {
3022             if ((SR.LaneMask & MOMask).none())
3023               continue;
3024             checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
3025           }
3026         }
3027       }
3028     }
3029   }
3030 }
3031 
3032 // This function gets called after visiting all instructions in a bundle. The
3033 // argument points to the bundle header.
3034 // Normal stand-alone instructions are also considered 'bundles', and this
3035 // function is called for all of them.
3036 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3037   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3038   set_union(MInfo.regsKilled, regsKilled);
3039   set_subtract(regsLive, regsKilled); regsKilled.clear();
3040   // Kill any masked registers.
3041   while (!regMasks.empty()) {
3042     const uint32_t *Mask = regMasks.pop_back_val();
3043     for (Register Reg : regsLive)
3044       if (Reg.isPhysical() &&
3045           MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3046         regsDead.push_back(Reg);
3047   }
3048   set_subtract(regsLive, regsDead);   regsDead.clear();
3049   set_union(regsLive, regsDefined);   regsDefined.clear();
3050 }
3051 
3052 void
3053 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3054   MBBInfoMap[MBB].regsLiveOut = regsLive;
3055   regsLive.clear();
3056 
3057   if (Indexes) {
3058     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3059     if (!(stop > lastIndex)) {
3060       report("Block ends before last instruction index", MBB);
3061       OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3062          << '\n';
3063     }
3064     lastIndex = stop;
3065   }
3066 }
3067 
3068 namespace {
3069 // This implements a set of registers that serves as a filter: can filter other
3070 // sets by passing through elements not in the filter and blocking those that
3071 // are. Any filter implicitly includes the full set of physical registers upon
3072 // creation, thus filtering them all out. The filter itself as a set only grows,
3073 // and needs to be as efficient as possible.
3074 struct VRegFilter {
3075   // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3076   // no duplicates. Both virtual and physical registers are fine.
3077   template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3078     SmallVector<Register, 0> VRegsBuffer;
3079     filterAndAdd(FromRegSet, VRegsBuffer);
3080   }
3081   // Filter \p FromRegSet through the filter and append passed elements into \p
3082   // ToVRegs. All elements appended are then added to the filter itself.
3083   // \returns true if anything changed.
3084   template <typename RegSetT>
3085   bool filterAndAdd(const RegSetT &FromRegSet,
3086                     SmallVectorImpl<Register> &ToVRegs) {
3087     unsigned SparseUniverse = Sparse.size();
3088     unsigned NewSparseUniverse = SparseUniverse;
3089     unsigned NewDenseSize = Dense.size();
3090     size_t Begin = ToVRegs.size();
3091     for (Register Reg : FromRegSet) {
3092       if (!Reg.isVirtual())
3093         continue;
3094       unsigned Index = Register::virtReg2Index(Reg);
3095       if (Index < SparseUniverseMax) {
3096         if (Index < SparseUniverse && Sparse.test(Index))
3097           continue;
3098         NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3099       } else {
3100         if (Dense.count(Reg))
3101           continue;
3102         ++NewDenseSize;
3103       }
3104       ToVRegs.push_back(Reg);
3105     }
3106     size_t End = ToVRegs.size();
3107     if (Begin == End)
3108       return false;
3109     // Reserving space in sets once performs better than doing so continuously
3110     // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3111     // tuned all the way down) and double iteration (the second one is over a
3112     // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3113     Sparse.resize(NewSparseUniverse);
3114     Dense.reserve(NewDenseSize);
3115     for (unsigned I = Begin; I < End; ++I) {
3116       Register Reg = ToVRegs[I];
3117       unsigned Index = Register::virtReg2Index(Reg);
3118       if (Index < SparseUniverseMax)
3119         Sparse.set(Index);
3120       else
3121         Dense.insert(Reg);
3122     }
3123     return true;
3124   }
3125 
3126 private:
3127   static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3128   // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3129   // are tracked by Dense. The only purpose of the threashold and the Dense set
3130   // is to have a reasonably growing memory usage in pathological cases (large
3131   // number of very sparse VRegFilter instances live at the same time). In
3132   // practice even in the worst-by-execution time cases having all elements
3133   // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3134   // space efficient than if tracked by Dense. The threashold is set to keep the
3135   // worst-case memory usage within 2x of figures determined empirically for
3136   // "all Dense" scenario in such worst-by-execution-time cases.
3137   BitVector Sparse;
3138   DenseSet<unsigned> Dense;
3139 };
3140 
3141 // Implements both a transfer function and a (binary, in-place) join operator
3142 // for a dataflow over register sets with set union join and filtering transfer
3143 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3144 // Maintains out_b as its state, allowing for O(n) iteration over it at any
3145 // time, where n is the size of the set (as opposed to O(U) where U is the
3146 // universe). filter_b implicitly contains all physical registers at all times.
3147 class FilteringVRegSet {
3148   VRegFilter Filter;
3149   SmallVector<Register, 0> VRegs;
3150 
3151 public:
3152   // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3153   // Both virtual and physical registers are fine.
3154   template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3155     Filter.add(RS);
3156   }
3157   // Passes \p RS through the filter_b (transfer function) and adds what's left
3158   // to itself (out_b).
3159   template <typename RegSetT> bool add(const RegSetT &RS) {
3160     // Double-duty the Filter: to maintain VRegs a set (and the join operation
3161     // a set union) just add everything being added here to the Filter as well.
3162     return Filter.filterAndAdd(RS, VRegs);
3163   }
3164   using const_iterator = decltype(VRegs)::const_iterator;
3165   const_iterator begin() const { return VRegs.begin(); }
3166   const_iterator end() const { return VRegs.end(); }
3167   size_t size() const { return VRegs.size(); }
3168 };
3169 } // namespace
3170 
3171 // Calculate the largest possible vregsPassed sets. These are the registers that
3172 // can pass through an MBB live, but may not be live every time. It is assumed
3173 // that all vregsPassed sets are empty before the call.
3174 void MachineVerifier::calcRegsPassed() {
3175   if (MF->empty())
3176     // ReversePostOrderTraversal doesn't handle empty functions.
3177     return;
3178 
3179   for (const MachineBasicBlock *MB :
3180        ReversePostOrderTraversal<const MachineFunction *>(MF)) {
3181     FilteringVRegSet VRegs;
3182     BBInfo &Info = MBBInfoMap[MB];
3183     assert(Info.reachable);
3184 
3185     VRegs.addToFilter(Info.regsKilled);
3186     VRegs.addToFilter(Info.regsLiveOut);
3187     for (const MachineBasicBlock *Pred : MB->predecessors()) {
3188       const BBInfo &PredInfo = MBBInfoMap[Pred];
3189       if (!PredInfo.reachable)
3190         continue;
3191 
3192       VRegs.add(PredInfo.regsLiveOut);
3193       VRegs.add(PredInfo.vregsPassed);
3194     }
3195     Info.vregsPassed.reserve(VRegs.size());
3196     Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3197   }
3198 }
3199 
3200 // Calculate the set of virtual registers that must be passed through each basic
3201 // block in order to satisfy the requirements of successor blocks. This is very
3202 // similar to calcRegsPassed, only backwards.
3203 void MachineVerifier::calcRegsRequired() {
3204   // First push live-in regs to predecessors' vregsRequired.
3205   SmallPtrSet<const MachineBasicBlock*, 8> todo;
3206   for (const auto &MBB : *MF) {
3207     BBInfo &MInfo = MBBInfoMap[&MBB];
3208     for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3209       BBInfo &PInfo = MBBInfoMap[Pred];
3210       if (PInfo.addRequired(MInfo.vregsLiveIn))
3211         todo.insert(Pred);
3212     }
3213 
3214     // Handle the PHI node.
3215     for (const MachineInstr &MI : MBB.phis()) {
3216       for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3217         // Skip those Operands which are undef regs or not regs.
3218         if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3219           continue;
3220 
3221         // Get register and predecessor for one PHI edge.
3222         Register Reg = MI.getOperand(i).getReg();
3223         const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3224 
3225         BBInfo &PInfo = MBBInfoMap[Pred];
3226         if (PInfo.addRequired(Reg))
3227           todo.insert(Pred);
3228       }
3229     }
3230   }
3231 
3232   // Iteratively push vregsRequired to predecessors. This will converge to the
3233   // same final state regardless of DenseSet iteration order.
3234   while (!todo.empty()) {
3235     const MachineBasicBlock *MBB = *todo.begin();
3236     todo.erase(MBB);
3237     BBInfo &MInfo = MBBInfoMap[MBB];
3238     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3239       if (Pred == MBB)
3240         continue;
3241       BBInfo &SInfo = MBBInfoMap[Pred];
3242       if (SInfo.addRequired(MInfo.vregsRequired))
3243         todo.insert(Pred);
3244     }
3245   }
3246 }
3247 
3248 // Check PHI instructions at the beginning of MBB. It is assumed that
3249 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3250 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3251   BBInfo &MInfo = MBBInfoMap[&MBB];
3252 
3253   SmallPtrSet<const MachineBasicBlock*, 8> seen;
3254   for (const MachineInstr &Phi : MBB) {
3255     if (!Phi.isPHI())
3256       break;
3257     seen.clear();
3258 
3259     const MachineOperand &MODef = Phi.getOperand(0);
3260     if (!MODef.isReg() || !MODef.isDef()) {
3261       report("Expected first PHI operand to be a register def", &MODef, 0);
3262       continue;
3263     }
3264     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3265         MODef.isEarlyClobber() || MODef.isDebug())
3266       report("Unexpected flag on PHI operand", &MODef, 0);
3267     Register DefReg = MODef.getReg();
3268     if (!DefReg.isVirtual())
3269       report("Expected first PHI operand to be a virtual register", &MODef, 0);
3270 
3271     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3272       const MachineOperand &MO0 = Phi.getOperand(I);
3273       if (!MO0.isReg()) {
3274         report("Expected PHI operand to be a register", &MO0, I);
3275         continue;
3276       }
3277       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3278           MO0.isDebug() || MO0.isTied())
3279         report("Unexpected flag on PHI operand", &MO0, I);
3280 
3281       const MachineOperand &MO1 = Phi.getOperand(I + 1);
3282       if (!MO1.isMBB()) {
3283         report("Expected PHI operand to be a basic block", &MO1, I + 1);
3284         continue;
3285       }
3286 
3287       const MachineBasicBlock &Pre = *MO1.getMBB();
3288       if (!Pre.isSuccessor(&MBB)) {
3289         report("PHI input is not a predecessor block", &MO1, I + 1);
3290         continue;
3291       }
3292 
3293       if (MInfo.reachable) {
3294         seen.insert(&Pre);
3295         BBInfo &PrInfo = MBBInfoMap[&Pre];
3296         if (!MO0.isUndef() && PrInfo.reachable &&
3297             !PrInfo.isLiveOut(MO0.getReg()))
3298           report("PHI operand is not live-out from predecessor", &MO0, I);
3299       }
3300     }
3301 
3302     // Did we see all predecessors?
3303     if (MInfo.reachable) {
3304       for (MachineBasicBlock *Pred : MBB.predecessors()) {
3305         if (!seen.count(Pred)) {
3306           report("Missing PHI operand", &Phi);
3307           OS << printMBBReference(*Pred)
3308              << " is a predecessor according to the CFG.\n";
3309         }
3310       }
3311     }
3312   }
3313 }
3314 
3315 static void
3316 verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT,
3317                          std::function<void(const Twine &Message)> FailureCB,
3318                          raw_ostream &OS) {
3319   MachineConvergenceVerifier CV;
3320   CV.initialize(&OS, FailureCB, MF);
3321 
3322   for (const auto &MBB : MF) {
3323     CV.visit(MBB);
3324     for (const auto &MI : MBB.instrs())
3325       CV.visit(MI);
3326   }
3327 
3328   if (CV.sawTokens()) {
3329     DT.recalculate(const_cast<MachineFunction &>(MF));
3330     CV.verify(DT);
3331   }
3332 }
3333 
3334 void MachineVerifier::visitMachineFunctionAfter() {
3335   auto FailureCB = [this](const Twine &Message) {
3336     report(Message.str().c_str(), MF);
3337   };
3338   verifyConvergenceControl(*MF, DT, FailureCB, OS);
3339 
3340   calcRegsPassed();
3341 
3342   for (const MachineBasicBlock &MBB : *MF)
3343     checkPHIOps(MBB);
3344 
3345   // Now check liveness info if available
3346   calcRegsRequired();
3347 
3348   // Check for killed virtual registers that should be live out.
3349   for (const auto &MBB : *MF) {
3350     BBInfo &MInfo = MBBInfoMap[&MBB];
3351     for (Register VReg : MInfo.vregsRequired)
3352       if (MInfo.regsKilled.count(VReg)) {
3353         report("Virtual register killed in block, but needed live out.", &MBB);
3354         OS << "Virtual register " << printReg(VReg)
3355            << " is used after the block.\n";
3356       }
3357   }
3358 
3359   if (!MF->empty()) {
3360     BBInfo &MInfo = MBBInfoMap[&MF->front()];
3361     for (Register VReg : MInfo.vregsRequired) {
3362       report("Virtual register defs don't dominate all uses.", MF);
3363       report_context_vreg(VReg);
3364     }
3365   }
3366 
3367   if (LiveVars)
3368     verifyLiveVariables();
3369   if (LiveInts)
3370     verifyLiveIntervals();
3371 
3372   // Check live-in list of each MBB. If a register is live into MBB, check
3373   // that the register is in regsLiveOut of each predecessor block. Since
3374   // this must come from a definition in the predecesssor or its live-in
3375   // list, this will catch a live-through case where the predecessor does not
3376   // have the register in its live-in list.  This currently only checks
3377   // registers that have no aliases, are not allocatable and are not
3378   // reserved, which could mean a condition code register for instance.
3379   if (MRI->tracksLiveness())
3380     for (const auto &MBB : *MF)
3381       for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
3382         MCPhysReg LiveInReg = P.PhysReg;
3383         bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3384         if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3385           continue;
3386         for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3387           BBInfo &PInfo = MBBInfoMap[Pred];
3388           if (!PInfo.regsLiveOut.count(LiveInReg)) {
3389             report("Live in register not found to be live out from predecessor.",
3390                    &MBB);
3391             OS << TRI->getName(LiveInReg) << " not found to be live out from "
3392                << printMBBReference(*Pred) << '\n';
3393           }
3394         }
3395       }
3396 
3397   for (auto CSInfo : MF->getCallSitesInfo())
3398     if (!CSInfo.first->isCall())
3399       report("Call site info referencing instruction that is not call", MF);
3400 
3401   // If there's debug-info, check that we don't have any duplicate value
3402   // tracking numbers.
3403   if (MF->getFunction().getSubprogram()) {
3404     DenseSet<unsigned> SeenNumbers;
3405     for (const auto &MBB : *MF) {
3406       for (const auto &MI : MBB) {
3407         if (auto Num = MI.peekDebugInstrNum()) {
3408           auto Result = SeenNumbers.insert((unsigned)Num);
3409           if (!Result.second)
3410             report("Instruction has a duplicated value tracking number", &MI);
3411         }
3412       }
3413     }
3414   }
3415 }
3416 
3417 void MachineVerifier::verifyLiveVariables() {
3418   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3419   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3420     Register Reg = Register::index2VirtReg(I);
3421     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3422     for (const auto &MBB : *MF) {
3423       BBInfo &MInfo = MBBInfoMap[&MBB];
3424 
3425       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3426       if (MInfo.vregsRequired.count(Reg)) {
3427         if (!VI.AliveBlocks.test(MBB.getNumber())) {
3428           report("LiveVariables: Block missing from AliveBlocks", &MBB);
3429           OS << "Virtual register " << printReg(Reg)
3430              << " must be live through the block.\n";
3431         }
3432       } else {
3433         if (VI.AliveBlocks.test(MBB.getNumber())) {
3434           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3435           OS << "Virtual register " << printReg(Reg)
3436              << " is not needed live through the block.\n";
3437         }
3438       }
3439     }
3440   }
3441 }
3442 
3443 void MachineVerifier::verifyLiveIntervals() {
3444   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3445   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3446     Register Reg = Register::index2VirtReg(I);
3447 
3448     // Spilling and splitting may leave unused registers around. Skip them.
3449     if (MRI->reg_nodbg_empty(Reg))
3450       continue;
3451 
3452     if (!LiveInts->hasInterval(Reg)) {
3453       report("Missing live interval for virtual register", MF);
3454       OS << printReg(Reg, TRI) << " still has defs or uses\n";
3455       continue;
3456     }
3457 
3458     const LiveInterval &LI = LiveInts->getInterval(Reg);
3459     assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3460     verifyLiveInterval(LI);
3461   }
3462 
3463   // Verify all the cached regunit intervals.
3464   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3465     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3466       verifyLiveRange(*LR, i);
3467 }
3468 
3469 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3470                                            const VNInfo *VNI, Register Reg,
3471                                            LaneBitmask LaneMask) {
3472   if (VNI->isUnused())
3473     return;
3474 
3475   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3476 
3477   if (!DefVNI) {
3478     report("Value not live at VNInfo def and not marked unused", MF);
3479     report_context(LR, Reg, LaneMask);
3480     report_context(*VNI);
3481     return;
3482   }
3483 
3484   if (DefVNI != VNI) {
3485     report("Live segment at def has different VNInfo", MF);
3486     report_context(LR, Reg, LaneMask);
3487     report_context(*VNI);
3488     return;
3489   }
3490 
3491   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3492   if (!MBB) {
3493     report("Invalid VNInfo definition index", MF);
3494     report_context(LR, Reg, LaneMask);
3495     report_context(*VNI);
3496     return;
3497   }
3498 
3499   if (VNI->isPHIDef()) {
3500     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3501       report("PHIDef VNInfo is not defined at MBB start", MBB);
3502       report_context(LR, Reg, LaneMask);
3503       report_context(*VNI);
3504     }
3505     return;
3506   }
3507 
3508   // Non-PHI def.
3509   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3510   if (!MI) {
3511     report("No instruction at VNInfo def index", MBB);
3512     report_context(LR, Reg, LaneMask);
3513     report_context(*VNI);
3514     return;
3515   }
3516 
3517   if (Reg != 0) {
3518     bool hasDef = false;
3519     bool isEarlyClobber = false;
3520     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3521       if (!MOI->isReg() || !MOI->isDef())
3522         continue;
3523       if (Reg.isVirtual()) {
3524         if (MOI->getReg() != Reg)
3525           continue;
3526       } else {
3527         if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3528           continue;
3529       }
3530       if (LaneMask.any() &&
3531           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3532         continue;
3533       hasDef = true;
3534       if (MOI->isEarlyClobber())
3535         isEarlyClobber = true;
3536     }
3537 
3538     if (!hasDef) {
3539       report("Defining instruction does not modify register", MI);
3540       report_context(LR, Reg, LaneMask);
3541       report_context(*VNI);
3542     }
3543 
3544     // Early clobber defs begin at USE slots, but other defs must begin at
3545     // DEF slots.
3546     if (isEarlyClobber) {
3547       if (!VNI->def.isEarlyClobber()) {
3548         report("Early clobber def must be at an early-clobber slot", MBB);
3549         report_context(LR, Reg, LaneMask);
3550         report_context(*VNI);
3551       }
3552     } else if (!VNI->def.isRegister()) {
3553       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3554       report_context(LR, Reg, LaneMask);
3555       report_context(*VNI);
3556     }
3557   }
3558 }
3559 
3560 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3561                                              const LiveRange::const_iterator I,
3562                                              Register Reg,
3563                                              LaneBitmask LaneMask) {
3564   const LiveRange::Segment &S = *I;
3565   const VNInfo *VNI = S.valno;
3566   assert(VNI && "Live segment has no valno");
3567 
3568   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3569     report("Foreign valno in live segment", MF);
3570     report_context(LR, Reg, LaneMask);
3571     report_context(S);
3572     report_context(*VNI);
3573   }
3574 
3575   if (VNI->isUnused()) {
3576     report("Live segment valno is marked unused", MF);
3577     report_context(LR, Reg, LaneMask);
3578     report_context(S);
3579   }
3580 
3581   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3582   if (!MBB) {
3583     report("Bad start of live segment, no basic block", MF);
3584     report_context(LR, Reg, LaneMask);
3585     report_context(S);
3586     return;
3587   }
3588   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3589   if (S.start != MBBStartIdx && S.start != VNI->def) {
3590     report("Live segment must begin at MBB entry or valno def", MBB);
3591     report_context(LR, Reg, LaneMask);
3592     report_context(S);
3593   }
3594 
3595   const MachineBasicBlock *EndMBB =
3596     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3597   if (!EndMBB) {
3598     report("Bad end of live segment, no basic block", MF);
3599     report_context(LR, Reg, LaneMask);
3600     report_context(S);
3601     return;
3602   }
3603 
3604   // Checks for non-live-out segments.
3605   if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3606     // RegUnit intervals are allowed dead phis.
3607     if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3608         S.end == VNI->def.getDeadSlot())
3609       return;
3610 
3611     // The live segment is ending inside EndMBB
3612     const MachineInstr *MI =
3613         LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3614     if (!MI) {
3615       report("Live segment doesn't end at a valid instruction", EndMBB);
3616       report_context(LR, Reg, LaneMask);
3617       report_context(S);
3618       return;
3619     }
3620 
3621     // The block slot must refer to a basic block boundary.
3622     if (S.end.isBlock()) {
3623       report("Live segment ends at B slot of an instruction", EndMBB);
3624       report_context(LR, Reg, LaneMask);
3625       report_context(S);
3626     }
3627 
3628     if (S.end.isDead()) {
3629       // Segment ends on the dead slot.
3630       // That means there must be a dead def.
3631       if (!SlotIndex::isSameInstr(S.start, S.end)) {
3632         report("Live segment ending at dead slot spans instructions", EndMBB);
3633         report_context(LR, Reg, LaneMask);
3634         report_context(S);
3635       }
3636     }
3637 
3638     // After tied operands are rewritten, a live segment can only end at an
3639     // early-clobber slot if it is being redefined by an early-clobber def.
3640     // TODO: Before tied operands are rewritten, a live segment can only end at
3641     // an early-clobber slot if the last use is tied to an early-clobber def.
3642     if (MF->getProperties().hasProperty(
3643             MachineFunctionProperties::Property::TiedOpsRewritten) &&
3644         S.end.isEarlyClobber()) {
3645       if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3646         report("Live segment ending at early clobber slot must be "
3647                "redefined by an EC def in the same instruction",
3648                EndMBB);
3649         report_context(LR, Reg, LaneMask);
3650         report_context(S);
3651       }
3652     }
3653 
3654     // The following checks only apply to virtual registers. Physreg liveness
3655     // is too weird to check.
3656     if (Reg.isVirtual()) {
3657       // A live segment can end with either a redefinition, a kill flag on a
3658       // use, or a dead flag on a def.
3659       bool hasRead = false;
3660       bool hasSubRegDef = false;
3661       bool hasDeadDef = false;
3662       for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3663         if (!MOI->isReg() || MOI->getReg() != Reg)
3664           continue;
3665         unsigned Sub = MOI->getSubReg();
3666         LaneBitmask SLM =
3667             Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3668         if (MOI->isDef()) {
3669           if (Sub != 0) {
3670             hasSubRegDef = true;
3671             // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3672             // mask for subregister defs. Read-undef defs will be handled by
3673             // readsReg below.
3674             SLM = ~SLM;
3675           }
3676           if (MOI->isDead())
3677             hasDeadDef = true;
3678         }
3679         if (LaneMask.any() && (LaneMask & SLM).none())
3680           continue;
3681         if (MOI->readsReg())
3682           hasRead = true;
3683       }
3684       if (S.end.isDead()) {
3685         // Make sure that the corresponding machine operand for a "dead" live
3686         // range has the dead flag. We cannot perform this check for subregister
3687         // liveranges as partially dead values are allowed.
3688         if (LaneMask.none() && !hasDeadDef) {
3689           report(
3690               "Instruction ending live segment on dead slot has no dead flag",
3691               MI);
3692           report_context(LR, Reg, LaneMask);
3693           report_context(S);
3694         }
3695       } else {
3696         if (!hasRead) {
3697           // When tracking subregister liveness, the main range must start new
3698           // values on partial register writes, even if there is no read.
3699           if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3700               !hasSubRegDef) {
3701             report("Instruction ending live segment doesn't read the register",
3702                    MI);
3703             report_context(LR, Reg, LaneMask);
3704             report_context(S);
3705           }
3706         }
3707       }
3708     }
3709   }
3710 
3711   // Now check all the basic blocks in this live segment.
3712   MachineFunction::const_iterator MFI = MBB->getIterator();
3713   // Is this live segment the beginning of a non-PHIDef VN?
3714   if (S.start == VNI->def && !VNI->isPHIDef()) {
3715     // Not live-in to any blocks.
3716     if (MBB == EndMBB)
3717       return;
3718     // Skip this block.
3719     ++MFI;
3720   }
3721 
3722   SmallVector<SlotIndex, 4> Undefs;
3723   if (LaneMask.any()) {
3724     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3725     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3726   }
3727 
3728   while (true) {
3729     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3730     // We don't know how to track physregs into a landing pad.
3731     if (!Reg.isVirtual() && MFI->isEHPad()) {
3732       if (&*MFI == EndMBB)
3733         break;
3734       ++MFI;
3735       continue;
3736     }
3737 
3738     // Is VNI a PHI-def in the current block?
3739     bool IsPHI = VNI->isPHIDef() &&
3740       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3741 
3742     // Check that VNI is live-out of all predecessors.
3743     for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3744       SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3745       // Predecessor of landing pad live-out on last call.
3746       if (MFI->isEHPad()) {
3747         for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3748           if (MI.isCall()) {
3749             PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3750             break;
3751           }
3752         }
3753       }
3754       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3755 
3756       // All predecessors must have a live-out value. However for a phi
3757       // instruction with subregister intervals
3758       // only one of the subregisters (not necessarily the current one) needs to
3759       // be defined.
3760       if (!PVNI && (LaneMask.none() || !IsPHI)) {
3761         if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3762           continue;
3763         report("Register not marked live out of predecessor", Pred);
3764         report_context(LR, Reg, LaneMask);
3765         report_context(*VNI);
3766         OS << " live into " << printMBBReference(*MFI) << '@'
3767            << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3768            << '\n';
3769         continue;
3770       }
3771 
3772       // Only PHI-defs can take different predecessor values.
3773       if (!IsPHI && PVNI != VNI) {
3774         report("Different value live out of predecessor", Pred);
3775         report_context(LR, Reg, LaneMask);
3776         OS << "Valno #" << PVNI->id << " live out of "
3777            << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3778            << " live into " << printMBBReference(*MFI) << '@'
3779            << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3780       }
3781     }
3782     if (&*MFI == EndMBB)
3783       break;
3784     ++MFI;
3785   }
3786 }
3787 
3788 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3789                                       LaneBitmask LaneMask) {
3790   for (const VNInfo *VNI : LR.valnos)
3791     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3792 
3793   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3794     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3795 }
3796 
3797 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3798   Register Reg = LI.reg();
3799   assert(Reg.isVirtual());
3800   verifyLiveRange(LI, Reg);
3801 
3802   if (LI.hasSubRanges()) {
3803     LaneBitmask Mask;
3804     LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3805     for (const LiveInterval::SubRange &SR : LI.subranges()) {
3806       if ((Mask & SR.LaneMask).any()) {
3807         report("Lane masks of sub ranges overlap in live interval", MF);
3808         report_context(LI);
3809       }
3810       if ((SR.LaneMask & ~MaxMask).any()) {
3811         report("Subrange lanemask is invalid", MF);
3812         report_context(LI);
3813       }
3814       if (SR.empty()) {
3815         report("Subrange must not be empty", MF);
3816         report_context(SR, LI.reg(), SR.LaneMask);
3817       }
3818       Mask |= SR.LaneMask;
3819       verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3820       if (!LI.covers(SR)) {
3821         report("A Subrange is not covered by the main range", MF);
3822         report_context(LI);
3823       }
3824     }
3825   }
3826 
3827   // Check the LI only has one connected component.
3828   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3829   unsigned NumComp = ConEQ.Classify(LI);
3830   if (NumComp > 1) {
3831     report("Multiple connected components in live interval", MF);
3832     report_context(LI);
3833     for (unsigned comp = 0; comp != NumComp; ++comp) {
3834       OS << comp << ": valnos";
3835       for (const VNInfo *I : LI.valnos)
3836         if (comp == ConEQ.getEqClass(I))
3837           OS << ' ' << I->id;
3838       OS << '\n';
3839     }
3840   }
3841 }
3842 
3843 namespace {
3844 
3845   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3846   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3847   // value is zero.
3848   // We use a bool plus an integer to capture the stack state.
3849   struct StackStateOfBB {
3850     StackStateOfBB() = default;
3851     StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3852       EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3853       ExitIsSetup(ExitSetup) {}
3854 
3855     // Can be negative, which means we are setting up a frame.
3856     int EntryValue = 0;
3857     int ExitValue = 0;
3858     bool EntryIsSetup = false;
3859     bool ExitIsSetup = false;
3860   };
3861 
3862 } // end anonymous namespace
3863 
3864 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3865 /// by a FrameDestroy <n>, stack adjustments are identical on all
3866 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3867 void MachineVerifier::verifyStackFrame() {
3868   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
3869   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3870   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3871     return;
3872 
3873   SmallVector<StackStateOfBB, 8> SPState;
3874   SPState.resize(MF->getNumBlockIDs());
3875   df_iterator_default_set<const MachineBasicBlock*> Reachable;
3876 
3877   // Visit the MBBs in DFS order.
3878   for (df_ext_iterator<const MachineFunction *,
3879                        df_iterator_default_set<const MachineBasicBlock *>>
3880        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3881        DFI != DFE; ++DFI) {
3882     const MachineBasicBlock *MBB = *DFI;
3883 
3884     StackStateOfBB BBState;
3885     // Check the exit state of the DFS stack predecessor.
3886     if (DFI.getPathLength() >= 2) {
3887       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3888       assert(Reachable.count(StackPred) &&
3889              "DFS stack predecessor is already visited.\n");
3890       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3891       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3892       BBState.ExitValue = BBState.EntryValue;
3893       BBState.ExitIsSetup = BBState.EntryIsSetup;
3894     }
3895 
3896     if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3897       report("Call frame size on entry does not match value computed from "
3898              "predecessor",
3899              MBB);
3900       OS << "Call frame size on entry " << MBB->getCallFrameSize()
3901          << " does not match value computed from predecessor "
3902          << -BBState.EntryValue << '\n';
3903     }
3904 
3905     // Update stack state by checking contents of MBB.
3906     for (const auto &I : *MBB) {
3907       if (I.getOpcode() == FrameSetupOpcode) {
3908         if (BBState.ExitIsSetup)
3909           report("FrameSetup is after another FrameSetup", &I);
3910         if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3911           report("AdjustsStack not set in presence of a frame pseudo "
3912                  "instruction.", &I);
3913         BBState.ExitValue -= TII->getFrameTotalSize(I);
3914         BBState.ExitIsSetup = true;
3915       }
3916 
3917       if (I.getOpcode() == FrameDestroyOpcode) {
3918         int Size = TII->getFrameTotalSize(I);
3919         if (!BBState.ExitIsSetup)
3920           report("FrameDestroy is not after a FrameSetup", &I);
3921         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3922                                                BBState.ExitValue;
3923         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3924           report("FrameDestroy <n> is after FrameSetup <m>", &I);
3925           OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
3926              << AbsSPAdj << ">.\n";
3927         }
3928         if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3929           report("AdjustsStack not set in presence of a frame pseudo "
3930                  "instruction.", &I);
3931         BBState.ExitValue += Size;
3932         BBState.ExitIsSetup = false;
3933       }
3934     }
3935     SPState[MBB->getNumber()] = BBState;
3936 
3937     // Make sure the exit state of any predecessor is consistent with the entry
3938     // state.
3939     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3940       if (Reachable.count(Pred) &&
3941           (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3942            SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3943         report("The exit stack state of a predecessor is inconsistent.", MBB);
3944         OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
3945            << SPState[Pred->getNumber()].ExitValue << ", "
3946            << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3947            << printMBBReference(*MBB) << " has entry state ("
3948            << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3949       }
3950     }
3951 
3952     // Make sure the entry state of any successor is consistent with the exit
3953     // state.
3954     for (const MachineBasicBlock *Succ : MBB->successors()) {
3955       if (Reachable.count(Succ) &&
3956           (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3957            SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3958         report("The entry stack state of a successor is inconsistent.", MBB);
3959         OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
3960            << SPState[Succ->getNumber()].EntryValue << ", "
3961            << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3962            << printMBBReference(*MBB) << " has exit state ("
3963            << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3964       }
3965     }
3966 
3967     // Make sure a basic block with return ends with zero stack adjustment.
3968     if (!MBB->empty() && MBB->back().isReturn()) {
3969       if (BBState.ExitIsSetup)
3970         report("A return block ends with a FrameSetup.", MBB);
3971       if (BBState.ExitValue)
3972         report("A return block ends with a nonzero stack adjustment.", MBB);
3973     }
3974   }
3975 }
3976