xref: /llvm-project/llvm/lib/CodeGen/MachineVerifier.cpp (revision 46f9c6cf0b919ce660f49e5fd9995eb893fa9aa4)
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled from LLVMTargetMachine.cpp with the
20 // command-line option -verify-machineinstrs, or by defining the environment
21 // variable LLVM_VERIFY_MACHINEINSTRS to the name of a file that will receive
22 // the verifier errors.
23 //===----------------------------------------------------------------------===//
24 
25 #include "LiveRangeCalc.h"
26 #include "llvm/ADT/BitVector.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/DenseSet.h"
29 #include "llvm/ADT/DepthFirstIterator.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetOperations.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Twine.h"
36 #include "llvm/Analysis/EHPersonalities.h"
37 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
38 #include "llvm/CodeGen/LiveInterval.h"
39 #include "llvm/CodeGen/LiveIntervals.h"
40 #include "llvm/CodeGen/LiveStacks.h"
41 #include "llvm/CodeGen/LiveVariables.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineFunctionPass.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBundle.h"
48 #include "llvm/CodeGen/MachineMemOperand.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/PseudoSourceValue.h"
52 #include "llvm/CodeGen/SlotIndexes.h"
53 #include "llvm/CodeGen/StackMaps.h"
54 #include "llvm/CodeGen/TargetInstrInfo.h"
55 #include "llvm/CodeGen/TargetOpcodes.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/TargetSubtargetInfo.h"
58 #include "llvm/IR/BasicBlock.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/InlineAsm.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/MC/LaneBitmask.h"
63 #include "llvm/MC/MCAsmInfo.h"
64 #include "llvm/MC/MCInstrDesc.h"
65 #include "llvm/MC/MCRegisterInfo.h"
66 #include "llvm/MC/MCTargetOptions.h"
67 #include "llvm/Pass.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/LowLevelTypeImpl.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Target/TargetMachine.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstddef>
77 #include <cstdint>
78 #include <iterator>
79 #include <string>
80 #include <utility>
81 
82 using namespace llvm;
83 
84 namespace {
85 
86   struct MachineVerifier {
87     MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
88 
89     unsigned verify(MachineFunction &MF);
90 
91     Pass *const PASS;
92     const char *Banner;
93     const MachineFunction *MF;
94     const TargetMachine *TM;
95     const TargetInstrInfo *TII;
96     const TargetRegisterInfo *TRI;
97     const MachineRegisterInfo *MRI;
98 
99     unsigned foundErrors;
100 
101     // Avoid querying the MachineFunctionProperties for each operand.
102     bool isFunctionRegBankSelected;
103     bool isFunctionSelected;
104 
105     using RegVector = SmallVector<unsigned, 16>;
106     using RegMaskVector = SmallVector<const uint32_t *, 4>;
107     using RegSet = DenseSet<unsigned>;
108     using RegMap = DenseMap<unsigned, const MachineInstr *>;
109     using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
110 
111     const MachineInstr *FirstNonPHI;
112     const MachineInstr *FirstTerminator;
113     BlockSet FunctionBlocks;
114 
115     BitVector regsReserved;
116     RegSet regsLive;
117     RegVector regsDefined, regsDead, regsKilled;
118     RegMaskVector regMasks;
119 
120     SlotIndex lastIndex;
121 
122     // Add Reg and any sub-registers to RV
123     void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
124       RV.push_back(Reg);
125       if (TargetRegisterInfo::isPhysicalRegister(Reg))
126         for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
127           RV.push_back(*SubRegs);
128     }
129 
130     struct BBInfo {
131       // Is this MBB reachable from the MF entry point?
132       bool reachable = false;
133 
134       // Vregs that must be live in because they are used without being
135       // defined. Map value is the user.
136       RegMap vregsLiveIn;
137 
138       // Regs killed in MBB. They may be defined again, and will then be in both
139       // regsKilled and regsLiveOut.
140       RegSet regsKilled;
141 
142       // Regs defined in MBB and live out. Note that vregs passing through may
143       // be live out without being mentioned here.
144       RegSet regsLiveOut;
145 
146       // Vregs that pass through MBB untouched. This set is disjoint from
147       // regsKilled and regsLiveOut.
148       RegSet vregsPassed;
149 
150       // Vregs that must pass through MBB because they are needed by a successor
151       // block. This set is disjoint from regsLiveOut.
152       RegSet vregsRequired;
153 
154       // Set versions of block's predecessor and successor lists.
155       BlockSet Preds, Succs;
156 
157       BBInfo() = default;
158 
159       // Add register to vregsPassed if it belongs there. Return true if
160       // anything changed.
161       bool addPassed(unsigned Reg) {
162         if (!TargetRegisterInfo::isVirtualRegister(Reg))
163           return false;
164         if (regsKilled.count(Reg) || regsLiveOut.count(Reg))
165           return false;
166         return vregsPassed.insert(Reg).second;
167       }
168 
169       // Same for a full set.
170       bool addPassed(const RegSet &RS) {
171         bool changed = false;
172         for (RegSet::const_iterator I = RS.begin(), E = RS.end(); I != E; ++I)
173           if (addPassed(*I))
174             changed = true;
175         return changed;
176       }
177 
178       // Add register to vregsRequired if it belongs there. Return true if
179       // anything changed.
180       bool addRequired(unsigned Reg) {
181         if (!TargetRegisterInfo::isVirtualRegister(Reg))
182           return false;
183         if (regsLiveOut.count(Reg))
184           return false;
185         return vregsRequired.insert(Reg).second;
186       }
187 
188       // Same for a full set.
189       bool addRequired(const RegSet &RS) {
190         bool changed = false;
191         for (RegSet::const_iterator I = RS.begin(), E = RS.end(); I != E; ++I)
192           if (addRequired(*I))
193             changed = true;
194         return changed;
195       }
196 
197       // Same for a full map.
198       bool addRequired(const RegMap &RM) {
199         bool changed = false;
200         for (RegMap::const_iterator I = RM.begin(), E = RM.end(); I != E; ++I)
201           if (addRequired(I->first))
202             changed = true;
203         return changed;
204       }
205 
206       // Live-out registers are either in regsLiveOut or vregsPassed.
207       bool isLiveOut(unsigned Reg) const {
208         return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
209       }
210     };
211 
212     // Extra register info per MBB.
213     DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
214 
215     bool isReserved(unsigned Reg) {
216       return Reg < regsReserved.size() && regsReserved.test(Reg);
217     }
218 
219     bool isAllocatable(unsigned Reg) const {
220       return Reg < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
221         !regsReserved.test(Reg);
222     }
223 
224     // Analysis information if available
225     LiveVariables *LiveVars;
226     LiveIntervals *LiveInts;
227     LiveStacks *LiveStks;
228     SlotIndexes *Indexes;
229 
230     void visitMachineFunctionBefore();
231     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
232     void visitMachineBundleBefore(const MachineInstr *MI);
233 
234     void verifyPreISelGenericInstruction(const MachineInstr *MI);
235     void visitMachineInstrBefore(const MachineInstr *MI);
236     void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
237     void visitMachineInstrAfter(const MachineInstr *MI);
238     void visitMachineBundleAfter(const MachineInstr *MI);
239     void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
240     void visitMachineFunctionAfter();
241 
242     void report(const char *msg, const MachineFunction *MF);
243     void report(const char *msg, const MachineBasicBlock *MBB);
244     void report(const char *msg, const MachineInstr *MI);
245     void report(const char *msg, const MachineOperand *MO, unsigned MONum,
246                 LLT MOVRegType = LLT{});
247 
248     void report_context(const LiveInterval &LI) const;
249     void report_context(const LiveRange &LR, unsigned VRegUnit,
250                         LaneBitmask LaneMask) const;
251     void report_context(const LiveRange::Segment &S) const;
252     void report_context(const VNInfo &VNI) const;
253     void report_context(SlotIndex Pos) const;
254     void report_context(MCPhysReg PhysReg) const;
255     void report_context_liverange(const LiveRange &LR) const;
256     void report_context_lanemask(LaneBitmask LaneMask) const;
257     void report_context_vreg(unsigned VReg) const;
258     void report_context_vreg_regunit(unsigned VRegOrUnit) const;
259 
260     void verifyInlineAsm(const MachineInstr *MI);
261 
262     void checkLiveness(const MachineOperand *MO, unsigned MONum);
263     void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
264                             SlotIndex UseIdx, const LiveRange &LR, unsigned VRegOrUnit,
265                             LaneBitmask LaneMask = LaneBitmask::getNone());
266     void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
267                             SlotIndex DefIdx, const LiveRange &LR, unsigned VRegOrUnit,
268                             bool SubRangeCheck = false,
269                             LaneBitmask LaneMask = LaneBitmask::getNone());
270 
271     void markReachable(const MachineBasicBlock *MBB);
272     void calcRegsPassed();
273     void checkPHIOps(const MachineBasicBlock &MBB);
274 
275     void calcRegsRequired();
276     void verifyLiveVariables();
277     void verifyLiveIntervals();
278     void verifyLiveInterval(const LiveInterval&);
279     void verifyLiveRangeValue(const LiveRange&, const VNInfo*, unsigned,
280                               LaneBitmask);
281     void verifyLiveRangeSegment(const LiveRange&,
282                                 const LiveRange::const_iterator I, unsigned,
283                                 LaneBitmask);
284     void verifyLiveRange(const LiveRange&, unsigned,
285                          LaneBitmask LaneMask = LaneBitmask::getNone());
286 
287     void verifyStackFrame();
288 
289     void verifySlotIndexes() const;
290     void verifyProperties(const MachineFunction &MF);
291   };
292 
293   struct MachineVerifierPass : public MachineFunctionPass {
294     static char ID; // Pass ID, replacement for typeid
295 
296     const std::string Banner;
297 
298     MachineVerifierPass(std::string banner = std::string())
299       : MachineFunctionPass(ID), Banner(std::move(banner)) {
300         initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
301       }
302 
303     void getAnalysisUsage(AnalysisUsage &AU) const override {
304       AU.setPreservesAll();
305       MachineFunctionPass::getAnalysisUsage(AU);
306     }
307 
308     bool runOnMachineFunction(MachineFunction &MF) override {
309       unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
310       if (FoundErrors)
311         report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
312       return false;
313     }
314   };
315 
316 } // end anonymous namespace
317 
318 char MachineVerifierPass::ID = 0;
319 
320 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
321                 "Verify generated machine code", false, false)
322 
323 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
324   return new MachineVerifierPass(Banner);
325 }
326 
327 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
328     const {
329   MachineFunction &MF = const_cast<MachineFunction&>(*this);
330   unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
331   if (AbortOnErrors && FoundErrors)
332     report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
333   return FoundErrors == 0;
334 }
335 
336 void MachineVerifier::verifySlotIndexes() const {
337   if (Indexes == nullptr)
338     return;
339 
340   // Ensure the IdxMBB list is sorted by slot indexes.
341   SlotIndex Last;
342   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
343        E = Indexes->MBBIndexEnd(); I != E; ++I) {
344     assert(!Last.isValid() || I->first > Last);
345     Last = I->first;
346   }
347 }
348 
349 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
350   // If a pass has introduced virtual registers without clearing the
351   // NoVRegs property (or set it without allocating the vregs)
352   // then report an error.
353   if (MF.getProperties().hasProperty(
354           MachineFunctionProperties::Property::NoVRegs) &&
355       MRI->getNumVirtRegs())
356     report("Function has NoVRegs property but there are VReg operands", &MF);
357 }
358 
359 unsigned MachineVerifier::verify(MachineFunction &MF) {
360   foundErrors = 0;
361 
362   this->MF = &MF;
363   TM = &MF.getTarget();
364   TII = MF.getSubtarget().getInstrInfo();
365   TRI = MF.getSubtarget().getRegisterInfo();
366   MRI = &MF.getRegInfo();
367 
368   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
369       MachineFunctionProperties::Property::FailedISel);
370 
371   // If we're mid-GlobalISel and we already triggered the fallback path then
372   // it's expected that the MIR is somewhat broken but that's ok since we'll
373   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
374   if (isFunctionFailedISel)
375     return foundErrors;
376 
377   isFunctionRegBankSelected =
378       !isFunctionFailedISel &&
379       MF.getProperties().hasProperty(
380           MachineFunctionProperties::Property::RegBankSelected);
381   isFunctionSelected = !isFunctionFailedISel &&
382                        MF.getProperties().hasProperty(
383                            MachineFunctionProperties::Property::Selected);
384   LiveVars = nullptr;
385   LiveInts = nullptr;
386   LiveStks = nullptr;
387   Indexes = nullptr;
388   if (PASS) {
389     LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
390     // We don't want to verify LiveVariables if LiveIntervals is available.
391     if (!LiveInts)
392       LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
393     LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
394     Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
395   }
396 
397   verifySlotIndexes();
398 
399   verifyProperties(MF);
400 
401   visitMachineFunctionBefore();
402   for (MachineFunction::const_iterator MFI = MF.begin(), MFE = MF.end();
403        MFI!=MFE; ++MFI) {
404     visitMachineBasicBlockBefore(&*MFI);
405     // Keep track of the current bundle header.
406     const MachineInstr *CurBundle = nullptr;
407     // Do we expect the next instruction to be part of the same bundle?
408     bool InBundle = false;
409 
410     for (MachineBasicBlock::const_instr_iterator MBBI = MFI->instr_begin(),
411            MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI) {
412       if (MBBI->getParent() != &*MFI) {
413         report("Bad instruction parent pointer", &*MFI);
414         errs() << "Instruction: " << *MBBI;
415         continue;
416       }
417 
418       // Check for consistent bundle flags.
419       if (InBundle && !MBBI->isBundledWithPred())
420         report("Missing BundledPred flag, "
421                "BundledSucc was set on predecessor",
422                &*MBBI);
423       if (!InBundle && MBBI->isBundledWithPred())
424         report("BundledPred flag is set, "
425                "but BundledSucc not set on predecessor",
426                &*MBBI);
427 
428       // Is this a bundle header?
429       if (!MBBI->isInsideBundle()) {
430         if (CurBundle)
431           visitMachineBundleAfter(CurBundle);
432         CurBundle = &*MBBI;
433         visitMachineBundleBefore(CurBundle);
434       } else if (!CurBundle)
435         report("No bundle header", &*MBBI);
436       visitMachineInstrBefore(&*MBBI);
437       for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
438         const MachineInstr &MI = *MBBI;
439         const MachineOperand &Op = MI.getOperand(I);
440         if (Op.getParent() != &MI) {
441           // Make sure to use correct addOperand / RemoveOperand / ChangeTo
442           // functions when replacing operands of a MachineInstr.
443           report("Instruction has operand with wrong parent set", &MI);
444         }
445 
446         visitMachineOperand(&Op, I);
447       }
448 
449       visitMachineInstrAfter(&*MBBI);
450 
451       // Was this the last bundled instruction?
452       InBundle = MBBI->isBundledWithSucc();
453     }
454     if (CurBundle)
455       visitMachineBundleAfter(CurBundle);
456     if (InBundle)
457       report("BundledSucc flag set on last instruction in block", &MFI->back());
458     visitMachineBasicBlockAfter(&*MFI);
459   }
460   visitMachineFunctionAfter();
461 
462   // Clean up.
463   regsLive.clear();
464   regsDefined.clear();
465   regsDead.clear();
466   regsKilled.clear();
467   regMasks.clear();
468   MBBInfoMap.clear();
469 
470   return foundErrors;
471 }
472 
473 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
474   assert(MF);
475   errs() << '\n';
476   if (!foundErrors++) {
477     if (Banner)
478       errs() << "# " << Banner << '\n';
479     if (LiveInts != nullptr)
480       LiveInts->print(errs());
481     else
482       MF->print(errs(), Indexes);
483   }
484   errs() << "*** Bad machine code: " << msg << " ***\n"
485       << "- function:    " << MF->getName() << "\n";
486 }
487 
488 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
489   assert(MBB);
490   report(msg, MBB->getParent());
491   errs() << "- basic block: " << printMBBReference(*MBB) << ' '
492          << MBB->getName() << " (" << (const void *)MBB << ')';
493   if (Indexes)
494     errs() << " [" << Indexes->getMBBStartIdx(MBB)
495         << ';' <<  Indexes->getMBBEndIdx(MBB) << ')';
496   errs() << '\n';
497 }
498 
499 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
500   assert(MI);
501   report(msg, MI->getParent());
502   errs() << "- instruction: ";
503   if (Indexes && Indexes->hasIndex(*MI))
504     errs() << Indexes->getInstructionIndex(*MI) << '\t';
505   MI->print(errs(), /*SkipOpers=*/true);
506 }
507 
508 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
509                              unsigned MONum, LLT MOVRegType) {
510   assert(MO);
511   report(msg, MO->getParent());
512   errs() << "- operand " << MONum << ":   ";
513   MO->print(errs(), MOVRegType, TRI);
514   errs() << "\n";
515 }
516 
517 void MachineVerifier::report_context(SlotIndex Pos) const {
518   errs() << "- at:          " << Pos << '\n';
519 }
520 
521 void MachineVerifier::report_context(const LiveInterval &LI) const {
522   errs() << "- interval:    " << LI << '\n';
523 }
524 
525 void MachineVerifier::report_context(const LiveRange &LR, unsigned VRegUnit,
526                                      LaneBitmask LaneMask) const {
527   report_context_liverange(LR);
528   report_context_vreg_regunit(VRegUnit);
529   if (LaneMask.any())
530     report_context_lanemask(LaneMask);
531 }
532 
533 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
534   errs() << "- segment:     " << S << '\n';
535 }
536 
537 void MachineVerifier::report_context(const VNInfo &VNI) const {
538   errs() << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
539 }
540 
541 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
542   errs() << "- liverange:   " << LR << '\n';
543 }
544 
545 void MachineVerifier::report_context(MCPhysReg PReg) const {
546   errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
547 }
548 
549 void MachineVerifier::report_context_vreg(unsigned VReg) const {
550   errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
551 }
552 
553 void MachineVerifier::report_context_vreg_regunit(unsigned VRegOrUnit) const {
554   if (TargetRegisterInfo::isVirtualRegister(VRegOrUnit)) {
555     report_context_vreg(VRegOrUnit);
556   } else {
557     errs() << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
558   }
559 }
560 
561 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
562   errs() << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
563 }
564 
565 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
566   BBInfo &MInfo = MBBInfoMap[MBB];
567   if (!MInfo.reachable) {
568     MInfo.reachable = true;
569     for (MachineBasicBlock::const_succ_iterator SuI = MBB->succ_begin(),
570            SuE = MBB->succ_end(); SuI != SuE; ++SuI)
571       markReachable(*SuI);
572   }
573 }
574 
575 void MachineVerifier::visitMachineFunctionBefore() {
576   lastIndex = SlotIndex();
577   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
578                                            : TRI->getReservedRegs(*MF);
579 
580   if (!MF->empty())
581     markReachable(&MF->front());
582 
583   // Build a set of the basic blocks in the function.
584   FunctionBlocks.clear();
585   for (const auto &MBB : *MF) {
586     FunctionBlocks.insert(&MBB);
587     BBInfo &MInfo = MBBInfoMap[&MBB];
588 
589     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
590     if (MInfo.Preds.size() != MBB.pred_size())
591       report("MBB has duplicate entries in its predecessor list.", &MBB);
592 
593     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
594     if (MInfo.Succs.size() != MBB.succ_size())
595       report("MBB has duplicate entries in its successor list.", &MBB);
596   }
597 
598   // Check that the register use lists are sane.
599   MRI->verifyUseLists();
600 
601   if (!MF->empty())
602     verifyStackFrame();
603 }
604 
605 // Does iterator point to a and b as the first two elements?
606 static bool matchPair(MachineBasicBlock::const_succ_iterator i,
607                       const MachineBasicBlock *a, const MachineBasicBlock *b) {
608   if (*i == a)
609     return *++i == b;
610   if (*i == b)
611     return *++i == a;
612   return false;
613 }
614 
615 void
616 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
617   FirstTerminator = nullptr;
618   FirstNonPHI = nullptr;
619 
620   if (!MF->getProperties().hasProperty(
621       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
622     // If this block has allocatable physical registers live-in, check that
623     // it is an entry block or landing pad.
624     for (const auto &LI : MBB->liveins()) {
625       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
626           MBB->getIterator() != MBB->getParent()->begin()) {
627         report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
628         report_context(LI.PhysReg);
629       }
630     }
631   }
632 
633   // Count the number of landing pad successors.
634   SmallPtrSet<MachineBasicBlock*, 4> LandingPadSuccs;
635   for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
636        E = MBB->succ_end(); I != E; ++I) {
637     if ((*I)->isEHPad())
638       LandingPadSuccs.insert(*I);
639     if (!FunctionBlocks.count(*I))
640       report("MBB has successor that isn't part of the function.", MBB);
641     if (!MBBInfoMap[*I].Preds.count(MBB)) {
642       report("Inconsistent CFG", MBB);
643       errs() << "MBB is not in the predecessor list of the successor "
644              << printMBBReference(*(*I)) << ".\n";
645     }
646   }
647 
648   // Check the predecessor list.
649   for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(),
650        E = MBB->pred_end(); I != E; ++I) {
651     if (!FunctionBlocks.count(*I))
652       report("MBB has predecessor that isn't part of the function.", MBB);
653     if (!MBBInfoMap[*I].Succs.count(MBB)) {
654       report("Inconsistent CFG", MBB);
655       errs() << "MBB is not in the successor list of the predecessor "
656              << printMBBReference(*(*I)) << ".\n";
657     }
658   }
659 
660   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
661   const BasicBlock *BB = MBB->getBasicBlock();
662   const Function &F = MF->getFunction();
663   if (LandingPadSuccs.size() > 1 &&
664       !(AsmInfo &&
665         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
666         BB && isa<SwitchInst>(BB->getTerminator())) &&
667       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
668     report("MBB has more than one landing pad successor", MBB);
669 
670   // Call AnalyzeBranch. If it succeeds, there several more conditions to check.
671   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
672   SmallVector<MachineOperand, 4> Cond;
673   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
674                           Cond)) {
675     // Ok, AnalyzeBranch thinks it knows what's going on with this block. Let's
676     // check whether its answers match up with reality.
677     if (!TBB && !FBB) {
678       // Block falls through to its successor.
679       MachineFunction::const_iterator MBBI = MBB->getIterator();
680       ++MBBI;
681       if (MBBI == MF->end()) {
682         // It's possible that the block legitimately ends with a noreturn
683         // call or an unreachable, in which case it won't actually fall
684         // out the bottom of the function.
685       } else if (MBB->succ_size() == LandingPadSuccs.size()) {
686         // It's possible that the block legitimately ends with a noreturn
687         // call or an unreachable, in which case it won't actually fall
688         // out of the block.
689       } else if (MBB->succ_size() != 1+LandingPadSuccs.size()) {
690         report("MBB exits via unconditional fall-through but doesn't have "
691                "exactly one CFG successor!", MBB);
692       } else if (!MBB->isSuccessor(&*MBBI)) {
693         report("MBB exits via unconditional fall-through but its successor "
694                "differs from its CFG successor!", MBB);
695       }
696       if (!MBB->empty() && MBB->back().isBarrier() &&
697           !TII->isPredicated(MBB->back())) {
698         report("MBB exits via unconditional fall-through but ends with a "
699                "barrier instruction!", MBB);
700       }
701       if (!Cond.empty()) {
702         report("MBB exits via unconditional fall-through but has a condition!",
703                MBB);
704       }
705     } else if (TBB && !FBB && Cond.empty()) {
706       // Block unconditionally branches somewhere.
707       // If the block has exactly one successor, that happens to be a
708       // landingpad, accept it as valid control flow.
709       if (MBB->succ_size() != 1+LandingPadSuccs.size() &&
710           (MBB->succ_size() != 1 || LandingPadSuccs.size() != 1 ||
711            *MBB->succ_begin() != *LandingPadSuccs.begin())) {
712         report("MBB exits via unconditional branch but doesn't have "
713                "exactly one CFG successor!", MBB);
714       } else if (!MBB->isSuccessor(TBB)) {
715         report("MBB exits via unconditional branch but the CFG "
716                "successor doesn't match the actual successor!", MBB);
717       }
718       if (MBB->empty()) {
719         report("MBB exits via unconditional branch but doesn't contain "
720                "any instructions!", MBB);
721       } else if (!MBB->back().isBarrier()) {
722         report("MBB exits via unconditional branch but doesn't end with a "
723                "barrier instruction!", MBB);
724       } else if (!MBB->back().isTerminator()) {
725         report("MBB exits via unconditional branch but the branch isn't a "
726                "terminator instruction!", MBB);
727       }
728     } else if (TBB && !FBB && !Cond.empty()) {
729       // Block conditionally branches somewhere, otherwise falls through.
730       MachineFunction::const_iterator MBBI = MBB->getIterator();
731       ++MBBI;
732       if (MBBI == MF->end()) {
733         report("MBB conditionally falls through out of function!", MBB);
734       } else if (MBB->succ_size() == 1) {
735         // A conditional branch with only one successor is weird, but allowed.
736         if (&*MBBI != TBB)
737           report("MBB exits via conditional branch/fall-through but only has "
738                  "one CFG successor!", MBB);
739         else if (TBB != *MBB->succ_begin())
740           report("MBB exits via conditional branch/fall-through but the CFG "
741                  "successor don't match the actual successor!", MBB);
742       } else if (MBB->succ_size() != 2) {
743         report("MBB exits via conditional branch/fall-through but doesn't have "
744                "exactly two CFG successors!", MBB);
745       } else if (!matchPair(MBB->succ_begin(), TBB, &*MBBI)) {
746         report("MBB exits via conditional branch/fall-through but the CFG "
747                "successors don't match the actual successors!", MBB);
748       }
749       if (MBB->empty()) {
750         report("MBB exits via conditional branch/fall-through but doesn't "
751                "contain any instructions!", MBB);
752       } else if (MBB->back().isBarrier()) {
753         report("MBB exits via conditional branch/fall-through but ends with a "
754                "barrier instruction!", MBB);
755       } else if (!MBB->back().isTerminator()) {
756         report("MBB exits via conditional branch/fall-through but the branch "
757                "isn't a terminator instruction!", MBB);
758       }
759     } else if (TBB && FBB) {
760       // Block conditionally branches somewhere, otherwise branches
761       // somewhere else.
762       if (MBB->succ_size() == 1) {
763         // A conditional branch with only one successor is weird, but allowed.
764         if (FBB != TBB)
765           report("MBB exits via conditional branch/branch through but only has "
766                  "one CFG successor!", MBB);
767         else if (TBB != *MBB->succ_begin())
768           report("MBB exits via conditional branch/branch through but the CFG "
769                  "successor don't match the actual successor!", MBB);
770       } else if (MBB->succ_size() != 2) {
771         report("MBB exits via conditional branch/branch but doesn't have "
772                "exactly two CFG successors!", MBB);
773       } else if (!matchPair(MBB->succ_begin(), TBB, FBB)) {
774         report("MBB exits via conditional branch/branch but the CFG "
775                "successors don't match the actual successors!", MBB);
776       }
777       if (MBB->empty()) {
778         report("MBB exits via conditional branch/branch but doesn't "
779                "contain any instructions!", MBB);
780       } else if (!MBB->back().isBarrier()) {
781         report("MBB exits via conditional branch/branch but doesn't end with a "
782                "barrier instruction!", MBB);
783       } else if (!MBB->back().isTerminator()) {
784         report("MBB exits via conditional branch/branch but the branch "
785                "isn't a terminator instruction!", MBB);
786       }
787       if (Cond.empty()) {
788         report("MBB exits via conditional branch/branch but there's no "
789                "condition!", MBB);
790       }
791     } else {
792       report("AnalyzeBranch returned invalid data!", MBB);
793     }
794   }
795 
796   regsLive.clear();
797   if (MRI->tracksLiveness()) {
798     for (const auto &LI : MBB->liveins()) {
799       if (!TargetRegisterInfo::isPhysicalRegister(LI.PhysReg)) {
800         report("MBB live-in list contains non-physical register", MBB);
801         continue;
802       }
803       for (MCSubRegIterator SubRegs(LI.PhysReg, TRI, /*IncludeSelf=*/true);
804            SubRegs.isValid(); ++SubRegs)
805         regsLive.insert(*SubRegs);
806     }
807   }
808 
809   const MachineFrameInfo &MFI = MF->getFrameInfo();
810   BitVector PR = MFI.getPristineRegs(*MF);
811   for (unsigned I : PR.set_bits()) {
812     for (MCSubRegIterator SubRegs(I, TRI, /*IncludeSelf=*/true);
813          SubRegs.isValid(); ++SubRegs)
814       regsLive.insert(*SubRegs);
815   }
816 
817   regsKilled.clear();
818   regsDefined.clear();
819 
820   if (Indexes)
821     lastIndex = Indexes->getMBBStartIdx(MBB);
822 }
823 
824 // This function gets called for all bundle headers, including normal
825 // stand-alone unbundled instructions.
826 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
827   if (Indexes && Indexes->hasIndex(*MI)) {
828     SlotIndex idx = Indexes->getInstructionIndex(*MI);
829     if (!(idx > lastIndex)) {
830       report("Instruction index out of order", MI);
831       errs() << "Last instruction was at " << lastIndex << '\n';
832     }
833     lastIndex = idx;
834   }
835 
836   // Ensure non-terminators don't follow terminators.
837   // Ignore predicated terminators formed by if conversion.
838   // FIXME: If conversion shouldn't need to violate this rule.
839   if (MI->isTerminator() && !TII->isPredicated(*MI)) {
840     if (!FirstTerminator)
841       FirstTerminator = MI;
842   } else if (FirstTerminator) {
843     report("Non-terminator instruction after the first terminator", MI);
844     errs() << "First terminator was:\t" << *FirstTerminator;
845   }
846 }
847 
848 // The operands on an INLINEASM instruction must follow a template.
849 // Verify that the flag operands make sense.
850 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
851   // The first two operands on INLINEASM are the asm string and global flags.
852   if (MI->getNumOperands() < 2) {
853     report("Too few operands on inline asm", MI);
854     return;
855   }
856   if (!MI->getOperand(0).isSymbol())
857     report("Asm string must be an external symbol", MI);
858   if (!MI->getOperand(1).isImm())
859     report("Asm flags must be an immediate", MI);
860   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
861   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
862   // and Extra_IsConvergent = 32.
863   if (!isUInt<6>(MI->getOperand(1).getImm()))
864     report("Unknown asm flags", &MI->getOperand(1), 1);
865 
866   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
867 
868   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
869   unsigned NumOps;
870   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
871     const MachineOperand &MO = MI->getOperand(OpNo);
872     // There may be implicit ops after the fixed operands.
873     if (!MO.isImm())
874       break;
875     NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
876   }
877 
878   if (OpNo > MI->getNumOperands())
879     report("Missing operands in last group", MI);
880 
881   // An optional MDNode follows the groups.
882   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
883     ++OpNo;
884 
885   // All trailing operands must be implicit registers.
886   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
887     const MachineOperand &MO = MI->getOperand(OpNo);
888     if (!MO.isReg() || !MO.isImplicit())
889       report("Expected implicit register after groups", &MO, OpNo);
890   }
891 }
892 
893 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
894   if (isFunctionSelected)
895     report("Unexpected generic instruction in a Selected function", MI);
896 
897   const MCInstrDesc &MCID = MI->getDesc();
898   unsigned NumOps = MI->getNumOperands();
899 
900   // Check types.
901   SmallVector<LLT, 4> Types;
902   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
903        I != E; ++I) {
904     if (!MCID.OpInfo[I].isGenericType())
905       continue;
906     // Generic instructions specify type equality constraints between some of
907     // their operands. Make sure these are consistent.
908     size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
909     Types.resize(std::max(TypeIdx + 1, Types.size()));
910 
911     const MachineOperand *MO = &MI->getOperand(I);
912     LLT OpTy = MRI->getType(MO->getReg());
913     // Don't report a type mismatch if there is no actual mismatch, only a
914     // type missing, to reduce noise:
915     if (OpTy.isValid()) {
916       // Only the first valid type for a type index will be printed: don't
917       // overwrite it later so it's always clear which type was expected:
918       if (!Types[TypeIdx].isValid())
919         Types[TypeIdx] = OpTy;
920       else if (Types[TypeIdx] != OpTy)
921         report("Type mismatch in generic instruction", MO, I, OpTy);
922     } else {
923       // Generic instructions must have types attached to their operands.
924       report("Generic instruction is missing a virtual register type", MO, I);
925     }
926   }
927 
928   // Generic opcodes must not have physical register operands.
929   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
930     const MachineOperand *MO = &MI->getOperand(I);
931     if (MO->isReg() && TargetRegisterInfo::isPhysicalRegister(MO->getReg()))
932       report("Generic instruction cannot have physical register", MO, I);
933   }
934 
935   // Avoid out of bounds in checks below. This was already reported earlier.
936   if (MI->getNumOperands() < MCID.getNumOperands())
937     return;
938 
939   StringRef ErrorInfo;
940   if (!TII->verifyInstruction(*MI, ErrorInfo))
941     report(ErrorInfo.data(), MI);
942 
943   // Verify properties of various specific instruction types
944   switch (MI->getOpcode()) {
945   case TargetOpcode::G_CONSTANT:
946   case TargetOpcode::G_FCONSTANT: {
947     if (MI->getNumOperands() < MCID.getNumOperands())
948       break;
949 
950     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
951     if (DstTy.isVector())
952       report("Instruction cannot use a vector result type", MI);
953     break;
954   }
955   case TargetOpcode::G_LOAD:
956   case TargetOpcode::G_STORE:
957   case TargetOpcode::G_ZEXTLOAD:
958   case TargetOpcode::G_SEXTLOAD: {
959     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
960     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
961     if (!PtrTy.isPointer())
962       report("Generic memory instruction must access a pointer", MI);
963 
964     // Generic loads and stores must have a single MachineMemOperand
965     // describing that access.
966     if (!MI->hasOneMemOperand()) {
967       report("Generic instruction accessing memory must have one mem operand",
968              MI);
969     } else {
970       const MachineMemOperand &MMO = **MI->memoperands_begin();
971       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
972           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
973         if (MMO.getSize() * 8 >= ValTy.getSizeInBits())
974           report("Generic extload must have a narrower memory type", MI);
975       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
976         if (MMO.getSize() > (ValTy.getSizeInBits() + 7) / 8)
977           report("load memory size cannot exceed result size", MI);
978       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
979         if ((ValTy.getSizeInBits() + 7) / 8 < MMO.getSize())
980           report("store memory size cannot exceed value size", MI);
981       }
982     }
983 
984     break;
985   }
986   case TargetOpcode::G_PHI: {
987     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
988     if (!DstTy.isValid() ||
989         !std::all_of(MI->operands_begin() + 1, MI->operands_end(),
990                      [this, &DstTy](const MachineOperand &MO) {
991                        if (!MO.isReg())
992                          return true;
993                        LLT Ty = MRI->getType(MO.getReg());
994                        if (!Ty.isValid() || (Ty != DstTy))
995                          return false;
996                        return true;
997                      }))
998       report("Generic Instruction G_PHI has operands with incompatible/missing "
999              "types",
1000              MI);
1001     break;
1002   }
1003   case TargetOpcode::G_BITCAST: {
1004     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1005     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1006     if (!DstTy.isValid() || !SrcTy.isValid())
1007       break;
1008 
1009     if (SrcTy.isPointer() != DstTy.isPointer())
1010       report("bitcast cannot convert between pointers and other types", MI);
1011 
1012     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1013       report("bitcast sizes must match", MI);
1014     break;
1015   }
1016   case TargetOpcode::G_INTTOPTR:
1017   case TargetOpcode::G_PTRTOINT:
1018   case TargetOpcode::G_ADDRSPACE_CAST: {
1019     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1020     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1021     if (!DstTy.isValid() || !SrcTy.isValid())
1022       break;
1023 
1024     if (DstTy.isVector() != SrcTy.isVector())
1025       report("pointer casts must be all-vector or all-scalar", MI);
1026     else {
1027       if (DstTy.isVector() ) {
1028         if (DstTy.getNumElements() != SrcTy.getNumElements()) {
1029           report("pointer casts must preserve number of elements", MI);
1030           break;
1031         }
1032       }
1033     }
1034 
1035     DstTy = DstTy.getScalarType();
1036     SrcTy = SrcTy.getScalarType();
1037 
1038     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1039       if (!DstTy.isPointer())
1040         report("inttoptr result type must be a pointer", MI);
1041       if (SrcTy.isPointer())
1042         report("inttoptr source type must not be a pointer", MI);
1043     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1044       if (!SrcTy.isPointer())
1045         report("ptrtoint source type must be a pointer", MI);
1046       if (DstTy.isPointer())
1047         report("ptrtoint result type must not be a pointer", MI);
1048     } else {
1049       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1050       if (!SrcTy.isPointer() || !DstTy.isPointer())
1051         report("addrspacecast types must be pointers", MI);
1052       else {
1053         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1054           report("addrspacecast must convert different address spaces", MI);
1055       }
1056     }
1057 
1058     break;
1059   }
1060   case TargetOpcode::G_SEXT:
1061   case TargetOpcode::G_ZEXT:
1062   case TargetOpcode::G_ANYEXT:
1063   case TargetOpcode::G_TRUNC:
1064   case TargetOpcode::G_FPEXT:
1065   case TargetOpcode::G_FPTRUNC: {
1066     // Number of operands and presense of types is already checked (and
1067     // reported in case of any issues), so no need to report them again. As
1068     // we're trying to report as many issues as possible at once, however, the
1069     // instructions aren't guaranteed to have the right number of operands or
1070     // types attached to them at this point
1071     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1072     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1073     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1074     if (!DstTy.isValid() || !SrcTy.isValid())
1075       break;
1076 
1077     LLT DstElTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
1078     LLT SrcElTy = SrcTy.isVector() ? SrcTy.getElementType() : SrcTy;
1079     if (DstElTy.isPointer() || SrcElTy.isPointer())
1080       report("Generic extend/truncate can not operate on pointers", MI);
1081 
1082     if (DstTy.isVector() != SrcTy.isVector()) {
1083       report("Generic extend/truncate must be all-vector or all-scalar", MI);
1084       // Generally we try to report as many issues as possible at once, but in
1085       // this case it's not clear what should we be comparing the size of the
1086       // scalar with: the size of the whole vector or its lane. Instead of
1087       // making an arbitrary choice and emitting not so helpful message, let's
1088       // avoid the extra noise and stop here.
1089       break;
1090     }
1091     if (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())
1092       report("Generic vector extend/truncate must preserve number of lanes",
1093              MI);
1094     unsigned DstSize = DstElTy.getSizeInBits();
1095     unsigned SrcSize = SrcElTy.getSizeInBits();
1096     switch (MI->getOpcode()) {
1097     default:
1098       if (DstSize <= SrcSize)
1099         report("Generic extend has destination type no larger than source", MI);
1100       break;
1101     case TargetOpcode::G_TRUNC:
1102     case TargetOpcode::G_FPTRUNC:
1103       if (DstSize >= SrcSize)
1104         report("Generic truncate has destination type no smaller than source",
1105                MI);
1106       break;
1107     }
1108     break;
1109   }
1110   case TargetOpcode::G_MERGE_VALUES: {
1111     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1112     // e.g. s2N = MERGE sN, sN
1113     // Merging multiple scalars into a vector is not allowed, should use
1114     // G_BUILD_VECTOR for that.
1115     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1116     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1117     if (DstTy.isVector() || SrcTy.isVector())
1118       report("G_MERGE_VALUES cannot operate on vectors", MI);
1119     break;
1120   }
1121   case TargetOpcode::G_UNMERGE_VALUES: {
1122     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1123     LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
1124     // For now G_UNMERGE can split vectors.
1125     for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
1126       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
1127         report("G_UNMERGE_VALUES destination types do not match", MI);
1128     }
1129     if (SrcTy.getSizeInBits() !=
1130         (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
1131       report("G_UNMERGE_VALUES source operand does not cover dest operands",
1132              MI);
1133     }
1134     break;
1135   }
1136   case TargetOpcode::G_BUILD_VECTOR: {
1137     // Source types must be scalars, dest type a vector. Total size of scalars
1138     // must match the dest vector size.
1139     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1140     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1141     if (!DstTy.isVector() || SrcEltTy.isVector())
1142       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1143     for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1144       if (MRI->getType(MI->getOperand(1).getReg()) !=
1145           MRI->getType(MI->getOperand(i).getReg()))
1146         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1147     }
1148     if (DstTy.getSizeInBits() !=
1149         SrcEltTy.getSizeInBits() * (MI->getNumOperands() - 1))
1150       report("G_BUILD_VECTOR src operands total size don't match dest "
1151              "size.",
1152              MI);
1153     break;
1154   }
1155   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1156     // Source types must be scalars, dest type a vector. Scalar types must be
1157     // larger than the dest vector elt type, as this is a truncating operation.
1158     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1159     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1160     if (!DstTy.isVector() || SrcEltTy.isVector())
1161       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1162              MI);
1163     for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1164       if (MRI->getType(MI->getOperand(1).getReg()) !=
1165           MRI->getType(MI->getOperand(i).getReg()))
1166         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1167                MI);
1168     }
1169     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1170       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1171              "dest elt type",
1172              MI);
1173     break;
1174   }
1175   case TargetOpcode::G_CONCAT_VECTORS: {
1176     // Source types should be vectors, and total size should match the dest
1177     // vector size.
1178     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1179     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1180     if (!DstTy.isVector() || !SrcTy.isVector())
1181       report("G_CONCAT_VECTOR requires vector source and destination operands",
1182              MI);
1183     for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1184       if (MRI->getType(MI->getOperand(1).getReg()) !=
1185           MRI->getType(MI->getOperand(i).getReg()))
1186         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1187     }
1188     if (DstTy.getNumElements() !=
1189         SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1190       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1191     break;
1192   }
1193   case TargetOpcode::G_ICMP:
1194   case TargetOpcode::G_FCMP: {
1195     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1196     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1197 
1198     if ((DstTy.isVector() != SrcTy.isVector()) ||
1199         (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1200       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1201 
1202     break;
1203   }
1204   default:
1205     break;
1206   }
1207 }
1208 
1209 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1210   const MCInstrDesc &MCID = MI->getDesc();
1211   if (MI->getNumOperands() < MCID.getNumOperands()) {
1212     report("Too few operands", MI);
1213     errs() << MCID.getNumOperands() << " operands expected, but "
1214            << MI->getNumOperands() << " given.\n";
1215   }
1216 
1217   if (MI->isPHI()) {
1218     if (MF->getProperties().hasProperty(
1219             MachineFunctionProperties::Property::NoPHIs))
1220       report("Found PHI instruction with NoPHIs property set", MI);
1221 
1222     if (FirstNonPHI)
1223       report("Found PHI instruction after non-PHI", MI);
1224   } else if (FirstNonPHI == nullptr)
1225     FirstNonPHI = MI;
1226 
1227   // Check the tied operands.
1228   if (MI->isInlineAsm())
1229     verifyInlineAsm(MI);
1230 
1231   // Check the MachineMemOperands for basic consistency.
1232   for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
1233                                   E = MI->memoperands_end();
1234        I != E; ++I) {
1235     if ((*I)->isLoad() && !MI->mayLoad())
1236       report("Missing mayLoad flag", MI);
1237     if ((*I)->isStore() && !MI->mayStore())
1238       report("Missing mayStore flag", MI);
1239   }
1240 
1241   // Debug values must not have a slot index.
1242   // Other instructions must have one, unless they are inside a bundle.
1243   if (LiveInts) {
1244     bool mapped = !LiveInts->isNotInMIMap(*MI);
1245     if (MI->isDebugInstr()) {
1246       if (mapped)
1247         report("Debug instruction has a slot index", MI);
1248     } else if (MI->isInsideBundle()) {
1249       if (mapped)
1250         report("Instruction inside bundle has a slot index", MI);
1251     } else {
1252       if (!mapped)
1253         report("Missing slot index", MI);
1254     }
1255   }
1256 
1257   if (isPreISelGenericOpcode(MCID.getOpcode())) {
1258     verifyPreISelGenericInstruction(MI);
1259     return;
1260   }
1261 
1262   StringRef ErrorInfo;
1263   if (!TII->verifyInstruction(*MI, ErrorInfo))
1264     report(ErrorInfo.data(), MI);
1265 
1266   // Verify properties of various specific instruction types
1267   switch (MI->getOpcode()) {
1268   case TargetOpcode::COPY: {
1269     if (foundErrors)
1270       break;
1271     const MachineOperand &DstOp = MI->getOperand(0);
1272     const MachineOperand &SrcOp = MI->getOperand(1);
1273     LLT DstTy = MRI->getType(DstOp.getReg());
1274     LLT SrcTy = MRI->getType(SrcOp.getReg());
1275     if (SrcTy.isValid() && DstTy.isValid()) {
1276       // If both types are valid, check that the types are the same.
1277       if (SrcTy != DstTy) {
1278         report("Copy Instruction is illegal with mismatching types", MI);
1279         errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1280       }
1281     }
1282     if (SrcTy.isValid() || DstTy.isValid()) {
1283       // If one of them have valid types, let's just check they have the same
1284       // size.
1285       unsigned SrcSize = TRI->getRegSizeInBits(SrcOp.getReg(), *MRI);
1286       unsigned DstSize = TRI->getRegSizeInBits(DstOp.getReg(), *MRI);
1287       assert(SrcSize && "Expecting size here");
1288       assert(DstSize && "Expecting size here");
1289       if (SrcSize != DstSize)
1290         if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1291           report("Copy Instruction is illegal with mismatching sizes", MI);
1292           errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1293                  << "\n";
1294         }
1295     }
1296     break;
1297   }
1298   case TargetOpcode::STATEPOINT:
1299     if (!MI->getOperand(StatepointOpers::IDPos).isImm() ||
1300         !MI->getOperand(StatepointOpers::NBytesPos).isImm() ||
1301         !MI->getOperand(StatepointOpers::NCallArgsPos).isImm())
1302       report("meta operands to STATEPOINT not constant!", MI);
1303     break;
1304 
1305     auto VerifyStackMapConstant = [&](unsigned Offset) {
1306       if (!MI->getOperand(Offset).isImm() ||
1307           MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
1308           !MI->getOperand(Offset + 1).isImm())
1309         report("stack map constant to STATEPOINT not well formed!", MI);
1310     };
1311     const unsigned VarStart = StatepointOpers(MI).getVarIdx();
1312     VerifyStackMapConstant(VarStart + StatepointOpers::CCOffset);
1313     VerifyStackMapConstant(VarStart + StatepointOpers::FlagsOffset);
1314     VerifyStackMapConstant(VarStart + StatepointOpers::NumDeoptOperandsOffset);
1315 
1316     // TODO: verify we have properly encoded deopt arguments
1317     break;
1318   }
1319 }
1320 
1321 void
1322 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1323   const MachineInstr *MI = MO->getParent();
1324   const MCInstrDesc &MCID = MI->getDesc();
1325   unsigned NumDefs = MCID.getNumDefs();
1326   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1327     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1328 
1329   // The first MCID.NumDefs operands must be explicit register defines
1330   if (MONum < NumDefs) {
1331     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1332     if (!MO->isReg())
1333       report("Explicit definition must be a register", MO, MONum);
1334     else if (!MO->isDef() && !MCOI.isOptionalDef())
1335       report("Explicit definition marked as use", MO, MONum);
1336     else if (MO->isImplicit())
1337       report("Explicit definition marked as implicit", MO, MONum);
1338   } else if (MONum < MCID.getNumOperands()) {
1339     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1340     // Don't check if it's the last operand in a variadic instruction. See,
1341     // e.g., LDM_RET in the arm back end.
1342     if (MO->isReg() &&
1343         !(MI->isVariadic() && MONum == MCID.getNumOperands()-1)) {
1344       if (MO->isDef() && !MCOI.isOptionalDef())
1345         report("Explicit operand marked as def", MO, MONum);
1346       if (MO->isImplicit())
1347         report("Explicit operand marked as implicit", MO, MONum);
1348     }
1349 
1350     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
1351     if (TiedTo != -1) {
1352       if (!MO->isReg())
1353         report("Tied use must be a register", MO, MONum);
1354       else if (!MO->isTied())
1355         report("Operand should be tied", MO, MONum);
1356       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
1357         report("Tied def doesn't match MCInstrDesc", MO, MONum);
1358       else if (TargetRegisterInfo::isPhysicalRegister(MO->getReg())) {
1359         const MachineOperand &MOTied = MI->getOperand(TiedTo);
1360         if (!MOTied.isReg())
1361           report("Tied counterpart must be a register", &MOTied, TiedTo);
1362         else if (TargetRegisterInfo::isPhysicalRegister(MOTied.getReg()) &&
1363                  MO->getReg() != MOTied.getReg())
1364           report("Tied physical registers must match.", &MOTied, TiedTo);
1365       }
1366     } else if (MO->isReg() && MO->isTied())
1367       report("Explicit operand should not be tied", MO, MONum);
1368   } else {
1369     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
1370     if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
1371       report("Extra explicit operand on non-variadic instruction", MO, MONum);
1372   }
1373 
1374   switch (MO->getType()) {
1375   case MachineOperand::MO_Register: {
1376     const unsigned Reg = MO->getReg();
1377     if (!Reg)
1378       return;
1379     if (MRI->tracksLiveness() && !MI->isDebugValue())
1380       checkLiveness(MO, MONum);
1381 
1382     // Verify the consistency of tied operands.
1383     if (MO->isTied()) {
1384       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
1385       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
1386       if (!OtherMO.isReg())
1387         report("Must be tied to a register", MO, MONum);
1388       if (!OtherMO.isTied())
1389         report("Missing tie flags on tied operand", MO, MONum);
1390       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
1391         report("Inconsistent tie links", MO, MONum);
1392       if (MONum < MCID.getNumDefs()) {
1393         if (OtherIdx < MCID.getNumOperands()) {
1394           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
1395             report("Explicit def tied to explicit use without tie constraint",
1396                    MO, MONum);
1397         } else {
1398           if (!OtherMO.isImplicit())
1399             report("Explicit def should be tied to implicit use", MO, MONum);
1400         }
1401       }
1402     }
1403 
1404     // Verify two-address constraints after leaving SSA form.
1405     unsigned DefIdx;
1406     if (!MRI->isSSA() && MO->isUse() &&
1407         MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
1408         Reg != MI->getOperand(DefIdx).getReg())
1409       report("Two-address instruction operands must be identical", MO, MONum);
1410 
1411     // Check register classes.
1412     unsigned SubIdx = MO->getSubReg();
1413 
1414     if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1415       if (SubIdx) {
1416         report("Illegal subregister index for physical register", MO, MONum);
1417         return;
1418       }
1419       if (MONum < MCID.getNumOperands()) {
1420         if (const TargetRegisterClass *DRC =
1421               TII->getRegClass(MCID, MONum, TRI, *MF)) {
1422           if (!DRC->contains(Reg)) {
1423             report("Illegal physical register for instruction", MO, MONum);
1424             errs() << printReg(Reg, TRI) << " is not a "
1425                    << TRI->getRegClassName(DRC) << " register.\n";
1426           }
1427         }
1428       }
1429       if (MO->isRenamable()) {
1430         if (MRI->isReserved(Reg)) {
1431           report("isRenamable set on reserved register", MO, MONum);
1432           return;
1433         }
1434       }
1435       if (MI->isDebugValue() && MO->isUse() && !MO->isDebug()) {
1436         report("Use-reg is not IsDebug in a DBG_VALUE", MO, MONum);
1437         return;
1438       }
1439     } else {
1440       // Virtual register.
1441       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
1442       if (!RC) {
1443         // This is a generic virtual register.
1444 
1445         // If we're post-Select, we can't have gvregs anymore.
1446         if (isFunctionSelected) {
1447           report("Generic virtual register invalid in a Selected function",
1448                  MO, MONum);
1449           return;
1450         }
1451 
1452         // The gvreg must have a type and it must not have a SubIdx.
1453         LLT Ty = MRI->getType(Reg);
1454         if (!Ty.isValid()) {
1455           report("Generic virtual register must have a valid type", MO,
1456                  MONum);
1457           return;
1458         }
1459 
1460         const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
1461 
1462         // If we're post-RegBankSelect, the gvreg must have a bank.
1463         if (!RegBank && isFunctionRegBankSelected) {
1464           report("Generic virtual register must have a bank in a "
1465                  "RegBankSelected function",
1466                  MO, MONum);
1467           return;
1468         }
1469 
1470         // Make sure the register fits into its register bank if any.
1471         if (RegBank && Ty.isValid() &&
1472             RegBank->getSize() < Ty.getSizeInBits()) {
1473           report("Register bank is too small for virtual register", MO,
1474                  MONum);
1475           errs() << "Register bank " << RegBank->getName() << " too small("
1476                  << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
1477                  << "-bits\n";
1478           return;
1479         }
1480         if (SubIdx)  {
1481           report("Generic virtual register does not subregister index", MO,
1482                  MONum);
1483           return;
1484         }
1485 
1486         // If this is a target specific instruction and this operand
1487         // has register class constraint, the virtual register must
1488         // comply to it.
1489         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
1490             MONum < MCID.getNumOperands() &&
1491             TII->getRegClass(MCID, MONum, TRI, *MF)) {
1492           report("Virtual register does not match instruction constraint", MO,
1493                  MONum);
1494           errs() << "Expect register class "
1495                  << TRI->getRegClassName(
1496                         TII->getRegClass(MCID, MONum, TRI, *MF))
1497                  << " but got nothing\n";
1498           return;
1499         }
1500 
1501         break;
1502       }
1503       if (SubIdx) {
1504         const TargetRegisterClass *SRC =
1505           TRI->getSubClassWithSubReg(RC, SubIdx);
1506         if (!SRC) {
1507           report("Invalid subregister index for virtual register", MO, MONum);
1508           errs() << "Register class " << TRI->getRegClassName(RC)
1509               << " does not support subreg index " << SubIdx << "\n";
1510           return;
1511         }
1512         if (RC != SRC) {
1513           report("Invalid register class for subregister index", MO, MONum);
1514           errs() << "Register class " << TRI->getRegClassName(RC)
1515               << " does not fully support subreg index " << SubIdx << "\n";
1516           return;
1517         }
1518       }
1519       if (MONum < MCID.getNumOperands()) {
1520         if (const TargetRegisterClass *DRC =
1521               TII->getRegClass(MCID, MONum, TRI, *MF)) {
1522           if (SubIdx) {
1523             const TargetRegisterClass *SuperRC =
1524                 TRI->getLargestLegalSuperClass(RC, *MF);
1525             if (!SuperRC) {
1526               report("No largest legal super class exists.", MO, MONum);
1527               return;
1528             }
1529             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
1530             if (!DRC) {
1531               report("No matching super-reg register class.", MO, MONum);
1532               return;
1533             }
1534           }
1535           if (!RC->hasSuperClassEq(DRC)) {
1536             report("Illegal virtual register for instruction", MO, MONum);
1537             errs() << "Expected a " << TRI->getRegClassName(DRC)
1538                 << " register, but got a " << TRI->getRegClassName(RC)
1539                 << " register\n";
1540           }
1541         }
1542       }
1543     }
1544     break;
1545   }
1546 
1547   case MachineOperand::MO_RegisterMask:
1548     regMasks.push_back(MO->getRegMask());
1549     break;
1550 
1551   case MachineOperand::MO_MachineBasicBlock:
1552     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
1553       report("PHI operand is not in the CFG", MO, MONum);
1554     break;
1555 
1556   case MachineOperand::MO_FrameIndex:
1557     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
1558         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
1559       int FI = MO->getIndex();
1560       LiveInterval &LI = LiveStks->getInterval(FI);
1561       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
1562 
1563       bool stores = MI->mayStore();
1564       bool loads = MI->mayLoad();
1565       // For a memory-to-memory move, we need to check if the frame
1566       // index is used for storing or loading, by inspecting the
1567       // memory operands.
1568       if (stores && loads) {
1569         for (auto *MMO : MI->memoperands()) {
1570           const PseudoSourceValue *PSV = MMO->getPseudoValue();
1571           if (PSV == nullptr) continue;
1572           const FixedStackPseudoSourceValue *Value =
1573             dyn_cast<FixedStackPseudoSourceValue>(PSV);
1574           if (Value == nullptr) continue;
1575           if (Value->getFrameIndex() != FI) continue;
1576 
1577           if (MMO->isStore())
1578             loads = false;
1579           else
1580             stores = false;
1581           break;
1582         }
1583         if (loads == stores)
1584           report("Missing fixed stack memoperand.", MI);
1585       }
1586       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
1587         report("Instruction loads from dead spill slot", MO, MONum);
1588         errs() << "Live stack: " << LI << '\n';
1589       }
1590       if (stores && !LI.liveAt(Idx.getRegSlot())) {
1591         report("Instruction stores to dead spill slot", MO, MONum);
1592         errs() << "Live stack: " << LI << '\n';
1593       }
1594     }
1595     break;
1596 
1597   default:
1598     break;
1599   }
1600 }
1601 
1602 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
1603     unsigned MONum, SlotIndex UseIdx, const LiveRange &LR, unsigned VRegOrUnit,
1604     LaneBitmask LaneMask) {
1605   LiveQueryResult LRQ = LR.Query(UseIdx);
1606   // Check if we have a segment at the use, note however that we only need one
1607   // live subregister range, the others may be dead.
1608   if (!LRQ.valueIn() && LaneMask.none()) {
1609     report("No live segment at use", MO, MONum);
1610     report_context_liverange(LR);
1611     report_context_vreg_regunit(VRegOrUnit);
1612     report_context(UseIdx);
1613   }
1614   if (MO->isKill() && !LRQ.isKill()) {
1615     report("Live range continues after kill flag", MO, MONum);
1616     report_context_liverange(LR);
1617     report_context_vreg_regunit(VRegOrUnit);
1618     if (LaneMask.any())
1619       report_context_lanemask(LaneMask);
1620     report_context(UseIdx);
1621   }
1622 }
1623 
1624 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
1625     unsigned MONum, SlotIndex DefIdx, const LiveRange &LR, unsigned VRegOrUnit,
1626     bool SubRangeCheck, LaneBitmask LaneMask) {
1627   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
1628     assert(VNI && "NULL valno is not allowed");
1629     if (VNI->def != DefIdx) {
1630       report("Inconsistent valno->def", MO, MONum);
1631       report_context_liverange(LR);
1632       report_context_vreg_regunit(VRegOrUnit);
1633       if (LaneMask.any())
1634         report_context_lanemask(LaneMask);
1635       report_context(*VNI);
1636       report_context(DefIdx);
1637     }
1638   } else {
1639     report("No live segment at def", MO, MONum);
1640     report_context_liverange(LR);
1641     report_context_vreg_regunit(VRegOrUnit);
1642     if (LaneMask.any())
1643       report_context_lanemask(LaneMask);
1644     report_context(DefIdx);
1645   }
1646   // Check that, if the dead def flag is present, LiveInts agree.
1647   if (MO->isDead()) {
1648     LiveQueryResult LRQ = LR.Query(DefIdx);
1649     if (!LRQ.isDeadDef()) {
1650       assert(TargetRegisterInfo::isVirtualRegister(VRegOrUnit) &&
1651              "Expecting a virtual register.");
1652       // A dead subreg def only tells us that the specific subreg is dead. There
1653       // could be other non-dead defs of other subregs, or we could have other
1654       // parts of the register being live through the instruction. So unless we
1655       // are checking liveness for a subrange it is ok for the live range to
1656       // continue, given that we have a dead def of a subregister.
1657       if (SubRangeCheck || MO->getSubReg() == 0) {
1658         report("Live range continues after dead def flag", MO, MONum);
1659         report_context_liverange(LR);
1660         report_context_vreg_regunit(VRegOrUnit);
1661         if (LaneMask.any())
1662           report_context_lanemask(LaneMask);
1663       }
1664     }
1665   }
1666 }
1667 
1668 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
1669   const MachineInstr *MI = MO->getParent();
1670   const unsigned Reg = MO->getReg();
1671 
1672   // Both use and def operands can read a register.
1673   if (MO->readsReg()) {
1674     if (MO->isKill())
1675       addRegWithSubRegs(regsKilled, Reg);
1676 
1677     // Check that LiveVars knows this kill.
1678     if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
1679         MO->isKill()) {
1680       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
1681       if (!is_contained(VI.Kills, MI))
1682         report("Kill missing from LiveVariables", MO, MONum);
1683     }
1684 
1685     // Check LiveInts liveness and kill.
1686     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
1687       SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
1688       // Check the cached regunit intervals.
1689       if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
1690         for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
1691           if (MRI->isReservedRegUnit(*Units))
1692             continue;
1693           if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
1694             checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
1695         }
1696       }
1697 
1698       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1699         if (LiveInts->hasInterval(Reg)) {
1700           // This is a virtual register interval.
1701           const LiveInterval &LI = LiveInts->getInterval(Reg);
1702           checkLivenessAtUse(MO, MONum, UseIdx, LI, Reg);
1703 
1704           if (LI.hasSubRanges() && !MO->isDef()) {
1705             unsigned SubRegIdx = MO->getSubReg();
1706             LaneBitmask MOMask = SubRegIdx != 0
1707                                ? TRI->getSubRegIndexLaneMask(SubRegIdx)
1708                                : MRI->getMaxLaneMaskForVReg(Reg);
1709             LaneBitmask LiveInMask;
1710             for (const LiveInterval::SubRange &SR : LI.subranges()) {
1711               if ((MOMask & SR.LaneMask).none())
1712                 continue;
1713               checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
1714               LiveQueryResult LRQ = SR.Query(UseIdx);
1715               if (LRQ.valueIn())
1716                 LiveInMask |= SR.LaneMask;
1717             }
1718             // At least parts of the register has to be live at the use.
1719             if ((LiveInMask & MOMask).none()) {
1720               report("No live subrange at use", MO, MONum);
1721               report_context(LI);
1722               report_context(UseIdx);
1723             }
1724           }
1725         } else {
1726           report("Virtual register has no live interval", MO, MONum);
1727         }
1728       }
1729     }
1730 
1731     // Use of a dead register.
1732     if (!regsLive.count(Reg)) {
1733       if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1734         // Reserved registers may be used even when 'dead'.
1735         bool Bad = !isReserved(Reg);
1736         // We are fine if just any subregister has a defined value.
1737         if (Bad) {
1738           for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid();
1739                ++SubRegs) {
1740             if (regsLive.count(*SubRegs)) {
1741               Bad = false;
1742               break;
1743             }
1744           }
1745         }
1746         // If there is an additional implicit-use of a super register we stop
1747         // here. By definition we are fine if the super register is not
1748         // (completely) dead, if the complete super register is dead we will
1749         // get a report for its operand.
1750         if (Bad) {
1751           for (const MachineOperand &MOP : MI->uses()) {
1752             if (!MOP.isReg() || !MOP.isImplicit())
1753               continue;
1754 
1755             if (!TargetRegisterInfo::isPhysicalRegister(MOP.getReg()))
1756               continue;
1757 
1758             for (MCSubRegIterator SubRegs(MOP.getReg(), TRI); SubRegs.isValid();
1759                  ++SubRegs) {
1760               if (*SubRegs == Reg) {
1761                 Bad = false;
1762                 break;
1763               }
1764             }
1765           }
1766         }
1767         if (Bad)
1768           report("Using an undefined physical register", MO, MONum);
1769       } else if (MRI->def_empty(Reg)) {
1770         report("Reading virtual register without a def", MO, MONum);
1771       } else {
1772         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
1773         // We don't know which virtual registers are live in, so only complain
1774         // if vreg was killed in this MBB. Otherwise keep track of vregs that
1775         // must be live in. PHI instructions are handled separately.
1776         if (MInfo.regsKilled.count(Reg))
1777           report("Using a killed virtual register", MO, MONum);
1778         else if (!MI->isPHI())
1779           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
1780       }
1781     }
1782   }
1783 
1784   if (MO->isDef()) {
1785     // Register defined.
1786     // TODO: verify that earlyclobber ops are not used.
1787     if (MO->isDead())
1788       addRegWithSubRegs(regsDead, Reg);
1789     else
1790       addRegWithSubRegs(regsDefined, Reg);
1791 
1792     // Verify SSA form.
1793     if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
1794         std::next(MRI->def_begin(Reg)) != MRI->def_end())
1795       report("Multiple virtual register defs in SSA form", MO, MONum);
1796 
1797     // Check LiveInts for a live segment, but only for virtual registers.
1798     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
1799       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
1800       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
1801 
1802       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1803         if (LiveInts->hasInterval(Reg)) {
1804           const LiveInterval &LI = LiveInts->getInterval(Reg);
1805           checkLivenessAtDef(MO, MONum, DefIdx, LI, Reg);
1806 
1807           if (LI.hasSubRanges()) {
1808             unsigned SubRegIdx = MO->getSubReg();
1809             LaneBitmask MOMask = SubRegIdx != 0
1810               ? TRI->getSubRegIndexLaneMask(SubRegIdx)
1811               : MRI->getMaxLaneMaskForVReg(Reg);
1812             for (const LiveInterval::SubRange &SR : LI.subranges()) {
1813               if ((SR.LaneMask & MOMask).none())
1814                 continue;
1815               checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
1816             }
1817           }
1818         } else {
1819           report("Virtual register has no Live interval", MO, MONum);
1820         }
1821       }
1822     }
1823   }
1824 }
1825 
1826 void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {}
1827 
1828 // This function gets called after visiting all instructions in a bundle. The
1829 // argument points to the bundle header.
1830 // Normal stand-alone instructions are also considered 'bundles', and this
1831 // function is called for all of them.
1832 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
1833   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
1834   set_union(MInfo.regsKilled, regsKilled);
1835   set_subtract(regsLive, regsKilled); regsKilled.clear();
1836   // Kill any masked registers.
1837   while (!regMasks.empty()) {
1838     const uint32_t *Mask = regMasks.pop_back_val();
1839     for (RegSet::iterator I = regsLive.begin(), E = regsLive.end(); I != E; ++I)
1840       if (TargetRegisterInfo::isPhysicalRegister(*I) &&
1841           MachineOperand::clobbersPhysReg(Mask, *I))
1842         regsDead.push_back(*I);
1843   }
1844   set_subtract(regsLive, regsDead);   regsDead.clear();
1845   set_union(regsLive, regsDefined);   regsDefined.clear();
1846 }
1847 
1848 void
1849 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
1850   MBBInfoMap[MBB].regsLiveOut = regsLive;
1851   regsLive.clear();
1852 
1853   if (Indexes) {
1854     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
1855     if (!(stop > lastIndex)) {
1856       report("Block ends before last instruction index", MBB);
1857       errs() << "Block ends at " << stop
1858           << " last instruction was at " << lastIndex << '\n';
1859     }
1860     lastIndex = stop;
1861   }
1862 }
1863 
1864 // Calculate the largest possible vregsPassed sets. These are the registers that
1865 // can pass through an MBB live, but may not be live every time. It is assumed
1866 // that all vregsPassed sets are empty before the call.
1867 void MachineVerifier::calcRegsPassed() {
1868   // First push live-out regs to successors' vregsPassed. Remember the MBBs that
1869   // have any vregsPassed.
1870   SmallPtrSet<const MachineBasicBlock*, 8> todo;
1871   for (const auto &MBB : *MF) {
1872     BBInfo &MInfo = MBBInfoMap[&MBB];
1873     if (!MInfo.reachable)
1874       continue;
1875     for (MachineBasicBlock::const_succ_iterator SuI = MBB.succ_begin(),
1876            SuE = MBB.succ_end(); SuI != SuE; ++SuI) {
1877       BBInfo &SInfo = MBBInfoMap[*SuI];
1878       if (SInfo.addPassed(MInfo.regsLiveOut))
1879         todo.insert(*SuI);
1880     }
1881   }
1882 
1883   // Iteratively push vregsPassed to successors. This will converge to the same
1884   // final state regardless of DenseSet iteration order.
1885   while (!todo.empty()) {
1886     const MachineBasicBlock *MBB = *todo.begin();
1887     todo.erase(MBB);
1888     BBInfo &MInfo = MBBInfoMap[MBB];
1889     for (MachineBasicBlock::const_succ_iterator SuI = MBB->succ_begin(),
1890            SuE = MBB->succ_end(); SuI != SuE; ++SuI) {
1891       if (*SuI == MBB)
1892         continue;
1893       BBInfo &SInfo = MBBInfoMap[*SuI];
1894       if (SInfo.addPassed(MInfo.vregsPassed))
1895         todo.insert(*SuI);
1896     }
1897   }
1898 }
1899 
1900 // Calculate the set of virtual registers that must be passed through each basic
1901 // block in order to satisfy the requirements of successor blocks. This is very
1902 // similar to calcRegsPassed, only backwards.
1903 void MachineVerifier::calcRegsRequired() {
1904   // First push live-in regs to predecessors' vregsRequired.
1905   SmallPtrSet<const MachineBasicBlock*, 8> todo;
1906   for (const auto &MBB : *MF) {
1907     BBInfo &MInfo = MBBInfoMap[&MBB];
1908     for (MachineBasicBlock::const_pred_iterator PrI = MBB.pred_begin(),
1909            PrE = MBB.pred_end(); PrI != PrE; ++PrI) {
1910       BBInfo &PInfo = MBBInfoMap[*PrI];
1911       if (PInfo.addRequired(MInfo.vregsLiveIn))
1912         todo.insert(*PrI);
1913     }
1914   }
1915 
1916   // Iteratively push vregsRequired to predecessors. This will converge to the
1917   // same final state regardless of DenseSet iteration order.
1918   while (!todo.empty()) {
1919     const MachineBasicBlock *MBB = *todo.begin();
1920     todo.erase(MBB);
1921     BBInfo &MInfo = MBBInfoMap[MBB];
1922     for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
1923            PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
1924       if (*PrI == MBB)
1925         continue;
1926       BBInfo &SInfo = MBBInfoMap[*PrI];
1927       if (SInfo.addRequired(MInfo.vregsRequired))
1928         todo.insert(*PrI);
1929     }
1930   }
1931 }
1932 
1933 // Check PHI instructions at the beginning of MBB. It is assumed that
1934 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
1935 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
1936   BBInfo &MInfo = MBBInfoMap[&MBB];
1937 
1938   SmallPtrSet<const MachineBasicBlock*, 8> seen;
1939   for (const MachineInstr &Phi : MBB) {
1940     if (!Phi.isPHI())
1941       break;
1942     seen.clear();
1943 
1944     const MachineOperand &MODef = Phi.getOperand(0);
1945     if (!MODef.isReg() || !MODef.isDef()) {
1946       report("Expected first PHI operand to be a register def", &MODef, 0);
1947       continue;
1948     }
1949     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
1950         MODef.isEarlyClobber() || MODef.isDebug())
1951       report("Unexpected flag on PHI operand", &MODef, 0);
1952     unsigned DefReg = MODef.getReg();
1953     if (!TargetRegisterInfo::isVirtualRegister(DefReg))
1954       report("Expected first PHI operand to be a virtual register", &MODef, 0);
1955 
1956     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
1957       const MachineOperand &MO0 = Phi.getOperand(I);
1958       if (!MO0.isReg()) {
1959         report("Expected PHI operand to be a register", &MO0, I);
1960         continue;
1961       }
1962       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
1963           MO0.isDebug() || MO0.isTied())
1964         report("Unexpected flag on PHI operand", &MO0, I);
1965 
1966       const MachineOperand &MO1 = Phi.getOperand(I + 1);
1967       if (!MO1.isMBB()) {
1968         report("Expected PHI operand to be a basic block", &MO1, I + 1);
1969         continue;
1970       }
1971 
1972       const MachineBasicBlock &Pre = *MO1.getMBB();
1973       if (!Pre.isSuccessor(&MBB)) {
1974         report("PHI input is not a predecessor block", &MO1, I + 1);
1975         continue;
1976       }
1977 
1978       if (MInfo.reachable) {
1979         seen.insert(&Pre);
1980         BBInfo &PrInfo = MBBInfoMap[&Pre];
1981         if (!MO0.isUndef() && PrInfo.reachable &&
1982             !PrInfo.isLiveOut(MO0.getReg()))
1983           report("PHI operand is not live-out from predecessor", &MO0, I);
1984       }
1985     }
1986 
1987     // Did we see all predecessors?
1988     if (MInfo.reachable) {
1989       for (MachineBasicBlock *Pred : MBB.predecessors()) {
1990         if (!seen.count(Pred)) {
1991           report("Missing PHI operand", &Phi);
1992           errs() << printMBBReference(*Pred)
1993                  << " is a predecessor according to the CFG.\n";
1994         }
1995       }
1996     }
1997   }
1998 }
1999 
2000 void MachineVerifier::visitMachineFunctionAfter() {
2001   calcRegsPassed();
2002 
2003   for (const MachineBasicBlock &MBB : *MF)
2004     checkPHIOps(MBB);
2005 
2006   // Now check liveness info if available
2007   calcRegsRequired();
2008 
2009   // Check for killed virtual registers that should be live out.
2010   for (const auto &MBB : *MF) {
2011     BBInfo &MInfo = MBBInfoMap[&MBB];
2012     for (RegSet::iterator
2013          I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
2014          ++I)
2015       if (MInfo.regsKilled.count(*I)) {
2016         report("Virtual register killed in block, but needed live out.", &MBB);
2017         errs() << "Virtual register " << printReg(*I)
2018                << " is used after the block.\n";
2019       }
2020   }
2021 
2022   if (!MF->empty()) {
2023     BBInfo &MInfo = MBBInfoMap[&MF->front()];
2024     for (RegSet::iterator
2025          I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
2026          ++I) {
2027       report("Virtual register defs don't dominate all uses.", MF);
2028       report_context_vreg(*I);
2029     }
2030   }
2031 
2032   if (LiveVars)
2033     verifyLiveVariables();
2034   if (LiveInts)
2035     verifyLiveIntervals();
2036 }
2037 
2038 void MachineVerifier::verifyLiveVariables() {
2039   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2040   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
2041     unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
2042     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2043     for (const auto &MBB : *MF) {
2044       BBInfo &MInfo = MBBInfoMap[&MBB];
2045 
2046       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2047       if (MInfo.vregsRequired.count(Reg)) {
2048         if (!VI.AliveBlocks.test(MBB.getNumber())) {
2049           report("LiveVariables: Block missing from AliveBlocks", &MBB);
2050           errs() << "Virtual register " << printReg(Reg)
2051                  << " must be live through the block.\n";
2052         }
2053       } else {
2054         if (VI.AliveBlocks.test(MBB.getNumber())) {
2055           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2056           errs() << "Virtual register " << printReg(Reg)
2057                  << " is not needed live through the block.\n";
2058         }
2059       }
2060     }
2061   }
2062 }
2063 
2064 void MachineVerifier::verifyLiveIntervals() {
2065   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2066   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
2067     unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
2068 
2069     // Spilling and splitting may leave unused registers around. Skip them.
2070     if (MRI->reg_nodbg_empty(Reg))
2071       continue;
2072 
2073     if (!LiveInts->hasInterval(Reg)) {
2074       report("Missing live interval for virtual register", MF);
2075       errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2076       continue;
2077     }
2078 
2079     const LiveInterval &LI = LiveInts->getInterval(Reg);
2080     assert(Reg == LI.reg && "Invalid reg to interval mapping");
2081     verifyLiveInterval(LI);
2082   }
2083 
2084   // Verify all the cached regunit intervals.
2085   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2086     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2087       verifyLiveRange(*LR, i);
2088 }
2089 
2090 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2091                                            const VNInfo *VNI, unsigned Reg,
2092                                            LaneBitmask LaneMask) {
2093   if (VNI->isUnused())
2094     return;
2095 
2096   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2097 
2098   if (!DefVNI) {
2099     report("Value not live at VNInfo def and not marked unused", MF);
2100     report_context(LR, Reg, LaneMask);
2101     report_context(*VNI);
2102     return;
2103   }
2104 
2105   if (DefVNI != VNI) {
2106     report("Live segment at def has different VNInfo", MF);
2107     report_context(LR, Reg, LaneMask);
2108     report_context(*VNI);
2109     return;
2110   }
2111 
2112   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2113   if (!MBB) {
2114     report("Invalid VNInfo definition index", MF);
2115     report_context(LR, Reg, LaneMask);
2116     report_context(*VNI);
2117     return;
2118   }
2119 
2120   if (VNI->isPHIDef()) {
2121     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2122       report("PHIDef VNInfo is not defined at MBB start", MBB);
2123       report_context(LR, Reg, LaneMask);
2124       report_context(*VNI);
2125     }
2126     return;
2127   }
2128 
2129   // Non-PHI def.
2130   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
2131   if (!MI) {
2132     report("No instruction at VNInfo def index", MBB);
2133     report_context(LR, Reg, LaneMask);
2134     report_context(*VNI);
2135     return;
2136   }
2137 
2138   if (Reg != 0) {
2139     bool hasDef = false;
2140     bool isEarlyClobber = false;
2141     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2142       if (!MOI->isReg() || !MOI->isDef())
2143         continue;
2144       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2145         if (MOI->getReg() != Reg)
2146           continue;
2147       } else {
2148         if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
2149             !TRI->hasRegUnit(MOI->getReg(), Reg))
2150           continue;
2151       }
2152       if (LaneMask.any() &&
2153           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
2154         continue;
2155       hasDef = true;
2156       if (MOI->isEarlyClobber())
2157         isEarlyClobber = true;
2158     }
2159 
2160     if (!hasDef) {
2161       report("Defining instruction does not modify register", MI);
2162       report_context(LR, Reg, LaneMask);
2163       report_context(*VNI);
2164     }
2165 
2166     // Early clobber defs begin at USE slots, but other defs must begin at
2167     // DEF slots.
2168     if (isEarlyClobber) {
2169       if (!VNI->def.isEarlyClobber()) {
2170         report("Early clobber def must be at an early-clobber slot", MBB);
2171         report_context(LR, Reg, LaneMask);
2172         report_context(*VNI);
2173       }
2174     } else if (!VNI->def.isRegister()) {
2175       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
2176       report_context(LR, Reg, LaneMask);
2177       report_context(*VNI);
2178     }
2179   }
2180 }
2181 
2182 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
2183                                              const LiveRange::const_iterator I,
2184                                              unsigned Reg, LaneBitmask LaneMask)
2185 {
2186   const LiveRange::Segment &S = *I;
2187   const VNInfo *VNI = S.valno;
2188   assert(VNI && "Live segment has no valno");
2189 
2190   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
2191     report("Foreign valno in live segment", MF);
2192     report_context(LR, Reg, LaneMask);
2193     report_context(S);
2194     report_context(*VNI);
2195   }
2196 
2197   if (VNI->isUnused()) {
2198     report("Live segment valno is marked unused", MF);
2199     report_context(LR, Reg, LaneMask);
2200     report_context(S);
2201   }
2202 
2203   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
2204   if (!MBB) {
2205     report("Bad start of live segment, no basic block", MF);
2206     report_context(LR, Reg, LaneMask);
2207     report_context(S);
2208     return;
2209   }
2210   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
2211   if (S.start != MBBStartIdx && S.start != VNI->def) {
2212     report("Live segment must begin at MBB entry or valno def", MBB);
2213     report_context(LR, Reg, LaneMask);
2214     report_context(S);
2215   }
2216 
2217   const MachineBasicBlock *EndMBB =
2218     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
2219   if (!EndMBB) {
2220     report("Bad end of live segment, no basic block", MF);
2221     report_context(LR, Reg, LaneMask);
2222     report_context(S);
2223     return;
2224   }
2225 
2226   // No more checks for live-out segments.
2227   if (S.end == LiveInts->getMBBEndIdx(EndMBB))
2228     return;
2229 
2230   // RegUnit intervals are allowed dead phis.
2231   if (!TargetRegisterInfo::isVirtualRegister(Reg) && VNI->isPHIDef() &&
2232       S.start == VNI->def && S.end == VNI->def.getDeadSlot())
2233     return;
2234 
2235   // The live segment is ending inside EndMBB
2236   const MachineInstr *MI =
2237     LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
2238   if (!MI) {
2239     report("Live segment doesn't end at a valid instruction", EndMBB);
2240     report_context(LR, Reg, LaneMask);
2241     report_context(S);
2242     return;
2243   }
2244 
2245   // The block slot must refer to a basic block boundary.
2246   if (S.end.isBlock()) {
2247     report("Live segment ends at B slot of an instruction", EndMBB);
2248     report_context(LR, Reg, LaneMask);
2249     report_context(S);
2250   }
2251 
2252   if (S.end.isDead()) {
2253     // Segment ends on the dead slot.
2254     // That means there must be a dead def.
2255     if (!SlotIndex::isSameInstr(S.start, S.end)) {
2256       report("Live segment ending at dead slot spans instructions", EndMBB);
2257       report_context(LR, Reg, LaneMask);
2258       report_context(S);
2259     }
2260   }
2261 
2262   // A live segment can only end at an early-clobber slot if it is being
2263   // redefined by an early-clobber def.
2264   if (S.end.isEarlyClobber()) {
2265     if (I+1 == LR.end() || (I+1)->start != S.end) {
2266       report("Live segment ending at early clobber slot must be "
2267              "redefined by an EC def in the same instruction", EndMBB);
2268       report_context(LR, Reg, LaneMask);
2269       report_context(S);
2270     }
2271   }
2272 
2273   // The following checks only apply to virtual registers. Physreg liveness
2274   // is too weird to check.
2275   if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2276     // A live segment can end with either a redefinition, a kill flag on a
2277     // use, or a dead flag on a def.
2278     bool hasRead = false;
2279     bool hasSubRegDef = false;
2280     bool hasDeadDef = false;
2281     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2282       if (!MOI->isReg() || MOI->getReg() != Reg)
2283         continue;
2284       unsigned Sub = MOI->getSubReg();
2285       LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
2286                                  : LaneBitmask::getAll();
2287       if (MOI->isDef()) {
2288         if (Sub != 0) {
2289           hasSubRegDef = true;
2290           // An operand %0:sub0 reads %0:sub1..n. Invert the lane
2291           // mask for subregister defs. Read-undef defs will be handled by
2292           // readsReg below.
2293           SLM = ~SLM;
2294         }
2295         if (MOI->isDead())
2296           hasDeadDef = true;
2297       }
2298       if (LaneMask.any() && (LaneMask & SLM).none())
2299         continue;
2300       if (MOI->readsReg())
2301         hasRead = true;
2302     }
2303     if (S.end.isDead()) {
2304       // Make sure that the corresponding machine operand for a "dead" live
2305       // range has the dead flag. We cannot perform this check for subregister
2306       // liveranges as partially dead values are allowed.
2307       if (LaneMask.none() && !hasDeadDef) {
2308         report("Instruction ending live segment on dead slot has no dead flag",
2309                MI);
2310         report_context(LR, Reg, LaneMask);
2311         report_context(S);
2312       }
2313     } else {
2314       if (!hasRead) {
2315         // When tracking subregister liveness, the main range must start new
2316         // values on partial register writes, even if there is no read.
2317         if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
2318             !hasSubRegDef) {
2319           report("Instruction ending live segment doesn't read the register",
2320                  MI);
2321           report_context(LR, Reg, LaneMask);
2322           report_context(S);
2323         }
2324       }
2325     }
2326   }
2327 
2328   // Now check all the basic blocks in this live segment.
2329   MachineFunction::const_iterator MFI = MBB->getIterator();
2330   // Is this live segment the beginning of a non-PHIDef VN?
2331   if (S.start == VNI->def && !VNI->isPHIDef()) {
2332     // Not live-in to any blocks.
2333     if (MBB == EndMBB)
2334       return;
2335     // Skip this block.
2336     ++MFI;
2337   }
2338 
2339   SmallVector<SlotIndex, 4> Undefs;
2340   if (LaneMask.any()) {
2341     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
2342     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
2343   }
2344 
2345   while (true) {
2346     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
2347     // We don't know how to track physregs into a landing pad.
2348     if (!TargetRegisterInfo::isVirtualRegister(Reg) &&
2349         MFI->isEHPad()) {
2350       if (&*MFI == EndMBB)
2351         break;
2352       ++MFI;
2353       continue;
2354     }
2355 
2356     // Is VNI a PHI-def in the current block?
2357     bool IsPHI = VNI->isPHIDef() &&
2358       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
2359 
2360     // Check that VNI is live-out of all predecessors.
2361     for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
2362          PE = MFI->pred_end(); PI != PE; ++PI) {
2363       SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI);
2364       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
2365 
2366       // All predecessors must have a live-out value. However for a phi
2367       // instruction with subregister intervals
2368       // only one of the subregisters (not necessarily the current one) needs to
2369       // be defined.
2370       if (!PVNI && (LaneMask.none() || !IsPHI)) {
2371         if (LiveRangeCalc::isJointlyDominated(*PI, Undefs, *Indexes))
2372           continue;
2373         report("Register not marked live out of predecessor", *PI);
2374         report_context(LR, Reg, LaneMask);
2375         report_context(*VNI);
2376         errs() << " live into " << printMBBReference(*MFI) << '@'
2377                << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
2378                << PEnd << '\n';
2379         continue;
2380       }
2381 
2382       // Only PHI-defs can take different predecessor values.
2383       if (!IsPHI && PVNI != VNI) {
2384         report("Different value live out of predecessor", *PI);
2385         report_context(LR, Reg, LaneMask);
2386         errs() << "Valno #" << PVNI->id << " live out of "
2387                << printMBBReference(*(*PI)) << '@' << PEnd << "\nValno #"
2388                << VNI->id << " live into " << printMBBReference(*MFI) << '@'
2389                << LiveInts->getMBBStartIdx(&*MFI) << '\n';
2390       }
2391     }
2392     if (&*MFI == EndMBB)
2393       break;
2394     ++MFI;
2395   }
2396 }
2397 
2398 void MachineVerifier::verifyLiveRange(const LiveRange &LR, unsigned Reg,
2399                                       LaneBitmask LaneMask) {
2400   for (const VNInfo *VNI : LR.valnos)
2401     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
2402 
2403   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
2404     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
2405 }
2406 
2407 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
2408   unsigned Reg = LI.reg;
2409   assert(TargetRegisterInfo::isVirtualRegister(Reg));
2410   verifyLiveRange(LI, Reg);
2411 
2412   LaneBitmask Mask;
2413   LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
2414   for (const LiveInterval::SubRange &SR : LI.subranges()) {
2415     if ((Mask & SR.LaneMask).any()) {
2416       report("Lane masks of sub ranges overlap in live interval", MF);
2417       report_context(LI);
2418     }
2419     if ((SR.LaneMask & ~MaxMask).any()) {
2420       report("Subrange lanemask is invalid", MF);
2421       report_context(LI);
2422     }
2423     if (SR.empty()) {
2424       report("Subrange must not be empty", MF);
2425       report_context(SR, LI.reg, SR.LaneMask);
2426     }
2427     Mask |= SR.LaneMask;
2428     verifyLiveRange(SR, LI.reg, SR.LaneMask);
2429     if (!LI.covers(SR)) {
2430       report("A Subrange is not covered by the main range", MF);
2431       report_context(LI);
2432     }
2433   }
2434 
2435   // Check the LI only has one connected component.
2436   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
2437   unsigned NumComp = ConEQ.Classify(LI);
2438   if (NumComp > 1) {
2439     report("Multiple connected components in live interval", MF);
2440     report_context(LI);
2441     for (unsigned comp = 0; comp != NumComp; ++comp) {
2442       errs() << comp << ": valnos";
2443       for (LiveInterval::const_vni_iterator I = LI.vni_begin(),
2444            E = LI.vni_end(); I!=E; ++I)
2445         if (comp == ConEQ.getEqClass(*I))
2446           errs() << ' ' << (*I)->id;
2447       errs() << '\n';
2448     }
2449   }
2450 }
2451 
2452 namespace {
2453 
2454   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
2455   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
2456   // value is zero.
2457   // We use a bool plus an integer to capture the stack state.
2458   struct StackStateOfBB {
2459     StackStateOfBB() = default;
2460     StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
2461       EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
2462       ExitIsSetup(ExitSetup) {}
2463 
2464     // Can be negative, which means we are setting up a frame.
2465     int EntryValue = 0;
2466     int ExitValue = 0;
2467     bool EntryIsSetup = false;
2468     bool ExitIsSetup = false;
2469   };
2470 
2471 } // end anonymous namespace
2472 
2473 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
2474 /// by a FrameDestroy <n>, stack adjustments are identical on all
2475 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
2476 void MachineVerifier::verifyStackFrame() {
2477   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
2478   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
2479   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
2480     return;
2481 
2482   SmallVector<StackStateOfBB, 8> SPState;
2483   SPState.resize(MF->getNumBlockIDs());
2484   df_iterator_default_set<const MachineBasicBlock*> Reachable;
2485 
2486   // Visit the MBBs in DFS order.
2487   for (df_ext_iterator<const MachineFunction *,
2488                        df_iterator_default_set<const MachineBasicBlock *>>
2489        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
2490        DFI != DFE; ++DFI) {
2491     const MachineBasicBlock *MBB = *DFI;
2492 
2493     StackStateOfBB BBState;
2494     // Check the exit state of the DFS stack predecessor.
2495     if (DFI.getPathLength() >= 2) {
2496       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
2497       assert(Reachable.count(StackPred) &&
2498              "DFS stack predecessor is already visited.\n");
2499       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
2500       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
2501       BBState.ExitValue = BBState.EntryValue;
2502       BBState.ExitIsSetup = BBState.EntryIsSetup;
2503     }
2504 
2505     // Update stack state by checking contents of MBB.
2506     for (const auto &I : *MBB) {
2507       if (I.getOpcode() == FrameSetupOpcode) {
2508         if (BBState.ExitIsSetup)
2509           report("FrameSetup is after another FrameSetup", &I);
2510         BBState.ExitValue -= TII->getFrameTotalSize(I);
2511         BBState.ExitIsSetup = true;
2512       }
2513 
2514       if (I.getOpcode() == FrameDestroyOpcode) {
2515         int Size = TII->getFrameTotalSize(I);
2516         if (!BBState.ExitIsSetup)
2517           report("FrameDestroy is not after a FrameSetup", &I);
2518         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
2519                                                BBState.ExitValue;
2520         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
2521           report("FrameDestroy <n> is after FrameSetup <m>", &I);
2522           errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
2523               << AbsSPAdj << ">.\n";
2524         }
2525         BBState.ExitValue += Size;
2526         BBState.ExitIsSetup = false;
2527       }
2528     }
2529     SPState[MBB->getNumber()] = BBState;
2530 
2531     // Make sure the exit state of any predecessor is consistent with the entry
2532     // state.
2533     for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(),
2534          E = MBB->pred_end(); I != E; ++I) {
2535       if (Reachable.count(*I) &&
2536           (SPState[(*I)->getNumber()].ExitValue != BBState.EntryValue ||
2537            SPState[(*I)->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
2538         report("The exit stack state of a predecessor is inconsistent.", MBB);
2539         errs() << "Predecessor " << printMBBReference(*(*I))
2540                << " has exit state (" << SPState[(*I)->getNumber()].ExitValue
2541                << ", " << SPState[(*I)->getNumber()].ExitIsSetup << "), while "
2542                << printMBBReference(*MBB) << " has entry state ("
2543                << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
2544       }
2545     }
2546 
2547     // Make sure the entry state of any successor is consistent with the exit
2548     // state.
2549     for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
2550          E = MBB->succ_end(); I != E; ++I) {
2551       if (Reachable.count(*I) &&
2552           (SPState[(*I)->getNumber()].EntryValue != BBState.ExitValue ||
2553            SPState[(*I)->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
2554         report("The entry stack state of a successor is inconsistent.", MBB);
2555         errs() << "Successor " << printMBBReference(*(*I))
2556                << " has entry state (" << SPState[(*I)->getNumber()].EntryValue
2557                << ", " << SPState[(*I)->getNumber()].EntryIsSetup << "), while "
2558                << printMBBReference(*MBB) << " has exit state ("
2559                << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
2560       }
2561     }
2562 
2563     // Make sure a basic block with return ends with zero stack adjustment.
2564     if (!MBB->empty() && MBB->back().isReturn()) {
2565       if (BBState.ExitIsSetup)
2566         report("A return block ends with a FrameSetup.", MBB);
2567       if (BBState.ExitValue)
2568         report("A return block ends with a nonzero stack adjustment.", MBB);
2569     }
2570   }
2571 }
2572