1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/Analysis/EHPersonalities.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeCalc.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/RegisterBank.h"
52 #include "llvm/CodeGen/RegisterBankInfo.h"
53 #include "llvm/CodeGen/SlotIndexes.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/CodeGen/TargetInstrInfo.h"
56 #include "llvm/CodeGen/TargetOpcodes.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/InlineAsm.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/MC/LaneBitmask.h"
66 #include "llvm/MC/MCAsmInfo.h"
67 #include "llvm/MC/MCInstrDesc.h"
68 #include "llvm/MC/MCRegisterInfo.h"
69 #include "llvm/MC/MCTargetOptions.h"
70 #include "llvm/Pass.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/LowLevelTypeImpl.h"
74 #include "llvm/Support/MathExtras.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Target/TargetMachine.h"
77 #include <algorithm>
78 #include <cassert>
79 #include <cstddef>
80 #include <cstdint>
81 #include <iterator>
82 #include <string>
83 #include <utility>
84 
85 using namespace llvm;
86 
87 namespace {
88 
89   struct MachineVerifier {
90     MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
91 
92     unsigned verify(const MachineFunction &MF);
93 
94     Pass *const PASS;
95     const char *Banner;
96     const MachineFunction *MF;
97     const TargetMachine *TM;
98     const TargetInstrInfo *TII;
99     const TargetRegisterInfo *TRI;
100     const MachineRegisterInfo *MRI;
101     const RegisterBankInfo *RBI;
102 
103     unsigned foundErrors;
104 
105     // Avoid querying the MachineFunctionProperties for each operand.
106     bool isFunctionRegBankSelected;
107     bool isFunctionSelected;
108     bool isFunctionTracksDebugUserValues;
109 
110     using RegVector = SmallVector<Register, 16>;
111     using RegMaskVector = SmallVector<const uint32_t *, 4>;
112     using RegSet = DenseSet<Register>;
113     using RegMap = DenseMap<Register, const MachineInstr *>;
114     using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
115 
116     const MachineInstr *FirstNonPHI;
117     const MachineInstr *FirstTerminator;
118     BlockSet FunctionBlocks;
119 
120     BitVector regsReserved;
121     RegSet regsLive;
122     RegVector regsDefined, regsDead, regsKilled;
123     RegMaskVector regMasks;
124 
125     SlotIndex lastIndex;
126 
127     // Add Reg and any sub-registers to RV
128     void addRegWithSubRegs(RegVector &RV, Register Reg) {
129       RV.push_back(Reg);
130       if (Reg.isPhysical())
131         append_range(RV, TRI->subregs(Reg.asMCReg()));
132     }
133 
134     struct BBInfo {
135       // Is this MBB reachable from the MF entry point?
136       bool reachable = false;
137 
138       // Vregs that must be live in because they are used without being
139       // defined. Map value is the user. vregsLiveIn doesn't include regs
140       // that only are used by PHI nodes.
141       RegMap vregsLiveIn;
142 
143       // Regs killed in MBB. They may be defined again, and will then be in both
144       // regsKilled and regsLiveOut.
145       RegSet regsKilled;
146 
147       // Regs defined in MBB and live out. Note that vregs passing through may
148       // be live out without being mentioned here.
149       RegSet regsLiveOut;
150 
151       // Vregs that pass through MBB untouched. This set is disjoint from
152       // regsKilled and regsLiveOut.
153       RegSet vregsPassed;
154 
155       // Vregs that must pass through MBB because they are needed by a successor
156       // block. This set is disjoint from regsLiveOut.
157       RegSet vregsRequired;
158 
159       // Set versions of block's predecessor and successor lists.
160       BlockSet Preds, Succs;
161 
162       BBInfo() = default;
163 
164       // Add register to vregsRequired if it belongs there. Return true if
165       // anything changed.
166       bool addRequired(Register Reg) {
167         if (!Reg.isVirtual())
168           return false;
169         if (regsLiveOut.count(Reg))
170           return false;
171         return vregsRequired.insert(Reg).second;
172       }
173 
174       // Same for a full set.
175       bool addRequired(const RegSet &RS) {
176         bool Changed = false;
177         for (Register Reg : RS)
178           Changed |= addRequired(Reg);
179         return Changed;
180       }
181 
182       // Same for a full map.
183       bool addRequired(const RegMap &RM) {
184         bool Changed = false;
185         for (const auto &I : RM)
186           Changed |= addRequired(I.first);
187         return Changed;
188       }
189 
190       // Live-out registers are either in regsLiveOut or vregsPassed.
191       bool isLiveOut(Register Reg) const {
192         return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
193       }
194     };
195 
196     // Extra register info per MBB.
197     DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
198 
199     bool isReserved(Register Reg) {
200       return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
201     }
202 
203     bool isAllocatable(Register Reg) const {
204       return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
205              !regsReserved.test(Reg.id());
206     }
207 
208     // Analysis information if available
209     LiveVariables *LiveVars;
210     LiveIntervals *LiveInts;
211     LiveStacks *LiveStks;
212     SlotIndexes *Indexes;
213 
214     void visitMachineFunctionBefore();
215     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
216     void visitMachineBundleBefore(const MachineInstr *MI);
217 
218     /// Verify that all of \p MI's virtual register operands are scalars.
219     /// \returns True if all virtual register operands are scalar. False
220     /// otherwise.
221     bool verifyAllRegOpsScalar(const MachineInstr &MI,
222                                const MachineRegisterInfo &MRI);
223     bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
224     void verifyPreISelGenericInstruction(const MachineInstr *MI);
225     void visitMachineInstrBefore(const MachineInstr *MI);
226     void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
227     void visitMachineBundleAfter(const MachineInstr *MI);
228     void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
229     void visitMachineFunctionAfter();
230 
231     void report(const char *msg, const MachineFunction *MF);
232     void report(const char *msg, const MachineBasicBlock *MBB);
233     void report(const char *msg, const MachineInstr *MI);
234     void report(const char *msg, const MachineOperand *MO, unsigned MONum,
235                 LLT MOVRegType = LLT{});
236     void report(const Twine &Msg, const MachineInstr *MI);
237 
238     void report_context(const LiveInterval &LI) const;
239     void report_context(const LiveRange &LR, Register VRegUnit,
240                         LaneBitmask LaneMask) const;
241     void report_context(const LiveRange::Segment &S) const;
242     void report_context(const VNInfo &VNI) const;
243     void report_context(SlotIndex Pos) const;
244     void report_context(MCPhysReg PhysReg) const;
245     void report_context_liverange(const LiveRange &LR) const;
246     void report_context_lanemask(LaneBitmask LaneMask) const;
247     void report_context_vreg(Register VReg) const;
248     void report_context_vreg_regunit(Register VRegOrUnit) const;
249 
250     void verifyInlineAsm(const MachineInstr *MI);
251 
252     void checkLiveness(const MachineOperand *MO, unsigned MONum);
253     void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
254                             SlotIndex UseIdx, const LiveRange &LR,
255                             Register VRegOrUnit,
256                             LaneBitmask LaneMask = LaneBitmask::getNone());
257     void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
258                             SlotIndex DefIdx, const LiveRange &LR,
259                             Register VRegOrUnit, bool SubRangeCheck = false,
260                             LaneBitmask LaneMask = LaneBitmask::getNone());
261 
262     void markReachable(const MachineBasicBlock *MBB);
263     void calcRegsPassed();
264     void checkPHIOps(const MachineBasicBlock &MBB);
265 
266     void calcRegsRequired();
267     void verifyLiveVariables();
268     void verifyLiveIntervals();
269     void verifyLiveInterval(const LiveInterval&);
270     void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
271                               LaneBitmask);
272     void verifyLiveRangeSegment(const LiveRange &,
273                                 const LiveRange::const_iterator I, Register,
274                                 LaneBitmask);
275     void verifyLiveRange(const LiveRange &, Register,
276                          LaneBitmask LaneMask = LaneBitmask::getNone());
277 
278     void verifyStackFrame();
279 
280     void verifySlotIndexes() const;
281     void verifyProperties(const MachineFunction &MF);
282   };
283 
284   struct MachineVerifierPass : public MachineFunctionPass {
285     static char ID; // Pass ID, replacement for typeid
286 
287     const std::string Banner;
288 
289     MachineVerifierPass(std::string banner = std::string())
290       : MachineFunctionPass(ID), Banner(std::move(banner)) {
291         initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
292       }
293 
294     void getAnalysisUsage(AnalysisUsage &AU) const override {
295       AU.setPreservesAll();
296       MachineFunctionPass::getAnalysisUsage(AU);
297     }
298 
299     bool runOnMachineFunction(MachineFunction &MF) override {
300       // Skip functions that have known verification problems.
301       // FIXME: Remove this mechanism when all problematic passes have been
302       // fixed.
303       if (MF.getProperties().hasProperty(
304               MachineFunctionProperties::Property::FailsVerification))
305         return false;
306 
307       unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
308       if (FoundErrors)
309         report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
310       return false;
311     }
312   };
313 
314 } // end anonymous namespace
315 
316 char MachineVerifierPass::ID = 0;
317 
318 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
319                 "Verify generated machine code", false, false)
320 
321 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
322   return new MachineVerifierPass(Banner);
323 }
324 
325 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
326                                  const std::string &Banner,
327                                  const MachineFunction &MF) {
328   // TODO: Use MFAM after porting below analyses.
329   // LiveVariables *LiveVars;
330   // LiveIntervals *LiveInts;
331   // LiveStacks *LiveStks;
332   // SlotIndexes *Indexes;
333   unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
334   if (FoundErrors)
335     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
336 }
337 
338 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
339     const {
340   MachineFunction &MF = const_cast<MachineFunction&>(*this);
341   unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
342   if (AbortOnErrors && FoundErrors)
343     report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
344   return FoundErrors == 0;
345 }
346 
347 void MachineVerifier::verifySlotIndexes() const {
348   if (Indexes == nullptr)
349     return;
350 
351   // Ensure the IdxMBB list is sorted by slot indexes.
352   SlotIndex Last;
353   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
354        E = Indexes->MBBIndexEnd(); I != E; ++I) {
355     assert(!Last.isValid() || I->first > Last);
356     Last = I->first;
357   }
358 }
359 
360 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
361   // If a pass has introduced virtual registers without clearing the
362   // NoVRegs property (or set it without allocating the vregs)
363   // then report an error.
364   if (MF.getProperties().hasProperty(
365           MachineFunctionProperties::Property::NoVRegs) &&
366       MRI->getNumVirtRegs())
367     report("Function has NoVRegs property but there are VReg operands", &MF);
368 }
369 
370 unsigned MachineVerifier::verify(const MachineFunction &MF) {
371   foundErrors = 0;
372 
373   this->MF = &MF;
374   TM = &MF.getTarget();
375   TII = MF.getSubtarget().getInstrInfo();
376   TRI = MF.getSubtarget().getRegisterInfo();
377   RBI = MF.getSubtarget().getRegBankInfo();
378   MRI = &MF.getRegInfo();
379 
380   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
381       MachineFunctionProperties::Property::FailedISel);
382 
383   // If we're mid-GlobalISel and we already triggered the fallback path then
384   // it's expected that the MIR is somewhat broken but that's ok since we'll
385   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
386   if (isFunctionFailedISel)
387     return foundErrors;
388 
389   isFunctionRegBankSelected = MF.getProperties().hasProperty(
390       MachineFunctionProperties::Property::RegBankSelected);
391   isFunctionSelected = MF.getProperties().hasProperty(
392       MachineFunctionProperties::Property::Selected);
393   isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
394       MachineFunctionProperties::Property::TracksDebugUserValues);
395 
396   LiveVars = nullptr;
397   LiveInts = nullptr;
398   LiveStks = nullptr;
399   Indexes = nullptr;
400   if (PASS) {
401     LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
402     // We don't want to verify LiveVariables if LiveIntervals is available.
403     if (!LiveInts)
404       LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
405     LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
406     Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
407   }
408 
409   verifySlotIndexes();
410 
411   verifyProperties(MF);
412 
413   visitMachineFunctionBefore();
414   for (const MachineBasicBlock &MBB : MF) {
415     visitMachineBasicBlockBefore(&MBB);
416     // Keep track of the current bundle header.
417     const MachineInstr *CurBundle = nullptr;
418     // Do we expect the next instruction to be part of the same bundle?
419     bool InBundle = false;
420 
421     for (const MachineInstr &MI : MBB.instrs()) {
422       if (MI.getParent() != &MBB) {
423         report("Bad instruction parent pointer", &MBB);
424         errs() << "Instruction: " << MI;
425         continue;
426       }
427 
428       // Check for consistent bundle flags.
429       if (InBundle && !MI.isBundledWithPred())
430         report("Missing BundledPred flag, "
431                "BundledSucc was set on predecessor",
432                &MI);
433       if (!InBundle && MI.isBundledWithPred())
434         report("BundledPred flag is set, "
435                "but BundledSucc not set on predecessor",
436                &MI);
437 
438       // Is this a bundle header?
439       if (!MI.isInsideBundle()) {
440         if (CurBundle)
441           visitMachineBundleAfter(CurBundle);
442         CurBundle = &MI;
443         visitMachineBundleBefore(CurBundle);
444       } else if (!CurBundle)
445         report("No bundle header", &MI);
446       visitMachineInstrBefore(&MI);
447       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
448         const MachineOperand &Op = MI.getOperand(I);
449         if (Op.getParent() != &MI) {
450           // Make sure to use correct addOperand / removeOperand / ChangeTo
451           // functions when replacing operands of a MachineInstr.
452           report("Instruction has operand with wrong parent set", &MI);
453         }
454 
455         visitMachineOperand(&Op, I);
456       }
457 
458       // Was this the last bundled instruction?
459       InBundle = MI.isBundledWithSucc();
460     }
461     if (CurBundle)
462       visitMachineBundleAfter(CurBundle);
463     if (InBundle)
464       report("BundledSucc flag set on last instruction in block", &MBB.back());
465     visitMachineBasicBlockAfter(&MBB);
466   }
467   visitMachineFunctionAfter();
468 
469   // Clean up.
470   regsLive.clear();
471   regsDefined.clear();
472   regsDead.clear();
473   regsKilled.clear();
474   regMasks.clear();
475   MBBInfoMap.clear();
476 
477   return foundErrors;
478 }
479 
480 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
481   assert(MF);
482   errs() << '\n';
483   if (!foundErrors++) {
484     if (Banner)
485       errs() << "# " << Banner << '\n';
486     if (LiveInts != nullptr)
487       LiveInts->print(errs());
488     else
489       MF->print(errs(), Indexes);
490   }
491   errs() << "*** Bad machine code: " << msg << " ***\n"
492       << "- function:    " << MF->getName() << "\n";
493 }
494 
495 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
496   assert(MBB);
497   report(msg, MBB->getParent());
498   errs() << "- basic block: " << printMBBReference(*MBB) << ' '
499          << MBB->getName() << " (" << (const void *)MBB << ')';
500   if (Indexes)
501     errs() << " [" << Indexes->getMBBStartIdx(MBB)
502         << ';' <<  Indexes->getMBBEndIdx(MBB) << ')';
503   errs() << '\n';
504 }
505 
506 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
507   assert(MI);
508   report(msg, MI->getParent());
509   errs() << "- instruction: ";
510   if (Indexes && Indexes->hasIndex(*MI))
511     errs() << Indexes->getInstructionIndex(*MI) << '\t';
512   MI->print(errs(), /*IsStandalone=*/true);
513 }
514 
515 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
516                              unsigned MONum, LLT MOVRegType) {
517   assert(MO);
518   report(msg, MO->getParent());
519   errs() << "- operand " << MONum << ":   ";
520   MO->print(errs(), MOVRegType, TRI);
521   errs() << "\n";
522 }
523 
524 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
525   report(Msg.str().c_str(), MI);
526 }
527 
528 void MachineVerifier::report_context(SlotIndex Pos) const {
529   errs() << "- at:          " << Pos << '\n';
530 }
531 
532 void MachineVerifier::report_context(const LiveInterval &LI) const {
533   errs() << "- interval:    " << LI << '\n';
534 }
535 
536 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
537                                      LaneBitmask LaneMask) const {
538   report_context_liverange(LR);
539   report_context_vreg_regunit(VRegUnit);
540   if (LaneMask.any())
541     report_context_lanemask(LaneMask);
542 }
543 
544 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
545   errs() << "- segment:     " << S << '\n';
546 }
547 
548 void MachineVerifier::report_context(const VNInfo &VNI) const {
549   errs() << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
550 }
551 
552 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
553   errs() << "- liverange:   " << LR << '\n';
554 }
555 
556 void MachineVerifier::report_context(MCPhysReg PReg) const {
557   errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
558 }
559 
560 void MachineVerifier::report_context_vreg(Register VReg) const {
561   errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
562 }
563 
564 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
565   if (Register::isVirtualRegister(VRegOrUnit)) {
566     report_context_vreg(VRegOrUnit);
567   } else {
568     errs() << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
569   }
570 }
571 
572 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
573   errs() << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
574 }
575 
576 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
577   BBInfo &MInfo = MBBInfoMap[MBB];
578   if (!MInfo.reachable) {
579     MInfo.reachable = true;
580     for (const MachineBasicBlock *Succ : MBB->successors())
581       markReachable(Succ);
582   }
583 }
584 
585 void MachineVerifier::visitMachineFunctionBefore() {
586   lastIndex = SlotIndex();
587   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
588                                            : TRI->getReservedRegs(*MF);
589 
590   if (!MF->empty())
591     markReachable(&MF->front());
592 
593   // Build a set of the basic blocks in the function.
594   FunctionBlocks.clear();
595   for (const auto &MBB : *MF) {
596     FunctionBlocks.insert(&MBB);
597     BBInfo &MInfo = MBBInfoMap[&MBB];
598 
599     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
600     if (MInfo.Preds.size() != MBB.pred_size())
601       report("MBB has duplicate entries in its predecessor list.", &MBB);
602 
603     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
604     if (MInfo.Succs.size() != MBB.succ_size())
605       report("MBB has duplicate entries in its successor list.", &MBB);
606   }
607 
608   // Check that the register use lists are sane.
609   MRI->verifyUseLists();
610 
611   if (!MF->empty())
612     verifyStackFrame();
613 }
614 
615 void
616 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
617   FirstTerminator = nullptr;
618   FirstNonPHI = nullptr;
619 
620   if (!MF->getProperties().hasProperty(
621       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
622     // If this block has allocatable physical registers live-in, check that
623     // it is an entry block or landing pad.
624     for (const auto &LI : MBB->liveins()) {
625       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
626           MBB->getIterator() != MBB->getParent()->begin()) {
627         report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
628         report_context(LI.PhysReg);
629       }
630     }
631   }
632 
633   // Count the number of landing pad successors.
634   SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
635   for (const auto *succ : MBB->successors()) {
636     if (succ->isEHPad())
637       LandingPadSuccs.insert(succ);
638     if (!FunctionBlocks.count(succ))
639       report("MBB has successor that isn't part of the function.", MBB);
640     if (!MBBInfoMap[succ].Preds.count(MBB)) {
641       report("Inconsistent CFG", MBB);
642       errs() << "MBB is not in the predecessor list of the successor "
643              << printMBBReference(*succ) << ".\n";
644     }
645   }
646 
647   // Check the predecessor list.
648   for (const MachineBasicBlock *Pred : MBB->predecessors()) {
649     if (!FunctionBlocks.count(Pred))
650       report("MBB has predecessor that isn't part of the function.", MBB);
651     if (!MBBInfoMap[Pred].Succs.count(MBB)) {
652       report("Inconsistent CFG", MBB);
653       errs() << "MBB is not in the successor list of the predecessor "
654              << printMBBReference(*Pred) << ".\n";
655     }
656   }
657 
658   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
659   const BasicBlock *BB = MBB->getBasicBlock();
660   const Function &F = MF->getFunction();
661   if (LandingPadSuccs.size() > 1 &&
662       !(AsmInfo &&
663         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
664         BB && isa<SwitchInst>(BB->getTerminator())) &&
665       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
666     report("MBB has more than one landing pad successor", MBB);
667 
668   // Call analyzeBranch. If it succeeds, there several more conditions to check.
669   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
670   SmallVector<MachineOperand, 4> Cond;
671   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
672                           Cond)) {
673     // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
674     // check whether its answers match up with reality.
675     if (!TBB && !FBB) {
676       // Block falls through to its successor.
677       if (!MBB->empty() && MBB->back().isBarrier() &&
678           !TII->isPredicated(MBB->back())) {
679         report("MBB exits via unconditional fall-through but ends with a "
680                "barrier instruction!", MBB);
681       }
682       if (!Cond.empty()) {
683         report("MBB exits via unconditional fall-through but has a condition!",
684                MBB);
685       }
686     } else if (TBB && !FBB && Cond.empty()) {
687       // Block unconditionally branches somewhere.
688       if (MBB->empty()) {
689         report("MBB exits via unconditional branch but doesn't contain "
690                "any instructions!", MBB);
691       } else if (!MBB->back().isBarrier()) {
692         report("MBB exits via unconditional branch but doesn't end with a "
693                "barrier instruction!", MBB);
694       } else if (!MBB->back().isTerminator()) {
695         report("MBB exits via unconditional branch but the branch isn't a "
696                "terminator instruction!", MBB);
697       }
698     } else if (TBB && !FBB && !Cond.empty()) {
699       // Block conditionally branches somewhere, otherwise falls through.
700       if (MBB->empty()) {
701         report("MBB exits via conditional branch/fall-through but doesn't "
702                "contain any instructions!", MBB);
703       } else if (MBB->back().isBarrier()) {
704         report("MBB exits via conditional branch/fall-through but ends with a "
705                "barrier instruction!", MBB);
706       } else if (!MBB->back().isTerminator()) {
707         report("MBB exits via conditional branch/fall-through but the branch "
708                "isn't a terminator instruction!", MBB);
709       }
710     } else if (TBB && FBB) {
711       // Block conditionally branches somewhere, otherwise branches
712       // somewhere else.
713       if (MBB->empty()) {
714         report("MBB exits via conditional branch/branch but doesn't "
715                "contain any instructions!", MBB);
716       } else if (!MBB->back().isBarrier()) {
717         report("MBB exits via conditional branch/branch but doesn't end with a "
718                "barrier instruction!", MBB);
719       } else if (!MBB->back().isTerminator()) {
720         report("MBB exits via conditional branch/branch but the branch "
721                "isn't a terminator instruction!", MBB);
722       }
723       if (Cond.empty()) {
724         report("MBB exits via conditional branch/branch but there's no "
725                "condition!", MBB);
726       }
727     } else {
728       report("analyzeBranch returned invalid data!", MBB);
729     }
730 
731     // Now check that the successors match up with the answers reported by
732     // analyzeBranch.
733     if (TBB && !MBB->isSuccessor(TBB))
734       report("MBB exits via jump or conditional branch, but its target isn't a "
735              "CFG successor!",
736              MBB);
737     if (FBB && !MBB->isSuccessor(FBB))
738       report("MBB exits via conditional branch, but its target isn't a CFG "
739              "successor!",
740              MBB);
741 
742     // There might be a fallthrough to the next block if there's either no
743     // unconditional true branch, or if there's a condition, and one of the
744     // branches is missing.
745     bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
746 
747     // A conditional fallthrough must be an actual CFG successor, not
748     // unreachable. (Conversely, an unconditional fallthrough might not really
749     // be a successor, because the block might end in unreachable.)
750     if (!Cond.empty() && !FBB) {
751       MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
752       if (MBBI == MF->end()) {
753         report("MBB conditionally falls through out of function!", MBB);
754       } else if (!MBB->isSuccessor(&*MBBI))
755         report("MBB exits via conditional branch/fall-through but the CFG "
756                "successors don't match the actual successors!",
757                MBB);
758     }
759 
760     // Verify that there aren't any extra un-accounted-for successors.
761     for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
762       // If this successor is one of the branch targets, it's okay.
763       if (SuccMBB == TBB || SuccMBB == FBB)
764         continue;
765       // If we might have a fallthrough, and the successor is the fallthrough
766       // block, that's also ok.
767       if (Fallthrough && SuccMBB == MBB->getNextNode())
768         continue;
769       // Also accept successors which are for exception-handling or might be
770       // inlineasm_br targets.
771       if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
772         continue;
773       report("MBB has unexpected successors which are not branch targets, "
774              "fallthrough, EHPads, or inlineasm_br targets.",
775              MBB);
776     }
777   }
778 
779   regsLive.clear();
780   if (MRI->tracksLiveness()) {
781     for (const auto &LI : MBB->liveins()) {
782       if (!Register::isPhysicalRegister(LI.PhysReg)) {
783         report("MBB live-in list contains non-physical register", MBB);
784         continue;
785       }
786       for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
787         regsLive.insert(SubReg);
788     }
789   }
790 
791   const MachineFrameInfo &MFI = MF->getFrameInfo();
792   BitVector PR = MFI.getPristineRegs(*MF);
793   for (unsigned I : PR.set_bits()) {
794     for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
795       regsLive.insert(SubReg);
796   }
797 
798   regsKilled.clear();
799   regsDefined.clear();
800 
801   if (Indexes)
802     lastIndex = Indexes->getMBBStartIdx(MBB);
803 }
804 
805 // This function gets called for all bundle headers, including normal
806 // stand-alone unbundled instructions.
807 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
808   if (Indexes && Indexes->hasIndex(*MI)) {
809     SlotIndex idx = Indexes->getInstructionIndex(*MI);
810     if (!(idx > lastIndex)) {
811       report("Instruction index out of order", MI);
812       errs() << "Last instruction was at " << lastIndex << '\n';
813     }
814     lastIndex = idx;
815   }
816 
817   // Ensure non-terminators don't follow terminators.
818   if (MI->isTerminator()) {
819     if (!FirstTerminator)
820       FirstTerminator = MI;
821   } else if (FirstTerminator) {
822     report("Non-terminator instruction after the first terminator", MI);
823     errs() << "First terminator was:\t" << *FirstTerminator;
824   }
825 }
826 
827 // The operands on an INLINEASM instruction must follow a template.
828 // Verify that the flag operands make sense.
829 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
830   // The first two operands on INLINEASM are the asm string and global flags.
831   if (MI->getNumOperands() < 2) {
832     report("Too few operands on inline asm", MI);
833     return;
834   }
835   if (!MI->getOperand(0).isSymbol())
836     report("Asm string must be an external symbol", MI);
837   if (!MI->getOperand(1).isImm())
838     report("Asm flags must be an immediate", MI);
839   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
840   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
841   // and Extra_IsConvergent = 32.
842   if (!isUInt<6>(MI->getOperand(1).getImm()))
843     report("Unknown asm flags", &MI->getOperand(1), 1);
844 
845   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
846 
847   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
848   unsigned NumOps;
849   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
850     const MachineOperand &MO = MI->getOperand(OpNo);
851     // There may be implicit ops after the fixed operands.
852     if (!MO.isImm())
853       break;
854     NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
855   }
856 
857   if (OpNo > MI->getNumOperands())
858     report("Missing operands in last group", MI);
859 
860   // An optional MDNode follows the groups.
861   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
862     ++OpNo;
863 
864   // All trailing operands must be implicit registers.
865   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
866     const MachineOperand &MO = MI->getOperand(OpNo);
867     if (!MO.isReg() || !MO.isImplicit())
868       report("Expected implicit register after groups", &MO, OpNo);
869   }
870 }
871 
872 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
873                                             const MachineRegisterInfo &MRI) {
874   if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
875         if (!Op.isReg())
876           return false;
877         const auto Reg = Op.getReg();
878         if (Reg.isPhysical())
879           return false;
880         return !MRI.getType(Reg).isScalar();
881       }))
882     return true;
883   report("All register operands must have scalar types", &MI);
884   return false;
885 }
886 
887 /// Check that types are consistent when two operands need to have the same
888 /// number of vector elements.
889 /// \return true if the types are valid.
890 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
891                                                const MachineInstr *MI) {
892   if (Ty0.isVector() != Ty1.isVector()) {
893     report("operand types must be all-vector or all-scalar", MI);
894     // Generally we try to report as many issues as possible at once, but in
895     // this case it's not clear what should we be comparing the size of the
896     // scalar with: the size of the whole vector or its lane. Instead of
897     // making an arbitrary choice and emitting not so helpful message, let's
898     // avoid the extra noise and stop here.
899     return false;
900   }
901 
902   if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
903     report("operand types must preserve number of vector elements", MI);
904     return false;
905   }
906 
907   return true;
908 }
909 
910 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
911   if (isFunctionSelected)
912     report("Unexpected generic instruction in a Selected function", MI);
913 
914   const MCInstrDesc &MCID = MI->getDesc();
915   unsigned NumOps = MI->getNumOperands();
916 
917   // Branches must reference a basic block if they are not indirect
918   if (MI->isBranch() && !MI->isIndirectBranch()) {
919     bool HasMBB = false;
920     for (const MachineOperand &Op : MI->operands()) {
921       if (Op.isMBB()) {
922         HasMBB = true;
923         break;
924       }
925     }
926 
927     if (!HasMBB) {
928       report("Branch instruction is missing a basic block operand or "
929              "isIndirectBranch property",
930              MI);
931     }
932   }
933 
934   // Check types.
935   SmallVector<LLT, 4> Types;
936   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
937        I != E; ++I) {
938     if (!MCID.OpInfo[I].isGenericType())
939       continue;
940     // Generic instructions specify type equality constraints between some of
941     // their operands. Make sure these are consistent.
942     size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
943     Types.resize(std::max(TypeIdx + 1, Types.size()));
944 
945     const MachineOperand *MO = &MI->getOperand(I);
946     if (!MO->isReg()) {
947       report("generic instruction must use register operands", MI);
948       continue;
949     }
950 
951     LLT OpTy = MRI->getType(MO->getReg());
952     // Don't report a type mismatch if there is no actual mismatch, only a
953     // type missing, to reduce noise:
954     if (OpTy.isValid()) {
955       // Only the first valid type for a type index will be printed: don't
956       // overwrite it later so it's always clear which type was expected:
957       if (!Types[TypeIdx].isValid())
958         Types[TypeIdx] = OpTy;
959       else if (Types[TypeIdx] != OpTy)
960         report("Type mismatch in generic instruction", MO, I, OpTy);
961     } else {
962       // Generic instructions must have types attached to their operands.
963       report("Generic instruction is missing a virtual register type", MO, I);
964     }
965   }
966 
967   // Generic opcodes must not have physical register operands.
968   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
969     const MachineOperand *MO = &MI->getOperand(I);
970     if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
971       report("Generic instruction cannot have physical register", MO, I);
972   }
973 
974   // Avoid out of bounds in checks below. This was already reported earlier.
975   if (MI->getNumOperands() < MCID.getNumOperands())
976     return;
977 
978   StringRef ErrorInfo;
979   if (!TII->verifyInstruction(*MI, ErrorInfo))
980     report(ErrorInfo.data(), MI);
981 
982   // Verify properties of various specific instruction types
983   unsigned Opc = MI->getOpcode();
984   switch (Opc) {
985   case TargetOpcode::G_ASSERT_SEXT:
986   case TargetOpcode::G_ASSERT_ZEXT: {
987     std::string OpcName =
988         Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
989     if (!MI->getOperand(2).isImm()) {
990       report(Twine(OpcName, " expects an immediate operand #2"), MI);
991       break;
992     }
993 
994     Register Dst = MI->getOperand(0).getReg();
995     Register Src = MI->getOperand(1).getReg();
996     LLT SrcTy = MRI->getType(Src);
997     int64_t Imm = MI->getOperand(2).getImm();
998     if (Imm <= 0) {
999       report(Twine(OpcName, " size must be >= 1"), MI);
1000       break;
1001     }
1002 
1003     if (Imm >= SrcTy.getScalarSizeInBits()) {
1004       report(Twine(OpcName, " size must be less than source bit width"), MI);
1005       break;
1006     }
1007 
1008     const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1009     const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1010 
1011     // Allow only the source bank to be set.
1012     if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1013       report(Twine(OpcName, " cannot change register bank"), MI);
1014       break;
1015     }
1016 
1017     // Don't allow a class change. Do allow member class->regbank.
1018     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1019     if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1020       report(
1021           Twine(OpcName, " source and destination register classes must match"),
1022           MI);
1023       break;
1024     }
1025 
1026     break;
1027   }
1028 
1029   case TargetOpcode::G_CONSTANT:
1030   case TargetOpcode::G_FCONSTANT: {
1031     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1032     if (DstTy.isVector())
1033       report("Instruction cannot use a vector result type", MI);
1034 
1035     if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1036       if (!MI->getOperand(1).isCImm()) {
1037         report("G_CONSTANT operand must be cimm", MI);
1038         break;
1039       }
1040 
1041       const ConstantInt *CI = MI->getOperand(1).getCImm();
1042       if (CI->getBitWidth() != DstTy.getSizeInBits())
1043         report("inconsistent constant size", MI);
1044     } else {
1045       if (!MI->getOperand(1).isFPImm()) {
1046         report("G_FCONSTANT operand must be fpimm", MI);
1047         break;
1048       }
1049       const ConstantFP *CF = MI->getOperand(1).getFPImm();
1050 
1051       if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1052           DstTy.getSizeInBits()) {
1053         report("inconsistent constant size", MI);
1054       }
1055     }
1056 
1057     break;
1058   }
1059   case TargetOpcode::G_LOAD:
1060   case TargetOpcode::G_STORE:
1061   case TargetOpcode::G_ZEXTLOAD:
1062   case TargetOpcode::G_SEXTLOAD: {
1063     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1064     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1065     if (!PtrTy.isPointer())
1066       report("Generic memory instruction must access a pointer", MI);
1067 
1068     // Generic loads and stores must have a single MachineMemOperand
1069     // describing that access.
1070     if (!MI->hasOneMemOperand()) {
1071       report("Generic instruction accessing memory must have one mem operand",
1072              MI);
1073     } else {
1074       const MachineMemOperand &MMO = **MI->memoperands_begin();
1075       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1076           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1077         if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1078           report("Generic extload must have a narrower memory type", MI);
1079       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1080         if (MMO.getSize() > ValTy.getSizeInBytes())
1081           report("load memory size cannot exceed result size", MI);
1082       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1083         if (ValTy.getSizeInBytes() < MMO.getSize())
1084           report("store memory size cannot exceed value size", MI);
1085       }
1086 
1087       const AtomicOrdering Order = MMO.getSuccessOrdering();
1088       if (Opc == TargetOpcode::G_STORE) {
1089         if (Order == AtomicOrdering::Acquire ||
1090             Order == AtomicOrdering::AcquireRelease)
1091           report("atomic store cannot use acquire ordering", MI);
1092 
1093       } else {
1094         if (Order == AtomicOrdering::Release ||
1095             Order == AtomicOrdering::AcquireRelease)
1096           report("atomic load cannot use release ordering", MI);
1097       }
1098     }
1099 
1100     break;
1101   }
1102   case TargetOpcode::G_PHI: {
1103     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1104     if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1105                                     [this, &DstTy](const MachineOperand &MO) {
1106                                       if (!MO.isReg())
1107                                         return true;
1108                                       LLT Ty = MRI->getType(MO.getReg());
1109                                       if (!Ty.isValid() || (Ty != DstTy))
1110                                         return false;
1111                                       return true;
1112                                     }))
1113       report("Generic Instruction G_PHI has operands with incompatible/missing "
1114              "types",
1115              MI);
1116     break;
1117   }
1118   case TargetOpcode::G_BITCAST: {
1119     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1120     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1121     if (!DstTy.isValid() || !SrcTy.isValid())
1122       break;
1123 
1124     if (SrcTy.isPointer() != DstTy.isPointer())
1125       report("bitcast cannot convert between pointers and other types", MI);
1126 
1127     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1128       report("bitcast sizes must match", MI);
1129 
1130     if (SrcTy == DstTy)
1131       report("bitcast must change the type", MI);
1132 
1133     break;
1134   }
1135   case TargetOpcode::G_INTTOPTR:
1136   case TargetOpcode::G_PTRTOINT:
1137   case TargetOpcode::G_ADDRSPACE_CAST: {
1138     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1139     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1140     if (!DstTy.isValid() || !SrcTy.isValid())
1141       break;
1142 
1143     verifyVectorElementMatch(DstTy, SrcTy, MI);
1144 
1145     DstTy = DstTy.getScalarType();
1146     SrcTy = SrcTy.getScalarType();
1147 
1148     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1149       if (!DstTy.isPointer())
1150         report("inttoptr result type must be a pointer", MI);
1151       if (SrcTy.isPointer())
1152         report("inttoptr source type must not be a pointer", MI);
1153     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1154       if (!SrcTy.isPointer())
1155         report("ptrtoint source type must be a pointer", MI);
1156       if (DstTy.isPointer())
1157         report("ptrtoint result type must not be a pointer", MI);
1158     } else {
1159       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1160       if (!SrcTy.isPointer() || !DstTy.isPointer())
1161         report("addrspacecast types must be pointers", MI);
1162       else {
1163         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1164           report("addrspacecast must convert different address spaces", MI);
1165       }
1166     }
1167 
1168     break;
1169   }
1170   case TargetOpcode::G_PTR_ADD: {
1171     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1172     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1173     LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1174     if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1175       break;
1176 
1177     if (!PtrTy.getScalarType().isPointer())
1178       report("gep first operand must be a pointer", MI);
1179 
1180     if (OffsetTy.getScalarType().isPointer())
1181       report("gep offset operand must not be a pointer", MI);
1182 
1183     // TODO: Is the offset allowed to be a scalar with a vector?
1184     break;
1185   }
1186   case TargetOpcode::G_PTRMASK: {
1187     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1188     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1189     LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1190     if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1191       break;
1192 
1193     if (!DstTy.getScalarType().isPointer())
1194       report("ptrmask result type must be a pointer", MI);
1195 
1196     if (!MaskTy.getScalarType().isScalar())
1197       report("ptrmask mask type must be an integer", MI);
1198 
1199     verifyVectorElementMatch(DstTy, MaskTy, MI);
1200     break;
1201   }
1202   case TargetOpcode::G_SEXT:
1203   case TargetOpcode::G_ZEXT:
1204   case TargetOpcode::G_ANYEXT:
1205   case TargetOpcode::G_TRUNC:
1206   case TargetOpcode::G_FPEXT:
1207   case TargetOpcode::G_FPTRUNC: {
1208     // Number of operands and presense of types is already checked (and
1209     // reported in case of any issues), so no need to report them again. As
1210     // we're trying to report as many issues as possible at once, however, the
1211     // instructions aren't guaranteed to have the right number of operands or
1212     // types attached to them at this point
1213     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1214     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1215     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1216     if (!DstTy.isValid() || !SrcTy.isValid())
1217       break;
1218 
1219     LLT DstElTy = DstTy.getScalarType();
1220     LLT SrcElTy = SrcTy.getScalarType();
1221     if (DstElTy.isPointer() || SrcElTy.isPointer())
1222       report("Generic extend/truncate can not operate on pointers", MI);
1223 
1224     verifyVectorElementMatch(DstTy, SrcTy, MI);
1225 
1226     unsigned DstSize = DstElTy.getSizeInBits();
1227     unsigned SrcSize = SrcElTy.getSizeInBits();
1228     switch (MI->getOpcode()) {
1229     default:
1230       if (DstSize <= SrcSize)
1231         report("Generic extend has destination type no larger than source", MI);
1232       break;
1233     case TargetOpcode::G_TRUNC:
1234     case TargetOpcode::G_FPTRUNC:
1235       if (DstSize >= SrcSize)
1236         report("Generic truncate has destination type no smaller than source",
1237                MI);
1238       break;
1239     }
1240     break;
1241   }
1242   case TargetOpcode::G_SELECT: {
1243     LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1244     LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1245     if (!SelTy.isValid() || !CondTy.isValid())
1246       break;
1247 
1248     // Scalar condition select on a vector is valid.
1249     if (CondTy.isVector())
1250       verifyVectorElementMatch(SelTy, CondTy, MI);
1251     break;
1252   }
1253   case TargetOpcode::G_MERGE_VALUES: {
1254     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1255     // e.g. s2N = MERGE sN, sN
1256     // Merging multiple scalars into a vector is not allowed, should use
1257     // G_BUILD_VECTOR for that.
1258     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1259     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1260     if (DstTy.isVector() || SrcTy.isVector())
1261       report("G_MERGE_VALUES cannot operate on vectors", MI);
1262 
1263     const unsigned NumOps = MI->getNumOperands();
1264     if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1265       report("G_MERGE_VALUES result size is inconsistent", MI);
1266 
1267     for (unsigned I = 2; I != NumOps; ++I) {
1268       if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1269         report("G_MERGE_VALUES source types do not match", MI);
1270     }
1271 
1272     break;
1273   }
1274   case TargetOpcode::G_UNMERGE_VALUES: {
1275     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1276     LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
1277     // For now G_UNMERGE can split vectors.
1278     for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
1279       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
1280         report("G_UNMERGE_VALUES destination types do not match", MI);
1281     }
1282     if (SrcTy.getSizeInBits() !=
1283         (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
1284       report("G_UNMERGE_VALUES source operand does not cover dest operands",
1285              MI);
1286     }
1287     break;
1288   }
1289   case TargetOpcode::G_BUILD_VECTOR: {
1290     // Source types must be scalars, dest type a vector. Total size of scalars
1291     // must match the dest vector size.
1292     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1293     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1294     if (!DstTy.isVector() || SrcEltTy.isVector()) {
1295       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1296       break;
1297     }
1298 
1299     if (DstTy.getElementType() != SrcEltTy)
1300       report("G_BUILD_VECTOR result element type must match source type", MI);
1301 
1302     if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1303       report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1304 
1305     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1306       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1307         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1308 
1309     break;
1310   }
1311   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1312     // Source types must be scalars, dest type a vector. Scalar types must be
1313     // larger than the dest vector elt type, as this is a truncating operation.
1314     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1315     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1316     if (!DstTy.isVector() || SrcEltTy.isVector())
1317       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1318              MI);
1319     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1320       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1321         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1322                MI);
1323     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1324       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1325              "dest elt type",
1326              MI);
1327     break;
1328   }
1329   case TargetOpcode::G_CONCAT_VECTORS: {
1330     // Source types should be vectors, and total size should match the dest
1331     // vector size.
1332     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1333     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1334     if (!DstTy.isVector() || !SrcTy.isVector())
1335       report("G_CONCAT_VECTOR requires vector source and destination operands",
1336              MI);
1337 
1338     if (MI->getNumOperands() < 3)
1339       report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1340 
1341     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1342       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1343         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1344     if (DstTy.getNumElements() !=
1345         SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1346       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1347     break;
1348   }
1349   case TargetOpcode::G_ICMP:
1350   case TargetOpcode::G_FCMP: {
1351     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1352     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1353 
1354     if ((DstTy.isVector() != SrcTy.isVector()) ||
1355         (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1356       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1357 
1358     break;
1359   }
1360   case TargetOpcode::G_EXTRACT: {
1361     const MachineOperand &SrcOp = MI->getOperand(1);
1362     if (!SrcOp.isReg()) {
1363       report("extract source must be a register", MI);
1364       break;
1365     }
1366 
1367     const MachineOperand &OffsetOp = MI->getOperand(2);
1368     if (!OffsetOp.isImm()) {
1369       report("extract offset must be a constant", MI);
1370       break;
1371     }
1372 
1373     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1374     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1375     if (SrcSize == DstSize)
1376       report("extract source must be larger than result", MI);
1377 
1378     if (DstSize + OffsetOp.getImm() > SrcSize)
1379       report("extract reads past end of register", MI);
1380     break;
1381   }
1382   case TargetOpcode::G_INSERT: {
1383     const MachineOperand &SrcOp = MI->getOperand(2);
1384     if (!SrcOp.isReg()) {
1385       report("insert source must be a register", MI);
1386       break;
1387     }
1388 
1389     const MachineOperand &OffsetOp = MI->getOperand(3);
1390     if (!OffsetOp.isImm()) {
1391       report("insert offset must be a constant", MI);
1392       break;
1393     }
1394 
1395     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1396     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1397 
1398     if (DstSize <= SrcSize)
1399       report("inserted size must be smaller than total register", MI);
1400 
1401     if (SrcSize + OffsetOp.getImm() > DstSize)
1402       report("insert writes past end of register", MI);
1403 
1404     break;
1405   }
1406   case TargetOpcode::G_JUMP_TABLE: {
1407     if (!MI->getOperand(1).isJTI())
1408       report("G_JUMP_TABLE source operand must be a jump table index", MI);
1409     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1410     if (!DstTy.isPointer())
1411       report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1412     break;
1413   }
1414   case TargetOpcode::G_BRJT: {
1415     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1416       report("G_BRJT src operand 0 must be a pointer type", MI);
1417 
1418     if (!MI->getOperand(1).isJTI())
1419       report("G_BRJT src operand 1 must be a jump table index", MI);
1420 
1421     const auto &IdxOp = MI->getOperand(2);
1422     if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1423       report("G_BRJT src operand 2 must be a scalar reg type", MI);
1424     break;
1425   }
1426   case TargetOpcode::G_INTRINSIC:
1427   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1428     // TODO: Should verify number of def and use operands, but the current
1429     // interface requires passing in IR types for mangling.
1430     const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1431     if (!IntrIDOp.isIntrinsicID()) {
1432       report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1433       break;
1434     }
1435 
1436     bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1437     unsigned IntrID = IntrIDOp.getIntrinsicID();
1438     if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1439       AttributeList Attrs
1440         = Intrinsic::getAttributes(MF->getFunction().getContext(),
1441                                    static_cast<Intrinsic::ID>(IntrID));
1442       bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone);
1443       if (NoSideEffects && DeclHasSideEffects) {
1444         report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1445         break;
1446       }
1447       if (!NoSideEffects && !DeclHasSideEffects) {
1448         report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1449         break;
1450       }
1451     }
1452 
1453     break;
1454   }
1455   case TargetOpcode::G_SEXT_INREG: {
1456     if (!MI->getOperand(2).isImm()) {
1457       report("G_SEXT_INREG expects an immediate operand #2", MI);
1458       break;
1459     }
1460 
1461     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1462     int64_t Imm = MI->getOperand(2).getImm();
1463     if (Imm <= 0)
1464       report("G_SEXT_INREG size must be >= 1", MI);
1465     if (Imm >= SrcTy.getScalarSizeInBits())
1466       report("G_SEXT_INREG size must be less than source bit width", MI);
1467     break;
1468   }
1469   case TargetOpcode::G_SHUFFLE_VECTOR: {
1470     const MachineOperand &MaskOp = MI->getOperand(3);
1471     if (!MaskOp.isShuffleMask()) {
1472       report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1473       break;
1474     }
1475 
1476     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1477     LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1478     LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1479 
1480     if (Src0Ty != Src1Ty)
1481       report("Source operands must be the same type", MI);
1482 
1483     if (Src0Ty.getScalarType() != DstTy.getScalarType())
1484       report("G_SHUFFLE_VECTOR cannot change element type", MI);
1485 
1486     // Don't check that all operands are vector because scalars are used in
1487     // place of 1 element vectors.
1488     int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1489     int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1490 
1491     ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1492 
1493     if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1494       report("Wrong result type for shufflemask", MI);
1495 
1496     for (int Idx : MaskIdxes) {
1497       if (Idx < 0)
1498         continue;
1499 
1500       if (Idx >= 2 * SrcNumElts)
1501         report("Out of bounds shuffle index", MI);
1502     }
1503 
1504     break;
1505   }
1506   case TargetOpcode::G_DYN_STACKALLOC: {
1507     const MachineOperand &DstOp = MI->getOperand(0);
1508     const MachineOperand &AllocOp = MI->getOperand(1);
1509     const MachineOperand &AlignOp = MI->getOperand(2);
1510 
1511     if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1512       report("dst operand 0 must be a pointer type", MI);
1513       break;
1514     }
1515 
1516     if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1517       report("src operand 1 must be a scalar reg type", MI);
1518       break;
1519     }
1520 
1521     if (!AlignOp.isImm()) {
1522       report("src operand 2 must be an immediate type", MI);
1523       break;
1524     }
1525     break;
1526   }
1527   case TargetOpcode::G_MEMCPY_INLINE:
1528   case TargetOpcode::G_MEMCPY:
1529   case TargetOpcode::G_MEMMOVE: {
1530     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1531     if (MMOs.size() != 2) {
1532       report("memcpy/memmove must have 2 memory operands", MI);
1533       break;
1534     }
1535 
1536     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1537         (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1538       report("wrong memory operand types", MI);
1539       break;
1540     }
1541 
1542     if (MMOs[0]->getSize() != MMOs[1]->getSize())
1543       report("inconsistent memory operand sizes", MI);
1544 
1545     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1546     LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1547 
1548     if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1549       report("memory instruction operand must be a pointer", MI);
1550       break;
1551     }
1552 
1553     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1554       report("inconsistent store address space", MI);
1555     if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1556       report("inconsistent load address space", MI);
1557 
1558     if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1559       if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1560         report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1561 
1562     break;
1563   }
1564   case TargetOpcode::G_BZERO:
1565   case TargetOpcode::G_MEMSET: {
1566     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1567     std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1568     if (MMOs.size() != 1) {
1569       report(Twine(Name, " must have 1 memory operand"), MI);
1570       break;
1571     }
1572 
1573     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1574       report(Twine(Name, " memory operand must be a store"), MI);
1575       break;
1576     }
1577 
1578     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1579     if (!DstPtrTy.isPointer()) {
1580       report(Twine(Name, " operand must be a pointer"), MI);
1581       break;
1582     }
1583 
1584     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1585       report("inconsistent " + Twine(Name, " address space"), MI);
1586 
1587     if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1588         (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1589       report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1590 
1591     break;
1592   }
1593   case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1594   case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1595     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1596     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1597     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1598     if (!DstTy.isScalar())
1599       report("Vector reduction requires a scalar destination type", MI);
1600     if (!Src1Ty.isScalar())
1601       report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1602     if (!Src2Ty.isVector())
1603       report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1604     break;
1605   }
1606   case TargetOpcode::G_VECREDUCE_FADD:
1607   case TargetOpcode::G_VECREDUCE_FMUL:
1608   case TargetOpcode::G_VECREDUCE_FMAX:
1609   case TargetOpcode::G_VECREDUCE_FMIN:
1610   case TargetOpcode::G_VECREDUCE_ADD:
1611   case TargetOpcode::G_VECREDUCE_MUL:
1612   case TargetOpcode::G_VECREDUCE_AND:
1613   case TargetOpcode::G_VECREDUCE_OR:
1614   case TargetOpcode::G_VECREDUCE_XOR:
1615   case TargetOpcode::G_VECREDUCE_SMAX:
1616   case TargetOpcode::G_VECREDUCE_SMIN:
1617   case TargetOpcode::G_VECREDUCE_UMAX:
1618   case TargetOpcode::G_VECREDUCE_UMIN: {
1619     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1620     if (!DstTy.isScalar())
1621       report("Vector reduction requires a scalar destination type", MI);
1622     break;
1623   }
1624 
1625   case TargetOpcode::G_SBFX:
1626   case TargetOpcode::G_UBFX: {
1627     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1628     if (DstTy.isVector()) {
1629       report("Bitfield extraction is not supported on vectors", MI);
1630       break;
1631     }
1632     break;
1633   }
1634   case TargetOpcode::G_SHL:
1635   case TargetOpcode::G_LSHR:
1636   case TargetOpcode::G_ASHR:
1637   case TargetOpcode::G_ROTR:
1638   case TargetOpcode::G_ROTL: {
1639     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1640     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1641     if (Src1Ty.isVector() != Src2Ty.isVector()) {
1642       report("Shifts and rotates require operands to be either all scalars or "
1643              "all vectors",
1644              MI);
1645       break;
1646     }
1647     break;
1648   }
1649   case TargetOpcode::G_LLROUND:
1650   case TargetOpcode::G_LROUND: {
1651     verifyAllRegOpsScalar(*MI, *MRI);
1652     break;
1653   }
1654   case TargetOpcode::G_IS_FPCLASS: {
1655     LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1656     LLT DestEltTy = DestTy.getScalarType();
1657     if (!DestEltTy.isScalar()) {
1658       report("Destination must be a scalar or vector of scalars", MI);
1659       break;
1660     }
1661     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1662     LLT SrcEltTy = SrcTy.getScalarType();
1663     if (!SrcEltTy.isScalar()) {
1664       report("Source must be a scalar or vector of scalars", MI);
1665       break;
1666     }
1667     if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1668       break;
1669     const MachineOperand &TestMO = MI->getOperand(2);
1670     if (!TestMO.isImm()) {
1671       report("floating-point class set (operand 2) must be an immediate", MI);
1672       break;
1673     }
1674     int64_t Test = TestMO.getImm();
1675     if (Test < 0 || Test > fcAllFlags) {
1676       report("Incorrect floating-point class set (operand 2)", MI);
1677       break;
1678     }
1679     const MachineOperand &SemanticsMO = MI->getOperand(3);
1680     if (!SemanticsMO.isImm()) {
1681       report("floating-point semantics (operand 3) must be an immediate", MI);
1682       break;
1683     }
1684     int64_t Semantics = SemanticsMO.getImm();
1685     if (Semantics < 0 || Semantics > APFloat::S_MaxSemantics) {
1686       report("Incorrect floating-point semantics (operand 3)", MI);
1687       break;
1688     }
1689     break;
1690   }
1691   default:
1692     break;
1693   }
1694 }
1695 
1696 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1697   const MCInstrDesc &MCID = MI->getDesc();
1698   if (MI->getNumOperands() < MCID.getNumOperands()) {
1699     report("Too few operands", MI);
1700     errs() << MCID.getNumOperands() << " operands expected, but "
1701            << MI->getNumOperands() << " given.\n";
1702   }
1703 
1704   if (MI->isPHI()) {
1705     if (MF->getProperties().hasProperty(
1706             MachineFunctionProperties::Property::NoPHIs))
1707       report("Found PHI instruction with NoPHIs property set", MI);
1708 
1709     if (FirstNonPHI)
1710       report("Found PHI instruction after non-PHI", MI);
1711   } else if (FirstNonPHI == nullptr)
1712     FirstNonPHI = MI;
1713 
1714   // Check the tied operands.
1715   if (MI->isInlineAsm())
1716     verifyInlineAsm(MI);
1717 
1718   // Check that unspillable terminators define a reg and have at most one use.
1719   if (TII->isUnspillableTerminator(MI)) {
1720     if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1721       report("Unspillable Terminator does not define a reg", MI);
1722     Register Def = MI->getOperand(0).getReg();
1723     if (Def.isVirtual() &&
1724         !MF->getProperties().hasProperty(
1725             MachineFunctionProperties::Property::NoPHIs) &&
1726         std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1727       report("Unspillable Terminator expected to have at most one use!", MI);
1728   }
1729 
1730   // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1731   // DBG_VALUEs: these are convenient to use in tests, but should never get
1732   // generated.
1733   if (MI->isDebugValue() && MI->getNumOperands() == 4)
1734     if (!MI->getDebugLoc())
1735       report("Missing DebugLoc for debug instruction", MI);
1736 
1737   // Meta instructions should never be the subject of debug value tracking,
1738   // they don't create a value in the output program at all.
1739   if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1740     report("Metadata instruction should not have a value tracking number", MI);
1741 
1742   // Check the MachineMemOperands for basic consistency.
1743   for (MachineMemOperand *Op : MI->memoperands()) {
1744     if (Op->isLoad() && !MI->mayLoad())
1745       report("Missing mayLoad flag", MI);
1746     if (Op->isStore() && !MI->mayStore())
1747       report("Missing mayStore flag", MI);
1748   }
1749 
1750   // Debug values must not have a slot index.
1751   // Other instructions must have one, unless they are inside a bundle.
1752   if (LiveInts) {
1753     bool mapped = !LiveInts->isNotInMIMap(*MI);
1754     if (MI->isDebugOrPseudoInstr()) {
1755       if (mapped)
1756         report("Debug instruction has a slot index", MI);
1757     } else if (MI->isInsideBundle()) {
1758       if (mapped)
1759         report("Instruction inside bundle has a slot index", MI);
1760     } else {
1761       if (!mapped)
1762         report("Missing slot index", MI);
1763     }
1764   }
1765 
1766   unsigned Opc = MCID.getOpcode();
1767   if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1768     verifyPreISelGenericInstruction(MI);
1769     return;
1770   }
1771 
1772   StringRef ErrorInfo;
1773   if (!TII->verifyInstruction(*MI, ErrorInfo))
1774     report(ErrorInfo.data(), MI);
1775 
1776   // Verify properties of various specific instruction types
1777   switch (MI->getOpcode()) {
1778   case TargetOpcode::COPY: {
1779     const MachineOperand &DstOp = MI->getOperand(0);
1780     const MachineOperand &SrcOp = MI->getOperand(1);
1781     const Register SrcReg = SrcOp.getReg();
1782     const Register DstReg = DstOp.getReg();
1783 
1784     LLT DstTy = MRI->getType(DstReg);
1785     LLT SrcTy = MRI->getType(SrcReg);
1786     if (SrcTy.isValid() && DstTy.isValid()) {
1787       // If both types are valid, check that the types are the same.
1788       if (SrcTy != DstTy) {
1789         report("Copy Instruction is illegal with mismatching types", MI);
1790         errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1791       }
1792 
1793       break;
1794     }
1795 
1796     if (!SrcTy.isValid() && !DstTy.isValid())
1797       break;
1798 
1799     // If we have only one valid type, this is likely a copy between a virtual
1800     // and physical register.
1801     unsigned SrcSize = 0;
1802     unsigned DstSize = 0;
1803     if (SrcReg.isPhysical() && DstTy.isValid()) {
1804       const TargetRegisterClass *SrcRC =
1805           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1806       if (SrcRC)
1807         SrcSize = TRI->getRegSizeInBits(*SrcRC);
1808     }
1809 
1810     if (SrcSize == 0)
1811       SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1812 
1813     if (DstReg.isPhysical() && SrcTy.isValid()) {
1814       const TargetRegisterClass *DstRC =
1815           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1816       if (DstRC)
1817         DstSize = TRI->getRegSizeInBits(*DstRC);
1818     }
1819 
1820     if (DstSize == 0)
1821       DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1822 
1823     if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1824       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1825         report("Copy Instruction is illegal with mismatching sizes", MI);
1826         errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1827                << "\n";
1828       }
1829     }
1830     break;
1831   }
1832   case TargetOpcode::STATEPOINT: {
1833     StatepointOpers SO(MI);
1834     if (!MI->getOperand(SO.getIDPos()).isImm() ||
1835         !MI->getOperand(SO.getNBytesPos()).isImm() ||
1836         !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1837       report("meta operands to STATEPOINT not constant!", MI);
1838       break;
1839     }
1840 
1841     auto VerifyStackMapConstant = [&](unsigned Offset) {
1842       if (Offset >= MI->getNumOperands()) {
1843         report("stack map constant to STATEPOINT is out of range!", MI);
1844         return;
1845       }
1846       if (!MI->getOperand(Offset - 1).isImm() ||
1847           MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1848           !MI->getOperand(Offset).isImm())
1849         report("stack map constant to STATEPOINT not well formed!", MI);
1850     };
1851     VerifyStackMapConstant(SO.getCCIdx());
1852     VerifyStackMapConstant(SO.getFlagsIdx());
1853     VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1854     VerifyStackMapConstant(SO.getNumGCPtrIdx());
1855     VerifyStackMapConstant(SO.getNumAllocaIdx());
1856     VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1857 
1858     // Verify that all explicit statepoint defs are tied to gc operands as
1859     // they are expected to be a relocation of gc operands.
1860     unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1861     unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1862     for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1863       unsigned UseOpIdx;
1864       if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1865         report("STATEPOINT defs expected to be tied", MI);
1866         break;
1867       }
1868       if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1869         report("STATEPOINT def tied to non-gc operand", MI);
1870         break;
1871       }
1872     }
1873 
1874     // TODO: verify we have properly encoded deopt arguments
1875   } break;
1876   case TargetOpcode::INSERT_SUBREG: {
1877     unsigned InsertedSize;
1878     if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1879       InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1880     else
1881       InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1882     unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1883     if (SubRegSize < InsertedSize) {
1884       report("INSERT_SUBREG expected inserted value to have equal or lesser "
1885              "size than the subreg it was inserted into", MI);
1886       break;
1887     }
1888   } break;
1889   }
1890 }
1891 
1892 void
1893 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1894   const MachineInstr *MI = MO->getParent();
1895   const MCInstrDesc &MCID = MI->getDesc();
1896   unsigned NumDefs = MCID.getNumDefs();
1897   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1898     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1899 
1900   // The first MCID.NumDefs operands must be explicit register defines
1901   if (MONum < NumDefs) {
1902     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1903     if (!MO->isReg())
1904       report("Explicit definition must be a register", MO, MONum);
1905     else if (!MO->isDef() && !MCOI.isOptionalDef())
1906       report("Explicit definition marked as use", MO, MONum);
1907     else if (MO->isImplicit())
1908       report("Explicit definition marked as implicit", MO, MONum);
1909   } else if (MONum < MCID.getNumOperands()) {
1910     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1911     // Don't check if it's the last operand in a variadic instruction. See,
1912     // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
1913     bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
1914     if (!IsOptional) {
1915       if (MO->isReg()) {
1916         if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
1917           report("Explicit operand marked as def", MO, MONum);
1918         if (MO->isImplicit())
1919           report("Explicit operand marked as implicit", MO, MONum);
1920       }
1921 
1922       // Check that an instruction has register operands only as expected.
1923       if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
1924           !MO->isReg() && !MO->isFI())
1925         report("Expected a register operand.", MO, MONum);
1926       if (MO->isReg()) {
1927         if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
1928             (MCOI.OperandType == MCOI::OPERAND_PCREL &&
1929              !TII->isPCRelRegisterOperandLegal(*MO)))
1930           report("Expected a non-register operand.", MO, MONum);
1931       }
1932     }
1933 
1934     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
1935     if (TiedTo != -1) {
1936       if (!MO->isReg())
1937         report("Tied use must be a register", MO, MONum);
1938       else if (!MO->isTied())
1939         report("Operand should be tied", MO, MONum);
1940       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
1941         report("Tied def doesn't match MCInstrDesc", MO, MONum);
1942       else if (Register::isPhysicalRegister(MO->getReg())) {
1943         const MachineOperand &MOTied = MI->getOperand(TiedTo);
1944         if (!MOTied.isReg())
1945           report("Tied counterpart must be a register", &MOTied, TiedTo);
1946         else if (Register::isPhysicalRegister(MOTied.getReg()) &&
1947                  MO->getReg() != MOTied.getReg())
1948           report("Tied physical registers must match.", &MOTied, TiedTo);
1949       }
1950     } else if (MO->isReg() && MO->isTied())
1951       report("Explicit operand should not be tied", MO, MONum);
1952   } else {
1953     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
1954     if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
1955       report("Extra explicit operand on non-variadic instruction", MO, MONum);
1956   }
1957 
1958   switch (MO->getType()) {
1959   case MachineOperand::MO_Register: {
1960     // Verify debug flag on debug instructions. Check this first because reg0
1961     // indicates an undefined debug value.
1962     if (MI->isDebugInstr() && MO->isUse()) {
1963       if (!MO->isDebug())
1964         report("Register operand must be marked debug", MO, MONum);
1965     } else if (MO->isDebug()) {
1966       report("Register operand must not be marked debug", MO, MONum);
1967     }
1968 
1969     const Register Reg = MO->getReg();
1970     if (!Reg)
1971       return;
1972     if (MRI->tracksLiveness() && !MI->isDebugInstr())
1973       checkLiveness(MO, MONum);
1974 
1975     if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
1976         MO->getReg().isVirtual()) // TODO: Apply to physregs too
1977       report("Undef virtual register def operands require a subregister", MO, MONum);
1978 
1979     // Verify the consistency of tied operands.
1980     if (MO->isTied()) {
1981       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
1982       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
1983       if (!OtherMO.isReg())
1984         report("Must be tied to a register", MO, MONum);
1985       if (!OtherMO.isTied())
1986         report("Missing tie flags on tied operand", MO, MONum);
1987       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
1988         report("Inconsistent tie links", MO, MONum);
1989       if (MONum < MCID.getNumDefs()) {
1990         if (OtherIdx < MCID.getNumOperands()) {
1991           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
1992             report("Explicit def tied to explicit use without tie constraint",
1993                    MO, MONum);
1994         } else {
1995           if (!OtherMO.isImplicit())
1996             report("Explicit def should be tied to implicit use", MO, MONum);
1997         }
1998       }
1999     }
2000 
2001     // Verify two-address constraints after the twoaddressinstruction pass.
2002     // Both twoaddressinstruction pass and phi-node-elimination pass call
2003     // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2004     // twoaddressinstruction pass not after phi-node-elimination pass. So we
2005     // shouldn't use the NoSSA as the condition, we should based on
2006     // TiedOpsRewritten property to verify two-address constraints, this
2007     // property will be set in twoaddressinstruction pass.
2008     unsigned DefIdx;
2009     if (MF->getProperties().hasProperty(
2010             MachineFunctionProperties::Property::TiedOpsRewritten) &&
2011         MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2012         Reg != MI->getOperand(DefIdx).getReg())
2013       report("Two-address instruction operands must be identical", MO, MONum);
2014 
2015     // Check register classes.
2016     unsigned SubIdx = MO->getSubReg();
2017 
2018     if (Register::isPhysicalRegister(Reg)) {
2019       if (SubIdx) {
2020         report("Illegal subregister index for physical register", MO, MONum);
2021         return;
2022       }
2023       if (MONum < MCID.getNumOperands()) {
2024         if (const TargetRegisterClass *DRC =
2025               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2026           if (!DRC->contains(Reg)) {
2027             report("Illegal physical register for instruction", MO, MONum);
2028             errs() << printReg(Reg, TRI) << " is not a "
2029                    << TRI->getRegClassName(DRC) << " register.\n";
2030           }
2031         }
2032       }
2033       if (MO->isRenamable()) {
2034         if (MRI->isReserved(Reg)) {
2035           report("isRenamable set on reserved register", MO, MONum);
2036           return;
2037         }
2038       }
2039     } else {
2040       // Virtual register.
2041       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2042       if (!RC) {
2043         // This is a generic virtual register.
2044 
2045         // Do not allow undef uses for generic virtual registers. This ensures
2046         // getVRegDef can never fail and return null on a generic register.
2047         //
2048         // FIXME: This restriction should probably be broadened to all SSA
2049         // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2050         // run on the SSA function just before phi elimination.
2051         if (MO->isUndef())
2052           report("Generic virtual register use cannot be undef", MO, MONum);
2053 
2054         // Debug value instruction is permitted to use undefined vregs.
2055         // This is a performance measure to skip the overhead of immediately
2056         // pruning unused debug operands. The final undef substitution occurs
2057         // when debug values are allocated in LDVImpl::handleDebugValue, so
2058         // these verifications always apply after this pass.
2059         if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2060             !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2061           // If we're post-Select, we can't have gvregs anymore.
2062           if (isFunctionSelected) {
2063             report("Generic virtual register invalid in a Selected function",
2064                    MO, MONum);
2065             return;
2066           }
2067 
2068           // The gvreg must have a type and it must not have a SubIdx.
2069           LLT Ty = MRI->getType(Reg);
2070           if (!Ty.isValid()) {
2071             report("Generic virtual register must have a valid type", MO,
2072                    MONum);
2073             return;
2074           }
2075 
2076           const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2077 
2078           // If we're post-RegBankSelect, the gvreg must have a bank.
2079           if (!RegBank && isFunctionRegBankSelected) {
2080             report("Generic virtual register must have a bank in a "
2081                    "RegBankSelected function",
2082                    MO, MONum);
2083             return;
2084           }
2085 
2086           // Make sure the register fits into its register bank if any.
2087           if (RegBank && Ty.isValid() &&
2088               RegBank->getSize() < Ty.getSizeInBits()) {
2089             report("Register bank is too small for virtual register", MO,
2090                    MONum);
2091             errs() << "Register bank " << RegBank->getName() << " too small("
2092                    << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2093                    << "-bits\n";
2094             return;
2095           }
2096         }
2097 
2098         if (SubIdx)  {
2099           report("Generic virtual register does not allow subregister index", MO,
2100                  MONum);
2101           return;
2102         }
2103 
2104         // If this is a target specific instruction and this operand
2105         // has register class constraint, the virtual register must
2106         // comply to it.
2107         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2108             MONum < MCID.getNumOperands() &&
2109             TII->getRegClass(MCID, MONum, TRI, *MF)) {
2110           report("Virtual register does not match instruction constraint", MO,
2111                  MONum);
2112           errs() << "Expect register class "
2113                  << TRI->getRegClassName(
2114                         TII->getRegClass(MCID, MONum, TRI, *MF))
2115                  << " but got nothing\n";
2116           return;
2117         }
2118 
2119         break;
2120       }
2121       if (SubIdx) {
2122         const TargetRegisterClass *SRC =
2123           TRI->getSubClassWithSubReg(RC, SubIdx);
2124         if (!SRC) {
2125           report("Invalid subregister index for virtual register", MO, MONum);
2126           errs() << "Register class " << TRI->getRegClassName(RC)
2127               << " does not support subreg index " << SubIdx << "\n";
2128           return;
2129         }
2130         if (RC != SRC) {
2131           report("Invalid register class for subregister index", MO, MONum);
2132           errs() << "Register class " << TRI->getRegClassName(RC)
2133               << " does not fully support subreg index " << SubIdx << "\n";
2134           return;
2135         }
2136       }
2137       if (MONum < MCID.getNumOperands()) {
2138         if (const TargetRegisterClass *DRC =
2139               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2140           if (SubIdx) {
2141             const TargetRegisterClass *SuperRC =
2142                 TRI->getLargestLegalSuperClass(RC, *MF);
2143             if (!SuperRC) {
2144               report("No largest legal super class exists.", MO, MONum);
2145               return;
2146             }
2147             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2148             if (!DRC) {
2149               report("No matching super-reg register class.", MO, MONum);
2150               return;
2151             }
2152           }
2153           if (!RC->hasSuperClassEq(DRC)) {
2154             report("Illegal virtual register for instruction", MO, MONum);
2155             errs() << "Expected a " << TRI->getRegClassName(DRC)
2156                 << " register, but got a " << TRI->getRegClassName(RC)
2157                 << " register\n";
2158           }
2159         }
2160       }
2161     }
2162     break;
2163   }
2164 
2165   case MachineOperand::MO_RegisterMask:
2166     regMasks.push_back(MO->getRegMask());
2167     break;
2168 
2169   case MachineOperand::MO_MachineBasicBlock:
2170     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2171       report("PHI operand is not in the CFG", MO, MONum);
2172     break;
2173 
2174   case MachineOperand::MO_FrameIndex:
2175     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2176         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2177       int FI = MO->getIndex();
2178       LiveInterval &LI = LiveStks->getInterval(FI);
2179       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2180 
2181       bool stores = MI->mayStore();
2182       bool loads = MI->mayLoad();
2183       // For a memory-to-memory move, we need to check if the frame
2184       // index is used for storing or loading, by inspecting the
2185       // memory operands.
2186       if (stores && loads) {
2187         for (auto *MMO : MI->memoperands()) {
2188           const PseudoSourceValue *PSV = MMO->getPseudoValue();
2189           if (PSV == nullptr) continue;
2190           const FixedStackPseudoSourceValue *Value =
2191             dyn_cast<FixedStackPseudoSourceValue>(PSV);
2192           if (Value == nullptr) continue;
2193           if (Value->getFrameIndex() != FI) continue;
2194 
2195           if (MMO->isStore())
2196             loads = false;
2197           else
2198             stores = false;
2199           break;
2200         }
2201         if (loads == stores)
2202           report("Missing fixed stack memoperand.", MI);
2203       }
2204       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2205         report("Instruction loads from dead spill slot", MO, MONum);
2206         errs() << "Live stack: " << LI << '\n';
2207       }
2208       if (stores && !LI.liveAt(Idx.getRegSlot())) {
2209         report("Instruction stores to dead spill slot", MO, MONum);
2210         errs() << "Live stack: " << LI << '\n';
2211       }
2212     }
2213     break;
2214 
2215   default:
2216     break;
2217   }
2218 }
2219 
2220 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2221                                          unsigned MONum, SlotIndex UseIdx,
2222                                          const LiveRange &LR,
2223                                          Register VRegOrUnit,
2224                                          LaneBitmask LaneMask) {
2225   LiveQueryResult LRQ = LR.Query(UseIdx);
2226   // Check if we have a segment at the use, note however that we only need one
2227   // live subregister range, the others may be dead.
2228   if (!LRQ.valueIn() && LaneMask.none()) {
2229     report("No live segment at use", MO, MONum);
2230     report_context_liverange(LR);
2231     report_context_vreg_regunit(VRegOrUnit);
2232     report_context(UseIdx);
2233   }
2234   if (MO->isKill() && !LRQ.isKill()) {
2235     report("Live range continues after kill flag", MO, MONum);
2236     report_context_liverange(LR);
2237     report_context_vreg_regunit(VRegOrUnit);
2238     if (LaneMask.any())
2239       report_context_lanemask(LaneMask);
2240     report_context(UseIdx);
2241   }
2242 }
2243 
2244 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2245                                          unsigned MONum, SlotIndex DefIdx,
2246                                          const LiveRange &LR,
2247                                          Register VRegOrUnit,
2248                                          bool SubRangeCheck,
2249                                          LaneBitmask LaneMask) {
2250   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2251     assert(VNI && "NULL valno is not allowed");
2252     if (VNI->def != DefIdx) {
2253       report("Inconsistent valno->def", MO, MONum);
2254       report_context_liverange(LR);
2255       report_context_vreg_regunit(VRegOrUnit);
2256       if (LaneMask.any())
2257         report_context_lanemask(LaneMask);
2258       report_context(*VNI);
2259       report_context(DefIdx);
2260     }
2261   } else {
2262     report("No live segment at def", MO, MONum);
2263     report_context_liverange(LR);
2264     report_context_vreg_regunit(VRegOrUnit);
2265     if (LaneMask.any())
2266       report_context_lanemask(LaneMask);
2267     report_context(DefIdx);
2268   }
2269   // Check that, if the dead def flag is present, LiveInts agree.
2270   if (MO->isDead()) {
2271     LiveQueryResult LRQ = LR.Query(DefIdx);
2272     if (!LRQ.isDeadDef()) {
2273       assert(Register::isVirtualRegister(VRegOrUnit) &&
2274              "Expecting a virtual register.");
2275       // A dead subreg def only tells us that the specific subreg is dead. There
2276       // could be other non-dead defs of other subregs, or we could have other
2277       // parts of the register being live through the instruction. So unless we
2278       // are checking liveness for a subrange it is ok for the live range to
2279       // continue, given that we have a dead def of a subregister.
2280       if (SubRangeCheck || MO->getSubReg() == 0) {
2281         report("Live range continues after dead def flag", MO, MONum);
2282         report_context_liverange(LR);
2283         report_context_vreg_regunit(VRegOrUnit);
2284         if (LaneMask.any())
2285           report_context_lanemask(LaneMask);
2286       }
2287     }
2288   }
2289 }
2290 
2291 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2292   const MachineInstr *MI = MO->getParent();
2293   const Register Reg = MO->getReg();
2294   const unsigned SubRegIdx = MO->getSubReg();
2295 
2296   const LiveInterval *LI = nullptr;
2297   if (LiveInts && Reg.isVirtual()) {
2298     if (LiveInts->hasInterval(Reg)) {
2299       LI = &LiveInts->getInterval(Reg);
2300       if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2301           !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2302         report("Live interval for subreg operand has no subranges", MO, MONum);
2303     } else {
2304       report("Virtual register has no live interval", MO, MONum);
2305     }
2306   }
2307 
2308   // Both use and def operands can read a register.
2309   if (MO->readsReg()) {
2310     if (MO->isKill())
2311       addRegWithSubRegs(regsKilled, Reg);
2312 
2313     // Check that LiveVars knows this kill (unless we are inside a bundle, in
2314     // which case we have already checked that LiveVars knows any kills on the
2315     // bundle header instead).
2316     if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2317         !MI->isBundledWithPred()) {
2318       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2319       if (!is_contained(VI.Kills, MI))
2320         report("Kill missing from LiveVariables", MO, MONum);
2321     }
2322 
2323     // Check LiveInts liveness and kill.
2324     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2325       SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2326       // Check the cached regunit intervals.
2327       if (Reg.isPhysical() && !isReserved(Reg)) {
2328         for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2329              ++Units) {
2330           if (MRI->isReservedRegUnit(*Units))
2331             continue;
2332           if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2333             checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2334         }
2335       }
2336 
2337       if (Reg.isVirtual()) {
2338         // This is a virtual register interval.
2339         checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2340 
2341         if (LI->hasSubRanges() && !MO->isDef()) {
2342           LaneBitmask MOMask = SubRegIdx != 0
2343                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2344                                    : MRI->getMaxLaneMaskForVReg(Reg);
2345           LaneBitmask LiveInMask;
2346           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2347             if ((MOMask & SR.LaneMask).none())
2348               continue;
2349             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2350             LiveQueryResult LRQ = SR.Query(UseIdx);
2351             if (LRQ.valueIn())
2352               LiveInMask |= SR.LaneMask;
2353           }
2354           // At least parts of the register has to be live at the use.
2355           if ((LiveInMask & MOMask).none()) {
2356             report("No live subrange at use", MO, MONum);
2357             report_context(*LI);
2358             report_context(UseIdx);
2359           }
2360         }
2361       }
2362     }
2363 
2364     // Use of a dead register.
2365     if (!regsLive.count(Reg)) {
2366       if (Reg.isPhysical()) {
2367         // Reserved registers may be used even when 'dead'.
2368         bool Bad = !isReserved(Reg);
2369         // We are fine if just any subregister has a defined value.
2370         if (Bad) {
2371 
2372           for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2373             if (regsLive.count(SubReg)) {
2374               Bad = false;
2375               break;
2376             }
2377           }
2378         }
2379         // If there is an additional implicit-use of a super register we stop
2380         // here. By definition we are fine if the super register is not
2381         // (completely) dead, if the complete super register is dead we will
2382         // get a report for its operand.
2383         if (Bad) {
2384           for (const MachineOperand &MOP : MI->uses()) {
2385             if (!MOP.isReg() || !MOP.isImplicit())
2386               continue;
2387 
2388             if (!MOP.getReg().isPhysical())
2389               continue;
2390 
2391             if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2392               Bad = false;
2393           }
2394         }
2395         if (Bad)
2396           report("Using an undefined physical register", MO, MONum);
2397       } else if (MRI->def_empty(Reg)) {
2398         report("Reading virtual register without a def", MO, MONum);
2399       } else {
2400         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2401         // We don't know which virtual registers are live in, so only complain
2402         // if vreg was killed in this MBB. Otherwise keep track of vregs that
2403         // must be live in. PHI instructions are handled separately.
2404         if (MInfo.regsKilled.count(Reg))
2405           report("Using a killed virtual register", MO, MONum);
2406         else if (!MI->isPHI())
2407           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2408       }
2409     }
2410   }
2411 
2412   if (MO->isDef()) {
2413     // Register defined.
2414     // TODO: verify that earlyclobber ops are not used.
2415     if (MO->isDead())
2416       addRegWithSubRegs(regsDead, Reg);
2417     else
2418       addRegWithSubRegs(regsDefined, Reg);
2419 
2420     // Verify SSA form.
2421     if (MRI->isSSA() && Reg.isVirtual() &&
2422         std::next(MRI->def_begin(Reg)) != MRI->def_end())
2423       report("Multiple virtual register defs in SSA form", MO, MONum);
2424 
2425     // Check LiveInts for a live segment, but only for virtual registers.
2426     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2427       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2428       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2429 
2430       if (Reg.isVirtual()) {
2431         checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2432 
2433         if (LI->hasSubRanges()) {
2434           LaneBitmask MOMask = SubRegIdx != 0
2435                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2436                                    : MRI->getMaxLaneMaskForVReg(Reg);
2437           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2438             if ((SR.LaneMask & MOMask).none())
2439               continue;
2440             checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2441           }
2442         }
2443       }
2444     }
2445   }
2446 }
2447 
2448 // This function gets called after visiting all instructions in a bundle. The
2449 // argument points to the bundle header.
2450 // Normal stand-alone instructions are also considered 'bundles', and this
2451 // function is called for all of them.
2452 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2453   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2454   set_union(MInfo.regsKilled, regsKilled);
2455   set_subtract(regsLive, regsKilled); regsKilled.clear();
2456   // Kill any masked registers.
2457   while (!regMasks.empty()) {
2458     const uint32_t *Mask = regMasks.pop_back_val();
2459     for (Register Reg : regsLive)
2460       if (Reg.isPhysical() &&
2461           MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2462         regsDead.push_back(Reg);
2463   }
2464   set_subtract(regsLive, regsDead);   regsDead.clear();
2465   set_union(regsLive, regsDefined);   regsDefined.clear();
2466 }
2467 
2468 void
2469 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2470   MBBInfoMap[MBB].regsLiveOut = regsLive;
2471   regsLive.clear();
2472 
2473   if (Indexes) {
2474     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2475     if (!(stop > lastIndex)) {
2476       report("Block ends before last instruction index", MBB);
2477       errs() << "Block ends at " << stop
2478           << " last instruction was at " << lastIndex << '\n';
2479     }
2480     lastIndex = stop;
2481   }
2482 }
2483 
2484 namespace {
2485 // This implements a set of registers that serves as a filter: can filter other
2486 // sets by passing through elements not in the filter and blocking those that
2487 // are. Any filter implicitly includes the full set of physical registers upon
2488 // creation, thus filtering them all out. The filter itself as a set only grows,
2489 // and needs to be as efficient as possible.
2490 struct VRegFilter {
2491   // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2492   // no duplicates. Both virtual and physical registers are fine.
2493   template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2494     SmallVector<Register, 0> VRegsBuffer;
2495     filterAndAdd(FromRegSet, VRegsBuffer);
2496   }
2497   // Filter \p FromRegSet through the filter and append passed elements into \p
2498   // ToVRegs. All elements appended are then added to the filter itself.
2499   // \returns true if anything changed.
2500   template <typename RegSetT>
2501   bool filterAndAdd(const RegSetT &FromRegSet,
2502                     SmallVectorImpl<Register> &ToVRegs) {
2503     unsigned SparseUniverse = Sparse.size();
2504     unsigned NewSparseUniverse = SparseUniverse;
2505     unsigned NewDenseSize = Dense.size();
2506     size_t Begin = ToVRegs.size();
2507     for (Register Reg : FromRegSet) {
2508       if (!Reg.isVirtual())
2509         continue;
2510       unsigned Index = Register::virtReg2Index(Reg);
2511       if (Index < SparseUniverseMax) {
2512         if (Index < SparseUniverse && Sparse.test(Index))
2513           continue;
2514         NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2515       } else {
2516         if (Dense.count(Reg))
2517           continue;
2518         ++NewDenseSize;
2519       }
2520       ToVRegs.push_back(Reg);
2521     }
2522     size_t End = ToVRegs.size();
2523     if (Begin == End)
2524       return false;
2525     // Reserving space in sets once performs better than doing so continuously
2526     // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2527     // tuned all the way down) and double iteration (the second one is over a
2528     // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2529     Sparse.resize(NewSparseUniverse);
2530     Dense.reserve(NewDenseSize);
2531     for (unsigned I = Begin; I < End; ++I) {
2532       Register Reg = ToVRegs[I];
2533       unsigned Index = Register::virtReg2Index(Reg);
2534       if (Index < SparseUniverseMax)
2535         Sparse.set(Index);
2536       else
2537         Dense.insert(Reg);
2538     }
2539     return true;
2540   }
2541 
2542 private:
2543   static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2544   // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2545   // are tracked by Dense. The only purpose of the threashold and the Dense set
2546   // is to have a reasonably growing memory usage in pathological cases (large
2547   // number of very sparse VRegFilter instances live at the same time). In
2548   // practice even in the worst-by-execution time cases having all elements
2549   // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2550   // space efficient than if tracked by Dense. The threashold is set to keep the
2551   // worst-case memory usage within 2x of figures determined empirically for
2552   // "all Dense" scenario in such worst-by-execution-time cases.
2553   BitVector Sparse;
2554   DenseSet<unsigned> Dense;
2555 };
2556 
2557 // Implements both a transfer function and a (binary, in-place) join operator
2558 // for a dataflow over register sets with set union join and filtering transfer
2559 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2560 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2561 // time, where n is the size of the set (as opposed to O(U) where U is the
2562 // universe). filter_b implicitly contains all physical registers at all times.
2563 class FilteringVRegSet {
2564   VRegFilter Filter;
2565   SmallVector<Register, 0> VRegs;
2566 
2567 public:
2568   // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2569   // Both virtual and physical registers are fine.
2570   template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2571     Filter.add(RS);
2572   }
2573   // Passes \p RS through the filter_b (transfer function) and adds what's left
2574   // to itself (out_b).
2575   template <typename RegSetT> bool add(const RegSetT &RS) {
2576     // Double-duty the Filter: to maintain VRegs a set (and the join operation
2577     // a set union) just add everything being added here to the Filter as well.
2578     return Filter.filterAndAdd(RS, VRegs);
2579   }
2580   using const_iterator = decltype(VRegs)::const_iterator;
2581   const_iterator begin() const { return VRegs.begin(); }
2582   const_iterator end() const { return VRegs.end(); }
2583   size_t size() const { return VRegs.size(); }
2584 };
2585 } // namespace
2586 
2587 // Calculate the largest possible vregsPassed sets. These are the registers that
2588 // can pass through an MBB live, but may not be live every time. It is assumed
2589 // that all vregsPassed sets are empty before the call.
2590 void MachineVerifier::calcRegsPassed() {
2591   if (MF->empty())
2592     // ReversePostOrderTraversal doesn't handle empty functions.
2593     return;
2594 
2595   for (const MachineBasicBlock *MB :
2596        ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2597     FilteringVRegSet VRegs;
2598     BBInfo &Info = MBBInfoMap[MB];
2599     assert(Info.reachable);
2600 
2601     VRegs.addToFilter(Info.regsKilled);
2602     VRegs.addToFilter(Info.regsLiveOut);
2603     for (const MachineBasicBlock *Pred : MB->predecessors()) {
2604       const BBInfo &PredInfo = MBBInfoMap[Pred];
2605       if (!PredInfo.reachable)
2606         continue;
2607 
2608       VRegs.add(PredInfo.regsLiveOut);
2609       VRegs.add(PredInfo.vregsPassed);
2610     }
2611     Info.vregsPassed.reserve(VRegs.size());
2612     Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2613   }
2614 }
2615 
2616 // Calculate the set of virtual registers that must be passed through each basic
2617 // block in order to satisfy the requirements of successor blocks. This is very
2618 // similar to calcRegsPassed, only backwards.
2619 void MachineVerifier::calcRegsRequired() {
2620   // First push live-in regs to predecessors' vregsRequired.
2621   SmallPtrSet<const MachineBasicBlock*, 8> todo;
2622   for (const auto &MBB : *MF) {
2623     BBInfo &MInfo = MBBInfoMap[&MBB];
2624     for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2625       BBInfo &PInfo = MBBInfoMap[Pred];
2626       if (PInfo.addRequired(MInfo.vregsLiveIn))
2627         todo.insert(Pred);
2628     }
2629 
2630     // Handle the PHI node.
2631     for (const MachineInstr &MI : MBB.phis()) {
2632       for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2633         // Skip those Operands which are undef regs or not regs.
2634         if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2635           continue;
2636 
2637         // Get register and predecessor for one PHI edge.
2638         Register Reg = MI.getOperand(i).getReg();
2639         const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2640 
2641         BBInfo &PInfo = MBBInfoMap[Pred];
2642         if (PInfo.addRequired(Reg))
2643           todo.insert(Pred);
2644       }
2645     }
2646   }
2647 
2648   // Iteratively push vregsRequired to predecessors. This will converge to the
2649   // same final state regardless of DenseSet iteration order.
2650   while (!todo.empty()) {
2651     const MachineBasicBlock *MBB = *todo.begin();
2652     todo.erase(MBB);
2653     BBInfo &MInfo = MBBInfoMap[MBB];
2654     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2655       if (Pred == MBB)
2656         continue;
2657       BBInfo &SInfo = MBBInfoMap[Pred];
2658       if (SInfo.addRequired(MInfo.vregsRequired))
2659         todo.insert(Pred);
2660     }
2661   }
2662 }
2663 
2664 // Check PHI instructions at the beginning of MBB. It is assumed that
2665 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2666 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2667   BBInfo &MInfo = MBBInfoMap[&MBB];
2668 
2669   SmallPtrSet<const MachineBasicBlock*, 8> seen;
2670   for (const MachineInstr &Phi : MBB) {
2671     if (!Phi.isPHI())
2672       break;
2673     seen.clear();
2674 
2675     const MachineOperand &MODef = Phi.getOperand(0);
2676     if (!MODef.isReg() || !MODef.isDef()) {
2677       report("Expected first PHI operand to be a register def", &MODef, 0);
2678       continue;
2679     }
2680     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2681         MODef.isEarlyClobber() || MODef.isDebug())
2682       report("Unexpected flag on PHI operand", &MODef, 0);
2683     Register DefReg = MODef.getReg();
2684     if (!Register::isVirtualRegister(DefReg))
2685       report("Expected first PHI operand to be a virtual register", &MODef, 0);
2686 
2687     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2688       const MachineOperand &MO0 = Phi.getOperand(I);
2689       if (!MO0.isReg()) {
2690         report("Expected PHI operand to be a register", &MO0, I);
2691         continue;
2692       }
2693       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2694           MO0.isDebug() || MO0.isTied())
2695         report("Unexpected flag on PHI operand", &MO0, I);
2696 
2697       const MachineOperand &MO1 = Phi.getOperand(I + 1);
2698       if (!MO1.isMBB()) {
2699         report("Expected PHI operand to be a basic block", &MO1, I + 1);
2700         continue;
2701       }
2702 
2703       const MachineBasicBlock &Pre = *MO1.getMBB();
2704       if (!Pre.isSuccessor(&MBB)) {
2705         report("PHI input is not a predecessor block", &MO1, I + 1);
2706         continue;
2707       }
2708 
2709       if (MInfo.reachable) {
2710         seen.insert(&Pre);
2711         BBInfo &PrInfo = MBBInfoMap[&Pre];
2712         if (!MO0.isUndef() && PrInfo.reachable &&
2713             !PrInfo.isLiveOut(MO0.getReg()))
2714           report("PHI operand is not live-out from predecessor", &MO0, I);
2715       }
2716     }
2717 
2718     // Did we see all predecessors?
2719     if (MInfo.reachable) {
2720       for (MachineBasicBlock *Pred : MBB.predecessors()) {
2721         if (!seen.count(Pred)) {
2722           report("Missing PHI operand", &Phi);
2723           errs() << printMBBReference(*Pred)
2724                  << " is a predecessor according to the CFG.\n";
2725         }
2726       }
2727     }
2728   }
2729 }
2730 
2731 void MachineVerifier::visitMachineFunctionAfter() {
2732   calcRegsPassed();
2733 
2734   for (const MachineBasicBlock &MBB : *MF)
2735     checkPHIOps(MBB);
2736 
2737   // Now check liveness info if available
2738   calcRegsRequired();
2739 
2740   // Check for killed virtual registers that should be live out.
2741   for (const auto &MBB : *MF) {
2742     BBInfo &MInfo = MBBInfoMap[&MBB];
2743     for (Register VReg : MInfo.vregsRequired)
2744       if (MInfo.regsKilled.count(VReg)) {
2745         report("Virtual register killed in block, but needed live out.", &MBB);
2746         errs() << "Virtual register " << printReg(VReg)
2747                << " is used after the block.\n";
2748       }
2749   }
2750 
2751   if (!MF->empty()) {
2752     BBInfo &MInfo = MBBInfoMap[&MF->front()];
2753     for (Register VReg : MInfo.vregsRequired) {
2754       report("Virtual register defs don't dominate all uses.", MF);
2755       report_context_vreg(VReg);
2756     }
2757   }
2758 
2759   if (LiveVars)
2760     verifyLiveVariables();
2761   if (LiveInts)
2762     verifyLiveIntervals();
2763 
2764   // Check live-in list of each MBB. If a register is live into MBB, check
2765   // that the register is in regsLiveOut of each predecessor block. Since
2766   // this must come from a definition in the predecesssor or its live-in
2767   // list, this will catch a live-through case where the predecessor does not
2768   // have the register in its live-in list.  This currently only checks
2769   // registers that have no aliases, are not allocatable and are not
2770   // reserved, which could mean a condition code register for instance.
2771   if (MRI->tracksLiveness())
2772     for (const auto &MBB : *MF)
2773       for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2774         MCPhysReg LiveInReg = P.PhysReg;
2775         bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2776         if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2777           continue;
2778         for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2779           BBInfo &PInfo = MBBInfoMap[Pred];
2780           if (!PInfo.regsLiveOut.count(LiveInReg)) {
2781             report("Live in register not found to be live out from predecessor.",
2782                    &MBB);
2783             errs() << TRI->getName(LiveInReg)
2784                    << " not found to be live out from "
2785                    << printMBBReference(*Pred) << "\n";
2786           }
2787         }
2788       }
2789 
2790   for (auto CSInfo : MF->getCallSitesInfo())
2791     if (!CSInfo.first->isCall())
2792       report("Call site info referencing instruction that is not call", MF);
2793 
2794   // If there's debug-info, check that we don't have any duplicate value
2795   // tracking numbers.
2796   if (MF->getFunction().getSubprogram()) {
2797     DenseSet<unsigned> SeenNumbers;
2798     for (auto &MBB : *MF) {
2799       for (auto &MI : MBB) {
2800         if (auto Num = MI.peekDebugInstrNum()) {
2801           auto Result = SeenNumbers.insert((unsigned)Num);
2802           if (!Result.second)
2803             report("Instruction has a duplicated value tracking number", &MI);
2804         }
2805       }
2806     }
2807   }
2808 }
2809 
2810 void MachineVerifier::verifyLiveVariables() {
2811   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2812   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2813     Register Reg = Register::index2VirtReg(I);
2814     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2815     for (const auto &MBB : *MF) {
2816       BBInfo &MInfo = MBBInfoMap[&MBB];
2817 
2818       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2819       if (MInfo.vregsRequired.count(Reg)) {
2820         if (!VI.AliveBlocks.test(MBB.getNumber())) {
2821           report("LiveVariables: Block missing from AliveBlocks", &MBB);
2822           errs() << "Virtual register " << printReg(Reg)
2823                  << " must be live through the block.\n";
2824         }
2825       } else {
2826         if (VI.AliveBlocks.test(MBB.getNumber())) {
2827           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2828           errs() << "Virtual register " << printReg(Reg)
2829                  << " is not needed live through the block.\n";
2830         }
2831       }
2832     }
2833   }
2834 }
2835 
2836 void MachineVerifier::verifyLiveIntervals() {
2837   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2838   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2839     Register Reg = Register::index2VirtReg(I);
2840 
2841     // Spilling and splitting may leave unused registers around. Skip them.
2842     if (MRI->reg_nodbg_empty(Reg))
2843       continue;
2844 
2845     if (!LiveInts->hasInterval(Reg)) {
2846       report("Missing live interval for virtual register", MF);
2847       errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2848       continue;
2849     }
2850 
2851     const LiveInterval &LI = LiveInts->getInterval(Reg);
2852     assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2853     verifyLiveInterval(LI);
2854   }
2855 
2856   // Verify all the cached regunit intervals.
2857   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2858     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2859       verifyLiveRange(*LR, i);
2860 }
2861 
2862 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2863                                            const VNInfo *VNI, Register Reg,
2864                                            LaneBitmask LaneMask) {
2865   if (VNI->isUnused())
2866     return;
2867 
2868   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2869 
2870   if (!DefVNI) {
2871     report("Value not live at VNInfo def and not marked unused", MF);
2872     report_context(LR, Reg, LaneMask);
2873     report_context(*VNI);
2874     return;
2875   }
2876 
2877   if (DefVNI != VNI) {
2878     report("Live segment at def has different VNInfo", MF);
2879     report_context(LR, Reg, LaneMask);
2880     report_context(*VNI);
2881     return;
2882   }
2883 
2884   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2885   if (!MBB) {
2886     report("Invalid VNInfo definition index", MF);
2887     report_context(LR, Reg, LaneMask);
2888     report_context(*VNI);
2889     return;
2890   }
2891 
2892   if (VNI->isPHIDef()) {
2893     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2894       report("PHIDef VNInfo is not defined at MBB start", MBB);
2895       report_context(LR, Reg, LaneMask);
2896       report_context(*VNI);
2897     }
2898     return;
2899   }
2900 
2901   // Non-PHI def.
2902   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
2903   if (!MI) {
2904     report("No instruction at VNInfo def index", MBB);
2905     report_context(LR, Reg, LaneMask);
2906     report_context(*VNI);
2907     return;
2908   }
2909 
2910   if (Reg != 0) {
2911     bool hasDef = false;
2912     bool isEarlyClobber = false;
2913     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2914       if (!MOI->isReg() || !MOI->isDef())
2915         continue;
2916       if (Register::isVirtualRegister(Reg)) {
2917         if (MOI->getReg() != Reg)
2918           continue;
2919       } else {
2920         if (!Register::isPhysicalRegister(MOI->getReg()) ||
2921             !TRI->hasRegUnit(MOI->getReg(), Reg))
2922           continue;
2923       }
2924       if (LaneMask.any() &&
2925           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
2926         continue;
2927       hasDef = true;
2928       if (MOI->isEarlyClobber())
2929         isEarlyClobber = true;
2930     }
2931 
2932     if (!hasDef) {
2933       report("Defining instruction does not modify register", MI);
2934       report_context(LR, Reg, LaneMask);
2935       report_context(*VNI);
2936     }
2937 
2938     // Early clobber defs begin at USE slots, but other defs must begin at
2939     // DEF slots.
2940     if (isEarlyClobber) {
2941       if (!VNI->def.isEarlyClobber()) {
2942         report("Early clobber def must be at an early-clobber slot", MBB);
2943         report_context(LR, Reg, LaneMask);
2944         report_context(*VNI);
2945       }
2946     } else if (!VNI->def.isRegister()) {
2947       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
2948       report_context(LR, Reg, LaneMask);
2949       report_context(*VNI);
2950     }
2951   }
2952 }
2953 
2954 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
2955                                              const LiveRange::const_iterator I,
2956                                              Register Reg,
2957                                              LaneBitmask LaneMask) {
2958   const LiveRange::Segment &S = *I;
2959   const VNInfo *VNI = S.valno;
2960   assert(VNI && "Live segment has no valno");
2961 
2962   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
2963     report("Foreign valno in live segment", MF);
2964     report_context(LR, Reg, LaneMask);
2965     report_context(S);
2966     report_context(*VNI);
2967   }
2968 
2969   if (VNI->isUnused()) {
2970     report("Live segment valno is marked unused", MF);
2971     report_context(LR, Reg, LaneMask);
2972     report_context(S);
2973   }
2974 
2975   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
2976   if (!MBB) {
2977     report("Bad start of live segment, no basic block", MF);
2978     report_context(LR, Reg, LaneMask);
2979     report_context(S);
2980     return;
2981   }
2982   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
2983   if (S.start != MBBStartIdx && S.start != VNI->def) {
2984     report("Live segment must begin at MBB entry or valno def", MBB);
2985     report_context(LR, Reg, LaneMask);
2986     report_context(S);
2987   }
2988 
2989   const MachineBasicBlock *EndMBB =
2990     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
2991   if (!EndMBB) {
2992     report("Bad end of live segment, no basic block", MF);
2993     report_context(LR, Reg, LaneMask);
2994     report_context(S);
2995     return;
2996   }
2997 
2998   // No more checks for live-out segments.
2999   if (S.end == LiveInts->getMBBEndIdx(EndMBB))
3000     return;
3001 
3002   // RegUnit intervals are allowed dead phis.
3003   if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() &&
3004       S.start == VNI->def && S.end == VNI->def.getDeadSlot())
3005     return;
3006 
3007   // The live segment is ending inside EndMBB
3008   const MachineInstr *MI =
3009     LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3010   if (!MI) {
3011     report("Live segment doesn't end at a valid instruction", EndMBB);
3012     report_context(LR, Reg, LaneMask);
3013     report_context(S);
3014     return;
3015   }
3016 
3017   // The block slot must refer to a basic block boundary.
3018   if (S.end.isBlock()) {
3019     report("Live segment ends at B slot of an instruction", EndMBB);
3020     report_context(LR, Reg, LaneMask);
3021     report_context(S);
3022   }
3023 
3024   if (S.end.isDead()) {
3025     // Segment ends on the dead slot.
3026     // That means there must be a dead def.
3027     if (!SlotIndex::isSameInstr(S.start, S.end)) {
3028       report("Live segment ending at dead slot spans instructions", EndMBB);
3029       report_context(LR, Reg, LaneMask);
3030       report_context(S);
3031     }
3032   }
3033 
3034   // After tied operands are rewritten, a live segment can only end at an
3035   // early-clobber slot if it is being redefined by an early-clobber def.
3036   // TODO: Before tied operands are rewritten, a live segment can only end at an
3037   // early-clobber slot if the last use is tied to an early-clobber def.
3038   if (MF->getProperties().hasProperty(
3039           MachineFunctionProperties::Property::TiedOpsRewritten) &&
3040       S.end.isEarlyClobber()) {
3041     if (I+1 == LR.end() || (I+1)->start != S.end) {
3042       report("Live segment ending at early clobber slot must be "
3043              "redefined by an EC def in the same instruction", EndMBB);
3044       report_context(LR, Reg, LaneMask);
3045       report_context(S);
3046     }
3047   }
3048 
3049   // The following checks only apply to virtual registers. Physreg liveness
3050   // is too weird to check.
3051   if (Register::isVirtualRegister(Reg)) {
3052     // A live segment can end with either a redefinition, a kill flag on a
3053     // use, or a dead flag on a def.
3054     bool hasRead = false;
3055     bool hasSubRegDef = false;
3056     bool hasDeadDef = false;
3057     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3058       if (!MOI->isReg() || MOI->getReg() != Reg)
3059         continue;
3060       unsigned Sub = MOI->getSubReg();
3061       LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
3062                                  : LaneBitmask::getAll();
3063       if (MOI->isDef()) {
3064         if (Sub != 0) {
3065           hasSubRegDef = true;
3066           // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3067           // mask for subregister defs. Read-undef defs will be handled by
3068           // readsReg below.
3069           SLM = ~SLM;
3070         }
3071         if (MOI->isDead())
3072           hasDeadDef = true;
3073       }
3074       if (LaneMask.any() && (LaneMask & SLM).none())
3075         continue;
3076       if (MOI->readsReg())
3077         hasRead = true;
3078     }
3079     if (S.end.isDead()) {
3080       // Make sure that the corresponding machine operand for a "dead" live
3081       // range has the dead flag. We cannot perform this check for subregister
3082       // liveranges as partially dead values are allowed.
3083       if (LaneMask.none() && !hasDeadDef) {
3084         report("Instruction ending live segment on dead slot has no dead flag",
3085                MI);
3086         report_context(LR, Reg, LaneMask);
3087         report_context(S);
3088       }
3089     } else {
3090       if (!hasRead) {
3091         // When tracking subregister liveness, the main range must start new
3092         // values on partial register writes, even if there is no read.
3093         if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3094             !hasSubRegDef) {
3095           report("Instruction ending live segment doesn't read the register",
3096                  MI);
3097           report_context(LR, Reg, LaneMask);
3098           report_context(S);
3099         }
3100       }
3101     }
3102   }
3103 
3104   // Now check all the basic blocks in this live segment.
3105   MachineFunction::const_iterator MFI = MBB->getIterator();
3106   // Is this live segment the beginning of a non-PHIDef VN?
3107   if (S.start == VNI->def && !VNI->isPHIDef()) {
3108     // Not live-in to any blocks.
3109     if (MBB == EndMBB)
3110       return;
3111     // Skip this block.
3112     ++MFI;
3113   }
3114 
3115   SmallVector<SlotIndex, 4> Undefs;
3116   if (LaneMask.any()) {
3117     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3118     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3119   }
3120 
3121   while (true) {
3122     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3123     // We don't know how to track physregs into a landing pad.
3124     if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) {
3125       if (&*MFI == EndMBB)
3126         break;
3127       ++MFI;
3128       continue;
3129     }
3130 
3131     // Is VNI a PHI-def in the current block?
3132     bool IsPHI = VNI->isPHIDef() &&
3133       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3134 
3135     // Check that VNI is live-out of all predecessors.
3136     for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3137       SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3138       // Predecessor of landing pad live-out on last call.
3139       if (MFI->isEHPad()) {
3140         for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3141           if (MI.isCall()) {
3142             PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3143             break;
3144           }
3145         }
3146       }
3147       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3148 
3149       // All predecessors must have a live-out value. However for a phi
3150       // instruction with subregister intervals
3151       // only one of the subregisters (not necessarily the current one) needs to
3152       // be defined.
3153       if (!PVNI && (LaneMask.none() || !IsPHI)) {
3154         if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3155           continue;
3156         report("Register not marked live out of predecessor", Pred);
3157         report_context(LR, Reg, LaneMask);
3158         report_context(*VNI);
3159         errs() << " live into " << printMBBReference(*MFI) << '@'
3160                << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3161                << PEnd << '\n';
3162         continue;
3163       }
3164 
3165       // Only PHI-defs can take different predecessor values.
3166       if (!IsPHI && PVNI != VNI) {
3167         report("Different value live out of predecessor", Pred);
3168         report_context(LR, Reg, LaneMask);
3169         errs() << "Valno #" << PVNI->id << " live out of "
3170                << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3171                << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3172                << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3173       }
3174     }
3175     if (&*MFI == EndMBB)
3176       break;
3177     ++MFI;
3178   }
3179 }
3180 
3181 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3182                                       LaneBitmask LaneMask) {
3183   for (const VNInfo *VNI : LR.valnos)
3184     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3185 
3186   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3187     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3188 }
3189 
3190 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3191   Register Reg = LI.reg();
3192   assert(Register::isVirtualRegister(Reg));
3193   verifyLiveRange(LI, Reg);
3194 
3195   LaneBitmask Mask;
3196   LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3197   for (const LiveInterval::SubRange &SR : LI.subranges()) {
3198     if ((Mask & SR.LaneMask).any()) {
3199       report("Lane masks of sub ranges overlap in live interval", MF);
3200       report_context(LI);
3201     }
3202     if ((SR.LaneMask & ~MaxMask).any()) {
3203       report("Subrange lanemask is invalid", MF);
3204       report_context(LI);
3205     }
3206     if (SR.empty()) {
3207       report("Subrange must not be empty", MF);
3208       report_context(SR, LI.reg(), SR.LaneMask);
3209     }
3210     Mask |= SR.LaneMask;
3211     verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3212     if (!LI.covers(SR)) {
3213       report("A Subrange is not covered by the main range", MF);
3214       report_context(LI);
3215     }
3216   }
3217 
3218   // Check the LI only has one connected component.
3219   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3220   unsigned NumComp = ConEQ.Classify(LI);
3221   if (NumComp > 1) {
3222     report("Multiple connected components in live interval", MF);
3223     report_context(LI);
3224     for (unsigned comp = 0; comp != NumComp; ++comp) {
3225       errs() << comp << ": valnos";
3226       for (const VNInfo *I : LI.valnos)
3227         if (comp == ConEQ.getEqClass(I))
3228           errs() << ' ' << I->id;
3229       errs() << '\n';
3230     }
3231   }
3232 }
3233 
3234 namespace {
3235 
3236   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3237   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3238   // value is zero.
3239   // We use a bool plus an integer to capture the stack state.
3240   struct StackStateOfBB {
3241     StackStateOfBB() = default;
3242     StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3243       EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3244       ExitIsSetup(ExitSetup) {}
3245 
3246     // Can be negative, which means we are setting up a frame.
3247     int EntryValue = 0;
3248     int ExitValue = 0;
3249     bool EntryIsSetup = false;
3250     bool ExitIsSetup = false;
3251   };
3252 
3253 } // end anonymous namespace
3254 
3255 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3256 /// by a FrameDestroy <n>, stack adjustments are identical on all
3257 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3258 void MachineVerifier::verifyStackFrame() {
3259   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
3260   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3261   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3262     return;
3263 
3264   SmallVector<StackStateOfBB, 8> SPState;
3265   SPState.resize(MF->getNumBlockIDs());
3266   df_iterator_default_set<const MachineBasicBlock*> Reachable;
3267 
3268   // Visit the MBBs in DFS order.
3269   for (df_ext_iterator<const MachineFunction *,
3270                        df_iterator_default_set<const MachineBasicBlock *>>
3271        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3272        DFI != DFE; ++DFI) {
3273     const MachineBasicBlock *MBB = *DFI;
3274 
3275     StackStateOfBB BBState;
3276     // Check the exit state of the DFS stack predecessor.
3277     if (DFI.getPathLength() >= 2) {
3278       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3279       assert(Reachable.count(StackPred) &&
3280              "DFS stack predecessor is already visited.\n");
3281       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3282       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3283       BBState.ExitValue = BBState.EntryValue;
3284       BBState.ExitIsSetup = BBState.EntryIsSetup;
3285     }
3286 
3287     // Update stack state by checking contents of MBB.
3288     for (const auto &I : *MBB) {
3289       if (I.getOpcode() == FrameSetupOpcode) {
3290         if (BBState.ExitIsSetup)
3291           report("FrameSetup is after another FrameSetup", &I);
3292         BBState.ExitValue -= TII->getFrameTotalSize(I);
3293         BBState.ExitIsSetup = true;
3294       }
3295 
3296       if (I.getOpcode() == FrameDestroyOpcode) {
3297         int Size = TII->getFrameTotalSize(I);
3298         if (!BBState.ExitIsSetup)
3299           report("FrameDestroy is not after a FrameSetup", &I);
3300         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3301                                                BBState.ExitValue;
3302         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3303           report("FrameDestroy <n> is after FrameSetup <m>", &I);
3304           errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3305               << AbsSPAdj << ">.\n";
3306         }
3307         BBState.ExitValue += Size;
3308         BBState.ExitIsSetup = false;
3309       }
3310     }
3311     SPState[MBB->getNumber()] = BBState;
3312 
3313     // Make sure the exit state of any predecessor is consistent with the entry
3314     // state.
3315     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3316       if (Reachable.count(Pred) &&
3317           (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3318            SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3319         report("The exit stack state of a predecessor is inconsistent.", MBB);
3320         errs() << "Predecessor " << printMBBReference(*Pred)
3321                << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3322                << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3323                << printMBBReference(*MBB) << " has entry state ("
3324                << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3325       }
3326     }
3327 
3328     // Make sure the entry state of any successor is consistent with the exit
3329     // state.
3330     for (const MachineBasicBlock *Succ : MBB->successors()) {
3331       if (Reachable.count(Succ) &&
3332           (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3333            SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3334         report("The entry stack state of a successor is inconsistent.", MBB);
3335         errs() << "Successor " << printMBBReference(*Succ)
3336                << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3337                << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3338                << printMBBReference(*MBB) << " has exit state ("
3339                << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3340       }
3341     }
3342 
3343     // Make sure a basic block with return ends with zero stack adjustment.
3344     if (!MBB->empty() && MBB->back().isReturn()) {
3345       if (BBState.ExitIsSetup)
3346         report("A return block ends with a FrameSetup.", MBB);
3347       if (BBState.ExitValue)
3348         report("A return block ends with a nonzero stack adjustment.", MBB);
3349     }
3350   }
3351 }
3352