1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Pass to verify generated machine code. The following is checked: 10 // 11 // Operand counts: All explicit operands must be present. 12 // 13 // Register classes: All physical and virtual register operands must be 14 // compatible with the register class required by the instruction descriptor. 15 // 16 // Register live intervals: Registers must be defined only once, and must be 17 // defined before use. 18 // 19 // The machine code verifier is enabled with the command-line option 20 // -verify-machineinstrs. 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DenseSet.h" 26 #include "llvm/ADT/DepthFirstIterator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetOperations.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/ADT/Twine.h" 34 #include "llvm/Analysis/EHPersonalities.h" 35 #include "llvm/CodeGen/GlobalISel/RegisterBank.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervalCalc.h" 38 #include "llvm/CodeGen/LiveIntervals.h" 39 #include "llvm/CodeGen/LiveStacks.h" 40 #include "llvm/CodeGen/LiveVariables.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineFunctionPass.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBundle.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/PseudoSourceValue.h" 51 #include "llvm/CodeGen/SlotIndexes.h" 52 #include "llvm/CodeGen/StackMaps.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetOpcodes.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/TargetSubtargetInfo.h" 57 #include "llvm/IR/BasicBlock.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/InlineAsm.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/InitializePasses.h" 62 #include "llvm/MC/LaneBitmask.h" 63 #include "llvm/MC/MCAsmInfo.h" 64 #include "llvm/MC/MCInstrDesc.h" 65 #include "llvm/MC/MCRegisterInfo.h" 66 #include "llvm/MC/MCTargetOptions.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/LowLevelTypeImpl.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Target/TargetMachine.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstddef> 77 #include <cstdint> 78 #include <iterator> 79 #include <string> 80 #include <utility> 81 82 using namespace llvm; 83 84 namespace { 85 86 struct MachineVerifier { 87 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {} 88 89 unsigned verify(const MachineFunction &MF); 90 91 Pass *const PASS; 92 const char *Banner; 93 const MachineFunction *MF; 94 const TargetMachine *TM; 95 const TargetInstrInfo *TII; 96 const TargetRegisterInfo *TRI; 97 const MachineRegisterInfo *MRI; 98 99 unsigned foundErrors; 100 101 // Avoid querying the MachineFunctionProperties for each operand. 102 bool isFunctionRegBankSelected; 103 bool isFunctionSelected; 104 105 using RegVector = SmallVector<unsigned, 16>; 106 using RegMaskVector = SmallVector<const uint32_t *, 4>; 107 using RegSet = DenseSet<unsigned>; 108 using RegMap = DenseMap<unsigned, const MachineInstr *>; 109 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>; 110 111 const MachineInstr *FirstNonPHI; 112 const MachineInstr *FirstTerminator; 113 BlockSet FunctionBlocks; 114 115 BitVector regsReserved; 116 RegSet regsLive; 117 RegVector regsDefined, regsDead, regsKilled; 118 RegMaskVector regMasks; 119 120 SlotIndex lastIndex; 121 122 // Add Reg and any sub-registers to RV 123 void addRegWithSubRegs(RegVector &RV, unsigned Reg) { 124 RV.push_back(Reg); 125 if (Register::isPhysicalRegister(Reg)) 126 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) 127 RV.push_back(SubReg); 128 } 129 130 struct BBInfo { 131 // Is this MBB reachable from the MF entry point? 132 bool reachable = false; 133 134 // Vregs that must be live in because they are used without being 135 // defined. Map value is the user. vregsLiveIn doesn't include regs 136 // that only are used by PHI nodes. 137 RegMap vregsLiveIn; 138 139 // Regs killed in MBB. They may be defined again, and will then be in both 140 // regsKilled and regsLiveOut. 141 RegSet regsKilled; 142 143 // Regs defined in MBB and live out. Note that vregs passing through may 144 // be live out without being mentioned here. 145 RegSet regsLiveOut; 146 147 // Vregs that pass through MBB untouched. This set is disjoint from 148 // regsKilled and regsLiveOut. 149 RegSet vregsPassed; 150 151 // Vregs that must pass through MBB because they are needed by a successor 152 // block. This set is disjoint from regsLiveOut. 153 RegSet vregsRequired; 154 155 // Set versions of block's predecessor and successor lists. 156 BlockSet Preds, Succs; 157 158 BBInfo() = default; 159 160 // Add register to vregsRequired if it belongs there. Return true if 161 // anything changed. 162 bool addRequired(unsigned Reg) { 163 if (!Register::isVirtualRegister(Reg)) 164 return false; 165 if (regsLiveOut.count(Reg)) 166 return false; 167 return vregsRequired.insert(Reg).second; 168 } 169 170 // Same for a full set. 171 bool addRequired(const RegSet &RS) { 172 bool Changed = false; 173 for (unsigned Reg : RS) 174 Changed |= addRequired(Reg); 175 return Changed; 176 } 177 178 // Same for a full map. 179 bool addRequired(const RegMap &RM) { 180 bool Changed = false; 181 for (const auto &I : RM) 182 Changed |= addRequired(I.first); 183 return Changed; 184 } 185 186 // Live-out registers are either in regsLiveOut or vregsPassed. 187 bool isLiveOut(unsigned Reg) const { 188 return regsLiveOut.count(Reg) || vregsPassed.count(Reg); 189 } 190 }; 191 192 // Extra register info per MBB. 193 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap; 194 195 bool isReserved(unsigned Reg) { 196 return Reg < regsReserved.size() && regsReserved.test(Reg); 197 } 198 199 bool isAllocatable(unsigned Reg) const { 200 return Reg < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) && 201 !regsReserved.test(Reg); 202 } 203 204 // Analysis information if available 205 LiveVariables *LiveVars; 206 LiveIntervals *LiveInts; 207 LiveStacks *LiveStks; 208 SlotIndexes *Indexes; 209 210 void visitMachineFunctionBefore(); 211 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); 212 void visitMachineBundleBefore(const MachineInstr *MI); 213 214 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); 215 void verifyPreISelGenericInstruction(const MachineInstr *MI); 216 void visitMachineInstrBefore(const MachineInstr *MI); 217 void visitMachineOperand(const MachineOperand *MO, unsigned MONum); 218 void visitMachineBundleAfter(const MachineInstr *MI); 219 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB); 220 void visitMachineFunctionAfter(); 221 222 void report(const char *msg, const MachineFunction *MF); 223 void report(const char *msg, const MachineBasicBlock *MBB); 224 void report(const char *msg, const MachineInstr *MI); 225 void report(const char *msg, const MachineOperand *MO, unsigned MONum, 226 LLT MOVRegType = LLT{}); 227 228 void report_context(const LiveInterval &LI) const; 229 void report_context(const LiveRange &LR, unsigned VRegUnit, 230 LaneBitmask LaneMask) const; 231 void report_context(const LiveRange::Segment &S) const; 232 void report_context(const VNInfo &VNI) const; 233 void report_context(SlotIndex Pos) const; 234 void report_context(MCPhysReg PhysReg) const; 235 void report_context_liverange(const LiveRange &LR) const; 236 void report_context_lanemask(LaneBitmask LaneMask) const; 237 void report_context_vreg(unsigned VReg) const; 238 void report_context_vreg_regunit(unsigned VRegOrUnit) const; 239 240 void verifyInlineAsm(const MachineInstr *MI); 241 242 void checkLiveness(const MachineOperand *MO, unsigned MONum); 243 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum, 244 SlotIndex UseIdx, const LiveRange &LR, unsigned VRegOrUnit, 245 LaneBitmask LaneMask = LaneBitmask::getNone()); 246 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum, 247 SlotIndex DefIdx, const LiveRange &LR, unsigned VRegOrUnit, 248 bool SubRangeCheck = false, 249 LaneBitmask LaneMask = LaneBitmask::getNone()); 250 251 void markReachable(const MachineBasicBlock *MBB); 252 void calcRegsPassed(); 253 void checkPHIOps(const MachineBasicBlock &MBB); 254 255 void calcRegsRequired(); 256 void verifyLiveVariables(); 257 void verifyLiveIntervals(); 258 void verifyLiveInterval(const LiveInterval&); 259 void verifyLiveRangeValue(const LiveRange&, const VNInfo*, unsigned, 260 LaneBitmask); 261 void verifyLiveRangeSegment(const LiveRange&, 262 const LiveRange::const_iterator I, unsigned, 263 LaneBitmask); 264 void verifyLiveRange(const LiveRange&, unsigned, 265 LaneBitmask LaneMask = LaneBitmask::getNone()); 266 267 void verifyStackFrame(); 268 269 void verifySlotIndexes() const; 270 void verifyProperties(const MachineFunction &MF); 271 }; 272 273 struct MachineVerifierPass : public MachineFunctionPass { 274 static char ID; // Pass ID, replacement for typeid 275 276 const std::string Banner; 277 278 MachineVerifierPass(std::string banner = std::string()) 279 : MachineFunctionPass(ID), Banner(std::move(banner)) { 280 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry()); 281 } 282 283 void getAnalysisUsage(AnalysisUsage &AU) const override { 284 AU.setPreservesAll(); 285 MachineFunctionPass::getAnalysisUsage(AU); 286 } 287 288 bool runOnMachineFunction(MachineFunction &MF) override { 289 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF); 290 if (FoundErrors) 291 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 292 return false; 293 } 294 }; 295 296 } // end anonymous namespace 297 298 char MachineVerifierPass::ID = 0; 299 300 INITIALIZE_PASS(MachineVerifierPass, "machineverifier", 301 "Verify generated machine code", false, false) 302 303 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) { 304 return new MachineVerifierPass(Banner); 305 } 306 307 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *, 308 const std::string &Banner, 309 const MachineFunction &MF) { 310 // TODO: Use MFAM after porting below analyses. 311 // LiveVariables *LiveVars; 312 // LiveIntervals *LiveInts; 313 // LiveStacks *LiveStks; 314 // SlotIndexes *Indexes; 315 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF); 316 if (FoundErrors) 317 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 318 } 319 320 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors) 321 const { 322 MachineFunction &MF = const_cast<MachineFunction&>(*this); 323 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF); 324 if (AbortOnErrors && FoundErrors) 325 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 326 return FoundErrors == 0; 327 } 328 329 void MachineVerifier::verifySlotIndexes() const { 330 if (Indexes == nullptr) 331 return; 332 333 // Ensure the IdxMBB list is sorted by slot indexes. 334 SlotIndex Last; 335 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(), 336 E = Indexes->MBBIndexEnd(); I != E; ++I) { 337 assert(!Last.isValid() || I->first > Last); 338 Last = I->first; 339 } 340 } 341 342 void MachineVerifier::verifyProperties(const MachineFunction &MF) { 343 // If a pass has introduced virtual registers without clearing the 344 // NoVRegs property (or set it without allocating the vregs) 345 // then report an error. 346 if (MF.getProperties().hasProperty( 347 MachineFunctionProperties::Property::NoVRegs) && 348 MRI->getNumVirtRegs()) 349 report("Function has NoVRegs property but there are VReg operands", &MF); 350 } 351 352 unsigned MachineVerifier::verify(const MachineFunction &MF) { 353 foundErrors = 0; 354 355 this->MF = &MF; 356 TM = &MF.getTarget(); 357 TII = MF.getSubtarget().getInstrInfo(); 358 TRI = MF.getSubtarget().getRegisterInfo(); 359 MRI = &MF.getRegInfo(); 360 361 const bool isFunctionFailedISel = MF.getProperties().hasProperty( 362 MachineFunctionProperties::Property::FailedISel); 363 364 // If we're mid-GlobalISel and we already triggered the fallback path then 365 // it's expected that the MIR is somewhat broken but that's ok since we'll 366 // reset it and clear the FailedISel attribute in ResetMachineFunctions. 367 if (isFunctionFailedISel) 368 return foundErrors; 369 370 isFunctionRegBankSelected = MF.getProperties().hasProperty( 371 MachineFunctionProperties::Property::RegBankSelected); 372 isFunctionSelected = MF.getProperties().hasProperty( 373 MachineFunctionProperties::Property::Selected); 374 375 LiveVars = nullptr; 376 LiveInts = nullptr; 377 LiveStks = nullptr; 378 Indexes = nullptr; 379 if (PASS) { 380 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>(); 381 // We don't want to verify LiveVariables if LiveIntervals is available. 382 if (!LiveInts) 383 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>(); 384 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>(); 385 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>(); 386 } 387 388 verifySlotIndexes(); 389 390 verifyProperties(MF); 391 392 visitMachineFunctionBefore(); 393 for (const MachineBasicBlock &MBB : MF) { 394 visitMachineBasicBlockBefore(&MBB); 395 // Keep track of the current bundle header. 396 const MachineInstr *CurBundle = nullptr; 397 // Do we expect the next instruction to be part of the same bundle? 398 bool InBundle = false; 399 400 for (const MachineInstr &MI : MBB.instrs()) { 401 if (MI.getParent() != &MBB) { 402 report("Bad instruction parent pointer", &MBB); 403 errs() << "Instruction: " << MI; 404 continue; 405 } 406 407 // Check for consistent bundle flags. 408 if (InBundle && !MI.isBundledWithPred()) 409 report("Missing BundledPred flag, " 410 "BundledSucc was set on predecessor", 411 &MI); 412 if (!InBundle && MI.isBundledWithPred()) 413 report("BundledPred flag is set, " 414 "but BundledSucc not set on predecessor", 415 &MI); 416 417 // Is this a bundle header? 418 if (!MI.isInsideBundle()) { 419 if (CurBundle) 420 visitMachineBundleAfter(CurBundle); 421 CurBundle = &MI; 422 visitMachineBundleBefore(CurBundle); 423 } else if (!CurBundle) 424 report("No bundle header", &MI); 425 visitMachineInstrBefore(&MI); 426 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 427 const MachineOperand &Op = MI.getOperand(I); 428 if (Op.getParent() != &MI) { 429 // Make sure to use correct addOperand / RemoveOperand / ChangeTo 430 // functions when replacing operands of a MachineInstr. 431 report("Instruction has operand with wrong parent set", &MI); 432 } 433 434 visitMachineOperand(&Op, I); 435 } 436 437 // Was this the last bundled instruction? 438 InBundle = MI.isBundledWithSucc(); 439 } 440 if (CurBundle) 441 visitMachineBundleAfter(CurBundle); 442 if (InBundle) 443 report("BundledSucc flag set on last instruction in block", &MBB.back()); 444 visitMachineBasicBlockAfter(&MBB); 445 } 446 visitMachineFunctionAfter(); 447 448 // Clean up. 449 regsLive.clear(); 450 regsDefined.clear(); 451 regsDead.clear(); 452 regsKilled.clear(); 453 regMasks.clear(); 454 MBBInfoMap.clear(); 455 456 return foundErrors; 457 } 458 459 void MachineVerifier::report(const char *msg, const MachineFunction *MF) { 460 assert(MF); 461 errs() << '\n'; 462 if (!foundErrors++) { 463 if (Banner) 464 errs() << "# " << Banner << '\n'; 465 if (LiveInts != nullptr) 466 LiveInts->print(errs()); 467 else 468 MF->print(errs(), Indexes); 469 } 470 errs() << "*** Bad machine code: " << msg << " ***\n" 471 << "- function: " << MF->getName() << "\n"; 472 } 473 474 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { 475 assert(MBB); 476 report(msg, MBB->getParent()); 477 errs() << "- basic block: " << printMBBReference(*MBB) << ' ' 478 << MBB->getName() << " (" << (const void *)MBB << ')'; 479 if (Indexes) 480 errs() << " [" << Indexes->getMBBStartIdx(MBB) 481 << ';' << Indexes->getMBBEndIdx(MBB) << ')'; 482 errs() << '\n'; 483 } 484 485 void MachineVerifier::report(const char *msg, const MachineInstr *MI) { 486 assert(MI); 487 report(msg, MI->getParent()); 488 errs() << "- instruction: "; 489 if (Indexes && Indexes->hasIndex(*MI)) 490 errs() << Indexes->getInstructionIndex(*MI) << '\t'; 491 MI->print(errs(), /*IsStandalone=*/true); 492 } 493 494 void MachineVerifier::report(const char *msg, const MachineOperand *MO, 495 unsigned MONum, LLT MOVRegType) { 496 assert(MO); 497 report(msg, MO->getParent()); 498 errs() << "- operand " << MONum << ": "; 499 MO->print(errs(), MOVRegType, TRI); 500 errs() << "\n"; 501 } 502 503 void MachineVerifier::report_context(SlotIndex Pos) const { 504 errs() << "- at: " << Pos << '\n'; 505 } 506 507 void MachineVerifier::report_context(const LiveInterval &LI) const { 508 errs() << "- interval: " << LI << '\n'; 509 } 510 511 void MachineVerifier::report_context(const LiveRange &LR, unsigned VRegUnit, 512 LaneBitmask LaneMask) const { 513 report_context_liverange(LR); 514 report_context_vreg_regunit(VRegUnit); 515 if (LaneMask.any()) 516 report_context_lanemask(LaneMask); 517 } 518 519 void MachineVerifier::report_context(const LiveRange::Segment &S) const { 520 errs() << "- segment: " << S << '\n'; 521 } 522 523 void MachineVerifier::report_context(const VNInfo &VNI) const { 524 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n"; 525 } 526 527 void MachineVerifier::report_context_liverange(const LiveRange &LR) const { 528 errs() << "- liverange: " << LR << '\n'; 529 } 530 531 void MachineVerifier::report_context(MCPhysReg PReg) const { 532 errs() << "- p. register: " << printReg(PReg, TRI) << '\n'; 533 } 534 535 void MachineVerifier::report_context_vreg(unsigned VReg) const { 536 errs() << "- v. register: " << printReg(VReg, TRI) << '\n'; 537 } 538 539 void MachineVerifier::report_context_vreg_regunit(unsigned VRegOrUnit) const { 540 if (Register::isVirtualRegister(VRegOrUnit)) { 541 report_context_vreg(VRegOrUnit); 542 } else { 543 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n'; 544 } 545 } 546 547 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const { 548 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n'; 549 } 550 551 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { 552 BBInfo &MInfo = MBBInfoMap[MBB]; 553 if (!MInfo.reachable) { 554 MInfo.reachable = true; 555 for (const MachineBasicBlock *Succ : MBB->successors()) 556 markReachable(Succ); 557 } 558 } 559 560 void MachineVerifier::visitMachineFunctionBefore() { 561 lastIndex = SlotIndex(); 562 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs() 563 : TRI->getReservedRegs(*MF); 564 565 if (!MF->empty()) 566 markReachable(&MF->front()); 567 568 // Build a set of the basic blocks in the function. 569 FunctionBlocks.clear(); 570 for (const auto &MBB : *MF) { 571 FunctionBlocks.insert(&MBB); 572 BBInfo &MInfo = MBBInfoMap[&MBB]; 573 574 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end()); 575 if (MInfo.Preds.size() != MBB.pred_size()) 576 report("MBB has duplicate entries in its predecessor list.", &MBB); 577 578 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end()); 579 if (MInfo.Succs.size() != MBB.succ_size()) 580 report("MBB has duplicate entries in its successor list.", &MBB); 581 } 582 583 // Check that the register use lists are sane. 584 MRI->verifyUseLists(); 585 586 if (!MF->empty()) 587 verifyStackFrame(); 588 } 589 590 void 591 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { 592 FirstTerminator = nullptr; 593 FirstNonPHI = nullptr; 594 595 if (!MF->getProperties().hasProperty( 596 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) { 597 // If this block has allocatable physical registers live-in, check that 598 // it is an entry block or landing pad. 599 for (const auto &LI : MBB->liveins()) { 600 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() && 601 MBB->getIterator() != MBB->getParent()->begin()) { 602 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB); 603 report_context(LI.PhysReg); 604 } 605 } 606 } 607 608 // Count the number of landing pad successors. 609 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs; 610 for (const auto *succ : MBB->successors()) { 611 if (succ->isEHPad()) 612 LandingPadSuccs.insert(succ); 613 if (!FunctionBlocks.count(succ)) 614 report("MBB has successor that isn't part of the function.", MBB); 615 if (!MBBInfoMap[succ].Preds.count(MBB)) { 616 report("Inconsistent CFG", MBB); 617 errs() << "MBB is not in the predecessor list of the successor " 618 << printMBBReference(*succ) << ".\n"; 619 } 620 } 621 622 // Check the predecessor list. 623 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 624 if (!FunctionBlocks.count(Pred)) 625 report("MBB has predecessor that isn't part of the function.", MBB); 626 if (!MBBInfoMap[Pred].Succs.count(MBB)) { 627 report("Inconsistent CFG", MBB); 628 errs() << "MBB is not in the successor list of the predecessor " 629 << printMBBReference(*Pred) << ".\n"; 630 } 631 } 632 633 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); 634 const BasicBlock *BB = MBB->getBasicBlock(); 635 const Function &F = MF->getFunction(); 636 if (LandingPadSuccs.size() > 1 && 637 !(AsmInfo && 638 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && 639 BB && isa<SwitchInst>(BB->getTerminator())) && 640 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) 641 report("MBB has more than one landing pad successor", MBB); 642 643 // Call analyzeBranch. If it succeeds, there several more conditions to check. 644 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 645 SmallVector<MachineOperand, 4> Cond; 646 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB, 647 Cond)) { 648 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's 649 // check whether its answers match up with reality. 650 if (!TBB && !FBB) { 651 // Block falls through to its successor. 652 if (!MBB->empty() && MBB->back().isBarrier() && 653 !TII->isPredicated(MBB->back())) { 654 report("MBB exits via unconditional fall-through but ends with a " 655 "barrier instruction!", MBB); 656 } 657 if (!Cond.empty()) { 658 report("MBB exits via unconditional fall-through but has a condition!", 659 MBB); 660 } 661 } else if (TBB && !FBB && Cond.empty()) { 662 // Block unconditionally branches somewhere. 663 if (MBB->empty()) { 664 report("MBB exits via unconditional branch but doesn't contain " 665 "any instructions!", MBB); 666 } else if (!MBB->back().isBarrier()) { 667 report("MBB exits via unconditional branch but doesn't end with a " 668 "barrier instruction!", MBB); 669 } else if (!MBB->back().isTerminator()) { 670 report("MBB exits via unconditional branch but the branch isn't a " 671 "terminator instruction!", MBB); 672 } 673 } else if (TBB && !FBB && !Cond.empty()) { 674 // Block conditionally branches somewhere, otherwise falls through. 675 if (MBB->empty()) { 676 report("MBB exits via conditional branch/fall-through but doesn't " 677 "contain any instructions!", MBB); 678 } else if (MBB->back().isBarrier()) { 679 report("MBB exits via conditional branch/fall-through but ends with a " 680 "barrier instruction!", MBB); 681 } else if (!MBB->back().isTerminator()) { 682 report("MBB exits via conditional branch/fall-through but the branch " 683 "isn't a terminator instruction!", MBB); 684 } 685 } else if (TBB && FBB) { 686 // Block conditionally branches somewhere, otherwise branches 687 // somewhere else. 688 if (MBB->empty()) { 689 report("MBB exits via conditional branch/branch but doesn't " 690 "contain any instructions!", MBB); 691 } else if (!MBB->back().isBarrier()) { 692 report("MBB exits via conditional branch/branch but doesn't end with a " 693 "barrier instruction!", MBB); 694 } else if (!MBB->back().isTerminator()) { 695 report("MBB exits via conditional branch/branch but the branch " 696 "isn't a terminator instruction!", MBB); 697 } 698 if (Cond.empty()) { 699 report("MBB exits via conditional branch/branch but there's no " 700 "condition!", MBB); 701 } 702 } else { 703 report("analyzeBranch returned invalid data!", MBB); 704 } 705 706 // Now check that the successors match up with the answers reported by 707 // analyzeBranch. 708 if (TBB && !MBB->isSuccessor(TBB)) 709 report("MBB exits via jump or conditional branch, but its target isn't a " 710 "CFG successor!", 711 MBB); 712 if (FBB && !MBB->isSuccessor(FBB)) 713 report("MBB exits via conditional branch, but its target isn't a CFG " 714 "successor!", 715 MBB); 716 717 // There might be a fallthrough to the next block if there's either no 718 // unconditional true branch, or if there's a condition, and one of the 719 // branches is missing. 720 bool Fallthrough = !TBB || (!Cond.empty() && !FBB); 721 722 // A conditional fallthrough must be an actual CFG successor, not 723 // unreachable. (Conversely, an unconditional fallthrough might not really 724 // be a successor, because the block might end in unreachable.) 725 if (!Cond.empty() && !FBB) { 726 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator()); 727 if (MBBI == MF->end()) { 728 report("MBB conditionally falls through out of function!", MBB); 729 } else if (!MBB->isSuccessor(&*MBBI)) 730 report("MBB exits via conditional branch/fall-through but the CFG " 731 "successors don't match the actual successors!", 732 MBB); 733 } 734 735 // Verify that there aren't any extra un-accounted-for successors. 736 for (const MachineBasicBlock *SuccMBB : MBB->successors()) { 737 // If this successor is one of the branch targets, it's okay. 738 if (SuccMBB == TBB || SuccMBB == FBB) 739 continue; 740 // If we might have a fallthrough, and the successor is the fallthrough 741 // block, that's also ok. 742 if (Fallthrough && SuccMBB == MBB->getNextNode()) 743 continue; 744 // Also accept successors which are for exception-handling or might be 745 // inlineasm_br targets. 746 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget()) 747 continue; 748 report("MBB has unexpected successors which are not branch targets, " 749 "fallthrough, EHPads, or inlineasm_br targets.", 750 MBB); 751 } 752 } 753 754 regsLive.clear(); 755 if (MRI->tracksLiveness()) { 756 for (const auto &LI : MBB->liveins()) { 757 if (!Register::isPhysicalRegister(LI.PhysReg)) { 758 report("MBB live-in list contains non-physical register", MBB); 759 continue; 760 } 761 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg)) 762 regsLive.insert(SubReg); 763 } 764 } 765 766 const MachineFrameInfo &MFI = MF->getFrameInfo(); 767 BitVector PR = MFI.getPristineRegs(*MF); 768 for (unsigned I : PR.set_bits()) { 769 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I)) 770 regsLive.insert(SubReg); 771 } 772 773 regsKilled.clear(); 774 regsDefined.clear(); 775 776 if (Indexes) 777 lastIndex = Indexes->getMBBStartIdx(MBB); 778 } 779 780 // This function gets called for all bundle headers, including normal 781 // stand-alone unbundled instructions. 782 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { 783 if (Indexes && Indexes->hasIndex(*MI)) { 784 SlotIndex idx = Indexes->getInstructionIndex(*MI); 785 if (!(idx > lastIndex)) { 786 report("Instruction index out of order", MI); 787 errs() << "Last instruction was at " << lastIndex << '\n'; 788 } 789 lastIndex = idx; 790 } 791 792 // Ensure non-terminators don't follow terminators. 793 if (MI->isTerminator()) { 794 if (!FirstTerminator) 795 FirstTerminator = MI; 796 } else if (FirstTerminator) { 797 report("Non-terminator instruction after the first terminator", MI); 798 errs() << "First terminator was:\t" << *FirstTerminator; 799 } 800 } 801 802 // The operands on an INLINEASM instruction must follow a template. 803 // Verify that the flag operands make sense. 804 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { 805 // The first two operands on INLINEASM are the asm string and global flags. 806 if (MI->getNumOperands() < 2) { 807 report("Too few operands on inline asm", MI); 808 return; 809 } 810 if (!MI->getOperand(0).isSymbol()) 811 report("Asm string must be an external symbol", MI); 812 if (!MI->getOperand(1).isImm()) 813 report("Asm flags must be an immediate", MI); 814 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, 815 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16, 816 // and Extra_IsConvergent = 32. 817 if (!isUInt<6>(MI->getOperand(1).getImm())) 818 report("Unknown asm flags", &MI->getOperand(1), 1); 819 820 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed"); 821 822 unsigned OpNo = InlineAsm::MIOp_FirstOperand; 823 unsigned NumOps; 824 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { 825 const MachineOperand &MO = MI->getOperand(OpNo); 826 // There may be implicit ops after the fixed operands. 827 if (!MO.isImm()) 828 break; 829 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); 830 } 831 832 if (OpNo > MI->getNumOperands()) 833 report("Missing operands in last group", MI); 834 835 // An optional MDNode follows the groups. 836 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) 837 ++OpNo; 838 839 // All trailing operands must be implicit registers. 840 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { 841 const MachineOperand &MO = MI->getOperand(OpNo); 842 if (!MO.isReg() || !MO.isImplicit()) 843 report("Expected implicit register after groups", &MO, OpNo); 844 } 845 } 846 847 /// Check that types are consistent when two operands need to have the same 848 /// number of vector elements. 849 /// \return true if the types are valid. 850 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, 851 const MachineInstr *MI) { 852 if (Ty0.isVector() != Ty1.isVector()) { 853 report("operand types must be all-vector or all-scalar", MI); 854 // Generally we try to report as many issues as possible at once, but in 855 // this case it's not clear what should we be comparing the size of the 856 // scalar with: the size of the whole vector or its lane. Instead of 857 // making an arbitrary choice and emitting not so helpful message, let's 858 // avoid the extra noise and stop here. 859 return false; 860 } 861 862 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) { 863 report("operand types must preserve number of vector elements", MI); 864 return false; 865 } 866 867 return true; 868 } 869 870 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { 871 if (isFunctionSelected) 872 report("Unexpected generic instruction in a Selected function", MI); 873 874 const MCInstrDesc &MCID = MI->getDesc(); 875 unsigned NumOps = MI->getNumOperands(); 876 877 // Branches must reference a basic block if they are not indirect 878 if (MI->isBranch() && !MI->isIndirectBranch()) { 879 bool HasMBB = false; 880 for (const MachineOperand &Op : MI->operands()) { 881 if (Op.isMBB()) { 882 HasMBB = true; 883 break; 884 } 885 } 886 887 if (!HasMBB) { 888 report("Branch instruction is missing a basic block operand or " 889 "isIndirectBranch property", 890 MI); 891 } 892 } 893 894 // Check types. 895 SmallVector<LLT, 4> Types; 896 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); 897 I != E; ++I) { 898 if (!MCID.OpInfo[I].isGenericType()) 899 continue; 900 // Generic instructions specify type equality constraints between some of 901 // their operands. Make sure these are consistent. 902 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); 903 Types.resize(std::max(TypeIdx + 1, Types.size())); 904 905 const MachineOperand *MO = &MI->getOperand(I); 906 if (!MO->isReg()) { 907 report("generic instruction must use register operands", MI); 908 continue; 909 } 910 911 LLT OpTy = MRI->getType(MO->getReg()); 912 // Don't report a type mismatch if there is no actual mismatch, only a 913 // type missing, to reduce noise: 914 if (OpTy.isValid()) { 915 // Only the first valid type for a type index will be printed: don't 916 // overwrite it later so it's always clear which type was expected: 917 if (!Types[TypeIdx].isValid()) 918 Types[TypeIdx] = OpTy; 919 else if (Types[TypeIdx] != OpTy) 920 report("Type mismatch in generic instruction", MO, I, OpTy); 921 } else { 922 // Generic instructions must have types attached to their operands. 923 report("Generic instruction is missing a virtual register type", MO, I); 924 } 925 } 926 927 // Generic opcodes must not have physical register operands. 928 for (unsigned I = 0; I < MI->getNumOperands(); ++I) { 929 const MachineOperand *MO = &MI->getOperand(I); 930 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg())) 931 report("Generic instruction cannot have physical register", MO, I); 932 } 933 934 // Avoid out of bounds in checks below. This was already reported earlier. 935 if (MI->getNumOperands() < MCID.getNumOperands()) 936 return; 937 938 StringRef ErrorInfo; 939 if (!TII->verifyInstruction(*MI, ErrorInfo)) 940 report(ErrorInfo.data(), MI); 941 942 // Verify properties of various specific instruction types 943 switch (MI->getOpcode()) { 944 case TargetOpcode::G_CONSTANT: 945 case TargetOpcode::G_FCONSTANT: { 946 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 947 if (DstTy.isVector()) 948 report("Instruction cannot use a vector result type", MI); 949 950 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { 951 if (!MI->getOperand(1).isCImm()) { 952 report("G_CONSTANT operand must be cimm", MI); 953 break; 954 } 955 956 const ConstantInt *CI = MI->getOperand(1).getCImm(); 957 if (CI->getBitWidth() != DstTy.getSizeInBits()) 958 report("inconsistent constant size", MI); 959 } else { 960 if (!MI->getOperand(1).isFPImm()) { 961 report("G_FCONSTANT operand must be fpimm", MI); 962 break; 963 } 964 const ConstantFP *CF = MI->getOperand(1).getFPImm(); 965 966 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != 967 DstTy.getSizeInBits()) { 968 report("inconsistent constant size", MI); 969 } 970 } 971 972 break; 973 } 974 case TargetOpcode::G_LOAD: 975 case TargetOpcode::G_STORE: 976 case TargetOpcode::G_ZEXTLOAD: 977 case TargetOpcode::G_SEXTLOAD: { 978 LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); 979 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 980 if (!PtrTy.isPointer()) 981 report("Generic memory instruction must access a pointer", MI); 982 983 // Generic loads and stores must have a single MachineMemOperand 984 // describing that access. 985 if (!MI->hasOneMemOperand()) { 986 report("Generic instruction accessing memory must have one mem operand", 987 MI); 988 } else { 989 const MachineMemOperand &MMO = **MI->memoperands_begin(); 990 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || 991 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { 992 if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) 993 report("Generic extload must have a narrower memory type", MI); 994 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { 995 if (MMO.getSize() > ValTy.getSizeInBytes()) 996 report("load memory size cannot exceed result size", MI); 997 } else if (MI->getOpcode() == TargetOpcode::G_STORE) { 998 if (ValTy.getSizeInBytes() < MMO.getSize()) 999 report("store memory size cannot exceed value size", MI); 1000 } 1001 } 1002 1003 break; 1004 } 1005 case TargetOpcode::G_PHI: { 1006 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1007 if (!DstTy.isValid() || 1008 !std::all_of(MI->operands_begin() + 1, MI->operands_end(), 1009 [this, &DstTy](const MachineOperand &MO) { 1010 if (!MO.isReg()) 1011 return true; 1012 LLT Ty = MRI->getType(MO.getReg()); 1013 if (!Ty.isValid() || (Ty != DstTy)) 1014 return false; 1015 return true; 1016 })) 1017 report("Generic Instruction G_PHI has operands with incompatible/missing " 1018 "types", 1019 MI); 1020 break; 1021 } 1022 case TargetOpcode::G_BITCAST: { 1023 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1024 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1025 if (!DstTy.isValid() || !SrcTy.isValid()) 1026 break; 1027 1028 if (SrcTy.isPointer() != DstTy.isPointer()) 1029 report("bitcast cannot convert between pointers and other types", MI); 1030 1031 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) 1032 report("bitcast sizes must match", MI); 1033 1034 if (SrcTy == DstTy) 1035 report("bitcast must change the type", MI); 1036 1037 break; 1038 } 1039 case TargetOpcode::G_INTTOPTR: 1040 case TargetOpcode::G_PTRTOINT: 1041 case TargetOpcode::G_ADDRSPACE_CAST: { 1042 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1043 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1044 if (!DstTy.isValid() || !SrcTy.isValid()) 1045 break; 1046 1047 verifyVectorElementMatch(DstTy, SrcTy, MI); 1048 1049 DstTy = DstTy.getScalarType(); 1050 SrcTy = SrcTy.getScalarType(); 1051 1052 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { 1053 if (!DstTy.isPointer()) 1054 report("inttoptr result type must be a pointer", MI); 1055 if (SrcTy.isPointer()) 1056 report("inttoptr source type must not be a pointer", MI); 1057 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { 1058 if (!SrcTy.isPointer()) 1059 report("ptrtoint source type must be a pointer", MI); 1060 if (DstTy.isPointer()) 1061 report("ptrtoint result type must not be a pointer", MI); 1062 } else { 1063 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); 1064 if (!SrcTy.isPointer() || !DstTy.isPointer()) 1065 report("addrspacecast types must be pointers", MI); 1066 else { 1067 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) 1068 report("addrspacecast must convert different address spaces", MI); 1069 } 1070 } 1071 1072 break; 1073 } 1074 case TargetOpcode::G_PTR_ADD: { 1075 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1076 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1077 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); 1078 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid()) 1079 break; 1080 1081 if (!PtrTy.getScalarType().isPointer()) 1082 report("gep first operand must be a pointer", MI); 1083 1084 if (OffsetTy.getScalarType().isPointer()) 1085 report("gep offset operand must not be a pointer", MI); 1086 1087 // TODO: Is the offset allowed to be a scalar with a vector? 1088 break; 1089 } 1090 case TargetOpcode::G_PTRMASK: { 1091 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1092 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1093 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg()); 1094 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid()) 1095 break; 1096 1097 if (!DstTy.getScalarType().isPointer()) 1098 report("ptrmask result type must be a pointer", MI); 1099 1100 if (!MaskTy.getScalarType().isScalar()) 1101 report("ptrmask mask type must be an integer", MI); 1102 1103 verifyVectorElementMatch(DstTy, MaskTy, MI); 1104 break; 1105 } 1106 case TargetOpcode::G_SEXT: 1107 case TargetOpcode::G_ZEXT: 1108 case TargetOpcode::G_ANYEXT: 1109 case TargetOpcode::G_TRUNC: 1110 case TargetOpcode::G_FPEXT: 1111 case TargetOpcode::G_FPTRUNC: { 1112 // Number of operands and presense of types is already checked (and 1113 // reported in case of any issues), so no need to report them again. As 1114 // we're trying to report as many issues as possible at once, however, the 1115 // instructions aren't guaranteed to have the right number of operands or 1116 // types attached to them at this point 1117 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}"); 1118 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1119 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1120 if (!DstTy.isValid() || !SrcTy.isValid()) 1121 break; 1122 1123 LLT DstElTy = DstTy.getScalarType(); 1124 LLT SrcElTy = SrcTy.getScalarType(); 1125 if (DstElTy.isPointer() || SrcElTy.isPointer()) 1126 report("Generic extend/truncate can not operate on pointers", MI); 1127 1128 verifyVectorElementMatch(DstTy, SrcTy, MI); 1129 1130 unsigned DstSize = DstElTy.getSizeInBits(); 1131 unsigned SrcSize = SrcElTy.getSizeInBits(); 1132 switch (MI->getOpcode()) { 1133 default: 1134 if (DstSize <= SrcSize) 1135 report("Generic extend has destination type no larger than source", MI); 1136 break; 1137 case TargetOpcode::G_TRUNC: 1138 case TargetOpcode::G_FPTRUNC: 1139 if (DstSize >= SrcSize) 1140 report("Generic truncate has destination type no smaller than source", 1141 MI); 1142 break; 1143 } 1144 break; 1145 } 1146 case TargetOpcode::G_SELECT: { 1147 LLT SelTy = MRI->getType(MI->getOperand(0).getReg()); 1148 LLT CondTy = MRI->getType(MI->getOperand(1).getReg()); 1149 if (!SelTy.isValid() || !CondTy.isValid()) 1150 break; 1151 1152 // Scalar condition select on a vector is valid. 1153 if (CondTy.isVector()) 1154 verifyVectorElementMatch(SelTy, CondTy, MI); 1155 break; 1156 } 1157 case TargetOpcode::G_MERGE_VALUES: { 1158 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, 1159 // e.g. s2N = MERGE sN, sN 1160 // Merging multiple scalars into a vector is not allowed, should use 1161 // G_BUILD_VECTOR for that. 1162 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1163 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1164 if (DstTy.isVector() || SrcTy.isVector()) 1165 report("G_MERGE_VALUES cannot operate on vectors", MI); 1166 1167 const unsigned NumOps = MI->getNumOperands(); 1168 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1)) 1169 report("G_MERGE_VALUES result size is inconsistent", MI); 1170 1171 for (unsigned I = 2; I != NumOps; ++I) { 1172 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy) 1173 report("G_MERGE_VALUES source types do not match", MI); 1174 } 1175 1176 break; 1177 } 1178 case TargetOpcode::G_UNMERGE_VALUES: { 1179 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1180 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg()); 1181 // For now G_UNMERGE can split vectors. 1182 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) { 1183 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) 1184 report("G_UNMERGE_VALUES destination types do not match", MI); 1185 } 1186 if (SrcTy.getSizeInBits() != 1187 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) { 1188 report("G_UNMERGE_VALUES source operand does not cover dest operands", 1189 MI); 1190 } 1191 break; 1192 } 1193 case TargetOpcode::G_BUILD_VECTOR: { 1194 // Source types must be scalars, dest type a vector. Total size of scalars 1195 // must match the dest vector size. 1196 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1197 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1198 if (!DstTy.isVector() || SrcEltTy.isVector()) { 1199 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI); 1200 break; 1201 } 1202 1203 if (DstTy.getElementType() != SrcEltTy) 1204 report("G_BUILD_VECTOR result element type must match source type", MI); 1205 1206 if (DstTy.getNumElements() != MI->getNumOperands() - 1) 1207 report("G_BUILD_VECTOR must have an operand for each elemement", MI); 1208 1209 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1210 if (MRI->getType(MI->getOperand(1).getReg()) != 1211 MRI->getType(MI->getOperand(i).getReg())) 1212 report("G_BUILD_VECTOR source operand types are not homogeneous", MI); 1213 } 1214 1215 break; 1216 } 1217 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1218 // Source types must be scalars, dest type a vector. Scalar types must be 1219 // larger than the dest vector elt type, as this is a truncating operation. 1220 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1221 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1222 if (!DstTy.isVector() || SrcEltTy.isVector()) 1223 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands", 1224 MI); 1225 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1226 if (MRI->getType(MI->getOperand(1).getReg()) != 1227 MRI->getType(MI->getOperand(i).getReg())) 1228 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous", 1229 MI); 1230 } 1231 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits()) 1232 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than " 1233 "dest elt type", 1234 MI); 1235 break; 1236 } 1237 case TargetOpcode::G_CONCAT_VECTORS: { 1238 // Source types should be vectors, and total size should match the dest 1239 // vector size. 1240 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1241 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1242 if (!DstTy.isVector() || !SrcTy.isVector()) 1243 report("G_CONCAT_VECTOR requires vector source and destination operands", 1244 MI); 1245 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1246 if (MRI->getType(MI->getOperand(1).getReg()) != 1247 MRI->getType(MI->getOperand(i).getReg())) 1248 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI); 1249 } 1250 if (DstTy.getNumElements() != 1251 SrcTy.getNumElements() * (MI->getNumOperands() - 1)) 1252 report("G_CONCAT_VECTOR num dest and source elements should match", MI); 1253 break; 1254 } 1255 case TargetOpcode::G_ICMP: 1256 case TargetOpcode::G_FCMP: { 1257 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1258 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg()); 1259 1260 if ((DstTy.isVector() != SrcTy.isVector()) || 1261 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())) 1262 report("Generic vector icmp/fcmp must preserve number of lanes", MI); 1263 1264 break; 1265 } 1266 case TargetOpcode::G_EXTRACT: { 1267 const MachineOperand &SrcOp = MI->getOperand(1); 1268 if (!SrcOp.isReg()) { 1269 report("extract source must be a register", MI); 1270 break; 1271 } 1272 1273 const MachineOperand &OffsetOp = MI->getOperand(2); 1274 if (!OffsetOp.isImm()) { 1275 report("extract offset must be a constant", MI); 1276 break; 1277 } 1278 1279 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1280 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1281 if (SrcSize == DstSize) 1282 report("extract source must be larger than result", MI); 1283 1284 if (DstSize + OffsetOp.getImm() > SrcSize) 1285 report("extract reads past end of register", MI); 1286 break; 1287 } 1288 case TargetOpcode::G_INSERT: { 1289 const MachineOperand &SrcOp = MI->getOperand(2); 1290 if (!SrcOp.isReg()) { 1291 report("insert source must be a register", MI); 1292 break; 1293 } 1294 1295 const MachineOperand &OffsetOp = MI->getOperand(3); 1296 if (!OffsetOp.isImm()) { 1297 report("insert offset must be a constant", MI); 1298 break; 1299 } 1300 1301 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1302 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1303 1304 if (DstSize <= SrcSize) 1305 report("inserted size must be smaller than total register", MI); 1306 1307 if (SrcSize + OffsetOp.getImm() > DstSize) 1308 report("insert writes past end of register", MI); 1309 1310 break; 1311 } 1312 case TargetOpcode::G_JUMP_TABLE: { 1313 if (!MI->getOperand(1).isJTI()) 1314 report("G_JUMP_TABLE source operand must be a jump table index", MI); 1315 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1316 if (!DstTy.isPointer()) 1317 report("G_JUMP_TABLE dest operand must have a pointer type", MI); 1318 break; 1319 } 1320 case TargetOpcode::G_BRJT: { 1321 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1322 report("G_BRJT src operand 0 must be a pointer type", MI); 1323 1324 if (!MI->getOperand(1).isJTI()) 1325 report("G_BRJT src operand 1 must be a jump table index", MI); 1326 1327 const auto &IdxOp = MI->getOperand(2); 1328 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer()) 1329 report("G_BRJT src operand 2 must be a scalar reg type", MI); 1330 break; 1331 } 1332 case TargetOpcode::G_INTRINSIC: 1333 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: { 1334 // TODO: Should verify number of def and use operands, but the current 1335 // interface requires passing in IR types for mangling. 1336 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs()); 1337 if (!IntrIDOp.isIntrinsicID()) { 1338 report("G_INTRINSIC first src operand must be an intrinsic ID", MI); 1339 break; 1340 } 1341 1342 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC; 1343 unsigned IntrID = IntrIDOp.getIntrinsicID(); 1344 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 1345 AttributeList Attrs 1346 = Intrinsic::getAttributes(MF->getFunction().getContext(), 1347 static_cast<Intrinsic::ID>(IntrID)); 1348 bool DeclHasSideEffects = !Attrs.hasFnAttribute(Attribute::ReadNone); 1349 if (NoSideEffects && DeclHasSideEffects) { 1350 report("G_INTRINSIC used with intrinsic that accesses memory", MI); 1351 break; 1352 } 1353 if (!NoSideEffects && !DeclHasSideEffects) { 1354 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI); 1355 break; 1356 } 1357 } 1358 1359 break; 1360 } 1361 case TargetOpcode::G_SEXT_INREG: { 1362 if (!MI->getOperand(2).isImm()) { 1363 report("G_SEXT_INREG expects an immediate operand #2", MI); 1364 break; 1365 } 1366 1367 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1368 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1369 verifyVectorElementMatch(DstTy, SrcTy, MI); 1370 1371 int64_t Imm = MI->getOperand(2).getImm(); 1372 if (Imm <= 0) 1373 report("G_SEXT_INREG size must be >= 1", MI); 1374 if (Imm >= SrcTy.getScalarSizeInBits()) 1375 report("G_SEXT_INREG size must be less than source bit width", MI); 1376 break; 1377 } 1378 case TargetOpcode::G_SHUFFLE_VECTOR: { 1379 const MachineOperand &MaskOp = MI->getOperand(3); 1380 if (!MaskOp.isShuffleMask()) { 1381 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI); 1382 break; 1383 } 1384 1385 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1386 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg()); 1387 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg()); 1388 1389 if (Src0Ty != Src1Ty) 1390 report("Source operands must be the same type", MI); 1391 1392 if (Src0Ty.getScalarType() != DstTy.getScalarType()) 1393 report("G_SHUFFLE_VECTOR cannot change element type", MI); 1394 1395 // Don't check that all operands are vector because scalars are used in 1396 // place of 1 element vectors. 1397 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1; 1398 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1; 1399 1400 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask(); 1401 1402 if (static_cast<int>(MaskIdxes.size()) != DstNumElts) 1403 report("Wrong result type for shufflemask", MI); 1404 1405 for (int Idx : MaskIdxes) { 1406 if (Idx < 0) 1407 continue; 1408 1409 if (Idx >= 2 * SrcNumElts) 1410 report("Out of bounds shuffle index", MI); 1411 } 1412 1413 break; 1414 } 1415 case TargetOpcode::G_DYN_STACKALLOC: { 1416 const MachineOperand &DstOp = MI->getOperand(0); 1417 const MachineOperand &AllocOp = MI->getOperand(1); 1418 const MachineOperand &AlignOp = MI->getOperand(2); 1419 1420 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) { 1421 report("dst operand 0 must be a pointer type", MI); 1422 break; 1423 } 1424 1425 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) { 1426 report("src operand 1 must be a scalar reg type", MI); 1427 break; 1428 } 1429 1430 if (!AlignOp.isImm()) { 1431 report("src operand 2 must be an immediate type", MI); 1432 break; 1433 } 1434 break; 1435 } 1436 case TargetOpcode::G_MEMCPY: 1437 case TargetOpcode::G_MEMMOVE: { 1438 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1439 if (MMOs.size() != 2) { 1440 report("memcpy/memmove must have 2 memory operands", MI); 1441 break; 1442 } 1443 1444 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) || 1445 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) { 1446 report("wrong memory operand types", MI); 1447 break; 1448 } 1449 1450 if (MMOs[0]->getSize() != MMOs[1]->getSize()) 1451 report("inconsistent memory operand sizes", MI); 1452 1453 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1454 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg()); 1455 1456 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) { 1457 report("memory instruction operand must be a pointer", MI); 1458 break; 1459 } 1460 1461 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1462 report("inconsistent store address space", MI); 1463 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace()) 1464 report("inconsistent load address space", MI); 1465 1466 break; 1467 } 1468 case TargetOpcode::G_MEMSET: { 1469 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1470 if (MMOs.size() != 1) { 1471 report("memset must have 1 memory operand", MI); 1472 break; 1473 } 1474 1475 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) { 1476 report("memset memory operand must be a store", MI); 1477 break; 1478 } 1479 1480 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1481 if (!DstPtrTy.isPointer()) { 1482 report("memset operand must be a pointer", MI); 1483 break; 1484 } 1485 1486 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1487 report("inconsistent memset address space", MI); 1488 1489 break; 1490 } 1491 case TargetOpcode::G_VECREDUCE_SEQ_FADD: 1492 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: 1493 case TargetOpcode::G_VECREDUCE_FADD: 1494 case TargetOpcode::G_VECREDUCE_FMUL: { 1495 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1496 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1497 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1498 if (!DstTy.isScalar()) 1499 report("Vector reduction requires a scalar destination type", MI); 1500 if (!Src1Ty.isScalar()) 1501 report("FADD/FMUL vector reduction requires a scalar 1st operand", MI); 1502 if (!Src2Ty.isVector()) 1503 report("FADD/FMUL vector reduction must have a vector 2nd operand", MI); 1504 break; 1505 } 1506 case TargetOpcode::G_VECREDUCE_FMAX: 1507 case TargetOpcode::G_VECREDUCE_FMIN: 1508 case TargetOpcode::G_VECREDUCE_ADD: 1509 case TargetOpcode::G_VECREDUCE_MUL: 1510 case TargetOpcode::G_VECREDUCE_AND: 1511 case TargetOpcode::G_VECREDUCE_OR: 1512 case TargetOpcode::G_VECREDUCE_XOR: 1513 case TargetOpcode::G_VECREDUCE_SMAX: 1514 case TargetOpcode::G_VECREDUCE_SMIN: 1515 case TargetOpcode::G_VECREDUCE_UMAX: 1516 case TargetOpcode::G_VECREDUCE_UMIN: { 1517 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1518 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1519 if (!DstTy.isScalar()) 1520 report("Vector reduction requires a scalar destination type", MI); 1521 if (!SrcTy.isVector()) 1522 report("Vector reduction requires vector source=", MI); 1523 break; 1524 } 1525 default: 1526 break; 1527 } 1528 } 1529 1530 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { 1531 const MCInstrDesc &MCID = MI->getDesc(); 1532 if (MI->getNumOperands() < MCID.getNumOperands()) { 1533 report("Too few operands", MI); 1534 errs() << MCID.getNumOperands() << " operands expected, but " 1535 << MI->getNumOperands() << " given.\n"; 1536 } 1537 1538 if (MI->isPHI()) { 1539 if (MF->getProperties().hasProperty( 1540 MachineFunctionProperties::Property::NoPHIs)) 1541 report("Found PHI instruction with NoPHIs property set", MI); 1542 1543 if (FirstNonPHI) 1544 report("Found PHI instruction after non-PHI", MI); 1545 } else if (FirstNonPHI == nullptr) 1546 FirstNonPHI = MI; 1547 1548 // Check the tied operands. 1549 if (MI->isInlineAsm()) 1550 verifyInlineAsm(MI); 1551 1552 // A fully-formed DBG_VALUE must have a location. Ignore partially formed 1553 // DBG_VALUEs: these are convenient to use in tests, but should never get 1554 // generated. 1555 if (MI->isDebugValue() && MI->getNumOperands() == 4) 1556 if (!MI->getDebugLoc()) 1557 report("Missing DebugLoc for debug instruction", MI); 1558 1559 // Check the MachineMemOperands for basic consistency. 1560 for (MachineMemOperand *Op : MI->memoperands()) { 1561 if (Op->isLoad() && !MI->mayLoad()) 1562 report("Missing mayLoad flag", MI); 1563 if (Op->isStore() && !MI->mayStore()) 1564 report("Missing mayStore flag", MI); 1565 } 1566 1567 // Debug values must not have a slot index. 1568 // Other instructions must have one, unless they are inside a bundle. 1569 if (LiveInts) { 1570 bool mapped = !LiveInts->isNotInMIMap(*MI); 1571 if (MI->isDebugInstr()) { 1572 if (mapped) 1573 report("Debug instruction has a slot index", MI); 1574 } else if (MI->isInsideBundle()) { 1575 if (mapped) 1576 report("Instruction inside bundle has a slot index", MI); 1577 } else { 1578 if (!mapped) 1579 report("Missing slot index", MI); 1580 } 1581 } 1582 1583 if (isPreISelGenericOpcode(MCID.getOpcode())) { 1584 verifyPreISelGenericInstruction(MI); 1585 return; 1586 } 1587 1588 StringRef ErrorInfo; 1589 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1590 report(ErrorInfo.data(), MI); 1591 1592 // Verify properties of various specific instruction types 1593 switch (MI->getOpcode()) { 1594 case TargetOpcode::COPY: { 1595 if (foundErrors) 1596 break; 1597 const MachineOperand &DstOp = MI->getOperand(0); 1598 const MachineOperand &SrcOp = MI->getOperand(1); 1599 LLT DstTy = MRI->getType(DstOp.getReg()); 1600 LLT SrcTy = MRI->getType(SrcOp.getReg()); 1601 if (SrcTy.isValid() && DstTy.isValid()) { 1602 // If both types are valid, check that the types are the same. 1603 if (SrcTy != DstTy) { 1604 report("Copy Instruction is illegal with mismatching types", MI); 1605 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n"; 1606 } 1607 } 1608 if (SrcTy.isValid() || DstTy.isValid()) { 1609 // If one of them have valid types, let's just check they have the same 1610 // size. 1611 unsigned SrcSize = TRI->getRegSizeInBits(SrcOp.getReg(), *MRI); 1612 unsigned DstSize = TRI->getRegSizeInBits(DstOp.getReg(), *MRI); 1613 assert(SrcSize && "Expecting size here"); 1614 assert(DstSize && "Expecting size here"); 1615 if (SrcSize != DstSize) 1616 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) { 1617 report("Copy Instruction is illegal with mismatching sizes", MI); 1618 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize 1619 << "\n"; 1620 } 1621 } 1622 break; 1623 } 1624 case TargetOpcode::STATEPOINT: { 1625 StatepointOpers SO(MI); 1626 if (!MI->getOperand(SO.getIDPos()).isImm() || 1627 !MI->getOperand(SO.getNBytesPos()).isImm() || 1628 !MI->getOperand(SO.getNCallArgsPos()).isImm()) { 1629 report("meta operands to STATEPOINT not constant!", MI); 1630 break; 1631 } 1632 1633 auto VerifyStackMapConstant = [&](unsigned Offset) { 1634 if (!MI->getOperand(Offset - 1).isImm() || 1635 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp || 1636 !MI->getOperand(Offset).isImm()) 1637 report("stack map constant to STATEPOINT not well formed!", MI); 1638 }; 1639 VerifyStackMapConstant(SO.getCCIdx()); 1640 VerifyStackMapConstant(SO.getFlagsIdx()); 1641 VerifyStackMapConstant(SO.getNumDeoptArgsIdx()); 1642 1643 // TODO: verify we have properly encoded deopt arguments 1644 } break; 1645 } 1646 } 1647 1648 void 1649 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { 1650 const MachineInstr *MI = MO->getParent(); 1651 const MCInstrDesc &MCID = MI->getDesc(); 1652 unsigned NumDefs = MCID.getNumDefs(); 1653 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT) 1654 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0; 1655 1656 // The first MCID.NumDefs operands must be explicit register defines 1657 if (MONum < NumDefs) { 1658 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1659 if (!MO->isReg()) 1660 report("Explicit definition must be a register", MO, MONum); 1661 else if (!MO->isDef() && !MCOI.isOptionalDef()) 1662 report("Explicit definition marked as use", MO, MONum); 1663 else if (MO->isImplicit()) 1664 report("Explicit definition marked as implicit", MO, MONum); 1665 } else if (MONum < MCID.getNumOperands()) { 1666 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1667 // Don't check if it's the last operand in a variadic instruction. See, 1668 // e.g., LDM_RET in the arm back end. Check non-variadic operands only. 1669 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; 1670 if (!IsOptional) { 1671 if (MO->isReg()) { 1672 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs()) 1673 report("Explicit operand marked as def", MO, MONum); 1674 if (MO->isImplicit()) 1675 report("Explicit operand marked as implicit", MO, MONum); 1676 } 1677 1678 // Check that an instruction has register operands only as expected. 1679 if (MCOI.OperandType == MCOI::OPERAND_REGISTER && 1680 !MO->isReg() && !MO->isFI()) 1681 report("Expected a register operand.", MO, MONum); 1682 if ((MCOI.OperandType == MCOI::OPERAND_IMMEDIATE || 1683 MCOI.OperandType == MCOI::OPERAND_PCREL) && MO->isReg()) 1684 report("Expected a non-register operand.", MO, MONum); 1685 } 1686 1687 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); 1688 if (TiedTo != -1) { 1689 if (!MO->isReg()) 1690 report("Tied use must be a register", MO, MONum); 1691 else if (!MO->isTied()) 1692 report("Operand should be tied", MO, MONum); 1693 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) 1694 report("Tied def doesn't match MCInstrDesc", MO, MONum); 1695 else if (Register::isPhysicalRegister(MO->getReg())) { 1696 const MachineOperand &MOTied = MI->getOperand(TiedTo); 1697 if (!MOTied.isReg()) 1698 report("Tied counterpart must be a register", &MOTied, TiedTo); 1699 else if (Register::isPhysicalRegister(MOTied.getReg()) && 1700 MO->getReg() != MOTied.getReg()) 1701 report("Tied physical registers must match.", &MOTied, TiedTo); 1702 } 1703 } else if (MO->isReg() && MO->isTied()) 1704 report("Explicit operand should not be tied", MO, MONum); 1705 } else { 1706 // ARM adds %reg0 operands to indicate predicates. We'll allow that. 1707 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) 1708 report("Extra explicit operand on non-variadic instruction", MO, MONum); 1709 } 1710 1711 switch (MO->getType()) { 1712 case MachineOperand::MO_Register: { 1713 const Register Reg = MO->getReg(); 1714 if (!Reg) 1715 return; 1716 if (MRI->tracksLiveness() && !MI->isDebugValue()) 1717 checkLiveness(MO, MONum); 1718 1719 // Verify the consistency of tied operands. 1720 if (MO->isTied()) { 1721 unsigned OtherIdx = MI->findTiedOperandIdx(MONum); 1722 const MachineOperand &OtherMO = MI->getOperand(OtherIdx); 1723 if (!OtherMO.isReg()) 1724 report("Must be tied to a register", MO, MONum); 1725 if (!OtherMO.isTied()) 1726 report("Missing tie flags on tied operand", MO, MONum); 1727 if (MI->findTiedOperandIdx(OtherIdx) != MONum) 1728 report("Inconsistent tie links", MO, MONum); 1729 if (MONum < MCID.getNumDefs()) { 1730 if (OtherIdx < MCID.getNumOperands()) { 1731 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) 1732 report("Explicit def tied to explicit use without tie constraint", 1733 MO, MONum); 1734 } else { 1735 if (!OtherMO.isImplicit()) 1736 report("Explicit def should be tied to implicit use", MO, MONum); 1737 } 1738 } 1739 } 1740 1741 // Verify two-address constraints after the twoaddressinstruction pass. 1742 // Both twoaddressinstruction pass and phi-node-elimination pass call 1743 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after 1744 // twoaddressinstruction pass not after phi-node-elimination pass. So we 1745 // shouldn't use the NoSSA as the condition, we should based on 1746 // TiedOpsRewritten property to verify two-address constraints, this 1747 // property will be set in twoaddressinstruction pass. 1748 unsigned DefIdx; 1749 if (MF->getProperties().hasProperty( 1750 MachineFunctionProperties::Property::TiedOpsRewritten) && 1751 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) && 1752 Reg != MI->getOperand(DefIdx).getReg()) 1753 report("Two-address instruction operands must be identical", MO, MONum); 1754 1755 // Check register classes. 1756 unsigned SubIdx = MO->getSubReg(); 1757 1758 if (Register::isPhysicalRegister(Reg)) { 1759 if (SubIdx) { 1760 report("Illegal subregister index for physical register", MO, MONum); 1761 return; 1762 } 1763 if (MONum < MCID.getNumOperands()) { 1764 if (const TargetRegisterClass *DRC = 1765 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1766 if (!DRC->contains(Reg)) { 1767 report("Illegal physical register for instruction", MO, MONum); 1768 errs() << printReg(Reg, TRI) << " is not a " 1769 << TRI->getRegClassName(DRC) << " register.\n"; 1770 } 1771 } 1772 } 1773 if (MO->isRenamable()) { 1774 if (MRI->isReserved(Reg)) { 1775 report("isRenamable set on reserved register", MO, MONum); 1776 return; 1777 } 1778 } 1779 if (MI->isDebugValue() && MO->isUse() && !MO->isDebug()) { 1780 report("Use-reg is not IsDebug in a DBG_VALUE", MO, MONum); 1781 return; 1782 } 1783 } else { 1784 // Virtual register. 1785 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg); 1786 if (!RC) { 1787 // This is a generic virtual register. 1788 1789 // Do not allow undef uses for generic virtual registers. This ensures 1790 // getVRegDef can never fail and return null on a generic register. 1791 // 1792 // FIXME: This restriction should probably be broadened to all SSA 1793 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still 1794 // run on the SSA function just before phi elimination. 1795 if (MO->isUndef()) 1796 report("Generic virtual register use cannot be undef", MO, MONum); 1797 1798 // If we're post-Select, we can't have gvregs anymore. 1799 if (isFunctionSelected) { 1800 report("Generic virtual register invalid in a Selected function", 1801 MO, MONum); 1802 return; 1803 } 1804 1805 // The gvreg must have a type and it must not have a SubIdx. 1806 LLT Ty = MRI->getType(Reg); 1807 if (!Ty.isValid()) { 1808 report("Generic virtual register must have a valid type", MO, 1809 MONum); 1810 return; 1811 } 1812 1813 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg); 1814 1815 // If we're post-RegBankSelect, the gvreg must have a bank. 1816 if (!RegBank && isFunctionRegBankSelected) { 1817 report("Generic virtual register must have a bank in a " 1818 "RegBankSelected function", 1819 MO, MONum); 1820 return; 1821 } 1822 1823 // Make sure the register fits into its register bank if any. 1824 if (RegBank && Ty.isValid() && 1825 RegBank->getSize() < Ty.getSizeInBits()) { 1826 report("Register bank is too small for virtual register", MO, 1827 MONum); 1828 errs() << "Register bank " << RegBank->getName() << " too small(" 1829 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits() 1830 << "-bits\n"; 1831 return; 1832 } 1833 if (SubIdx) { 1834 report("Generic virtual register does not allow subregister index", MO, 1835 MONum); 1836 return; 1837 } 1838 1839 // If this is a target specific instruction and this operand 1840 // has register class constraint, the virtual register must 1841 // comply to it. 1842 if (!isPreISelGenericOpcode(MCID.getOpcode()) && 1843 MONum < MCID.getNumOperands() && 1844 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1845 report("Virtual register does not match instruction constraint", MO, 1846 MONum); 1847 errs() << "Expect register class " 1848 << TRI->getRegClassName( 1849 TII->getRegClass(MCID, MONum, TRI, *MF)) 1850 << " but got nothing\n"; 1851 return; 1852 } 1853 1854 break; 1855 } 1856 if (SubIdx) { 1857 const TargetRegisterClass *SRC = 1858 TRI->getSubClassWithSubReg(RC, SubIdx); 1859 if (!SRC) { 1860 report("Invalid subregister index for virtual register", MO, MONum); 1861 errs() << "Register class " << TRI->getRegClassName(RC) 1862 << " does not support subreg index " << SubIdx << "\n"; 1863 return; 1864 } 1865 if (RC != SRC) { 1866 report("Invalid register class for subregister index", MO, MONum); 1867 errs() << "Register class " << TRI->getRegClassName(RC) 1868 << " does not fully support subreg index " << SubIdx << "\n"; 1869 return; 1870 } 1871 } 1872 if (MONum < MCID.getNumOperands()) { 1873 if (const TargetRegisterClass *DRC = 1874 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1875 if (SubIdx) { 1876 const TargetRegisterClass *SuperRC = 1877 TRI->getLargestLegalSuperClass(RC, *MF); 1878 if (!SuperRC) { 1879 report("No largest legal super class exists.", MO, MONum); 1880 return; 1881 } 1882 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx); 1883 if (!DRC) { 1884 report("No matching super-reg register class.", MO, MONum); 1885 return; 1886 } 1887 } 1888 if (!RC->hasSuperClassEq(DRC)) { 1889 report("Illegal virtual register for instruction", MO, MONum); 1890 errs() << "Expected a " << TRI->getRegClassName(DRC) 1891 << " register, but got a " << TRI->getRegClassName(RC) 1892 << " register\n"; 1893 } 1894 } 1895 } 1896 } 1897 break; 1898 } 1899 1900 case MachineOperand::MO_RegisterMask: 1901 regMasks.push_back(MO->getRegMask()); 1902 break; 1903 1904 case MachineOperand::MO_MachineBasicBlock: 1905 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent())) 1906 report("PHI operand is not in the CFG", MO, MONum); 1907 break; 1908 1909 case MachineOperand::MO_FrameIndex: 1910 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) && 1911 LiveInts && !LiveInts->isNotInMIMap(*MI)) { 1912 int FI = MO->getIndex(); 1913 LiveInterval &LI = LiveStks->getInterval(FI); 1914 SlotIndex Idx = LiveInts->getInstructionIndex(*MI); 1915 1916 bool stores = MI->mayStore(); 1917 bool loads = MI->mayLoad(); 1918 // For a memory-to-memory move, we need to check if the frame 1919 // index is used for storing or loading, by inspecting the 1920 // memory operands. 1921 if (stores && loads) { 1922 for (auto *MMO : MI->memoperands()) { 1923 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 1924 if (PSV == nullptr) continue; 1925 const FixedStackPseudoSourceValue *Value = 1926 dyn_cast<FixedStackPseudoSourceValue>(PSV); 1927 if (Value == nullptr) continue; 1928 if (Value->getFrameIndex() != FI) continue; 1929 1930 if (MMO->isStore()) 1931 loads = false; 1932 else 1933 stores = false; 1934 break; 1935 } 1936 if (loads == stores) 1937 report("Missing fixed stack memoperand.", MI); 1938 } 1939 if (loads && !LI.liveAt(Idx.getRegSlot(true))) { 1940 report("Instruction loads from dead spill slot", MO, MONum); 1941 errs() << "Live stack: " << LI << '\n'; 1942 } 1943 if (stores && !LI.liveAt(Idx.getRegSlot())) { 1944 report("Instruction stores to dead spill slot", MO, MONum); 1945 errs() << "Live stack: " << LI << '\n'; 1946 } 1947 } 1948 break; 1949 1950 default: 1951 break; 1952 } 1953 } 1954 1955 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO, 1956 unsigned MONum, SlotIndex UseIdx, const LiveRange &LR, unsigned VRegOrUnit, 1957 LaneBitmask LaneMask) { 1958 LiveQueryResult LRQ = LR.Query(UseIdx); 1959 // Check if we have a segment at the use, note however that we only need one 1960 // live subregister range, the others may be dead. 1961 if (!LRQ.valueIn() && LaneMask.none()) { 1962 report("No live segment at use", MO, MONum); 1963 report_context_liverange(LR); 1964 report_context_vreg_regunit(VRegOrUnit); 1965 report_context(UseIdx); 1966 } 1967 if (MO->isKill() && !LRQ.isKill()) { 1968 report("Live range continues after kill flag", MO, MONum); 1969 report_context_liverange(LR); 1970 report_context_vreg_regunit(VRegOrUnit); 1971 if (LaneMask.any()) 1972 report_context_lanemask(LaneMask); 1973 report_context(UseIdx); 1974 } 1975 } 1976 1977 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO, 1978 unsigned MONum, SlotIndex DefIdx, const LiveRange &LR, unsigned VRegOrUnit, 1979 bool SubRangeCheck, LaneBitmask LaneMask) { 1980 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) { 1981 assert(VNI && "NULL valno is not allowed"); 1982 if (VNI->def != DefIdx) { 1983 report("Inconsistent valno->def", MO, MONum); 1984 report_context_liverange(LR); 1985 report_context_vreg_regunit(VRegOrUnit); 1986 if (LaneMask.any()) 1987 report_context_lanemask(LaneMask); 1988 report_context(*VNI); 1989 report_context(DefIdx); 1990 } 1991 } else { 1992 report("No live segment at def", MO, MONum); 1993 report_context_liverange(LR); 1994 report_context_vreg_regunit(VRegOrUnit); 1995 if (LaneMask.any()) 1996 report_context_lanemask(LaneMask); 1997 report_context(DefIdx); 1998 } 1999 // Check that, if the dead def flag is present, LiveInts agree. 2000 if (MO->isDead()) { 2001 LiveQueryResult LRQ = LR.Query(DefIdx); 2002 if (!LRQ.isDeadDef()) { 2003 assert(Register::isVirtualRegister(VRegOrUnit) && 2004 "Expecting a virtual register."); 2005 // A dead subreg def only tells us that the specific subreg is dead. There 2006 // could be other non-dead defs of other subregs, or we could have other 2007 // parts of the register being live through the instruction. So unless we 2008 // are checking liveness for a subrange it is ok for the live range to 2009 // continue, given that we have a dead def of a subregister. 2010 if (SubRangeCheck || MO->getSubReg() == 0) { 2011 report("Live range continues after dead def flag", MO, MONum); 2012 report_context_liverange(LR); 2013 report_context_vreg_regunit(VRegOrUnit); 2014 if (LaneMask.any()) 2015 report_context_lanemask(LaneMask); 2016 } 2017 } 2018 } 2019 } 2020 2021 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { 2022 const MachineInstr *MI = MO->getParent(); 2023 const unsigned Reg = MO->getReg(); 2024 2025 // Both use and def operands can read a register. 2026 if (MO->readsReg()) { 2027 if (MO->isKill()) 2028 addRegWithSubRegs(regsKilled, Reg); 2029 2030 // Check that LiveVars knows this kill. 2031 if (LiveVars && Register::isVirtualRegister(Reg) && MO->isKill()) { 2032 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2033 if (!is_contained(VI.Kills, MI)) 2034 report("Kill missing from LiveVariables", MO, MONum); 2035 } 2036 2037 // Check LiveInts liveness and kill. 2038 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2039 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI); 2040 // Check the cached regunit intervals. 2041 if (Register::isPhysicalRegister(Reg) && !isReserved(Reg)) { 2042 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) { 2043 if (MRI->isReservedRegUnit(*Units)) 2044 continue; 2045 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units)) 2046 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units); 2047 } 2048 } 2049 2050 if (Register::isVirtualRegister(Reg)) { 2051 if (LiveInts->hasInterval(Reg)) { 2052 // This is a virtual register interval. 2053 const LiveInterval &LI = LiveInts->getInterval(Reg); 2054 checkLivenessAtUse(MO, MONum, UseIdx, LI, Reg); 2055 2056 if (LI.hasSubRanges() && !MO->isDef()) { 2057 unsigned SubRegIdx = MO->getSubReg(); 2058 LaneBitmask MOMask = SubRegIdx != 0 2059 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2060 : MRI->getMaxLaneMaskForVReg(Reg); 2061 LaneBitmask LiveInMask; 2062 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2063 if ((MOMask & SR.LaneMask).none()) 2064 continue; 2065 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask); 2066 LiveQueryResult LRQ = SR.Query(UseIdx); 2067 if (LRQ.valueIn()) 2068 LiveInMask |= SR.LaneMask; 2069 } 2070 // At least parts of the register has to be live at the use. 2071 if ((LiveInMask & MOMask).none()) { 2072 report("No live subrange at use", MO, MONum); 2073 report_context(LI); 2074 report_context(UseIdx); 2075 } 2076 } 2077 } else { 2078 report("Virtual register has no live interval", MO, MONum); 2079 } 2080 } 2081 } 2082 2083 // Use of a dead register. 2084 if (!regsLive.count(Reg)) { 2085 if (Register::isPhysicalRegister(Reg)) { 2086 // Reserved registers may be used even when 'dead'. 2087 bool Bad = !isReserved(Reg); 2088 // We are fine if just any subregister has a defined value. 2089 if (Bad) { 2090 2091 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) { 2092 if (regsLive.count(SubReg)) { 2093 Bad = false; 2094 break; 2095 } 2096 } 2097 } 2098 // If there is an additional implicit-use of a super register we stop 2099 // here. By definition we are fine if the super register is not 2100 // (completely) dead, if the complete super register is dead we will 2101 // get a report for its operand. 2102 if (Bad) { 2103 for (const MachineOperand &MOP : MI->uses()) { 2104 if (!MOP.isReg() || !MOP.isImplicit()) 2105 continue; 2106 2107 if (!Register::isPhysicalRegister(MOP.getReg())) 2108 continue; 2109 2110 for (const MCPhysReg &SubReg : TRI->subregs(MOP.getReg())) { 2111 if (SubReg == Reg) { 2112 Bad = false; 2113 break; 2114 } 2115 } 2116 } 2117 } 2118 if (Bad) 2119 report("Using an undefined physical register", MO, MONum); 2120 } else if (MRI->def_empty(Reg)) { 2121 report("Reading virtual register without a def", MO, MONum); 2122 } else { 2123 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2124 // We don't know which virtual registers are live in, so only complain 2125 // if vreg was killed in this MBB. Otherwise keep track of vregs that 2126 // must be live in. PHI instructions are handled separately. 2127 if (MInfo.regsKilled.count(Reg)) 2128 report("Using a killed virtual register", MO, MONum); 2129 else if (!MI->isPHI()) 2130 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI)); 2131 } 2132 } 2133 } 2134 2135 if (MO->isDef()) { 2136 // Register defined. 2137 // TODO: verify that earlyclobber ops are not used. 2138 if (MO->isDead()) 2139 addRegWithSubRegs(regsDead, Reg); 2140 else 2141 addRegWithSubRegs(regsDefined, Reg); 2142 2143 // Verify SSA form. 2144 if (MRI->isSSA() && Register::isVirtualRegister(Reg) && 2145 std::next(MRI->def_begin(Reg)) != MRI->def_end()) 2146 report("Multiple virtual register defs in SSA form", MO, MONum); 2147 2148 // Check LiveInts for a live segment, but only for virtual registers. 2149 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2150 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI); 2151 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber()); 2152 2153 if (Register::isVirtualRegister(Reg)) { 2154 if (LiveInts->hasInterval(Reg)) { 2155 const LiveInterval &LI = LiveInts->getInterval(Reg); 2156 checkLivenessAtDef(MO, MONum, DefIdx, LI, Reg); 2157 2158 if (LI.hasSubRanges()) { 2159 unsigned SubRegIdx = MO->getSubReg(); 2160 LaneBitmask MOMask = SubRegIdx != 0 2161 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2162 : MRI->getMaxLaneMaskForVReg(Reg); 2163 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2164 if ((SR.LaneMask & MOMask).none()) 2165 continue; 2166 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask); 2167 } 2168 } 2169 } else { 2170 report("Virtual register has no Live interval", MO, MONum); 2171 } 2172 } 2173 } 2174 } 2175 } 2176 2177 // This function gets called after visiting all instructions in a bundle. The 2178 // argument points to the bundle header. 2179 // Normal stand-alone instructions are also considered 'bundles', and this 2180 // function is called for all of them. 2181 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) { 2182 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2183 set_union(MInfo.regsKilled, regsKilled); 2184 set_subtract(regsLive, regsKilled); regsKilled.clear(); 2185 // Kill any masked registers. 2186 while (!regMasks.empty()) { 2187 const uint32_t *Mask = regMasks.pop_back_val(); 2188 for (unsigned Reg : regsLive) 2189 if (Register::isPhysicalRegister(Reg) && 2190 MachineOperand::clobbersPhysReg(Mask, Reg)) 2191 regsDead.push_back(Reg); 2192 } 2193 set_subtract(regsLive, regsDead); regsDead.clear(); 2194 set_union(regsLive, regsDefined); regsDefined.clear(); 2195 } 2196 2197 void 2198 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) { 2199 MBBInfoMap[MBB].regsLiveOut = regsLive; 2200 regsLive.clear(); 2201 2202 if (Indexes) { 2203 SlotIndex stop = Indexes->getMBBEndIdx(MBB); 2204 if (!(stop > lastIndex)) { 2205 report("Block ends before last instruction index", MBB); 2206 errs() << "Block ends at " << stop 2207 << " last instruction was at " << lastIndex << '\n'; 2208 } 2209 lastIndex = stop; 2210 } 2211 } 2212 2213 namespace { 2214 // This implements a set of registers that serves as a filter: can filter other 2215 // sets by passing through elements not in the filter and blocking those that 2216 // are. Any filter implicitly includes the full set of physical registers upon 2217 // creation, thus filtering them all out. The filter itself as a set only grows, 2218 // and needs to be as efficient as possible. 2219 struct VRegFilter { 2220 // Add elements to the filter itself. \pre Input set \p FromRegSet must have 2221 // no duplicates. Both virtual and physical registers are fine. 2222 template <typename RegSetT> void add(const RegSetT &FromRegSet) { 2223 SmallVector<unsigned, 0> VRegsBuffer; 2224 filterAndAdd(FromRegSet, VRegsBuffer); 2225 } 2226 // Filter \p FromRegSet through the filter and append passed elements into \p 2227 // ToVRegs. All elements appended are then added to the filter itself. 2228 // \returns true if anything changed. 2229 template <typename RegSetT> 2230 bool filterAndAdd(const RegSetT &FromRegSet, 2231 SmallVectorImpl<unsigned> &ToVRegs) { 2232 unsigned SparseUniverse = Sparse.size(); 2233 unsigned NewSparseUniverse = SparseUniverse; 2234 unsigned NewDenseSize = Dense.size(); 2235 size_t Begin = ToVRegs.size(); 2236 for (unsigned Reg : FromRegSet) { 2237 if (!Register::isVirtualRegister(Reg)) 2238 continue; 2239 unsigned Index = Register::virtReg2Index(Reg); 2240 if (Index < SparseUniverseMax) { 2241 if (Index < SparseUniverse && Sparse.test(Index)) 2242 continue; 2243 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1); 2244 } else { 2245 if (Dense.count(Reg)) 2246 continue; 2247 ++NewDenseSize; 2248 } 2249 ToVRegs.push_back(Reg); 2250 } 2251 size_t End = ToVRegs.size(); 2252 if (Begin == End) 2253 return false; 2254 // Reserving space in sets once performs better than doing so continuously 2255 // and pays easily for double look-ups (even in Dense with SparseUniverseMax 2256 // tuned all the way down) and double iteration (the second one is over a 2257 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector). 2258 Sparse.resize(NewSparseUniverse); 2259 Dense.reserve(NewDenseSize); 2260 for (unsigned I = Begin; I < End; ++I) { 2261 unsigned Reg = ToVRegs[I]; 2262 unsigned Index = Register::virtReg2Index(Reg); 2263 if (Index < SparseUniverseMax) 2264 Sparse.set(Index); 2265 else 2266 Dense.insert(Reg); 2267 } 2268 return true; 2269 } 2270 2271 private: 2272 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; 2273 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound 2274 // are tracked by Dense. The only purpose of the threashold and the Dense set 2275 // is to have a reasonably growing memory usage in pathological cases (large 2276 // number of very sparse VRegFilter instances live at the same time). In 2277 // practice even in the worst-by-execution time cases having all elements 2278 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more 2279 // space efficient than if tracked by Dense. The threashold is set to keep the 2280 // worst-case memory usage within 2x of figures determined empirically for 2281 // "all Dense" scenario in such worst-by-execution-time cases. 2282 BitVector Sparse; 2283 DenseSet<unsigned> Dense; 2284 }; 2285 2286 // Implements both a transfer function and a (binary, in-place) join operator 2287 // for a dataflow over register sets with set union join and filtering transfer 2288 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time. 2289 // Maintains out_b as its state, allowing for O(n) iteration over it at any 2290 // time, where n is the size of the set (as opposed to O(U) where U is the 2291 // universe). filter_b implicitly contains all physical registers at all times. 2292 class FilteringVRegSet { 2293 VRegFilter Filter; 2294 SmallVector<unsigned, 0> VRegs; 2295 2296 public: 2297 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates. 2298 // Both virtual and physical registers are fine. 2299 template <typename RegSetT> void addToFilter(const RegSetT &RS) { 2300 Filter.add(RS); 2301 } 2302 // Passes \p RS through the filter_b (transfer function) and adds what's left 2303 // to itself (out_b). 2304 template <typename RegSetT> bool add(const RegSetT &RS) { 2305 // Double-duty the Filter: to maintain VRegs a set (and the join operation 2306 // a set union) just add everything being added here to the Filter as well. 2307 return Filter.filterAndAdd(RS, VRegs); 2308 } 2309 using const_iterator = decltype(VRegs)::const_iterator; 2310 const_iterator begin() const { return VRegs.begin(); } 2311 const_iterator end() const { return VRegs.end(); } 2312 size_t size() const { return VRegs.size(); } 2313 }; 2314 } // namespace 2315 2316 // Calculate the largest possible vregsPassed sets. These are the registers that 2317 // can pass through an MBB live, but may not be live every time. It is assumed 2318 // that all vregsPassed sets are empty before the call. 2319 void MachineVerifier::calcRegsPassed() { 2320 if (MF->empty()) 2321 // ReversePostOrderTraversal doesn't handle empty functions. 2322 return; 2323 2324 for (const MachineBasicBlock *MB : 2325 ReversePostOrderTraversal<const MachineFunction *>(MF)) { 2326 FilteringVRegSet VRegs; 2327 BBInfo &Info = MBBInfoMap[MB]; 2328 assert(Info.reachable); 2329 2330 VRegs.addToFilter(Info.regsKilled); 2331 VRegs.addToFilter(Info.regsLiveOut); 2332 for (const MachineBasicBlock *Pred : MB->predecessors()) { 2333 const BBInfo &PredInfo = MBBInfoMap[Pred]; 2334 if (!PredInfo.reachable) 2335 continue; 2336 2337 VRegs.add(PredInfo.regsLiveOut); 2338 VRegs.add(PredInfo.vregsPassed); 2339 } 2340 Info.vregsPassed.reserve(VRegs.size()); 2341 Info.vregsPassed.insert(VRegs.begin(), VRegs.end()); 2342 } 2343 } 2344 2345 // Calculate the set of virtual registers that must be passed through each basic 2346 // block in order to satisfy the requirements of successor blocks. This is very 2347 // similar to calcRegsPassed, only backwards. 2348 void MachineVerifier::calcRegsRequired() { 2349 // First push live-in regs to predecessors' vregsRequired. 2350 SmallPtrSet<const MachineBasicBlock*, 8> todo; 2351 for (const auto &MBB : *MF) { 2352 BBInfo &MInfo = MBBInfoMap[&MBB]; 2353 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2354 BBInfo &PInfo = MBBInfoMap[Pred]; 2355 if (PInfo.addRequired(MInfo.vregsLiveIn)) 2356 todo.insert(Pred); 2357 } 2358 2359 // Handle the PHI node. 2360 for (const MachineInstr &MI : MBB.phis()) { 2361 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2362 // Skip those Operands which are undef regs or not regs. 2363 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg()) 2364 continue; 2365 2366 // Get register and predecessor for one PHI edge. 2367 Register Reg = MI.getOperand(i).getReg(); 2368 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB(); 2369 2370 BBInfo &PInfo = MBBInfoMap[Pred]; 2371 if (PInfo.addRequired(Reg)) 2372 todo.insert(Pred); 2373 } 2374 } 2375 } 2376 2377 // Iteratively push vregsRequired to predecessors. This will converge to the 2378 // same final state regardless of DenseSet iteration order. 2379 while (!todo.empty()) { 2380 const MachineBasicBlock *MBB = *todo.begin(); 2381 todo.erase(MBB); 2382 BBInfo &MInfo = MBBInfoMap[MBB]; 2383 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 2384 if (Pred == MBB) 2385 continue; 2386 BBInfo &SInfo = MBBInfoMap[Pred]; 2387 if (SInfo.addRequired(MInfo.vregsRequired)) 2388 todo.insert(Pred); 2389 } 2390 } 2391 } 2392 2393 // Check PHI instructions at the beginning of MBB. It is assumed that 2394 // calcRegsPassed has been run so BBInfo::isLiveOut is valid. 2395 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { 2396 BBInfo &MInfo = MBBInfoMap[&MBB]; 2397 2398 SmallPtrSet<const MachineBasicBlock*, 8> seen; 2399 for (const MachineInstr &Phi : MBB) { 2400 if (!Phi.isPHI()) 2401 break; 2402 seen.clear(); 2403 2404 const MachineOperand &MODef = Phi.getOperand(0); 2405 if (!MODef.isReg() || !MODef.isDef()) { 2406 report("Expected first PHI operand to be a register def", &MODef, 0); 2407 continue; 2408 } 2409 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || 2410 MODef.isEarlyClobber() || MODef.isDebug()) 2411 report("Unexpected flag on PHI operand", &MODef, 0); 2412 Register DefReg = MODef.getReg(); 2413 if (!Register::isVirtualRegister(DefReg)) 2414 report("Expected first PHI operand to be a virtual register", &MODef, 0); 2415 2416 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) { 2417 const MachineOperand &MO0 = Phi.getOperand(I); 2418 if (!MO0.isReg()) { 2419 report("Expected PHI operand to be a register", &MO0, I); 2420 continue; 2421 } 2422 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() || 2423 MO0.isDebug() || MO0.isTied()) 2424 report("Unexpected flag on PHI operand", &MO0, I); 2425 2426 const MachineOperand &MO1 = Phi.getOperand(I + 1); 2427 if (!MO1.isMBB()) { 2428 report("Expected PHI operand to be a basic block", &MO1, I + 1); 2429 continue; 2430 } 2431 2432 const MachineBasicBlock &Pre = *MO1.getMBB(); 2433 if (!Pre.isSuccessor(&MBB)) { 2434 report("PHI input is not a predecessor block", &MO1, I + 1); 2435 continue; 2436 } 2437 2438 if (MInfo.reachable) { 2439 seen.insert(&Pre); 2440 BBInfo &PrInfo = MBBInfoMap[&Pre]; 2441 if (!MO0.isUndef() && PrInfo.reachable && 2442 !PrInfo.isLiveOut(MO0.getReg())) 2443 report("PHI operand is not live-out from predecessor", &MO0, I); 2444 } 2445 } 2446 2447 // Did we see all predecessors? 2448 if (MInfo.reachable) { 2449 for (MachineBasicBlock *Pred : MBB.predecessors()) { 2450 if (!seen.count(Pred)) { 2451 report("Missing PHI operand", &Phi); 2452 errs() << printMBBReference(*Pred) 2453 << " is a predecessor according to the CFG.\n"; 2454 } 2455 } 2456 } 2457 } 2458 } 2459 2460 void MachineVerifier::visitMachineFunctionAfter() { 2461 calcRegsPassed(); 2462 2463 for (const MachineBasicBlock &MBB : *MF) 2464 checkPHIOps(MBB); 2465 2466 // Now check liveness info if available 2467 calcRegsRequired(); 2468 2469 // Check for killed virtual registers that should be live out. 2470 for (const auto &MBB : *MF) { 2471 BBInfo &MInfo = MBBInfoMap[&MBB]; 2472 for (unsigned VReg : MInfo.vregsRequired) 2473 if (MInfo.regsKilled.count(VReg)) { 2474 report("Virtual register killed in block, but needed live out.", &MBB); 2475 errs() << "Virtual register " << printReg(VReg) 2476 << " is used after the block.\n"; 2477 } 2478 } 2479 2480 if (!MF->empty()) { 2481 BBInfo &MInfo = MBBInfoMap[&MF->front()]; 2482 for (unsigned VReg : MInfo.vregsRequired) { 2483 report("Virtual register defs don't dominate all uses.", MF); 2484 report_context_vreg(VReg); 2485 } 2486 } 2487 2488 if (LiveVars) 2489 verifyLiveVariables(); 2490 if (LiveInts) 2491 verifyLiveIntervals(); 2492 2493 // Check live-in list of each MBB. If a register is live into MBB, check 2494 // that the register is in regsLiveOut of each predecessor block. Since 2495 // this must come from a definition in the predecesssor or its live-in 2496 // list, this will catch a live-through case where the predecessor does not 2497 // have the register in its live-in list. This currently only checks 2498 // registers that have no aliases, are not allocatable and are not 2499 // reserved, which could mean a condition code register for instance. 2500 if (MRI->tracksLiveness()) 2501 for (const auto &MBB : *MF) 2502 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { 2503 MCPhysReg LiveInReg = P.PhysReg; 2504 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid(); 2505 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg)) 2506 continue; 2507 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2508 BBInfo &PInfo = MBBInfoMap[Pred]; 2509 if (!PInfo.regsLiveOut.count(LiveInReg)) { 2510 report("Live in register not found to be live out from predecessor.", 2511 &MBB); 2512 errs() << TRI->getName(LiveInReg) 2513 << " not found to be live out from " 2514 << printMBBReference(*Pred) << "\n"; 2515 } 2516 } 2517 } 2518 2519 for (auto CSInfo : MF->getCallSitesInfo()) 2520 if (!CSInfo.first->isCall()) 2521 report("Call site info referencing instruction that is not call", MF); 2522 } 2523 2524 void MachineVerifier::verifyLiveVariables() { 2525 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars"); 2526 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 2527 unsigned Reg = Register::index2VirtReg(i); 2528 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2529 for (const auto &MBB : *MF) { 2530 BBInfo &MInfo = MBBInfoMap[&MBB]; 2531 2532 // Our vregsRequired should be identical to LiveVariables' AliveBlocks 2533 if (MInfo.vregsRequired.count(Reg)) { 2534 if (!VI.AliveBlocks.test(MBB.getNumber())) { 2535 report("LiveVariables: Block missing from AliveBlocks", &MBB); 2536 errs() << "Virtual register " << printReg(Reg) 2537 << " must be live through the block.\n"; 2538 } 2539 } else { 2540 if (VI.AliveBlocks.test(MBB.getNumber())) { 2541 report("LiveVariables: Block should not be in AliveBlocks", &MBB); 2542 errs() << "Virtual register " << printReg(Reg) 2543 << " is not needed live through the block.\n"; 2544 } 2545 } 2546 } 2547 } 2548 } 2549 2550 void MachineVerifier::verifyLiveIntervals() { 2551 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts"); 2552 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 2553 unsigned Reg = Register::index2VirtReg(i); 2554 2555 // Spilling and splitting may leave unused registers around. Skip them. 2556 if (MRI->reg_nodbg_empty(Reg)) 2557 continue; 2558 2559 if (!LiveInts->hasInterval(Reg)) { 2560 report("Missing live interval for virtual register", MF); 2561 errs() << printReg(Reg, TRI) << " still has defs or uses\n"; 2562 continue; 2563 } 2564 2565 const LiveInterval &LI = LiveInts->getInterval(Reg); 2566 assert(Reg == LI.reg() && "Invalid reg to interval mapping"); 2567 verifyLiveInterval(LI); 2568 } 2569 2570 // Verify all the cached regunit intervals. 2571 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 2572 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i)) 2573 verifyLiveRange(*LR, i); 2574 } 2575 2576 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR, 2577 const VNInfo *VNI, unsigned Reg, 2578 LaneBitmask LaneMask) { 2579 if (VNI->isUnused()) 2580 return; 2581 2582 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def); 2583 2584 if (!DefVNI) { 2585 report("Value not live at VNInfo def and not marked unused", MF); 2586 report_context(LR, Reg, LaneMask); 2587 report_context(*VNI); 2588 return; 2589 } 2590 2591 if (DefVNI != VNI) { 2592 report("Live segment at def has different VNInfo", MF); 2593 report_context(LR, Reg, LaneMask); 2594 report_context(*VNI); 2595 return; 2596 } 2597 2598 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def); 2599 if (!MBB) { 2600 report("Invalid VNInfo definition index", MF); 2601 report_context(LR, Reg, LaneMask); 2602 report_context(*VNI); 2603 return; 2604 } 2605 2606 if (VNI->isPHIDef()) { 2607 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) { 2608 report("PHIDef VNInfo is not defined at MBB start", MBB); 2609 report_context(LR, Reg, LaneMask); 2610 report_context(*VNI); 2611 } 2612 return; 2613 } 2614 2615 // Non-PHI def. 2616 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def); 2617 if (!MI) { 2618 report("No instruction at VNInfo def index", MBB); 2619 report_context(LR, Reg, LaneMask); 2620 report_context(*VNI); 2621 return; 2622 } 2623 2624 if (Reg != 0) { 2625 bool hasDef = false; 2626 bool isEarlyClobber = false; 2627 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2628 if (!MOI->isReg() || !MOI->isDef()) 2629 continue; 2630 if (Register::isVirtualRegister(Reg)) { 2631 if (MOI->getReg() != Reg) 2632 continue; 2633 } else { 2634 if (!Register::isPhysicalRegister(MOI->getReg()) || 2635 !TRI->hasRegUnit(MOI->getReg(), Reg)) 2636 continue; 2637 } 2638 if (LaneMask.any() && 2639 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none()) 2640 continue; 2641 hasDef = true; 2642 if (MOI->isEarlyClobber()) 2643 isEarlyClobber = true; 2644 } 2645 2646 if (!hasDef) { 2647 report("Defining instruction does not modify register", MI); 2648 report_context(LR, Reg, LaneMask); 2649 report_context(*VNI); 2650 } 2651 2652 // Early clobber defs begin at USE slots, but other defs must begin at 2653 // DEF slots. 2654 if (isEarlyClobber) { 2655 if (!VNI->def.isEarlyClobber()) { 2656 report("Early clobber def must be at an early-clobber slot", MBB); 2657 report_context(LR, Reg, LaneMask); 2658 report_context(*VNI); 2659 } 2660 } else if (!VNI->def.isRegister()) { 2661 report("Non-PHI, non-early clobber def must be at a register slot", MBB); 2662 report_context(LR, Reg, LaneMask); 2663 report_context(*VNI); 2664 } 2665 } 2666 } 2667 2668 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, 2669 const LiveRange::const_iterator I, 2670 unsigned Reg, LaneBitmask LaneMask) 2671 { 2672 const LiveRange::Segment &S = *I; 2673 const VNInfo *VNI = S.valno; 2674 assert(VNI && "Live segment has no valno"); 2675 2676 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) { 2677 report("Foreign valno in live segment", MF); 2678 report_context(LR, Reg, LaneMask); 2679 report_context(S); 2680 report_context(*VNI); 2681 } 2682 2683 if (VNI->isUnused()) { 2684 report("Live segment valno is marked unused", MF); 2685 report_context(LR, Reg, LaneMask); 2686 report_context(S); 2687 } 2688 2689 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start); 2690 if (!MBB) { 2691 report("Bad start of live segment, no basic block", MF); 2692 report_context(LR, Reg, LaneMask); 2693 report_context(S); 2694 return; 2695 } 2696 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB); 2697 if (S.start != MBBStartIdx && S.start != VNI->def) { 2698 report("Live segment must begin at MBB entry or valno def", MBB); 2699 report_context(LR, Reg, LaneMask); 2700 report_context(S); 2701 } 2702 2703 const MachineBasicBlock *EndMBB = 2704 LiveInts->getMBBFromIndex(S.end.getPrevSlot()); 2705 if (!EndMBB) { 2706 report("Bad end of live segment, no basic block", MF); 2707 report_context(LR, Reg, LaneMask); 2708 report_context(S); 2709 return; 2710 } 2711 2712 // No more checks for live-out segments. 2713 if (S.end == LiveInts->getMBBEndIdx(EndMBB)) 2714 return; 2715 2716 // RegUnit intervals are allowed dead phis. 2717 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() && 2718 S.start == VNI->def && S.end == VNI->def.getDeadSlot()) 2719 return; 2720 2721 // The live segment is ending inside EndMBB 2722 const MachineInstr *MI = 2723 LiveInts->getInstructionFromIndex(S.end.getPrevSlot()); 2724 if (!MI) { 2725 report("Live segment doesn't end at a valid instruction", EndMBB); 2726 report_context(LR, Reg, LaneMask); 2727 report_context(S); 2728 return; 2729 } 2730 2731 // The block slot must refer to a basic block boundary. 2732 if (S.end.isBlock()) { 2733 report("Live segment ends at B slot of an instruction", EndMBB); 2734 report_context(LR, Reg, LaneMask); 2735 report_context(S); 2736 } 2737 2738 if (S.end.isDead()) { 2739 // Segment ends on the dead slot. 2740 // That means there must be a dead def. 2741 if (!SlotIndex::isSameInstr(S.start, S.end)) { 2742 report("Live segment ending at dead slot spans instructions", EndMBB); 2743 report_context(LR, Reg, LaneMask); 2744 report_context(S); 2745 } 2746 } 2747 2748 // A live segment can only end at an early-clobber slot if it is being 2749 // redefined by an early-clobber def. 2750 if (S.end.isEarlyClobber()) { 2751 if (I+1 == LR.end() || (I+1)->start != S.end) { 2752 report("Live segment ending at early clobber slot must be " 2753 "redefined by an EC def in the same instruction", EndMBB); 2754 report_context(LR, Reg, LaneMask); 2755 report_context(S); 2756 } 2757 } 2758 2759 // The following checks only apply to virtual registers. Physreg liveness 2760 // is too weird to check. 2761 if (Register::isVirtualRegister(Reg)) { 2762 // A live segment can end with either a redefinition, a kill flag on a 2763 // use, or a dead flag on a def. 2764 bool hasRead = false; 2765 bool hasSubRegDef = false; 2766 bool hasDeadDef = false; 2767 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2768 if (!MOI->isReg() || MOI->getReg() != Reg) 2769 continue; 2770 unsigned Sub = MOI->getSubReg(); 2771 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) 2772 : LaneBitmask::getAll(); 2773 if (MOI->isDef()) { 2774 if (Sub != 0) { 2775 hasSubRegDef = true; 2776 // An operand %0:sub0 reads %0:sub1..n. Invert the lane 2777 // mask for subregister defs. Read-undef defs will be handled by 2778 // readsReg below. 2779 SLM = ~SLM; 2780 } 2781 if (MOI->isDead()) 2782 hasDeadDef = true; 2783 } 2784 if (LaneMask.any() && (LaneMask & SLM).none()) 2785 continue; 2786 if (MOI->readsReg()) 2787 hasRead = true; 2788 } 2789 if (S.end.isDead()) { 2790 // Make sure that the corresponding machine operand for a "dead" live 2791 // range has the dead flag. We cannot perform this check for subregister 2792 // liveranges as partially dead values are allowed. 2793 if (LaneMask.none() && !hasDeadDef) { 2794 report("Instruction ending live segment on dead slot has no dead flag", 2795 MI); 2796 report_context(LR, Reg, LaneMask); 2797 report_context(S); 2798 } 2799 } else { 2800 if (!hasRead) { 2801 // When tracking subregister liveness, the main range must start new 2802 // values on partial register writes, even if there is no read. 2803 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() || 2804 !hasSubRegDef) { 2805 report("Instruction ending live segment doesn't read the register", 2806 MI); 2807 report_context(LR, Reg, LaneMask); 2808 report_context(S); 2809 } 2810 } 2811 } 2812 } 2813 2814 // Now check all the basic blocks in this live segment. 2815 MachineFunction::const_iterator MFI = MBB->getIterator(); 2816 // Is this live segment the beginning of a non-PHIDef VN? 2817 if (S.start == VNI->def && !VNI->isPHIDef()) { 2818 // Not live-in to any blocks. 2819 if (MBB == EndMBB) 2820 return; 2821 // Skip this block. 2822 ++MFI; 2823 } 2824 2825 SmallVector<SlotIndex, 4> Undefs; 2826 if (LaneMask.any()) { 2827 LiveInterval &OwnerLI = LiveInts->getInterval(Reg); 2828 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes); 2829 } 2830 2831 while (true) { 2832 assert(LiveInts->isLiveInToMBB(LR, &*MFI)); 2833 // We don't know how to track physregs into a landing pad. 2834 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) { 2835 if (&*MFI == EndMBB) 2836 break; 2837 ++MFI; 2838 continue; 2839 } 2840 2841 // Is VNI a PHI-def in the current block? 2842 bool IsPHI = VNI->isPHIDef() && 2843 VNI->def == LiveInts->getMBBStartIdx(&*MFI); 2844 2845 // Check that VNI is live-out of all predecessors. 2846 for (const MachineBasicBlock *Pred : MFI->predecessors()) { 2847 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); 2848 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd); 2849 2850 // All predecessors must have a live-out value. However for a phi 2851 // instruction with subregister intervals 2852 // only one of the subregisters (not necessarily the current one) needs to 2853 // be defined. 2854 if (!PVNI && (LaneMask.none() || !IsPHI)) { 2855 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes)) 2856 continue; 2857 report("Register not marked live out of predecessor", Pred); 2858 report_context(LR, Reg, LaneMask); 2859 report_context(*VNI); 2860 errs() << " live into " << printMBBReference(*MFI) << '@' 2861 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " 2862 << PEnd << '\n'; 2863 continue; 2864 } 2865 2866 // Only PHI-defs can take different predecessor values. 2867 if (!IsPHI && PVNI != VNI) { 2868 report("Different value live out of predecessor", Pred); 2869 report_context(LR, Reg, LaneMask); 2870 errs() << "Valno #" << PVNI->id << " live out of " 2871 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" 2872 << VNI->id << " live into " << printMBBReference(*MFI) << '@' 2873 << LiveInts->getMBBStartIdx(&*MFI) << '\n'; 2874 } 2875 } 2876 if (&*MFI == EndMBB) 2877 break; 2878 ++MFI; 2879 } 2880 } 2881 2882 void MachineVerifier::verifyLiveRange(const LiveRange &LR, unsigned Reg, 2883 LaneBitmask LaneMask) { 2884 for (const VNInfo *VNI : LR.valnos) 2885 verifyLiveRangeValue(LR, VNI, Reg, LaneMask); 2886 2887 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I) 2888 verifyLiveRangeSegment(LR, I, Reg, LaneMask); 2889 } 2890 2891 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) { 2892 unsigned Reg = LI.reg(); 2893 assert(Register::isVirtualRegister(Reg)); 2894 verifyLiveRange(LI, Reg); 2895 2896 LaneBitmask Mask; 2897 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); 2898 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2899 if ((Mask & SR.LaneMask).any()) { 2900 report("Lane masks of sub ranges overlap in live interval", MF); 2901 report_context(LI); 2902 } 2903 if ((SR.LaneMask & ~MaxMask).any()) { 2904 report("Subrange lanemask is invalid", MF); 2905 report_context(LI); 2906 } 2907 if (SR.empty()) { 2908 report("Subrange must not be empty", MF); 2909 report_context(SR, LI.reg(), SR.LaneMask); 2910 } 2911 Mask |= SR.LaneMask; 2912 verifyLiveRange(SR, LI.reg(), SR.LaneMask); 2913 if (!LI.covers(SR)) { 2914 report("A Subrange is not covered by the main range", MF); 2915 report_context(LI); 2916 } 2917 } 2918 2919 // Check the LI only has one connected component. 2920 ConnectedVNInfoEqClasses ConEQ(*LiveInts); 2921 unsigned NumComp = ConEQ.Classify(LI); 2922 if (NumComp > 1) { 2923 report("Multiple connected components in live interval", MF); 2924 report_context(LI); 2925 for (unsigned comp = 0; comp != NumComp; ++comp) { 2926 errs() << comp << ": valnos"; 2927 for (const VNInfo *I : LI.valnos) 2928 if (comp == ConEQ.getEqClass(I)) 2929 errs() << ' ' << I->id; 2930 errs() << '\n'; 2931 } 2932 } 2933 } 2934 2935 namespace { 2936 2937 // FrameSetup and FrameDestroy can have zero adjustment, so using a single 2938 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the 2939 // value is zero. 2940 // We use a bool plus an integer to capture the stack state. 2941 struct StackStateOfBB { 2942 StackStateOfBB() = default; 2943 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) : 2944 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup), 2945 ExitIsSetup(ExitSetup) {} 2946 2947 // Can be negative, which means we are setting up a frame. 2948 int EntryValue = 0; 2949 int ExitValue = 0; 2950 bool EntryIsSetup = false; 2951 bool ExitIsSetup = false; 2952 }; 2953 2954 } // end anonymous namespace 2955 2956 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed 2957 /// by a FrameDestroy <n>, stack adjustments are identical on all 2958 /// CFG edges to a merge point, and frame is destroyed at end of a return block. 2959 void MachineVerifier::verifyStackFrame() { 2960 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 2961 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 2962 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 2963 return; 2964 2965 SmallVector<StackStateOfBB, 8> SPState; 2966 SPState.resize(MF->getNumBlockIDs()); 2967 df_iterator_default_set<const MachineBasicBlock*> Reachable; 2968 2969 // Visit the MBBs in DFS order. 2970 for (df_ext_iterator<const MachineFunction *, 2971 df_iterator_default_set<const MachineBasicBlock *>> 2972 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable); 2973 DFI != DFE; ++DFI) { 2974 const MachineBasicBlock *MBB = *DFI; 2975 2976 StackStateOfBB BBState; 2977 // Check the exit state of the DFS stack predecessor. 2978 if (DFI.getPathLength() >= 2) { 2979 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 2980 assert(Reachable.count(StackPred) && 2981 "DFS stack predecessor is already visited.\n"); 2982 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue; 2983 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup; 2984 BBState.ExitValue = BBState.EntryValue; 2985 BBState.ExitIsSetup = BBState.EntryIsSetup; 2986 } 2987 2988 // Update stack state by checking contents of MBB. 2989 for (const auto &I : *MBB) { 2990 if (I.getOpcode() == FrameSetupOpcode) { 2991 if (BBState.ExitIsSetup) 2992 report("FrameSetup is after another FrameSetup", &I); 2993 BBState.ExitValue -= TII->getFrameTotalSize(I); 2994 BBState.ExitIsSetup = true; 2995 } 2996 2997 if (I.getOpcode() == FrameDestroyOpcode) { 2998 int Size = TII->getFrameTotalSize(I); 2999 if (!BBState.ExitIsSetup) 3000 report("FrameDestroy is not after a FrameSetup", &I); 3001 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue : 3002 BBState.ExitValue; 3003 if (BBState.ExitIsSetup && AbsSPAdj != Size) { 3004 report("FrameDestroy <n> is after FrameSetup <m>", &I); 3005 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <" 3006 << AbsSPAdj << ">.\n"; 3007 } 3008 BBState.ExitValue += Size; 3009 BBState.ExitIsSetup = false; 3010 } 3011 } 3012 SPState[MBB->getNumber()] = BBState; 3013 3014 // Make sure the exit state of any predecessor is consistent with the entry 3015 // state. 3016 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 3017 if (Reachable.count(Pred) && 3018 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue || 3019 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { 3020 report("The exit stack state of a predecessor is inconsistent.", MBB); 3021 errs() << "Predecessor " << printMBBReference(*Pred) 3022 << " has exit state (" << SPState[Pred->getNumber()].ExitValue 3023 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while " 3024 << printMBBReference(*MBB) << " has entry state (" 3025 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; 3026 } 3027 } 3028 3029 // Make sure the entry state of any successor is consistent with the exit 3030 // state. 3031 for (const MachineBasicBlock *Succ : MBB->successors()) { 3032 if (Reachable.count(Succ) && 3033 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue || 3034 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { 3035 report("The entry stack state of a successor is inconsistent.", MBB); 3036 errs() << "Successor " << printMBBReference(*Succ) 3037 << " has entry state (" << SPState[Succ->getNumber()].EntryValue 3038 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while " 3039 << printMBBReference(*MBB) << " has exit state (" 3040 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; 3041 } 3042 } 3043 3044 // Make sure a basic block with return ends with zero stack adjustment. 3045 if (!MBB->empty() && MBB->back().isReturn()) { 3046 if (BBState.ExitIsSetup) 3047 report("A return block ends with a FrameSetup.", MBB); 3048 if (BBState.ExitValue) 3049 report("A return block ends with a nonzero stack adjustment.", MBB); 3050 } 3051 } 3052 } 3053