1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Pass to verify generated machine code. The following is checked: 10 // 11 // Operand counts: All explicit operands must be present. 12 // 13 // Register classes: All physical and virtual register operands must be 14 // compatible with the register class required by the instruction descriptor. 15 // 16 // Register live intervals: Registers must be defined only once, and must be 17 // defined before use. 18 // 19 // The machine code verifier is enabled with the command-line option 20 // -verify-machineinstrs. 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DenseSet.h" 26 #include "llvm/ADT/DepthFirstIterator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetOperations.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/ADT/Twine.h" 34 #include "llvm/Analysis/EHPersonalities.h" 35 #include "llvm/CodeGen/GlobalISel/RegisterBank.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervalCalc.h" 38 #include "llvm/CodeGen/LiveIntervals.h" 39 #include "llvm/CodeGen/LiveStacks.h" 40 #include "llvm/CodeGen/LiveVariables.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineFunctionPass.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBundle.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/PseudoSourceValue.h" 51 #include "llvm/CodeGen/SlotIndexes.h" 52 #include "llvm/CodeGen/StackMaps.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetOpcodes.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/TargetSubtargetInfo.h" 57 #include "llvm/IR/BasicBlock.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/InlineAsm.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/InitializePasses.h" 62 #include "llvm/MC/LaneBitmask.h" 63 #include "llvm/MC/MCAsmInfo.h" 64 #include "llvm/MC/MCInstrDesc.h" 65 #include "llvm/MC/MCRegisterInfo.h" 66 #include "llvm/MC/MCTargetOptions.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/LowLevelTypeImpl.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Target/TargetMachine.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstddef> 77 #include <cstdint> 78 #include <iterator> 79 #include <string> 80 #include <utility> 81 82 using namespace llvm; 83 84 namespace { 85 86 struct MachineVerifier { 87 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {} 88 89 unsigned verify(const MachineFunction &MF); 90 91 Pass *const PASS; 92 const char *Banner; 93 const MachineFunction *MF; 94 const TargetMachine *TM; 95 const TargetInstrInfo *TII; 96 const TargetRegisterInfo *TRI; 97 const MachineRegisterInfo *MRI; 98 99 unsigned foundErrors; 100 101 // Avoid querying the MachineFunctionProperties for each operand. 102 bool isFunctionRegBankSelected; 103 bool isFunctionSelected; 104 bool isFunctionTracksDebugUserValues; 105 106 using RegVector = SmallVector<Register, 16>; 107 using RegMaskVector = SmallVector<const uint32_t *, 4>; 108 using RegSet = DenseSet<Register>; 109 using RegMap = DenseMap<Register, const MachineInstr *>; 110 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>; 111 112 const MachineInstr *FirstNonPHI; 113 const MachineInstr *FirstTerminator; 114 BlockSet FunctionBlocks; 115 116 BitVector regsReserved; 117 RegSet regsLive; 118 RegVector regsDefined, regsDead, regsKilled; 119 RegMaskVector regMasks; 120 121 SlotIndex lastIndex; 122 123 // Add Reg and any sub-registers to RV 124 void addRegWithSubRegs(RegVector &RV, Register Reg) { 125 RV.push_back(Reg); 126 if (Reg.isPhysical()) 127 append_range(RV, TRI->subregs(Reg.asMCReg())); 128 } 129 130 struct BBInfo { 131 // Is this MBB reachable from the MF entry point? 132 bool reachable = false; 133 134 // Vregs that must be live in because they are used without being 135 // defined. Map value is the user. vregsLiveIn doesn't include regs 136 // that only are used by PHI nodes. 137 RegMap vregsLiveIn; 138 139 // Regs killed in MBB. They may be defined again, and will then be in both 140 // regsKilled and regsLiveOut. 141 RegSet regsKilled; 142 143 // Regs defined in MBB and live out. Note that vregs passing through may 144 // be live out without being mentioned here. 145 RegSet regsLiveOut; 146 147 // Vregs that pass through MBB untouched. This set is disjoint from 148 // regsKilled and regsLiveOut. 149 RegSet vregsPassed; 150 151 // Vregs that must pass through MBB because they are needed by a successor 152 // block. This set is disjoint from regsLiveOut. 153 RegSet vregsRequired; 154 155 // Set versions of block's predecessor and successor lists. 156 BlockSet Preds, Succs; 157 158 BBInfo() = default; 159 160 // Add register to vregsRequired if it belongs there. Return true if 161 // anything changed. 162 bool addRequired(Register Reg) { 163 if (!Reg.isVirtual()) 164 return false; 165 if (regsLiveOut.count(Reg)) 166 return false; 167 return vregsRequired.insert(Reg).second; 168 } 169 170 // Same for a full set. 171 bool addRequired(const RegSet &RS) { 172 bool Changed = false; 173 for (Register Reg : RS) 174 Changed |= addRequired(Reg); 175 return Changed; 176 } 177 178 // Same for a full map. 179 bool addRequired(const RegMap &RM) { 180 bool Changed = false; 181 for (const auto &I : RM) 182 Changed |= addRequired(I.first); 183 return Changed; 184 } 185 186 // Live-out registers are either in regsLiveOut or vregsPassed. 187 bool isLiveOut(Register Reg) const { 188 return regsLiveOut.count(Reg) || vregsPassed.count(Reg); 189 } 190 }; 191 192 // Extra register info per MBB. 193 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap; 194 195 bool isReserved(Register Reg) { 196 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id()); 197 } 198 199 bool isAllocatable(Register Reg) const { 200 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) && 201 !regsReserved.test(Reg.id()); 202 } 203 204 // Analysis information if available 205 LiveVariables *LiveVars; 206 LiveIntervals *LiveInts; 207 LiveStacks *LiveStks; 208 SlotIndexes *Indexes; 209 210 void visitMachineFunctionBefore(); 211 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); 212 void visitMachineBundleBefore(const MachineInstr *MI); 213 214 /// Verify that all of \p MI's virtual register operands are scalars. 215 /// \returns True if all virtual register operands are scalar. False 216 /// otherwise. 217 bool verifyAllRegOpsScalar(const MachineInstr &MI, 218 const MachineRegisterInfo &MRI); 219 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); 220 void verifyPreISelGenericInstruction(const MachineInstr *MI); 221 void visitMachineInstrBefore(const MachineInstr *MI); 222 void visitMachineOperand(const MachineOperand *MO, unsigned MONum); 223 void visitMachineBundleAfter(const MachineInstr *MI); 224 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB); 225 void visitMachineFunctionAfter(); 226 227 void report(const char *msg, const MachineFunction *MF); 228 void report(const char *msg, const MachineBasicBlock *MBB); 229 void report(const char *msg, const MachineInstr *MI); 230 void report(const char *msg, const MachineOperand *MO, unsigned MONum, 231 LLT MOVRegType = LLT{}); 232 void report(const Twine &Msg, const MachineInstr *MI); 233 234 void report_context(const LiveInterval &LI) const; 235 void report_context(const LiveRange &LR, Register VRegUnit, 236 LaneBitmask LaneMask) const; 237 void report_context(const LiveRange::Segment &S) const; 238 void report_context(const VNInfo &VNI) const; 239 void report_context(SlotIndex Pos) const; 240 void report_context(MCPhysReg PhysReg) const; 241 void report_context_liverange(const LiveRange &LR) const; 242 void report_context_lanemask(LaneBitmask LaneMask) const; 243 void report_context_vreg(Register VReg) const; 244 void report_context_vreg_regunit(Register VRegOrUnit) const; 245 246 void verifyInlineAsm(const MachineInstr *MI); 247 248 void checkLiveness(const MachineOperand *MO, unsigned MONum); 249 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum, 250 SlotIndex UseIdx, const LiveRange &LR, 251 Register VRegOrUnit, 252 LaneBitmask LaneMask = LaneBitmask::getNone()); 253 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum, 254 SlotIndex DefIdx, const LiveRange &LR, 255 Register VRegOrUnit, bool SubRangeCheck = false, 256 LaneBitmask LaneMask = LaneBitmask::getNone()); 257 258 void markReachable(const MachineBasicBlock *MBB); 259 void calcRegsPassed(); 260 void checkPHIOps(const MachineBasicBlock &MBB); 261 262 void calcRegsRequired(); 263 void verifyLiveVariables(); 264 void verifyLiveIntervals(); 265 void verifyLiveInterval(const LiveInterval&); 266 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register, 267 LaneBitmask); 268 void verifyLiveRangeSegment(const LiveRange &, 269 const LiveRange::const_iterator I, Register, 270 LaneBitmask); 271 void verifyLiveRange(const LiveRange &, Register, 272 LaneBitmask LaneMask = LaneBitmask::getNone()); 273 274 void verifyStackFrame(); 275 276 void verifySlotIndexes() const; 277 void verifyProperties(const MachineFunction &MF); 278 }; 279 280 struct MachineVerifierPass : public MachineFunctionPass { 281 static char ID; // Pass ID, replacement for typeid 282 283 const std::string Banner; 284 285 MachineVerifierPass(std::string banner = std::string()) 286 : MachineFunctionPass(ID), Banner(std::move(banner)) { 287 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry()); 288 } 289 290 void getAnalysisUsage(AnalysisUsage &AU) const override { 291 AU.setPreservesAll(); 292 MachineFunctionPass::getAnalysisUsage(AU); 293 } 294 295 bool runOnMachineFunction(MachineFunction &MF) override { 296 // Skip functions that have known verification problems. 297 // FIXME: Remove this mechanism when all problematic passes have been 298 // fixed. 299 if (MF.getProperties().hasProperty( 300 MachineFunctionProperties::Property::FailsVerification)) 301 return false; 302 303 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF); 304 if (FoundErrors) 305 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 306 return false; 307 } 308 }; 309 310 } // end anonymous namespace 311 312 char MachineVerifierPass::ID = 0; 313 314 INITIALIZE_PASS(MachineVerifierPass, "machineverifier", 315 "Verify generated machine code", false, false) 316 317 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) { 318 return new MachineVerifierPass(Banner); 319 } 320 321 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *, 322 const std::string &Banner, 323 const MachineFunction &MF) { 324 // TODO: Use MFAM after porting below analyses. 325 // LiveVariables *LiveVars; 326 // LiveIntervals *LiveInts; 327 // LiveStacks *LiveStks; 328 // SlotIndexes *Indexes; 329 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF); 330 if (FoundErrors) 331 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 332 } 333 334 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors) 335 const { 336 MachineFunction &MF = const_cast<MachineFunction&>(*this); 337 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF); 338 if (AbortOnErrors && FoundErrors) 339 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 340 return FoundErrors == 0; 341 } 342 343 void MachineVerifier::verifySlotIndexes() const { 344 if (Indexes == nullptr) 345 return; 346 347 // Ensure the IdxMBB list is sorted by slot indexes. 348 SlotIndex Last; 349 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(), 350 E = Indexes->MBBIndexEnd(); I != E; ++I) { 351 assert(!Last.isValid() || I->first > Last); 352 Last = I->first; 353 } 354 } 355 356 void MachineVerifier::verifyProperties(const MachineFunction &MF) { 357 // If a pass has introduced virtual registers without clearing the 358 // NoVRegs property (or set it without allocating the vregs) 359 // then report an error. 360 if (MF.getProperties().hasProperty( 361 MachineFunctionProperties::Property::NoVRegs) && 362 MRI->getNumVirtRegs()) 363 report("Function has NoVRegs property but there are VReg operands", &MF); 364 } 365 366 unsigned MachineVerifier::verify(const MachineFunction &MF) { 367 foundErrors = 0; 368 369 this->MF = &MF; 370 TM = &MF.getTarget(); 371 TII = MF.getSubtarget().getInstrInfo(); 372 TRI = MF.getSubtarget().getRegisterInfo(); 373 MRI = &MF.getRegInfo(); 374 375 const bool isFunctionFailedISel = MF.getProperties().hasProperty( 376 MachineFunctionProperties::Property::FailedISel); 377 378 // If we're mid-GlobalISel and we already triggered the fallback path then 379 // it's expected that the MIR is somewhat broken but that's ok since we'll 380 // reset it and clear the FailedISel attribute in ResetMachineFunctions. 381 if (isFunctionFailedISel) 382 return foundErrors; 383 384 isFunctionRegBankSelected = MF.getProperties().hasProperty( 385 MachineFunctionProperties::Property::RegBankSelected); 386 isFunctionSelected = MF.getProperties().hasProperty( 387 MachineFunctionProperties::Property::Selected); 388 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty( 389 MachineFunctionProperties::Property::TracksDebugUserValues); 390 391 LiveVars = nullptr; 392 LiveInts = nullptr; 393 LiveStks = nullptr; 394 Indexes = nullptr; 395 if (PASS) { 396 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>(); 397 // We don't want to verify LiveVariables if LiveIntervals is available. 398 if (!LiveInts) 399 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>(); 400 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>(); 401 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>(); 402 } 403 404 verifySlotIndexes(); 405 406 verifyProperties(MF); 407 408 visitMachineFunctionBefore(); 409 for (const MachineBasicBlock &MBB : MF) { 410 visitMachineBasicBlockBefore(&MBB); 411 // Keep track of the current bundle header. 412 const MachineInstr *CurBundle = nullptr; 413 // Do we expect the next instruction to be part of the same bundle? 414 bool InBundle = false; 415 416 for (const MachineInstr &MI : MBB.instrs()) { 417 if (MI.getParent() != &MBB) { 418 report("Bad instruction parent pointer", &MBB); 419 errs() << "Instruction: " << MI; 420 continue; 421 } 422 423 // Check for consistent bundle flags. 424 if (InBundle && !MI.isBundledWithPred()) 425 report("Missing BundledPred flag, " 426 "BundledSucc was set on predecessor", 427 &MI); 428 if (!InBundle && MI.isBundledWithPred()) 429 report("BundledPred flag is set, " 430 "but BundledSucc not set on predecessor", 431 &MI); 432 433 // Is this a bundle header? 434 if (!MI.isInsideBundle()) { 435 if (CurBundle) 436 visitMachineBundleAfter(CurBundle); 437 CurBundle = &MI; 438 visitMachineBundleBefore(CurBundle); 439 } else if (!CurBundle) 440 report("No bundle header", &MI); 441 visitMachineInstrBefore(&MI); 442 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 443 const MachineOperand &Op = MI.getOperand(I); 444 if (Op.getParent() != &MI) { 445 // Make sure to use correct addOperand / RemoveOperand / ChangeTo 446 // functions when replacing operands of a MachineInstr. 447 report("Instruction has operand with wrong parent set", &MI); 448 } 449 450 visitMachineOperand(&Op, I); 451 } 452 453 // Was this the last bundled instruction? 454 InBundle = MI.isBundledWithSucc(); 455 } 456 if (CurBundle) 457 visitMachineBundleAfter(CurBundle); 458 if (InBundle) 459 report("BundledSucc flag set on last instruction in block", &MBB.back()); 460 visitMachineBasicBlockAfter(&MBB); 461 } 462 visitMachineFunctionAfter(); 463 464 // Clean up. 465 regsLive.clear(); 466 regsDefined.clear(); 467 regsDead.clear(); 468 regsKilled.clear(); 469 regMasks.clear(); 470 MBBInfoMap.clear(); 471 472 return foundErrors; 473 } 474 475 void MachineVerifier::report(const char *msg, const MachineFunction *MF) { 476 assert(MF); 477 errs() << '\n'; 478 if (!foundErrors++) { 479 if (Banner) 480 errs() << "# " << Banner << '\n'; 481 if (LiveInts != nullptr) 482 LiveInts->print(errs()); 483 else 484 MF->print(errs(), Indexes); 485 } 486 errs() << "*** Bad machine code: " << msg << " ***\n" 487 << "- function: " << MF->getName() << "\n"; 488 } 489 490 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { 491 assert(MBB); 492 report(msg, MBB->getParent()); 493 errs() << "- basic block: " << printMBBReference(*MBB) << ' ' 494 << MBB->getName() << " (" << (const void *)MBB << ')'; 495 if (Indexes) 496 errs() << " [" << Indexes->getMBBStartIdx(MBB) 497 << ';' << Indexes->getMBBEndIdx(MBB) << ')'; 498 errs() << '\n'; 499 } 500 501 void MachineVerifier::report(const char *msg, const MachineInstr *MI) { 502 assert(MI); 503 report(msg, MI->getParent()); 504 errs() << "- instruction: "; 505 if (Indexes && Indexes->hasIndex(*MI)) 506 errs() << Indexes->getInstructionIndex(*MI) << '\t'; 507 MI->print(errs(), /*IsStandalone=*/true); 508 } 509 510 void MachineVerifier::report(const char *msg, const MachineOperand *MO, 511 unsigned MONum, LLT MOVRegType) { 512 assert(MO); 513 report(msg, MO->getParent()); 514 errs() << "- operand " << MONum << ": "; 515 MO->print(errs(), MOVRegType, TRI); 516 errs() << "\n"; 517 } 518 519 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) { 520 report(Msg.str().c_str(), MI); 521 } 522 523 void MachineVerifier::report_context(SlotIndex Pos) const { 524 errs() << "- at: " << Pos << '\n'; 525 } 526 527 void MachineVerifier::report_context(const LiveInterval &LI) const { 528 errs() << "- interval: " << LI << '\n'; 529 } 530 531 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit, 532 LaneBitmask LaneMask) const { 533 report_context_liverange(LR); 534 report_context_vreg_regunit(VRegUnit); 535 if (LaneMask.any()) 536 report_context_lanemask(LaneMask); 537 } 538 539 void MachineVerifier::report_context(const LiveRange::Segment &S) const { 540 errs() << "- segment: " << S << '\n'; 541 } 542 543 void MachineVerifier::report_context(const VNInfo &VNI) const { 544 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n"; 545 } 546 547 void MachineVerifier::report_context_liverange(const LiveRange &LR) const { 548 errs() << "- liverange: " << LR << '\n'; 549 } 550 551 void MachineVerifier::report_context(MCPhysReg PReg) const { 552 errs() << "- p. register: " << printReg(PReg, TRI) << '\n'; 553 } 554 555 void MachineVerifier::report_context_vreg(Register VReg) const { 556 errs() << "- v. register: " << printReg(VReg, TRI) << '\n'; 557 } 558 559 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const { 560 if (Register::isVirtualRegister(VRegOrUnit)) { 561 report_context_vreg(VRegOrUnit); 562 } else { 563 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n'; 564 } 565 } 566 567 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const { 568 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n'; 569 } 570 571 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { 572 BBInfo &MInfo = MBBInfoMap[MBB]; 573 if (!MInfo.reachable) { 574 MInfo.reachable = true; 575 for (const MachineBasicBlock *Succ : MBB->successors()) 576 markReachable(Succ); 577 } 578 } 579 580 void MachineVerifier::visitMachineFunctionBefore() { 581 lastIndex = SlotIndex(); 582 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs() 583 : TRI->getReservedRegs(*MF); 584 585 if (!MF->empty()) 586 markReachable(&MF->front()); 587 588 // Build a set of the basic blocks in the function. 589 FunctionBlocks.clear(); 590 for (const auto &MBB : *MF) { 591 FunctionBlocks.insert(&MBB); 592 BBInfo &MInfo = MBBInfoMap[&MBB]; 593 594 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end()); 595 if (MInfo.Preds.size() != MBB.pred_size()) 596 report("MBB has duplicate entries in its predecessor list.", &MBB); 597 598 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end()); 599 if (MInfo.Succs.size() != MBB.succ_size()) 600 report("MBB has duplicate entries in its successor list.", &MBB); 601 } 602 603 // Check that the register use lists are sane. 604 MRI->verifyUseLists(); 605 606 if (!MF->empty()) 607 verifyStackFrame(); 608 } 609 610 void 611 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { 612 FirstTerminator = nullptr; 613 FirstNonPHI = nullptr; 614 615 if (!MF->getProperties().hasProperty( 616 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) { 617 // If this block has allocatable physical registers live-in, check that 618 // it is an entry block or landing pad. 619 for (const auto &LI : MBB->liveins()) { 620 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() && 621 MBB->getIterator() != MBB->getParent()->begin()) { 622 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB); 623 report_context(LI.PhysReg); 624 } 625 } 626 } 627 628 // Count the number of landing pad successors. 629 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs; 630 for (const auto *succ : MBB->successors()) { 631 if (succ->isEHPad()) 632 LandingPadSuccs.insert(succ); 633 if (!FunctionBlocks.count(succ)) 634 report("MBB has successor that isn't part of the function.", MBB); 635 if (!MBBInfoMap[succ].Preds.count(MBB)) { 636 report("Inconsistent CFG", MBB); 637 errs() << "MBB is not in the predecessor list of the successor " 638 << printMBBReference(*succ) << ".\n"; 639 } 640 } 641 642 // Check the predecessor list. 643 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 644 if (!FunctionBlocks.count(Pred)) 645 report("MBB has predecessor that isn't part of the function.", MBB); 646 if (!MBBInfoMap[Pred].Succs.count(MBB)) { 647 report("Inconsistent CFG", MBB); 648 errs() << "MBB is not in the successor list of the predecessor " 649 << printMBBReference(*Pred) << ".\n"; 650 } 651 } 652 653 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); 654 const BasicBlock *BB = MBB->getBasicBlock(); 655 const Function &F = MF->getFunction(); 656 if (LandingPadSuccs.size() > 1 && 657 !(AsmInfo && 658 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && 659 BB && isa<SwitchInst>(BB->getTerminator())) && 660 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) 661 report("MBB has more than one landing pad successor", MBB); 662 663 // Call analyzeBranch. If it succeeds, there several more conditions to check. 664 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 665 SmallVector<MachineOperand, 4> Cond; 666 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB, 667 Cond)) { 668 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's 669 // check whether its answers match up with reality. 670 if (!TBB && !FBB) { 671 // Block falls through to its successor. 672 if (!MBB->empty() && MBB->back().isBarrier() && 673 !TII->isPredicated(MBB->back())) { 674 report("MBB exits via unconditional fall-through but ends with a " 675 "barrier instruction!", MBB); 676 } 677 if (!Cond.empty()) { 678 report("MBB exits via unconditional fall-through but has a condition!", 679 MBB); 680 } 681 } else if (TBB && !FBB && Cond.empty()) { 682 // Block unconditionally branches somewhere. 683 if (MBB->empty()) { 684 report("MBB exits via unconditional branch but doesn't contain " 685 "any instructions!", MBB); 686 } else if (!MBB->back().isBarrier()) { 687 report("MBB exits via unconditional branch but doesn't end with a " 688 "barrier instruction!", MBB); 689 } else if (!MBB->back().isTerminator()) { 690 report("MBB exits via unconditional branch but the branch isn't a " 691 "terminator instruction!", MBB); 692 } 693 } else if (TBB && !FBB && !Cond.empty()) { 694 // Block conditionally branches somewhere, otherwise falls through. 695 if (MBB->empty()) { 696 report("MBB exits via conditional branch/fall-through but doesn't " 697 "contain any instructions!", MBB); 698 } else if (MBB->back().isBarrier()) { 699 report("MBB exits via conditional branch/fall-through but ends with a " 700 "barrier instruction!", MBB); 701 } else if (!MBB->back().isTerminator()) { 702 report("MBB exits via conditional branch/fall-through but the branch " 703 "isn't a terminator instruction!", MBB); 704 } 705 } else if (TBB && FBB) { 706 // Block conditionally branches somewhere, otherwise branches 707 // somewhere else. 708 if (MBB->empty()) { 709 report("MBB exits via conditional branch/branch but doesn't " 710 "contain any instructions!", MBB); 711 } else if (!MBB->back().isBarrier()) { 712 report("MBB exits via conditional branch/branch but doesn't end with a " 713 "barrier instruction!", MBB); 714 } else if (!MBB->back().isTerminator()) { 715 report("MBB exits via conditional branch/branch but the branch " 716 "isn't a terminator instruction!", MBB); 717 } 718 if (Cond.empty()) { 719 report("MBB exits via conditional branch/branch but there's no " 720 "condition!", MBB); 721 } 722 } else { 723 report("analyzeBranch returned invalid data!", MBB); 724 } 725 726 // Now check that the successors match up with the answers reported by 727 // analyzeBranch. 728 if (TBB && !MBB->isSuccessor(TBB)) 729 report("MBB exits via jump or conditional branch, but its target isn't a " 730 "CFG successor!", 731 MBB); 732 if (FBB && !MBB->isSuccessor(FBB)) 733 report("MBB exits via conditional branch, but its target isn't a CFG " 734 "successor!", 735 MBB); 736 737 // There might be a fallthrough to the next block if there's either no 738 // unconditional true branch, or if there's a condition, and one of the 739 // branches is missing. 740 bool Fallthrough = !TBB || (!Cond.empty() && !FBB); 741 742 // A conditional fallthrough must be an actual CFG successor, not 743 // unreachable. (Conversely, an unconditional fallthrough might not really 744 // be a successor, because the block might end in unreachable.) 745 if (!Cond.empty() && !FBB) { 746 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator()); 747 if (MBBI == MF->end()) { 748 report("MBB conditionally falls through out of function!", MBB); 749 } else if (!MBB->isSuccessor(&*MBBI)) 750 report("MBB exits via conditional branch/fall-through but the CFG " 751 "successors don't match the actual successors!", 752 MBB); 753 } 754 755 // Verify that there aren't any extra un-accounted-for successors. 756 for (const MachineBasicBlock *SuccMBB : MBB->successors()) { 757 // If this successor is one of the branch targets, it's okay. 758 if (SuccMBB == TBB || SuccMBB == FBB) 759 continue; 760 // If we might have a fallthrough, and the successor is the fallthrough 761 // block, that's also ok. 762 if (Fallthrough && SuccMBB == MBB->getNextNode()) 763 continue; 764 // Also accept successors which are for exception-handling or might be 765 // inlineasm_br targets. 766 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget()) 767 continue; 768 report("MBB has unexpected successors which are not branch targets, " 769 "fallthrough, EHPads, or inlineasm_br targets.", 770 MBB); 771 } 772 } 773 774 regsLive.clear(); 775 if (MRI->tracksLiveness()) { 776 for (const auto &LI : MBB->liveins()) { 777 if (!Register::isPhysicalRegister(LI.PhysReg)) { 778 report("MBB live-in list contains non-physical register", MBB); 779 continue; 780 } 781 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg)) 782 regsLive.insert(SubReg); 783 } 784 } 785 786 const MachineFrameInfo &MFI = MF->getFrameInfo(); 787 BitVector PR = MFI.getPristineRegs(*MF); 788 for (unsigned I : PR.set_bits()) { 789 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I)) 790 regsLive.insert(SubReg); 791 } 792 793 regsKilled.clear(); 794 regsDefined.clear(); 795 796 if (Indexes) 797 lastIndex = Indexes->getMBBStartIdx(MBB); 798 } 799 800 // This function gets called for all bundle headers, including normal 801 // stand-alone unbundled instructions. 802 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { 803 if (Indexes && Indexes->hasIndex(*MI)) { 804 SlotIndex idx = Indexes->getInstructionIndex(*MI); 805 if (!(idx > lastIndex)) { 806 report("Instruction index out of order", MI); 807 errs() << "Last instruction was at " << lastIndex << '\n'; 808 } 809 lastIndex = idx; 810 } 811 812 // Ensure non-terminators don't follow terminators. 813 if (MI->isTerminator()) { 814 if (!FirstTerminator) 815 FirstTerminator = MI; 816 } else if (FirstTerminator) { 817 report("Non-terminator instruction after the first terminator", MI); 818 errs() << "First terminator was:\t" << *FirstTerminator; 819 } 820 } 821 822 // The operands on an INLINEASM instruction must follow a template. 823 // Verify that the flag operands make sense. 824 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { 825 // The first two operands on INLINEASM are the asm string and global flags. 826 if (MI->getNumOperands() < 2) { 827 report("Too few operands on inline asm", MI); 828 return; 829 } 830 if (!MI->getOperand(0).isSymbol()) 831 report("Asm string must be an external symbol", MI); 832 if (!MI->getOperand(1).isImm()) 833 report("Asm flags must be an immediate", MI); 834 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, 835 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16, 836 // and Extra_IsConvergent = 32. 837 if (!isUInt<6>(MI->getOperand(1).getImm())) 838 report("Unknown asm flags", &MI->getOperand(1), 1); 839 840 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed"); 841 842 unsigned OpNo = InlineAsm::MIOp_FirstOperand; 843 unsigned NumOps; 844 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { 845 const MachineOperand &MO = MI->getOperand(OpNo); 846 // There may be implicit ops after the fixed operands. 847 if (!MO.isImm()) 848 break; 849 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); 850 } 851 852 if (OpNo > MI->getNumOperands()) 853 report("Missing operands in last group", MI); 854 855 // An optional MDNode follows the groups. 856 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) 857 ++OpNo; 858 859 // All trailing operands must be implicit registers. 860 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { 861 const MachineOperand &MO = MI->getOperand(OpNo); 862 if (!MO.isReg() || !MO.isImplicit()) 863 report("Expected implicit register after groups", &MO, OpNo); 864 } 865 } 866 867 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI, 868 const MachineRegisterInfo &MRI) { 869 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) { 870 if (!Op.isReg()) 871 return false; 872 const auto Reg = Op.getReg(); 873 if (Reg.isPhysical()) 874 return false; 875 return !MRI.getType(Reg).isScalar(); 876 })) 877 return true; 878 report("All register operands must have scalar types", &MI); 879 return false; 880 } 881 882 /// Check that types are consistent when two operands need to have the same 883 /// number of vector elements. 884 /// \return true if the types are valid. 885 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, 886 const MachineInstr *MI) { 887 if (Ty0.isVector() != Ty1.isVector()) { 888 report("operand types must be all-vector or all-scalar", MI); 889 // Generally we try to report as many issues as possible at once, but in 890 // this case it's not clear what should we be comparing the size of the 891 // scalar with: the size of the whole vector or its lane. Instead of 892 // making an arbitrary choice and emitting not so helpful message, let's 893 // avoid the extra noise and stop here. 894 return false; 895 } 896 897 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) { 898 report("operand types must preserve number of vector elements", MI); 899 return false; 900 } 901 902 return true; 903 } 904 905 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { 906 if (isFunctionSelected) 907 report("Unexpected generic instruction in a Selected function", MI); 908 909 const MCInstrDesc &MCID = MI->getDesc(); 910 unsigned NumOps = MI->getNumOperands(); 911 912 // Branches must reference a basic block if they are not indirect 913 if (MI->isBranch() && !MI->isIndirectBranch()) { 914 bool HasMBB = false; 915 for (const MachineOperand &Op : MI->operands()) { 916 if (Op.isMBB()) { 917 HasMBB = true; 918 break; 919 } 920 } 921 922 if (!HasMBB) { 923 report("Branch instruction is missing a basic block operand or " 924 "isIndirectBranch property", 925 MI); 926 } 927 } 928 929 // Check types. 930 SmallVector<LLT, 4> Types; 931 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); 932 I != E; ++I) { 933 if (!MCID.OpInfo[I].isGenericType()) 934 continue; 935 // Generic instructions specify type equality constraints between some of 936 // their operands. Make sure these are consistent. 937 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); 938 Types.resize(std::max(TypeIdx + 1, Types.size())); 939 940 const MachineOperand *MO = &MI->getOperand(I); 941 if (!MO->isReg()) { 942 report("generic instruction must use register operands", MI); 943 continue; 944 } 945 946 LLT OpTy = MRI->getType(MO->getReg()); 947 // Don't report a type mismatch if there is no actual mismatch, only a 948 // type missing, to reduce noise: 949 if (OpTy.isValid()) { 950 // Only the first valid type for a type index will be printed: don't 951 // overwrite it later so it's always clear which type was expected: 952 if (!Types[TypeIdx].isValid()) 953 Types[TypeIdx] = OpTy; 954 else if (Types[TypeIdx] != OpTy) 955 report("Type mismatch in generic instruction", MO, I, OpTy); 956 } else { 957 // Generic instructions must have types attached to their operands. 958 report("Generic instruction is missing a virtual register type", MO, I); 959 } 960 } 961 962 // Generic opcodes must not have physical register operands. 963 for (unsigned I = 0; I < MI->getNumOperands(); ++I) { 964 const MachineOperand *MO = &MI->getOperand(I); 965 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg())) 966 report("Generic instruction cannot have physical register", MO, I); 967 } 968 969 // Avoid out of bounds in checks below. This was already reported earlier. 970 if (MI->getNumOperands() < MCID.getNumOperands()) 971 return; 972 973 StringRef ErrorInfo; 974 if (!TII->verifyInstruction(*MI, ErrorInfo)) 975 report(ErrorInfo.data(), MI); 976 977 // Verify properties of various specific instruction types 978 unsigned Opc = MI->getOpcode(); 979 switch (Opc) { 980 case TargetOpcode::G_ASSERT_SEXT: 981 case TargetOpcode::G_ASSERT_ZEXT: { 982 std::string OpcName = 983 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT"; 984 if (!MI->getOperand(2).isImm()) { 985 report(Twine(OpcName, " expects an immediate operand #2"), MI); 986 break; 987 } 988 989 Register Dst = MI->getOperand(0).getReg(); 990 Register Src = MI->getOperand(1).getReg(); 991 LLT SrcTy = MRI->getType(Src); 992 int64_t Imm = MI->getOperand(2).getImm(); 993 if (Imm <= 0) { 994 report(Twine(OpcName, " size must be >= 1"), MI); 995 break; 996 } 997 998 if (Imm >= SrcTy.getScalarSizeInBits()) { 999 report(Twine(OpcName, " size must be less than source bit width"), MI); 1000 break; 1001 } 1002 1003 if (MRI->getRegBankOrNull(Src) != MRI->getRegBankOrNull(Dst)) { 1004 report( 1005 Twine(OpcName, " source and destination register banks must match"), 1006 MI); 1007 break; 1008 } 1009 1010 if (MRI->getRegClassOrNull(Src) != MRI->getRegClassOrNull(Dst)) 1011 report( 1012 Twine(OpcName, " source and destination register classes must match"), 1013 MI); 1014 1015 break; 1016 } 1017 1018 case TargetOpcode::G_CONSTANT: 1019 case TargetOpcode::G_FCONSTANT: { 1020 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1021 if (DstTy.isVector()) 1022 report("Instruction cannot use a vector result type", MI); 1023 1024 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { 1025 if (!MI->getOperand(1).isCImm()) { 1026 report("G_CONSTANT operand must be cimm", MI); 1027 break; 1028 } 1029 1030 const ConstantInt *CI = MI->getOperand(1).getCImm(); 1031 if (CI->getBitWidth() != DstTy.getSizeInBits()) 1032 report("inconsistent constant size", MI); 1033 } else { 1034 if (!MI->getOperand(1).isFPImm()) { 1035 report("G_FCONSTANT operand must be fpimm", MI); 1036 break; 1037 } 1038 const ConstantFP *CF = MI->getOperand(1).getFPImm(); 1039 1040 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != 1041 DstTy.getSizeInBits()) { 1042 report("inconsistent constant size", MI); 1043 } 1044 } 1045 1046 break; 1047 } 1048 case TargetOpcode::G_LOAD: 1049 case TargetOpcode::G_STORE: 1050 case TargetOpcode::G_ZEXTLOAD: 1051 case TargetOpcode::G_SEXTLOAD: { 1052 LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); 1053 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1054 if (!PtrTy.isPointer()) 1055 report("Generic memory instruction must access a pointer", MI); 1056 1057 // Generic loads and stores must have a single MachineMemOperand 1058 // describing that access. 1059 if (!MI->hasOneMemOperand()) { 1060 report("Generic instruction accessing memory must have one mem operand", 1061 MI); 1062 } else { 1063 const MachineMemOperand &MMO = **MI->memoperands_begin(); 1064 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || 1065 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { 1066 if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) 1067 report("Generic extload must have a narrower memory type", MI); 1068 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { 1069 if (MMO.getSize() > ValTy.getSizeInBytes()) 1070 report("load memory size cannot exceed result size", MI); 1071 } else if (MI->getOpcode() == TargetOpcode::G_STORE) { 1072 if (ValTy.getSizeInBytes() < MMO.getSize()) 1073 report("store memory size cannot exceed value size", MI); 1074 } 1075 } 1076 1077 break; 1078 } 1079 case TargetOpcode::G_PHI: { 1080 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1081 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()), 1082 [this, &DstTy](const MachineOperand &MO) { 1083 if (!MO.isReg()) 1084 return true; 1085 LLT Ty = MRI->getType(MO.getReg()); 1086 if (!Ty.isValid() || (Ty != DstTy)) 1087 return false; 1088 return true; 1089 })) 1090 report("Generic Instruction G_PHI has operands with incompatible/missing " 1091 "types", 1092 MI); 1093 break; 1094 } 1095 case TargetOpcode::G_BITCAST: { 1096 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1097 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1098 if (!DstTy.isValid() || !SrcTy.isValid()) 1099 break; 1100 1101 if (SrcTy.isPointer() != DstTy.isPointer()) 1102 report("bitcast cannot convert between pointers and other types", MI); 1103 1104 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) 1105 report("bitcast sizes must match", MI); 1106 1107 if (SrcTy == DstTy) 1108 report("bitcast must change the type", MI); 1109 1110 break; 1111 } 1112 case TargetOpcode::G_INTTOPTR: 1113 case TargetOpcode::G_PTRTOINT: 1114 case TargetOpcode::G_ADDRSPACE_CAST: { 1115 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1116 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1117 if (!DstTy.isValid() || !SrcTy.isValid()) 1118 break; 1119 1120 verifyVectorElementMatch(DstTy, SrcTy, MI); 1121 1122 DstTy = DstTy.getScalarType(); 1123 SrcTy = SrcTy.getScalarType(); 1124 1125 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { 1126 if (!DstTy.isPointer()) 1127 report("inttoptr result type must be a pointer", MI); 1128 if (SrcTy.isPointer()) 1129 report("inttoptr source type must not be a pointer", MI); 1130 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { 1131 if (!SrcTy.isPointer()) 1132 report("ptrtoint source type must be a pointer", MI); 1133 if (DstTy.isPointer()) 1134 report("ptrtoint result type must not be a pointer", MI); 1135 } else { 1136 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); 1137 if (!SrcTy.isPointer() || !DstTy.isPointer()) 1138 report("addrspacecast types must be pointers", MI); 1139 else { 1140 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) 1141 report("addrspacecast must convert different address spaces", MI); 1142 } 1143 } 1144 1145 break; 1146 } 1147 case TargetOpcode::G_PTR_ADD: { 1148 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1149 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1150 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); 1151 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid()) 1152 break; 1153 1154 if (!PtrTy.getScalarType().isPointer()) 1155 report("gep first operand must be a pointer", MI); 1156 1157 if (OffsetTy.getScalarType().isPointer()) 1158 report("gep offset operand must not be a pointer", MI); 1159 1160 // TODO: Is the offset allowed to be a scalar with a vector? 1161 break; 1162 } 1163 case TargetOpcode::G_PTRMASK: { 1164 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1165 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1166 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg()); 1167 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid()) 1168 break; 1169 1170 if (!DstTy.getScalarType().isPointer()) 1171 report("ptrmask result type must be a pointer", MI); 1172 1173 if (!MaskTy.getScalarType().isScalar()) 1174 report("ptrmask mask type must be an integer", MI); 1175 1176 verifyVectorElementMatch(DstTy, MaskTy, MI); 1177 break; 1178 } 1179 case TargetOpcode::G_SEXT: 1180 case TargetOpcode::G_ZEXT: 1181 case TargetOpcode::G_ANYEXT: 1182 case TargetOpcode::G_TRUNC: 1183 case TargetOpcode::G_FPEXT: 1184 case TargetOpcode::G_FPTRUNC: { 1185 // Number of operands and presense of types is already checked (and 1186 // reported in case of any issues), so no need to report them again. As 1187 // we're trying to report as many issues as possible at once, however, the 1188 // instructions aren't guaranteed to have the right number of operands or 1189 // types attached to them at this point 1190 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}"); 1191 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1192 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1193 if (!DstTy.isValid() || !SrcTy.isValid()) 1194 break; 1195 1196 LLT DstElTy = DstTy.getScalarType(); 1197 LLT SrcElTy = SrcTy.getScalarType(); 1198 if (DstElTy.isPointer() || SrcElTy.isPointer()) 1199 report("Generic extend/truncate can not operate on pointers", MI); 1200 1201 verifyVectorElementMatch(DstTy, SrcTy, MI); 1202 1203 unsigned DstSize = DstElTy.getSizeInBits(); 1204 unsigned SrcSize = SrcElTy.getSizeInBits(); 1205 switch (MI->getOpcode()) { 1206 default: 1207 if (DstSize <= SrcSize) 1208 report("Generic extend has destination type no larger than source", MI); 1209 break; 1210 case TargetOpcode::G_TRUNC: 1211 case TargetOpcode::G_FPTRUNC: 1212 if (DstSize >= SrcSize) 1213 report("Generic truncate has destination type no smaller than source", 1214 MI); 1215 break; 1216 } 1217 break; 1218 } 1219 case TargetOpcode::G_SELECT: { 1220 LLT SelTy = MRI->getType(MI->getOperand(0).getReg()); 1221 LLT CondTy = MRI->getType(MI->getOperand(1).getReg()); 1222 if (!SelTy.isValid() || !CondTy.isValid()) 1223 break; 1224 1225 // Scalar condition select on a vector is valid. 1226 if (CondTy.isVector()) 1227 verifyVectorElementMatch(SelTy, CondTy, MI); 1228 break; 1229 } 1230 case TargetOpcode::G_MERGE_VALUES: { 1231 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, 1232 // e.g. s2N = MERGE sN, sN 1233 // Merging multiple scalars into a vector is not allowed, should use 1234 // G_BUILD_VECTOR for that. 1235 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1236 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1237 if (DstTy.isVector() || SrcTy.isVector()) 1238 report("G_MERGE_VALUES cannot operate on vectors", MI); 1239 1240 const unsigned NumOps = MI->getNumOperands(); 1241 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1)) 1242 report("G_MERGE_VALUES result size is inconsistent", MI); 1243 1244 for (unsigned I = 2; I != NumOps; ++I) { 1245 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy) 1246 report("G_MERGE_VALUES source types do not match", MI); 1247 } 1248 1249 break; 1250 } 1251 case TargetOpcode::G_UNMERGE_VALUES: { 1252 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1253 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg()); 1254 // For now G_UNMERGE can split vectors. 1255 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) { 1256 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) 1257 report("G_UNMERGE_VALUES destination types do not match", MI); 1258 } 1259 if (SrcTy.getSizeInBits() != 1260 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) { 1261 report("G_UNMERGE_VALUES source operand does not cover dest operands", 1262 MI); 1263 } 1264 break; 1265 } 1266 case TargetOpcode::G_BUILD_VECTOR: { 1267 // Source types must be scalars, dest type a vector. Total size of scalars 1268 // must match the dest vector size. 1269 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1270 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1271 if (!DstTy.isVector() || SrcEltTy.isVector()) { 1272 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI); 1273 break; 1274 } 1275 1276 if (DstTy.getElementType() != SrcEltTy) 1277 report("G_BUILD_VECTOR result element type must match source type", MI); 1278 1279 if (DstTy.getNumElements() != MI->getNumOperands() - 1) 1280 report("G_BUILD_VECTOR must have an operand for each elemement", MI); 1281 1282 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1283 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1284 report("G_BUILD_VECTOR source operand types are not homogeneous", MI); 1285 1286 break; 1287 } 1288 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1289 // Source types must be scalars, dest type a vector. Scalar types must be 1290 // larger than the dest vector elt type, as this is a truncating operation. 1291 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1292 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1293 if (!DstTy.isVector() || SrcEltTy.isVector()) 1294 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands", 1295 MI); 1296 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1297 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1298 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous", 1299 MI); 1300 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits()) 1301 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than " 1302 "dest elt type", 1303 MI); 1304 break; 1305 } 1306 case TargetOpcode::G_CONCAT_VECTORS: { 1307 // Source types should be vectors, and total size should match the dest 1308 // vector size. 1309 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1310 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1311 if (!DstTy.isVector() || !SrcTy.isVector()) 1312 report("G_CONCAT_VECTOR requires vector source and destination operands", 1313 MI); 1314 1315 if (MI->getNumOperands() < 3) 1316 report("G_CONCAT_VECTOR requires at least 2 source operands", MI); 1317 1318 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1319 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1320 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI); 1321 if (DstTy.getNumElements() != 1322 SrcTy.getNumElements() * (MI->getNumOperands() - 1)) 1323 report("G_CONCAT_VECTOR num dest and source elements should match", MI); 1324 break; 1325 } 1326 case TargetOpcode::G_ICMP: 1327 case TargetOpcode::G_FCMP: { 1328 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1329 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg()); 1330 1331 if ((DstTy.isVector() != SrcTy.isVector()) || 1332 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())) 1333 report("Generic vector icmp/fcmp must preserve number of lanes", MI); 1334 1335 break; 1336 } 1337 case TargetOpcode::G_EXTRACT: { 1338 const MachineOperand &SrcOp = MI->getOperand(1); 1339 if (!SrcOp.isReg()) { 1340 report("extract source must be a register", MI); 1341 break; 1342 } 1343 1344 const MachineOperand &OffsetOp = MI->getOperand(2); 1345 if (!OffsetOp.isImm()) { 1346 report("extract offset must be a constant", MI); 1347 break; 1348 } 1349 1350 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1351 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1352 if (SrcSize == DstSize) 1353 report("extract source must be larger than result", MI); 1354 1355 if (DstSize + OffsetOp.getImm() > SrcSize) 1356 report("extract reads past end of register", MI); 1357 break; 1358 } 1359 case TargetOpcode::G_INSERT: { 1360 const MachineOperand &SrcOp = MI->getOperand(2); 1361 if (!SrcOp.isReg()) { 1362 report("insert source must be a register", MI); 1363 break; 1364 } 1365 1366 const MachineOperand &OffsetOp = MI->getOperand(3); 1367 if (!OffsetOp.isImm()) { 1368 report("insert offset must be a constant", MI); 1369 break; 1370 } 1371 1372 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1373 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1374 1375 if (DstSize <= SrcSize) 1376 report("inserted size must be smaller than total register", MI); 1377 1378 if (SrcSize + OffsetOp.getImm() > DstSize) 1379 report("insert writes past end of register", MI); 1380 1381 break; 1382 } 1383 case TargetOpcode::G_JUMP_TABLE: { 1384 if (!MI->getOperand(1).isJTI()) 1385 report("G_JUMP_TABLE source operand must be a jump table index", MI); 1386 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1387 if (!DstTy.isPointer()) 1388 report("G_JUMP_TABLE dest operand must have a pointer type", MI); 1389 break; 1390 } 1391 case TargetOpcode::G_BRJT: { 1392 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1393 report("G_BRJT src operand 0 must be a pointer type", MI); 1394 1395 if (!MI->getOperand(1).isJTI()) 1396 report("G_BRJT src operand 1 must be a jump table index", MI); 1397 1398 const auto &IdxOp = MI->getOperand(2); 1399 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer()) 1400 report("G_BRJT src operand 2 must be a scalar reg type", MI); 1401 break; 1402 } 1403 case TargetOpcode::G_INTRINSIC: 1404 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: { 1405 // TODO: Should verify number of def and use operands, but the current 1406 // interface requires passing in IR types for mangling. 1407 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs()); 1408 if (!IntrIDOp.isIntrinsicID()) { 1409 report("G_INTRINSIC first src operand must be an intrinsic ID", MI); 1410 break; 1411 } 1412 1413 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC; 1414 unsigned IntrID = IntrIDOp.getIntrinsicID(); 1415 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 1416 AttributeList Attrs 1417 = Intrinsic::getAttributes(MF->getFunction().getContext(), 1418 static_cast<Intrinsic::ID>(IntrID)); 1419 bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone); 1420 if (NoSideEffects && DeclHasSideEffects) { 1421 report("G_INTRINSIC used with intrinsic that accesses memory", MI); 1422 break; 1423 } 1424 if (!NoSideEffects && !DeclHasSideEffects) { 1425 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI); 1426 break; 1427 } 1428 } 1429 1430 break; 1431 } 1432 case TargetOpcode::G_SEXT_INREG: { 1433 if (!MI->getOperand(2).isImm()) { 1434 report("G_SEXT_INREG expects an immediate operand #2", MI); 1435 break; 1436 } 1437 1438 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1439 int64_t Imm = MI->getOperand(2).getImm(); 1440 if (Imm <= 0) 1441 report("G_SEXT_INREG size must be >= 1", MI); 1442 if (Imm >= SrcTy.getScalarSizeInBits()) 1443 report("G_SEXT_INREG size must be less than source bit width", MI); 1444 break; 1445 } 1446 case TargetOpcode::G_SHUFFLE_VECTOR: { 1447 const MachineOperand &MaskOp = MI->getOperand(3); 1448 if (!MaskOp.isShuffleMask()) { 1449 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI); 1450 break; 1451 } 1452 1453 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1454 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg()); 1455 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg()); 1456 1457 if (Src0Ty != Src1Ty) 1458 report("Source operands must be the same type", MI); 1459 1460 if (Src0Ty.getScalarType() != DstTy.getScalarType()) 1461 report("G_SHUFFLE_VECTOR cannot change element type", MI); 1462 1463 // Don't check that all operands are vector because scalars are used in 1464 // place of 1 element vectors. 1465 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1; 1466 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1; 1467 1468 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask(); 1469 1470 if (static_cast<int>(MaskIdxes.size()) != DstNumElts) 1471 report("Wrong result type for shufflemask", MI); 1472 1473 for (int Idx : MaskIdxes) { 1474 if (Idx < 0) 1475 continue; 1476 1477 if (Idx >= 2 * SrcNumElts) 1478 report("Out of bounds shuffle index", MI); 1479 } 1480 1481 break; 1482 } 1483 case TargetOpcode::G_DYN_STACKALLOC: { 1484 const MachineOperand &DstOp = MI->getOperand(0); 1485 const MachineOperand &AllocOp = MI->getOperand(1); 1486 const MachineOperand &AlignOp = MI->getOperand(2); 1487 1488 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) { 1489 report("dst operand 0 must be a pointer type", MI); 1490 break; 1491 } 1492 1493 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) { 1494 report("src operand 1 must be a scalar reg type", MI); 1495 break; 1496 } 1497 1498 if (!AlignOp.isImm()) { 1499 report("src operand 2 must be an immediate type", MI); 1500 break; 1501 } 1502 break; 1503 } 1504 case TargetOpcode::G_MEMCPY_INLINE: 1505 case TargetOpcode::G_MEMCPY: 1506 case TargetOpcode::G_MEMMOVE: { 1507 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1508 if (MMOs.size() != 2) { 1509 report("memcpy/memmove must have 2 memory operands", MI); 1510 break; 1511 } 1512 1513 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) || 1514 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) { 1515 report("wrong memory operand types", MI); 1516 break; 1517 } 1518 1519 if (MMOs[0]->getSize() != MMOs[1]->getSize()) 1520 report("inconsistent memory operand sizes", MI); 1521 1522 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1523 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg()); 1524 1525 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) { 1526 report("memory instruction operand must be a pointer", MI); 1527 break; 1528 } 1529 1530 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1531 report("inconsistent store address space", MI); 1532 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace()) 1533 report("inconsistent load address space", MI); 1534 1535 if (Opc != TargetOpcode::G_MEMCPY_INLINE) 1536 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL)) 1537 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI); 1538 1539 break; 1540 } 1541 case TargetOpcode::G_BZERO: 1542 case TargetOpcode::G_MEMSET: { 1543 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1544 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero"; 1545 if (MMOs.size() != 1) { 1546 report(Twine(Name, " must have 1 memory operand"), MI); 1547 break; 1548 } 1549 1550 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) { 1551 report(Twine(Name, " memory operand must be a store"), MI); 1552 break; 1553 } 1554 1555 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1556 if (!DstPtrTy.isPointer()) { 1557 report(Twine(Name, " operand must be a pointer"), MI); 1558 break; 1559 } 1560 1561 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1562 report("inconsistent " + Twine(Name, " address space"), MI); 1563 1564 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() || 1565 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL)) 1566 report("'tail' flag (last operand) must be an immediate 0 or 1", MI); 1567 1568 break; 1569 } 1570 case TargetOpcode::G_VECREDUCE_SEQ_FADD: 1571 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: { 1572 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1573 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1574 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1575 if (!DstTy.isScalar()) 1576 report("Vector reduction requires a scalar destination type", MI); 1577 if (!Src1Ty.isScalar()) 1578 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI); 1579 if (!Src2Ty.isVector()) 1580 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI); 1581 break; 1582 } 1583 case TargetOpcode::G_VECREDUCE_FADD: 1584 case TargetOpcode::G_VECREDUCE_FMUL: 1585 case TargetOpcode::G_VECREDUCE_FMAX: 1586 case TargetOpcode::G_VECREDUCE_FMIN: 1587 case TargetOpcode::G_VECREDUCE_ADD: 1588 case TargetOpcode::G_VECREDUCE_MUL: 1589 case TargetOpcode::G_VECREDUCE_AND: 1590 case TargetOpcode::G_VECREDUCE_OR: 1591 case TargetOpcode::G_VECREDUCE_XOR: 1592 case TargetOpcode::G_VECREDUCE_SMAX: 1593 case TargetOpcode::G_VECREDUCE_SMIN: 1594 case TargetOpcode::G_VECREDUCE_UMAX: 1595 case TargetOpcode::G_VECREDUCE_UMIN: { 1596 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1597 if (!DstTy.isScalar()) 1598 report("Vector reduction requires a scalar destination type", MI); 1599 break; 1600 } 1601 1602 case TargetOpcode::G_SBFX: 1603 case TargetOpcode::G_UBFX: { 1604 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1605 if (DstTy.isVector()) { 1606 report("Bitfield extraction is not supported on vectors", MI); 1607 break; 1608 } 1609 break; 1610 } 1611 case TargetOpcode::G_ROTR: 1612 case TargetOpcode::G_ROTL: { 1613 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1614 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1615 if (Src1Ty.isVector() != Src2Ty.isVector()) { 1616 report("Rotate requires operands to be either all scalars or all vectors", 1617 MI); 1618 break; 1619 } 1620 break; 1621 } 1622 case TargetOpcode::G_LLROUND: 1623 case TargetOpcode::G_LROUND: { 1624 verifyAllRegOpsScalar(*MI, *MRI); 1625 break; 1626 } 1627 default: 1628 break; 1629 } 1630 } 1631 1632 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { 1633 const MCInstrDesc &MCID = MI->getDesc(); 1634 if (MI->getNumOperands() < MCID.getNumOperands()) { 1635 report("Too few operands", MI); 1636 errs() << MCID.getNumOperands() << " operands expected, but " 1637 << MI->getNumOperands() << " given.\n"; 1638 } 1639 1640 if (MI->isPHI()) { 1641 if (MF->getProperties().hasProperty( 1642 MachineFunctionProperties::Property::NoPHIs)) 1643 report("Found PHI instruction with NoPHIs property set", MI); 1644 1645 if (FirstNonPHI) 1646 report("Found PHI instruction after non-PHI", MI); 1647 } else if (FirstNonPHI == nullptr) 1648 FirstNonPHI = MI; 1649 1650 // Check the tied operands. 1651 if (MI->isInlineAsm()) 1652 verifyInlineAsm(MI); 1653 1654 // Check that unspillable terminators define a reg and have at most one use. 1655 if (TII->isUnspillableTerminator(MI)) { 1656 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef()) 1657 report("Unspillable Terminator does not define a reg", MI); 1658 Register Def = MI->getOperand(0).getReg(); 1659 if (Def.isVirtual() && 1660 !MF->getProperties().hasProperty( 1661 MachineFunctionProperties::Property::NoPHIs) && 1662 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1) 1663 report("Unspillable Terminator expected to have at most one use!", MI); 1664 } 1665 1666 // A fully-formed DBG_VALUE must have a location. Ignore partially formed 1667 // DBG_VALUEs: these are convenient to use in tests, but should never get 1668 // generated. 1669 if (MI->isDebugValue() && MI->getNumOperands() == 4) 1670 if (!MI->getDebugLoc()) 1671 report("Missing DebugLoc for debug instruction", MI); 1672 1673 // Meta instructions should never be the subject of debug value tracking, 1674 // they don't create a value in the output program at all. 1675 if (MI->isMetaInstruction() && MI->peekDebugInstrNum()) 1676 report("Metadata instruction should not have a value tracking number", MI); 1677 1678 // Check the MachineMemOperands for basic consistency. 1679 for (MachineMemOperand *Op : MI->memoperands()) { 1680 if (Op->isLoad() && !MI->mayLoad()) 1681 report("Missing mayLoad flag", MI); 1682 if (Op->isStore() && !MI->mayStore()) 1683 report("Missing mayStore flag", MI); 1684 } 1685 1686 // Debug values must not have a slot index. 1687 // Other instructions must have one, unless they are inside a bundle. 1688 if (LiveInts) { 1689 bool mapped = !LiveInts->isNotInMIMap(*MI); 1690 if (MI->isDebugOrPseudoInstr()) { 1691 if (mapped) 1692 report("Debug instruction has a slot index", MI); 1693 } else if (MI->isInsideBundle()) { 1694 if (mapped) 1695 report("Instruction inside bundle has a slot index", MI); 1696 } else { 1697 if (!mapped) 1698 report("Missing slot index", MI); 1699 } 1700 } 1701 1702 unsigned Opc = MCID.getOpcode(); 1703 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) { 1704 verifyPreISelGenericInstruction(MI); 1705 return; 1706 } 1707 1708 StringRef ErrorInfo; 1709 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1710 report(ErrorInfo.data(), MI); 1711 1712 // Verify properties of various specific instruction types 1713 switch (MI->getOpcode()) { 1714 case TargetOpcode::COPY: { 1715 const MachineOperand &DstOp = MI->getOperand(0); 1716 const MachineOperand &SrcOp = MI->getOperand(1); 1717 const Register SrcReg = SrcOp.getReg(); 1718 const Register DstReg = DstOp.getReg(); 1719 1720 LLT DstTy = MRI->getType(DstReg); 1721 LLT SrcTy = MRI->getType(SrcReg); 1722 if (SrcTy.isValid() && DstTy.isValid()) { 1723 // If both types are valid, check that the types are the same. 1724 if (SrcTy != DstTy) { 1725 report("Copy Instruction is illegal with mismatching types", MI); 1726 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n"; 1727 } 1728 1729 break; 1730 } 1731 1732 if (!SrcTy.isValid() && !DstTy.isValid()) 1733 break; 1734 1735 // If we have only one valid type, this is likely a copy between a virtual 1736 // and physical register. 1737 unsigned SrcSize = 0; 1738 unsigned DstSize = 0; 1739 if (SrcReg.isPhysical() && DstTy.isValid()) { 1740 const TargetRegisterClass *SrcRC = 1741 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy); 1742 if (SrcRC) 1743 SrcSize = TRI->getRegSizeInBits(*SrcRC); 1744 } 1745 1746 if (SrcSize == 0) 1747 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI); 1748 1749 if (DstReg.isPhysical() && SrcTy.isValid()) { 1750 const TargetRegisterClass *DstRC = 1751 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy); 1752 if (DstRC) 1753 DstSize = TRI->getRegSizeInBits(*DstRC); 1754 } 1755 1756 if (DstSize == 0) 1757 DstSize = TRI->getRegSizeInBits(DstReg, *MRI); 1758 1759 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) { 1760 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) { 1761 report("Copy Instruction is illegal with mismatching sizes", MI); 1762 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize 1763 << "\n"; 1764 } 1765 } 1766 break; 1767 } 1768 case TargetOpcode::STATEPOINT: { 1769 StatepointOpers SO(MI); 1770 if (!MI->getOperand(SO.getIDPos()).isImm() || 1771 !MI->getOperand(SO.getNBytesPos()).isImm() || 1772 !MI->getOperand(SO.getNCallArgsPos()).isImm()) { 1773 report("meta operands to STATEPOINT not constant!", MI); 1774 break; 1775 } 1776 1777 auto VerifyStackMapConstant = [&](unsigned Offset) { 1778 if (Offset >= MI->getNumOperands()) { 1779 report("stack map constant to STATEPOINT is out of range!", MI); 1780 return; 1781 } 1782 if (!MI->getOperand(Offset - 1).isImm() || 1783 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp || 1784 !MI->getOperand(Offset).isImm()) 1785 report("stack map constant to STATEPOINT not well formed!", MI); 1786 }; 1787 VerifyStackMapConstant(SO.getCCIdx()); 1788 VerifyStackMapConstant(SO.getFlagsIdx()); 1789 VerifyStackMapConstant(SO.getNumDeoptArgsIdx()); 1790 VerifyStackMapConstant(SO.getNumGCPtrIdx()); 1791 VerifyStackMapConstant(SO.getNumAllocaIdx()); 1792 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx()); 1793 1794 // Verify that all explicit statepoint defs are tied to gc operands as 1795 // they are expected to be a relocation of gc operands. 1796 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx(); 1797 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2; 1798 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) { 1799 unsigned UseOpIdx; 1800 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) { 1801 report("STATEPOINT defs expected to be tied", MI); 1802 break; 1803 } 1804 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) { 1805 report("STATEPOINT def tied to non-gc operand", MI); 1806 break; 1807 } 1808 } 1809 1810 // TODO: verify we have properly encoded deopt arguments 1811 } break; 1812 case TargetOpcode::INSERT_SUBREG: { 1813 unsigned InsertedSize; 1814 if (unsigned SubIdx = MI->getOperand(2).getSubReg()) 1815 InsertedSize = TRI->getSubRegIdxSize(SubIdx); 1816 else 1817 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI); 1818 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm()); 1819 if (SubRegSize < InsertedSize) { 1820 report("INSERT_SUBREG expected inserted value to have equal or lesser " 1821 "size than the subreg it was inserted into", MI); 1822 break; 1823 } 1824 } break; 1825 } 1826 } 1827 1828 void 1829 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { 1830 const MachineInstr *MI = MO->getParent(); 1831 const MCInstrDesc &MCID = MI->getDesc(); 1832 unsigned NumDefs = MCID.getNumDefs(); 1833 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT) 1834 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0; 1835 1836 // The first MCID.NumDefs operands must be explicit register defines 1837 if (MONum < NumDefs) { 1838 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1839 if (!MO->isReg()) 1840 report("Explicit definition must be a register", MO, MONum); 1841 else if (!MO->isDef() && !MCOI.isOptionalDef()) 1842 report("Explicit definition marked as use", MO, MONum); 1843 else if (MO->isImplicit()) 1844 report("Explicit definition marked as implicit", MO, MONum); 1845 } else if (MONum < MCID.getNumOperands()) { 1846 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1847 // Don't check if it's the last operand in a variadic instruction. See, 1848 // e.g., LDM_RET in the arm back end. Check non-variadic operands only. 1849 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; 1850 if (!IsOptional) { 1851 if (MO->isReg()) { 1852 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs()) 1853 report("Explicit operand marked as def", MO, MONum); 1854 if (MO->isImplicit()) 1855 report("Explicit operand marked as implicit", MO, MONum); 1856 } 1857 1858 // Check that an instruction has register operands only as expected. 1859 if (MCOI.OperandType == MCOI::OPERAND_REGISTER && 1860 !MO->isReg() && !MO->isFI()) 1861 report("Expected a register operand.", MO, MONum); 1862 if (MO->isReg()) { 1863 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE || 1864 (MCOI.OperandType == MCOI::OPERAND_PCREL && 1865 !TII->isPCRelRegisterOperandLegal(*MO))) 1866 report("Expected a non-register operand.", MO, MONum); 1867 } 1868 } 1869 1870 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); 1871 if (TiedTo != -1) { 1872 if (!MO->isReg()) 1873 report("Tied use must be a register", MO, MONum); 1874 else if (!MO->isTied()) 1875 report("Operand should be tied", MO, MONum); 1876 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) 1877 report("Tied def doesn't match MCInstrDesc", MO, MONum); 1878 else if (Register::isPhysicalRegister(MO->getReg())) { 1879 const MachineOperand &MOTied = MI->getOperand(TiedTo); 1880 if (!MOTied.isReg()) 1881 report("Tied counterpart must be a register", &MOTied, TiedTo); 1882 else if (Register::isPhysicalRegister(MOTied.getReg()) && 1883 MO->getReg() != MOTied.getReg()) 1884 report("Tied physical registers must match.", &MOTied, TiedTo); 1885 } 1886 } else if (MO->isReg() && MO->isTied()) 1887 report("Explicit operand should not be tied", MO, MONum); 1888 } else { 1889 // ARM adds %reg0 operands to indicate predicates. We'll allow that. 1890 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) 1891 report("Extra explicit operand on non-variadic instruction", MO, MONum); 1892 } 1893 1894 switch (MO->getType()) { 1895 case MachineOperand::MO_Register: { 1896 // Verify debug flag on debug instructions. Check this first because reg0 1897 // indicates an undefined debug value. 1898 if (MI->isDebugInstr() && MO->isUse()) { 1899 if (!MO->isDebug()) 1900 report("Register operand must be marked debug", MO, MONum); 1901 } else if (MO->isDebug()) { 1902 report("Register operand must not be marked debug", MO, MONum); 1903 } 1904 1905 const Register Reg = MO->getReg(); 1906 if (!Reg) 1907 return; 1908 if (MRI->tracksLiveness() && !MI->isDebugValue()) 1909 checkLiveness(MO, MONum); 1910 1911 // Verify the consistency of tied operands. 1912 if (MO->isTied()) { 1913 unsigned OtherIdx = MI->findTiedOperandIdx(MONum); 1914 const MachineOperand &OtherMO = MI->getOperand(OtherIdx); 1915 if (!OtherMO.isReg()) 1916 report("Must be tied to a register", MO, MONum); 1917 if (!OtherMO.isTied()) 1918 report("Missing tie flags on tied operand", MO, MONum); 1919 if (MI->findTiedOperandIdx(OtherIdx) != MONum) 1920 report("Inconsistent tie links", MO, MONum); 1921 if (MONum < MCID.getNumDefs()) { 1922 if (OtherIdx < MCID.getNumOperands()) { 1923 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) 1924 report("Explicit def tied to explicit use without tie constraint", 1925 MO, MONum); 1926 } else { 1927 if (!OtherMO.isImplicit()) 1928 report("Explicit def should be tied to implicit use", MO, MONum); 1929 } 1930 } 1931 } 1932 1933 // Verify two-address constraints after the twoaddressinstruction pass. 1934 // Both twoaddressinstruction pass and phi-node-elimination pass call 1935 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after 1936 // twoaddressinstruction pass not after phi-node-elimination pass. So we 1937 // shouldn't use the NoSSA as the condition, we should based on 1938 // TiedOpsRewritten property to verify two-address constraints, this 1939 // property will be set in twoaddressinstruction pass. 1940 unsigned DefIdx; 1941 if (MF->getProperties().hasProperty( 1942 MachineFunctionProperties::Property::TiedOpsRewritten) && 1943 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) && 1944 Reg != MI->getOperand(DefIdx).getReg()) 1945 report("Two-address instruction operands must be identical", MO, MONum); 1946 1947 // Check register classes. 1948 unsigned SubIdx = MO->getSubReg(); 1949 1950 if (Register::isPhysicalRegister(Reg)) { 1951 if (SubIdx) { 1952 report("Illegal subregister index for physical register", MO, MONum); 1953 return; 1954 } 1955 if (MONum < MCID.getNumOperands()) { 1956 if (const TargetRegisterClass *DRC = 1957 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1958 if (!DRC->contains(Reg)) { 1959 report("Illegal physical register for instruction", MO, MONum); 1960 errs() << printReg(Reg, TRI) << " is not a " 1961 << TRI->getRegClassName(DRC) << " register.\n"; 1962 } 1963 } 1964 } 1965 if (MO->isRenamable()) { 1966 if (MRI->isReserved(Reg)) { 1967 report("isRenamable set on reserved register", MO, MONum); 1968 return; 1969 } 1970 } 1971 } else { 1972 // Virtual register. 1973 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg); 1974 if (!RC) { 1975 // This is a generic virtual register. 1976 1977 // Do not allow undef uses for generic virtual registers. This ensures 1978 // getVRegDef can never fail and return null on a generic register. 1979 // 1980 // FIXME: This restriction should probably be broadened to all SSA 1981 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still 1982 // run on the SSA function just before phi elimination. 1983 if (MO->isUndef()) 1984 report("Generic virtual register use cannot be undef", MO, MONum); 1985 1986 // Debug value instruction is permitted to use undefined vregs. 1987 // This is a performance measure to skip the overhead of immediately 1988 // pruning unused debug operands. The final undef substitution occurs 1989 // when debug values are allocated in LDVImpl::handleDebugValue, so 1990 // these verifications always apply after this pass. 1991 if (isFunctionTracksDebugUserValues || !MO->isUse() || 1992 !MI->isDebugValue() || !MRI->def_empty(Reg)) { 1993 // If we're post-Select, we can't have gvregs anymore. 1994 if (isFunctionSelected) { 1995 report("Generic virtual register invalid in a Selected function", 1996 MO, MONum); 1997 return; 1998 } 1999 2000 // The gvreg must have a type and it must not have a SubIdx. 2001 LLT Ty = MRI->getType(Reg); 2002 if (!Ty.isValid()) { 2003 report("Generic virtual register must have a valid type", MO, 2004 MONum); 2005 return; 2006 } 2007 2008 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg); 2009 2010 // If we're post-RegBankSelect, the gvreg must have a bank. 2011 if (!RegBank && isFunctionRegBankSelected) { 2012 report("Generic virtual register must have a bank in a " 2013 "RegBankSelected function", 2014 MO, MONum); 2015 return; 2016 } 2017 2018 // Make sure the register fits into its register bank if any. 2019 if (RegBank && Ty.isValid() && 2020 RegBank->getSize() < Ty.getSizeInBits()) { 2021 report("Register bank is too small for virtual register", MO, 2022 MONum); 2023 errs() << "Register bank " << RegBank->getName() << " too small(" 2024 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits() 2025 << "-bits\n"; 2026 return; 2027 } 2028 } 2029 2030 if (SubIdx) { 2031 report("Generic virtual register does not allow subregister index", MO, 2032 MONum); 2033 return; 2034 } 2035 2036 // If this is a target specific instruction and this operand 2037 // has register class constraint, the virtual register must 2038 // comply to it. 2039 if (!isPreISelGenericOpcode(MCID.getOpcode()) && 2040 MONum < MCID.getNumOperands() && 2041 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2042 report("Virtual register does not match instruction constraint", MO, 2043 MONum); 2044 errs() << "Expect register class " 2045 << TRI->getRegClassName( 2046 TII->getRegClass(MCID, MONum, TRI, *MF)) 2047 << " but got nothing\n"; 2048 return; 2049 } 2050 2051 break; 2052 } 2053 if (SubIdx) { 2054 const TargetRegisterClass *SRC = 2055 TRI->getSubClassWithSubReg(RC, SubIdx); 2056 if (!SRC) { 2057 report("Invalid subregister index for virtual register", MO, MONum); 2058 errs() << "Register class " << TRI->getRegClassName(RC) 2059 << " does not support subreg index " << SubIdx << "\n"; 2060 return; 2061 } 2062 if (RC != SRC) { 2063 report("Invalid register class for subregister index", MO, MONum); 2064 errs() << "Register class " << TRI->getRegClassName(RC) 2065 << " does not fully support subreg index " << SubIdx << "\n"; 2066 return; 2067 } 2068 } 2069 if (MONum < MCID.getNumOperands()) { 2070 if (const TargetRegisterClass *DRC = 2071 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2072 if (SubIdx) { 2073 const TargetRegisterClass *SuperRC = 2074 TRI->getLargestLegalSuperClass(RC, *MF); 2075 if (!SuperRC) { 2076 report("No largest legal super class exists.", MO, MONum); 2077 return; 2078 } 2079 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx); 2080 if (!DRC) { 2081 report("No matching super-reg register class.", MO, MONum); 2082 return; 2083 } 2084 } 2085 if (!RC->hasSuperClassEq(DRC)) { 2086 report("Illegal virtual register for instruction", MO, MONum); 2087 errs() << "Expected a " << TRI->getRegClassName(DRC) 2088 << " register, but got a " << TRI->getRegClassName(RC) 2089 << " register\n"; 2090 } 2091 } 2092 } 2093 } 2094 break; 2095 } 2096 2097 case MachineOperand::MO_RegisterMask: 2098 regMasks.push_back(MO->getRegMask()); 2099 break; 2100 2101 case MachineOperand::MO_MachineBasicBlock: 2102 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent())) 2103 report("PHI operand is not in the CFG", MO, MONum); 2104 break; 2105 2106 case MachineOperand::MO_FrameIndex: 2107 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) && 2108 LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2109 int FI = MO->getIndex(); 2110 LiveInterval &LI = LiveStks->getInterval(FI); 2111 SlotIndex Idx = LiveInts->getInstructionIndex(*MI); 2112 2113 bool stores = MI->mayStore(); 2114 bool loads = MI->mayLoad(); 2115 // For a memory-to-memory move, we need to check if the frame 2116 // index is used for storing or loading, by inspecting the 2117 // memory operands. 2118 if (stores && loads) { 2119 for (auto *MMO : MI->memoperands()) { 2120 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 2121 if (PSV == nullptr) continue; 2122 const FixedStackPseudoSourceValue *Value = 2123 dyn_cast<FixedStackPseudoSourceValue>(PSV); 2124 if (Value == nullptr) continue; 2125 if (Value->getFrameIndex() != FI) continue; 2126 2127 if (MMO->isStore()) 2128 loads = false; 2129 else 2130 stores = false; 2131 break; 2132 } 2133 if (loads == stores) 2134 report("Missing fixed stack memoperand.", MI); 2135 } 2136 if (loads && !LI.liveAt(Idx.getRegSlot(true))) { 2137 report("Instruction loads from dead spill slot", MO, MONum); 2138 errs() << "Live stack: " << LI << '\n'; 2139 } 2140 if (stores && !LI.liveAt(Idx.getRegSlot())) { 2141 report("Instruction stores to dead spill slot", MO, MONum); 2142 errs() << "Live stack: " << LI << '\n'; 2143 } 2144 } 2145 break; 2146 2147 default: 2148 break; 2149 } 2150 } 2151 2152 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO, 2153 unsigned MONum, SlotIndex UseIdx, 2154 const LiveRange &LR, 2155 Register VRegOrUnit, 2156 LaneBitmask LaneMask) { 2157 LiveQueryResult LRQ = LR.Query(UseIdx); 2158 // Check if we have a segment at the use, note however that we only need one 2159 // live subregister range, the others may be dead. 2160 if (!LRQ.valueIn() && LaneMask.none()) { 2161 report("No live segment at use", MO, MONum); 2162 report_context_liverange(LR); 2163 report_context_vreg_regunit(VRegOrUnit); 2164 report_context(UseIdx); 2165 } 2166 if (MO->isKill() && !LRQ.isKill()) { 2167 report("Live range continues after kill flag", MO, MONum); 2168 report_context_liverange(LR); 2169 report_context_vreg_regunit(VRegOrUnit); 2170 if (LaneMask.any()) 2171 report_context_lanemask(LaneMask); 2172 report_context(UseIdx); 2173 } 2174 } 2175 2176 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO, 2177 unsigned MONum, SlotIndex DefIdx, 2178 const LiveRange &LR, 2179 Register VRegOrUnit, 2180 bool SubRangeCheck, 2181 LaneBitmask LaneMask) { 2182 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) { 2183 assert(VNI && "NULL valno is not allowed"); 2184 if (VNI->def != DefIdx) { 2185 report("Inconsistent valno->def", MO, MONum); 2186 report_context_liverange(LR); 2187 report_context_vreg_regunit(VRegOrUnit); 2188 if (LaneMask.any()) 2189 report_context_lanemask(LaneMask); 2190 report_context(*VNI); 2191 report_context(DefIdx); 2192 } 2193 } else { 2194 report("No live segment at def", MO, MONum); 2195 report_context_liverange(LR); 2196 report_context_vreg_regunit(VRegOrUnit); 2197 if (LaneMask.any()) 2198 report_context_lanemask(LaneMask); 2199 report_context(DefIdx); 2200 } 2201 // Check that, if the dead def flag is present, LiveInts agree. 2202 if (MO->isDead()) { 2203 LiveQueryResult LRQ = LR.Query(DefIdx); 2204 if (!LRQ.isDeadDef()) { 2205 assert(Register::isVirtualRegister(VRegOrUnit) && 2206 "Expecting a virtual register."); 2207 // A dead subreg def only tells us that the specific subreg is dead. There 2208 // could be other non-dead defs of other subregs, or we could have other 2209 // parts of the register being live through the instruction. So unless we 2210 // are checking liveness for a subrange it is ok for the live range to 2211 // continue, given that we have a dead def of a subregister. 2212 if (SubRangeCheck || MO->getSubReg() == 0) { 2213 report("Live range continues after dead def flag", MO, MONum); 2214 report_context_liverange(LR); 2215 report_context_vreg_regunit(VRegOrUnit); 2216 if (LaneMask.any()) 2217 report_context_lanemask(LaneMask); 2218 } 2219 } 2220 } 2221 } 2222 2223 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { 2224 const MachineInstr *MI = MO->getParent(); 2225 const Register Reg = MO->getReg(); 2226 const unsigned SubRegIdx = MO->getSubReg(); 2227 2228 const LiveInterval *LI = nullptr; 2229 if (LiveInts && Reg.isVirtual()) { 2230 if (LiveInts->hasInterval(Reg)) { 2231 LI = &LiveInts->getInterval(Reg); 2232 if (SubRegIdx != 0 && !LI->empty() && !LI->hasSubRanges() && 2233 MRI->shouldTrackSubRegLiveness(Reg)) 2234 report("Live interval for subreg operand has no subranges", MO, MONum); 2235 } else { 2236 report("Virtual register has no live interval", MO, MONum); 2237 } 2238 } 2239 2240 // Both use and def operands can read a register. 2241 if (MO->readsReg()) { 2242 if (MO->isKill()) 2243 addRegWithSubRegs(regsKilled, Reg); 2244 2245 // Check that LiveVars knows this kill (unless we are inside a bundle, in 2246 // which case we have already checked that LiveVars knows any kills on the 2247 // bundle header instead). 2248 if (LiveVars && Reg.isVirtual() && MO->isKill() && 2249 !MI->isBundledWithPred()) { 2250 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2251 if (!is_contained(VI.Kills, MI)) 2252 report("Kill missing from LiveVariables", MO, MONum); 2253 } 2254 2255 // Check LiveInts liveness and kill. 2256 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2257 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI); 2258 // Check the cached regunit intervals. 2259 if (Reg.isPhysical() && !isReserved(Reg)) { 2260 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid(); 2261 ++Units) { 2262 if (MRI->isReservedRegUnit(*Units)) 2263 continue; 2264 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units)) 2265 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units); 2266 } 2267 } 2268 2269 if (Reg.isVirtual()) { 2270 // This is a virtual register interval. 2271 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg); 2272 2273 if (LI->hasSubRanges() && !MO->isDef()) { 2274 LaneBitmask MOMask = SubRegIdx != 0 2275 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2276 : MRI->getMaxLaneMaskForVReg(Reg); 2277 LaneBitmask LiveInMask; 2278 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2279 if ((MOMask & SR.LaneMask).none()) 2280 continue; 2281 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask); 2282 LiveQueryResult LRQ = SR.Query(UseIdx); 2283 if (LRQ.valueIn()) 2284 LiveInMask |= SR.LaneMask; 2285 } 2286 // At least parts of the register has to be live at the use. 2287 if ((LiveInMask & MOMask).none()) { 2288 report("No live subrange at use", MO, MONum); 2289 report_context(*LI); 2290 report_context(UseIdx); 2291 } 2292 } 2293 } 2294 } 2295 2296 // Use of a dead register. 2297 if (!regsLive.count(Reg)) { 2298 if (Reg.isPhysical()) { 2299 // Reserved registers may be used even when 'dead'. 2300 bool Bad = !isReserved(Reg); 2301 // We are fine if just any subregister has a defined value. 2302 if (Bad) { 2303 2304 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) { 2305 if (regsLive.count(SubReg)) { 2306 Bad = false; 2307 break; 2308 } 2309 } 2310 } 2311 // If there is an additional implicit-use of a super register we stop 2312 // here. By definition we are fine if the super register is not 2313 // (completely) dead, if the complete super register is dead we will 2314 // get a report for its operand. 2315 if (Bad) { 2316 for (const MachineOperand &MOP : MI->uses()) { 2317 if (!MOP.isReg() || !MOP.isImplicit()) 2318 continue; 2319 2320 if (!MOP.getReg().isPhysical()) 2321 continue; 2322 2323 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg)) 2324 Bad = false; 2325 } 2326 } 2327 if (Bad) 2328 report("Using an undefined physical register", MO, MONum); 2329 } else if (MRI->def_empty(Reg)) { 2330 report("Reading virtual register without a def", MO, MONum); 2331 } else { 2332 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2333 // We don't know which virtual registers are live in, so only complain 2334 // if vreg was killed in this MBB. Otherwise keep track of vregs that 2335 // must be live in. PHI instructions are handled separately. 2336 if (MInfo.regsKilled.count(Reg)) 2337 report("Using a killed virtual register", MO, MONum); 2338 else if (!MI->isPHI()) 2339 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI)); 2340 } 2341 } 2342 } 2343 2344 if (MO->isDef()) { 2345 // Register defined. 2346 // TODO: verify that earlyclobber ops are not used. 2347 if (MO->isDead()) 2348 addRegWithSubRegs(regsDead, Reg); 2349 else 2350 addRegWithSubRegs(regsDefined, Reg); 2351 2352 // Verify SSA form. 2353 if (MRI->isSSA() && Reg.isVirtual() && 2354 std::next(MRI->def_begin(Reg)) != MRI->def_end()) 2355 report("Multiple virtual register defs in SSA form", MO, MONum); 2356 2357 // Check LiveInts for a live segment, but only for virtual registers. 2358 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2359 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI); 2360 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber()); 2361 2362 if (Reg.isVirtual()) { 2363 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg); 2364 2365 if (LI->hasSubRanges()) { 2366 LaneBitmask MOMask = SubRegIdx != 0 2367 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2368 : MRI->getMaxLaneMaskForVReg(Reg); 2369 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2370 if ((SR.LaneMask & MOMask).none()) 2371 continue; 2372 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask); 2373 } 2374 } 2375 } 2376 } 2377 } 2378 } 2379 2380 // This function gets called after visiting all instructions in a bundle. The 2381 // argument points to the bundle header. 2382 // Normal stand-alone instructions are also considered 'bundles', and this 2383 // function is called for all of them. 2384 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) { 2385 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2386 set_union(MInfo.regsKilled, regsKilled); 2387 set_subtract(regsLive, regsKilled); regsKilled.clear(); 2388 // Kill any masked registers. 2389 while (!regMasks.empty()) { 2390 const uint32_t *Mask = regMasks.pop_back_val(); 2391 for (Register Reg : regsLive) 2392 if (Reg.isPhysical() && 2393 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg())) 2394 regsDead.push_back(Reg); 2395 } 2396 set_subtract(regsLive, regsDead); regsDead.clear(); 2397 set_union(regsLive, regsDefined); regsDefined.clear(); 2398 } 2399 2400 void 2401 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) { 2402 MBBInfoMap[MBB].regsLiveOut = regsLive; 2403 regsLive.clear(); 2404 2405 if (Indexes) { 2406 SlotIndex stop = Indexes->getMBBEndIdx(MBB); 2407 if (!(stop > lastIndex)) { 2408 report("Block ends before last instruction index", MBB); 2409 errs() << "Block ends at " << stop 2410 << " last instruction was at " << lastIndex << '\n'; 2411 } 2412 lastIndex = stop; 2413 } 2414 } 2415 2416 namespace { 2417 // This implements a set of registers that serves as a filter: can filter other 2418 // sets by passing through elements not in the filter and blocking those that 2419 // are. Any filter implicitly includes the full set of physical registers upon 2420 // creation, thus filtering them all out. The filter itself as a set only grows, 2421 // and needs to be as efficient as possible. 2422 struct VRegFilter { 2423 // Add elements to the filter itself. \pre Input set \p FromRegSet must have 2424 // no duplicates. Both virtual and physical registers are fine. 2425 template <typename RegSetT> void add(const RegSetT &FromRegSet) { 2426 SmallVector<Register, 0> VRegsBuffer; 2427 filterAndAdd(FromRegSet, VRegsBuffer); 2428 } 2429 // Filter \p FromRegSet through the filter and append passed elements into \p 2430 // ToVRegs. All elements appended are then added to the filter itself. 2431 // \returns true if anything changed. 2432 template <typename RegSetT> 2433 bool filterAndAdd(const RegSetT &FromRegSet, 2434 SmallVectorImpl<Register> &ToVRegs) { 2435 unsigned SparseUniverse = Sparse.size(); 2436 unsigned NewSparseUniverse = SparseUniverse; 2437 unsigned NewDenseSize = Dense.size(); 2438 size_t Begin = ToVRegs.size(); 2439 for (Register Reg : FromRegSet) { 2440 if (!Reg.isVirtual()) 2441 continue; 2442 unsigned Index = Register::virtReg2Index(Reg); 2443 if (Index < SparseUniverseMax) { 2444 if (Index < SparseUniverse && Sparse.test(Index)) 2445 continue; 2446 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1); 2447 } else { 2448 if (Dense.count(Reg)) 2449 continue; 2450 ++NewDenseSize; 2451 } 2452 ToVRegs.push_back(Reg); 2453 } 2454 size_t End = ToVRegs.size(); 2455 if (Begin == End) 2456 return false; 2457 // Reserving space in sets once performs better than doing so continuously 2458 // and pays easily for double look-ups (even in Dense with SparseUniverseMax 2459 // tuned all the way down) and double iteration (the second one is over a 2460 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector). 2461 Sparse.resize(NewSparseUniverse); 2462 Dense.reserve(NewDenseSize); 2463 for (unsigned I = Begin; I < End; ++I) { 2464 Register Reg = ToVRegs[I]; 2465 unsigned Index = Register::virtReg2Index(Reg); 2466 if (Index < SparseUniverseMax) 2467 Sparse.set(Index); 2468 else 2469 Dense.insert(Reg); 2470 } 2471 return true; 2472 } 2473 2474 private: 2475 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; 2476 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound 2477 // are tracked by Dense. The only purpose of the threashold and the Dense set 2478 // is to have a reasonably growing memory usage in pathological cases (large 2479 // number of very sparse VRegFilter instances live at the same time). In 2480 // practice even in the worst-by-execution time cases having all elements 2481 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more 2482 // space efficient than if tracked by Dense. The threashold is set to keep the 2483 // worst-case memory usage within 2x of figures determined empirically for 2484 // "all Dense" scenario in such worst-by-execution-time cases. 2485 BitVector Sparse; 2486 DenseSet<unsigned> Dense; 2487 }; 2488 2489 // Implements both a transfer function and a (binary, in-place) join operator 2490 // for a dataflow over register sets with set union join and filtering transfer 2491 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time. 2492 // Maintains out_b as its state, allowing for O(n) iteration over it at any 2493 // time, where n is the size of the set (as opposed to O(U) where U is the 2494 // universe). filter_b implicitly contains all physical registers at all times. 2495 class FilteringVRegSet { 2496 VRegFilter Filter; 2497 SmallVector<Register, 0> VRegs; 2498 2499 public: 2500 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates. 2501 // Both virtual and physical registers are fine. 2502 template <typename RegSetT> void addToFilter(const RegSetT &RS) { 2503 Filter.add(RS); 2504 } 2505 // Passes \p RS through the filter_b (transfer function) and adds what's left 2506 // to itself (out_b). 2507 template <typename RegSetT> bool add(const RegSetT &RS) { 2508 // Double-duty the Filter: to maintain VRegs a set (and the join operation 2509 // a set union) just add everything being added here to the Filter as well. 2510 return Filter.filterAndAdd(RS, VRegs); 2511 } 2512 using const_iterator = decltype(VRegs)::const_iterator; 2513 const_iterator begin() const { return VRegs.begin(); } 2514 const_iterator end() const { return VRegs.end(); } 2515 size_t size() const { return VRegs.size(); } 2516 }; 2517 } // namespace 2518 2519 // Calculate the largest possible vregsPassed sets. These are the registers that 2520 // can pass through an MBB live, but may not be live every time. It is assumed 2521 // that all vregsPassed sets are empty before the call. 2522 void MachineVerifier::calcRegsPassed() { 2523 if (MF->empty()) 2524 // ReversePostOrderTraversal doesn't handle empty functions. 2525 return; 2526 2527 for (const MachineBasicBlock *MB : 2528 ReversePostOrderTraversal<const MachineFunction *>(MF)) { 2529 FilteringVRegSet VRegs; 2530 BBInfo &Info = MBBInfoMap[MB]; 2531 assert(Info.reachable); 2532 2533 VRegs.addToFilter(Info.regsKilled); 2534 VRegs.addToFilter(Info.regsLiveOut); 2535 for (const MachineBasicBlock *Pred : MB->predecessors()) { 2536 const BBInfo &PredInfo = MBBInfoMap[Pred]; 2537 if (!PredInfo.reachable) 2538 continue; 2539 2540 VRegs.add(PredInfo.regsLiveOut); 2541 VRegs.add(PredInfo.vregsPassed); 2542 } 2543 Info.vregsPassed.reserve(VRegs.size()); 2544 Info.vregsPassed.insert(VRegs.begin(), VRegs.end()); 2545 } 2546 } 2547 2548 // Calculate the set of virtual registers that must be passed through each basic 2549 // block in order to satisfy the requirements of successor blocks. This is very 2550 // similar to calcRegsPassed, only backwards. 2551 void MachineVerifier::calcRegsRequired() { 2552 // First push live-in regs to predecessors' vregsRequired. 2553 SmallPtrSet<const MachineBasicBlock*, 8> todo; 2554 for (const auto &MBB : *MF) { 2555 BBInfo &MInfo = MBBInfoMap[&MBB]; 2556 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2557 BBInfo &PInfo = MBBInfoMap[Pred]; 2558 if (PInfo.addRequired(MInfo.vregsLiveIn)) 2559 todo.insert(Pred); 2560 } 2561 2562 // Handle the PHI node. 2563 for (const MachineInstr &MI : MBB.phis()) { 2564 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2565 // Skip those Operands which are undef regs or not regs. 2566 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg()) 2567 continue; 2568 2569 // Get register and predecessor for one PHI edge. 2570 Register Reg = MI.getOperand(i).getReg(); 2571 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB(); 2572 2573 BBInfo &PInfo = MBBInfoMap[Pred]; 2574 if (PInfo.addRequired(Reg)) 2575 todo.insert(Pred); 2576 } 2577 } 2578 } 2579 2580 // Iteratively push vregsRequired to predecessors. This will converge to the 2581 // same final state regardless of DenseSet iteration order. 2582 while (!todo.empty()) { 2583 const MachineBasicBlock *MBB = *todo.begin(); 2584 todo.erase(MBB); 2585 BBInfo &MInfo = MBBInfoMap[MBB]; 2586 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 2587 if (Pred == MBB) 2588 continue; 2589 BBInfo &SInfo = MBBInfoMap[Pred]; 2590 if (SInfo.addRequired(MInfo.vregsRequired)) 2591 todo.insert(Pred); 2592 } 2593 } 2594 } 2595 2596 // Check PHI instructions at the beginning of MBB. It is assumed that 2597 // calcRegsPassed has been run so BBInfo::isLiveOut is valid. 2598 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { 2599 BBInfo &MInfo = MBBInfoMap[&MBB]; 2600 2601 SmallPtrSet<const MachineBasicBlock*, 8> seen; 2602 for (const MachineInstr &Phi : MBB) { 2603 if (!Phi.isPHI()) 2604 break; 2605 seen.clear(); 2606 2607 const MachineOperand &MODef = Phi.getOperand(0); 2608 if (!MODef.isReg() || !MODef.isDef()) { 2609 report("Expected first PHI operand to be a register def", &MODef, 0); 2610 continue; 2611 } 2612 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || 2613 MODef.isEarlyClobber() || MODef.isDebug()) 2614 report("Unexpected flag on PHI operand", &MODef, 0); 2615 Register DefReg = MODef.getReg(); 2616 if (!Register::isVirtualRegister(DefReg)) 2617 report("Expected first PHI operand to be a virtual register", &MODef, 0); 2618 2619 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) { 2620 const MachineOperand &MO0 = Phi.getOperand(I); 2621 if (!MO0.isReg()) { 2622 report("Expected PHI operand to be a register", &MO0, I); 2623 continue; 2624 } 2625 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() || 2626 MO0.isDebug() || MO0.isTied()) 2627 report("Unexpected flag on PHI operand", &MO0, I); 2628 2629 const MachineOperand &MO1 = Phi.getOperand(I + 1); 2630 if (!MO1.isMBB()) { 2631 report("Expected PHI operand to be a basic block", &MO1, I + 1); 2632 continue; 2633 } 2634 2635 const MachineBasicBlock &Pre = *MO1.getMBB(); 2636 if (!Pre.isSuccessor(&MBB)) { 2637 report("PHI input is not a predecessor block", &MO1, I + 1); 2638 continue; 2639 } 2640 2641 if (MInfo.reachable) { 2642 seen.insert(&Pre); 2643 BBInfo &PrInfo = MBBInfoMap[&Pre]; 2644 if (!MO0.isUndef() && PrInfo.reachable && 2645 !PrInfo.isLiveOut(MO0.getReg())) 2646 report("PHI operand is not live-out from predecessor", &MO0, I); 2647 } 2648 } 2649 2650 // Did we see all predecessors? 2651 if (MInfo.reachable) { 2652 for (MachineBasicBlock *Pred : MBB.predecessors()) { 2653 if (!seen.count(Pred)) { 2654 report("Missing PHI operand", &Phi); 2655 errs() << printMBBReference(*Pred) 2656 << " is a predecessor according to the CFG.\n"; 2657 } 2658 } 2659 } 2660 } 2661 } 2662 2663 void MachineVerifier::visitMachineFunctionAfter() { 2664 calcRegsPassed(); 2665 2666 for (const MachineBasicBlock &MBB : *MF) 2667 checkPHIOps(MBB); 2668 2669 // Now check liveness info if available 2670 calcRegsRequired(); 2671 2672 // Check for killed virtual registers that should be live out. 2673 for (const auto &MBB : *MF) { 2674 BBInfo &MInfo = MBBInfoMap[&MBB]; 2675 for (Register VReg : MInfo.vregsRequired) 2676 if (MInfo.regsKilled.count(VReg)) { 2677 report("Virtual register killed in block, but needed live out.", &MBB); 2678 errs() << "Virtual register " << printReg(VReg) 2679 << " is used after the block.\n"; 2680 } 2681 } 2682 2683 if (!MF->empty()) { 2684 BBInfo &MInfo = MBBInfoMap[&MF->front()]; 2685 for (Register VReg : MInfo.vregsRequired) { 2686 report("Virtual register defs don't dominate all uses.", MF); 2687 report_context_vreg(VReg); 2688 } 2689 } 2690 2691 if (LiveVars) 2692 verifyLiveVariables(); 2693 if (LiveInts) 2694 verifyLiveIntervals(); 2695 2696 // Check live-in list of each MBB. If a register is live into MBB, check 2697 // that the register is in regsLiveOut of each predecessor block. Since 2698 // this must come from a definition in the predecesssor or its live-in 2699 // list, this will catch a live-through case where the predecessor does not 2700 // have the register in its live-in list. This currently only checks 2701 // registers that have no aliases, are not allocatable and are not 2702 // reserved, which could mean a condition code register for instance. 2703 if (MRI->tracksLiveness()) 2704 for (const auto &MBB : *MF) 2705 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { 2706 MCPhysReg LiveInReg = P.PhysReg; 2707 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid(); 2708 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg)) 2709 continue; 2710 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2711 BBInfo &PInfo = MBBInfoMap[Pred]; 2712 if (!PInfo.regsLiveOut.count(LiveInReg)) { 2713 report("Live in register not found to be live out from predecessor.", 2714 &MBB); 2715 errs() << TRI->getName(LiveInReg) 2716 << " not found to be live out from " 2717 << printMBBReference(*Pred) << "\n"; 2718 } 2719 } 2720 } 2721 2722 for (auto CSInfo : MF->getCallSitesInfo()) 2723 if (!CSInfo.first->isCall()) 2724 report("Call site info referencing instruction that is not call", MF); 2725 2726 // If there's debug-info, check that we don't have any duplicate value 2727 // tracking numbers. 2728 if (MF->getFunction().getSubprogram()) { 2729 DenseSet<unsigned> SeenNumbers; 2730 for (auto &MBB : *MF) { 2731 for (auto &MI : MBB) { 2732 if (auto Num = MI.peekDebugInstrNum()) { 2733 auto Result = SeenNumbers.insert((unsigned)Num); 2734 if (!Result.second) 2735 report("Instruction has a duplicated value tracking number", &MI); 2736 } 2737 } 2738 } 2739 } 2740 } 2741 2742 void MachineVerifier::verifyLiveVariables() { 2743 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars"); 2744 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2745 Register Reg = Register::index2VirtReg(I); 2746 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2747 for (const auto &MBB : *MF) { 2748 BBInfo &MInfo = MBBInfoMap[&MBB]; 2749 2750 // Our vregsRequired should be identical to LiveVariables' AliveBlocks 2751 if (MInfo.vregsRequired.count(Reg)) { 2752 if (!VI.AliveBlocks.test(MBB.getNumber())) { 2753 report("LiveVariables: Block missing from AliveBlocks", &MBB); 2754 errs() << "Virtual register " << printReg(Reg) 2755 << " must be live through the block.\n"; 2756 } 2757 } else { 2758 if (VI.AliveBlocks.test(MBB.getNumber())) { 2759 report("LiveVariables: Block should not be in AliveBlocks", &MBB); 2760 errs() << "Virtual register " << printReg(Reg) 2761 << " is not needed live through the block.\n"; 2762 } 2763 } 2764 } 2765 } 2766 } 2767 2768 void MachineVerifier::verifyLiveIntervals() { 2769 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts"); 2770 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2771 Register Reg = Register::index2VirtReg(I); 2772 2773 // Spilling and splitting may leave unused registers around. Skip them. 2774 if (MRI->reg_nodbg_empty(Reg)) 2775 continue; 2776 2777 if (!LiveInts->hasInterval(Reg)) { 2778 report("Missing live interval for virtual register", MF); 2779 errs() << printReg(Reg, TRI) << " still has defs or uses\n"; 2780 continue; 2781 } 2782 2783 const LiveInterval &LI = LiveInts->getInterval(Reg); 2784 assert(Reg == LI.reg() && "Invalid reg to interval mapping"); 2785 verifyLiveInterval(LI); 2786 } 2787 2788 // Verify all the cached regunit intervals. 2789 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 2790 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i)) 2791 verifyLiveRange(*LR, i); 2792 } 2793 2794 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR, 2795 const VNInfo *VNI, Register Reg, 2796 LaneBitmask LaneMask) { 2797 if (VNI->isUnused()) 2798 return; 2799 2800 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def); 2801 2802 if (!DefVNI) { 2803 report("Value not live at VNInfo def and not marked unused", MF); 2804 report_context(LR, Reg, LaneMask); 2805 report_context(*VNI); 2806 return; 2807 } 2808 2809 if (DefVNI != VNI) { 2810 report("Live segment at def has different VNInfo", MF); 2811 report_context(LR, Reg, LaneMask); 2812 report_context(*VNI); 2813 return; 2814 } 2815 2816 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def); 2817 if (!MBB) { 2818 report("Invalid VNInfo definition index", MF); 2819 report_context(LR, Reg, LaneMask); 2820 report_context(*VNI); 2821 return; 2822 } 2823 2824 if (VNI->isPHIDef()) { 2825 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) { 2826 report("PHIDef VNInfo is not defined at MBB start", MBB); 2827 report_context(LR, Reg, LaneMask); 2828 report_context(*VNI); 2829 } 2830 return; 2831 } 2832 2833 // Non-PHI def. 2834 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def); 2835 if (!MI) { 2836 report("No instruction at VNInfo def index", MBB); 2837 report_context(LR, Reg, LaneMask); 2838 report_context(*VNI); 2839 return; 2840 } 2841 2842 if (Reg != 0) { 2843 bool hasDef = false; 2844 bool isEarlyClobber = false; 2845 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2846 if (!MOI->isReg() || !MOI->isDef()) 2847 continue; 2848 if (Register::isVirtualRegister(Reg)) { 2849 if (MOI->getReg() != Reg) 2850 continue; 2851 } else { 2852 if (!Register::isPhysicalRegister(MOI->getReg()) || 2853 !TRI->hasRegUnit(MOI->getReg(), Reg)) 2854 continue; 2855 } 2856 if (LaneMask.any() && 2857 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none()) 2858 continue; 2859 hasDef = true; 2860 if (MOI->isEarlyClobber()) 2861 isEarlyClobber = true; 2862 } 2863 2864 if (!hasDef) { 2865 report("Defining instruction does not modify register", MI); 2866 report_context(LR, Reg, LaneMask); 2867 report_context(*VNI); 2868 } 2869 2870 // Early clobber defs begin at USE slots, but other defs must begin at 2871 // DEF slots. 2872 if (isEarlyClobber) { 2873 if (!VNI->def.isEarlyClobber()) { 2874 report("Early clobber def must be at an early-clobber slot", MBB); 2875 report_context(LR, Reg, LaneMask); 2876 report_context(*VNI); 2877 } 2878 } else if (!VNI->def.isRegister()) { 2879 report("Non-PHI, non-early clobber def must be at a register slot", MBB); 2880 report_context(LR, Reg, LaneMask); 2881 report_context(*VNI); 2882 } 2883 } 2884 } 2885 2886 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, 2887 const LiveRange::const_iterator I, 2888 Register Reg, 2889 LaneBitmask LaneMask) { 2890 const LiveRange::Segment &S = *I; 2891 const VNInfo *VNI = S.valno; 2892 assert(VNI && "Live segment has no valno"); 2893 2894 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) { 2895 report("Foreign valno in live segment", MF); 2896 report_context(LR, Reg, LaneMask); 2897 report_context(S); 2898 report_context(*VNI); 2899 } 2900 2901 if (VNI->isUnused()) { 2902 report("Live segment valno is marked unused", MF); 2903 report_context(LR, Reg, LaneMask); 2904 report_context(S); 2905 } 2906 2907 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start); 2908 if (!MBB) { 2909 report("Bad start of live segment, no basic block", MF); 2910 report_context(LR, Reg, LaneMask); 2911 report_context(S); 2912 return; 2913 } 2914 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB); 2915 if (S.start != MBBStartIdx && S.start != VNI->def) { 2916 report("Live segment must begin at MBB entry or valno def", MBB); 2917 report_context(LR, Reg, LaneMask); 2918 report_context(S); 2919 } 2920 2921 const MachineBasicBlock *EndMBB = 2922 LiveInts->getMBBFromIndex(S.end.getPrevSlot()); 2923 if (!EndMBB) { 2924 report("Bad end of live segment, no basic block", MF); 2925 report_context(LR, Reg, LaneMask); 2926 report_context(S); 2927 return; 2928 } 2929 2930 // No more checks for live-out segments. 2931 if (S.end == LiveInts->getMBBEndIdx(EndMBB)) 2932 return; 2933 2934 // RegUnit intervals are allowed dead phis. 2935 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() && 2936 S.start == VNI->def && S.end == VNI->def.getDeadSlot()) 2937 return; 2938 2939 // The live segment is ending inside EndMBB 2940 const MachineInstr *MI = 2941 LiveInts->getInstructionFromIndex(S.end.getPrevSlot()); 2942 if (!MI) { 2943 report("Live segment doesn't end at a valid instruction", EndMBB); 2944 report_context(LR, Reg, LaneMask); 2945 report_context(S); 2946 return; 2947 } 2948 2949 // The block slot must refer to a basic block boundary. 2950 if (S.end.isBlock()) { 2951 report("Live segment ends at B slot of an instruction", EndMBB); 2952 report_context(LR, Reg, LaneMask); 2953 report_context(S); 2954 } 2955 2956 if (S.end.isDead()) { 2957 // Segment ends on the dead slot. 2958 // That means there must be a dead def. 2959 if (!SlotIndex::isSameInstr(S.start, S.end)) { 2960 report("Live segment ending at dead slot spans instructions", EndMBB); 2961 report_context(LR, Reg, LaneMask); 2962 report_context(S); 2963 } 2964 } 2965 2966 // After tied operands are rewritten, a live segment can only end at an 2967 // early-clobber slot if it is being redefined by an early-clobber def. 2968 // TODO: Before tied operands are rewritten, a live segment can only end at an 2969 // early-clobber slot if the last use is tied to an early-clobber def. 2970 if (MF->getProperties().hasProperty( 2971 MachineFunctionProperties::Property::TiedOpsRewritten) && 2972 S.end.isEarlyClobber()) { 2973 if (I+1 == LR.end() || (I+1)->start != S.end) { 2974 report("Live segment ending at early clobber slot must be " 2975 "redefined by an EC def in the same instruction", EndMBB); 2976 report_context(LR, Reg, LaneMask); 2977 report_context(S); 2978 } 2979 } 2980 2981 // The following checks only apply to virtual registers. Physreg liveness 2982 // is too weird to check. 2983 if (Register::isVirtualRegister(Reg)) { 2984 // A live segment can end with either a redefinition, a kill flag on a 2985 // use, or a dead flag on a def. 2986 bool hasRead = false; 2987 bool hasSubRegDef = false; 2988 bool hasDeadDef = false; 2989 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2990 if (!MOI->isReg() || MOI->getReg() != Reg) 2991 continue; 2992 unsigned Sub = MOI->getSubReg(); 2993 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) 2994 : LaneBitmask::getAll(); 2995 if (MOI->isDef()) { 2996 if (Sub != 0) { 2997 hasSubRegDef = true; 2998 // An operand %0:sub0 reads %0:sub1..n. Invert the lane 2999 // mask for subregister defs. Read-undef defs will be handled by 3000 // readsReg below. 3001 SLM = ~SLM; 3002 } 3003 if (MOI->isDead()) 3004 hasDeadDef = true; 3005 } 3006 if (LaneMask.any() && (LaneMask & SLM).none()) 3007 continue; 3008 if (MOI->readsReg()) 3009 hasRead = true; 3010 } 3011 if (S.end.isDead()) { 3012 // Make sure that the corresponding machine operand for a "dead" live 3013 // range has the dead flag. We cannot perform this check for subregister 3014 // liveranges as partially dead values are allowed. 3015 if (LaneMask.none() && !hasDeadDef) { 3016 report("Instruction ending live segment on dead slot has no dead flag", 3017 MI); 3018 report_context(LR, Reg, LaneMask); 3019 report_context(S); 3020 } 3021 } else { 3022 if (!hasRead) { 3023 // When tracking subregister liveness, the main range must start new 3024 // values on partial register writes, even if there is no read. 3025 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() || 3026 !hasSubRegDef) { 3027 report("Instruction ending live segment doesn't read the register", 3028 MI); 3029 report_context(LR, Reg, LaneMask); 3030 report_context(S); 3031 } 3032 } 3033 } 3034 } 3035 3036 // Now check all the basic blocks in this live segment. 3037 MachineFunction::const_iterator MFI = MBB->getIterator(); 3038 // Is this live segment the beginning of a non-PHIDef VN? 3039 if (S.start == VNI->def && !VNI->isPHIDef()) { 3040 // Not live-in to any blocks. 3041 if (MBB == EndMBB) 3042 return; 3043 // Skip this block. 3044 ++MFI; 3045 } 3046 3047 SmallVector<SlotIndex, 4> Undefs; 3048 if (LaneMask.any()) { 3049 LiveInterval &OwnerLI = LiveInts->getInterval(Reg); 3050 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes); 3051 } 3052 3053 while (true) { 3054 assert(LiveInts->isLiveInToMBB(LR, &*MFI)); 3055 // We don't know how to track physregs into a landing pad. 3056 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) { 3057 if (&*MFI == EndMBB) 3058 break; 3059 ++MFI; 3060 continue; 3061 } 3062 3063 // Is VNI a PHI-def in the current block? 3064 bool IsPHI = VNI->isPHIDef() && 3065 VNI->def == LiveInts->getMBBStartIdx(&*MFI); 3066 3067 // Check that VNI is live-out of all predecessors. 3068 for (const MachineBasicBlock *Pred : MFI->predecessors()) { 3069 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); 3070 // Predecessor of landing pad live-out on last call. 3071 if (MFI->isEHPad()) { 3072 for (const MachineInstr &MI : llvm::reverse(*Pred)) { 3073 if (MI.isCall()) { 3074 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex(); 3075 break; 3076 } 3077 } 3078 } 3079 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd); 3080 3081 // All predecessors must have a live-out value. However for a phi 3082 // instruction with subregister intervals 3083 // only one of the subregisters (not necessarily the current one) needs to 3084 // be defined. 3085 if (!PVNI && (LaneMask.none() || !IsPHI)) { 3086 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes)) 3087 continue; 3088 report("Register not marked live out of predecessor", Pred); 3089 report_context(LR, Reg, LaneMask); 3090 report_context(*VNI); 3091 errs() << " live into " << printMBBReference(*MFI) << '@' 3092 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " 3093 << PEnd << '\n'; 3094 continue; 3095 } 3096 3097 // Only PHI-defs can take different predecessor values. 3098 if (!IsPHI && PVNI != VNI) { 3099 report("Different value live out of predecessor", Pred); 3100 report_context(LR, Reg, LaneMask); 3101 errs() << "Valno #" << PVNI->id << " live out of " 3102 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" 3103 << VNI->id << " live into " << printMBBReference(*MFI) << '@' 3104 << LiveInts->getMBBStartIdx(&*MFI) << '\n'; 3105 } 3106 } 3107 if (&*MFI == EndMBB) 3108 break; 3109 ++MFI; 3110 } 3111 } 3112 3113 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg, 3114 LaneBitmask LaneMask) { 3115 for (const VNInfo *VNI : LR.valnos) 3116 verifyLiveRangeValue(LR, VNI, Reg, LaneMask); 3117 3118 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I) 3119 verifyLiveRangeSegment(LR, I, Reg, LaneMask); 3120 } 3121 3122 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) { 3123 Register Reg = LI.reg(); 3124 assert(Register::isVirtualRegister(Reg)); 3125 verifyLiveRange(LI, Reg); 3126 3127 LaneBitmask Mask; 3128 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); 3129 for (const LiveInterval::SubRange &SR : LI.subranges()) { 3130 if ((Mask & SR.LaneMask).any()) { 3131 report("Lane masks of sub ranges overlap in live interval", MF); 3132 report_context(LI); 3133 } 3134 if ((SR.LaneMask & ~MaxMask).any()) { 3135 report("Subrange lanemask is invalid", MF); 3136 report_context(LI); 3137 } 3138 if (SR.empty()) { 3139 report("Subrange must not be empty", MF); 3140 report_context(SR, LI.reg(), SR.LaneMask); 3141 } 3142 Mask |= SR.LaneMask; 3143 verifyLiveRange(SR, LI.reg(), SR.LaneMask); 3144 if (!LI.covers(SR)) { 3145 report("A Subrange is not covered by the main range", MF); 3146 report_context(LI); 3147 } 3148 } 3149 3150 // Check the LI only has one connected component. 3151 ConnectedVNInfoEqClasses ConEQ(*LiveInts); 3152 unsigned NumComp = ConEQ.Classify(LI); 3153 if (NumComp > 1) { 3154 report("Multiple connected components in live interval", MF); 3155 report_context(LI); 3156 for (unsigned comp = 0; comp != NumComp; ++comp) { 3157 errs() << comp << ": valnos"; 3158 for (const VNInfo *I : LI.valnos) 3159 if (comp == ConEQ.getEqClass(I)) 3160 errs() << ' ' << I->id; 3161 errs() << '\n'; 3162 } 3163 } 3164 } 3165 3166 namespace { 3167 3168 // FrameSetup and FrameDestroy can have zero adjustment, so using a single 3169 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the 3170 // value is zero. 3171 // We use a bool plus an integer to capture the stack state. 3172 struct StackStateOfBB { 3173 StackStateOfBB() = default; 3174 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) : 3175 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup), 3176 ExitIsSetup(ExitSetup) {} 3177 3178 // Can be negative, which means we are setting up a frame. 3179 int EntryValue = 0; 3180 int ExitValue = 0; 3181 bool EntryIsSetup = false; 3182 bool ExitIsSetup = false; 3183 }; 3184 3185 } // end anonymous namespace 3186 3187 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed 3188 /// by a FrameDestroy <n>, stack adjustments are identical on all 3189 /// CFG edges to a merge point, and frame is destroyed at end of a return block. 3190 void MachineVerifier::verifyStackFrame() { 3191 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 3192 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 3193 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 3194 return; 3195 3196 SmallVector<StackStateOfBB, 8> SPState; 3197 SPState.resize(MF->getNumBlockIDs()); 3198 df_iterator_default_set<const MachineBasicBlock*> Reachable; 3199 3200 // Visit the MBBs in DFS order. 3201 for (df_ext_iterator<const MachineFunction *, 3202 df_iterator_default_set<const MachineBasicBlock *>> 3203 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable); 3204 DFI != DFE; ++DFI) { 3205 const MachineBasicBlock *MBB = *DFI; 3206 3207 StackStateOfBB BBState; 3208 // Check the exit state of the DFS stack predecessor. 3209 if (DFI.getPathLength() >= 2) { 3210 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 3211 assert(Reachable.count(StackPred) && 3212 "DFS stack predecessor is already visited.\n"); 3213 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue; 3214 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup; 3215 BBState.ExitValue = BBState.EntryValue; 3216 BBState.ExitIsSetup = BBState.EntryIsSetup; 3217 } 3218 3219 // Update stack state by checking contents of MBB. 3220 for (const auto &I : *MBB) { 3221 if (I.getOpcode() == FrameSetupOpcode) { 3222 if (BBState.ExitIsSetup) 3223 report("FrameSetup is after another FrameSetup", &I); 3224 BBState.ExitValue -= TII->getFrameTotalSize(I); 3225 BBState.ExitIsSetup = true; 3226 } 3227 3228 if (I.getOpcode() == FrameDestroyOpcode) { 3229 int Size = TII->getFrameTotalSize(I); 3230 if (!BBState.ExitIsSetup) 3231 report("FrameDestroy is not after a FrameSetup", &I); 3232 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue : 3233 BBState.ExitValue; 3234 if (BBState.ExitIsSetup && AbsSPAdj != Size) { 3235 report("FrameDestroy <n> is after FrameSetup <m>", &I); 3236 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <" 3237 << AbsSPAdj << ">.\n"; 3238 } 3239 BBState.ExitValue += Size; 3240 BBState.ExitIsSetup = false; 3241 } 3242 } 3243 SPState[MBB->getNumber()] = BBState; 3244 3245 // Make sure the exit state of any predecessor is consistent with the entry 3246 // state. 3247 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 3248 if (Reachable.count(Pred) && 3249 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue || 3250 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { 3251 report("The exit stack state of a predecessor is inconsistent.", MBB); 3252 errs() << "Predecessor " << printMBBReference(*Pred) 3253 << " has exit state (" << SPState[Pred->getNumber()].ExitValue 3254 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while " 3255 << printMBBReference(*MBB) << " has entry state (" 3256 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; 3257 } 3258 } 3259 3260 // Make sure the entry state of any successor is consistent with the exit 3261 // state. 3262 for (const MachineBasicBlock *Succ : MBB->successors()) { 3263 if (Reachable.count(Succ) && 3264 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue || 3265 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { 3266 report("The entry stack state of a successor is inconsistent.", MBB); 3267 errs() << "Successor " << printMBBReference(*Succ) 3268 << " has entry state (" << SPState[Succ->getNumber()].EntryValue 3269 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while " 3270 << printMBBReference(*MBB) << " has exit state (" 3271 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; 3272 } 3273 } 3274 3275 // Make sure a basic block with return ends with zero stack adjustment. 3276 if (!MBB->empty() && MBB->back().isReturn()) { 3277 if (BBState.ExitIsSetup) 3278 report("A return block ends with a FrameSetup.", MBB); 3279 if (BBState.ExitValue) 3280 report("A return block ends with a nonzero stack adjustment.", MBB); 3281 } 3282 } 3283 } 3284