1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the function verifier interface, that can be used for some 11 // sanity checking of input to the system. 12 // 13 // Note that this does not provide full `Java style' security and verifications, 14 // instead it just tries to ensure that code is well-formed. 15 // 16 // * Both of a binary operator's parameters are of the same type 17 // * Verify that the indices of mem access instructions match other operands 18 // * Verify that arithmetic and other things are only performed on first-class 19 // types. Verify that shifts & logicals only happen on integrals f.e. 20 // * All of the constants in a switch statement are of the correct type 21 // * The code is in valid SSA form 22 // * It should be illegal to put a label into any other type (like a structure) 23 // or to return one. [except constant arrays!] 24 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad 25 // * PHI nodes must have an entry for each predecessor, with no extras. 26 // * PHI nodes must be the first thing in a basic block, all grouped together 27 // * PHI nodes must have at least one entry 28 // * All basic blocks should only end with terminator insts, not contain them 29 // * The entry node to a function must not have predecessors 30 // * All Instructions must be embedded into a basic block 31 // * Functions cannot take a void-typed parameter 32 // * Verify that a function's argument list agrees with it's declared type. 33 // * It is illegal to specify a name for a void value. 34 // * It is illegal to have a internal global value with no initializer 35 // * It is illegal to have a ret instruction that returns a value that does not 36 // agree with the function return value type. 37 // * Function call argument types match the function prototype 38 // * A landing pad is defined by a landingpad instruction, and can be jumped to 39 // only by the unwind edge of an invoke instruction. 40 // * A landingpad instruction must be the first non-PHI instruction in the 41 // block. 42 // * Landingpad instructions must be in a function with a personality function. 43 // * All other things that are tested by asserts spread about the code... 44 // 45 //===----------------------------------------------------------------------===// 46 47 #include "llvm/IR/Verifier.h" 48 #include "llvm/ADT/APFloat.h" 49 #include "llvm/ADT/APInt.h" 50 #include "llvm/ADT/ArrayRef.h" 51 #include "llvm/ADT/DenseMap.h" 52 #include "llvm/ADT/ilist.h" 53 #include "llvm/ADT/MapVector.h" 54 #include "llvm/ADT/Optional.h" 55 #include "llvm/ADT/STLExtras.h" 56 #include "llvm/ADT/SmallPtrSet.h" 57 #include "llvm/ADT/SmallSet.h" 58 #include "llvm/ADT/SmallVector.h" 59 #include "llvm/ADT/StringMap.h" 60 #include "llvm/ADT/StringRef.h" 61 #include "llvm/ADT/Twine.h" 62 #include "llvm/IR/Argument.h" 63 #include "llvm/IR/Attributes.h" 64 #include "llvm/IR/BasicBlock.h" 65 #include "llvm/IR/CFG.h" 66 #include "llvm/IR/CallSite.h" 67 #include "llvm/IR/CallingConv.h" 68 #include "llvm/IR/Comdat.h" 69 #include "llvm/IR/Constant.h" 70 #include "llvm/IR/ConstantRange.h" 71 #include "llvm/IR/Constants.h" 72 #include "llvm/IR/DataLayout.h" 73 #include "llvm/IR/DebugInfo.h" 74 #include "llvm/IR/DebugInfoMetadata.h" 75 #include "llvm/IR/DebugLoc.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/DiagnosticInfo.h" 78 #include "llvm/IR/Dominators.h" 79 #include "llvm/IR/Function.h" 80 #include "llvm/IR/GlobalAlias.h" 81 #include "llvm/IR/GlobalValue.h" 82 #include "llvm/IR/GlobalVariable.h" 83 #include "llvm/IR/InlineAsm.h" 84 #include "llvm/IR/InstrTypes.h" 85 #include "llvm/IR/Instruction.h" 86 #include "llvm/IR/Instructions.h" 87 #include "llvm/IR/InstVisitor.h" 88 #include "llvm/IR/IntrinsicInst.h" 89 #include "llvm/IR/Intrinsics.h" 90 #include "llvm/IR/LLVMContext.h" 91 #include "llvm/IR/Metadata.h" 92 #include "llvm/IR/Module.h" 93 #include "llvm/IR/ModuleSlotTracker.h" 94 #include "llvm/IR/PassManager.h" 95 #include "llvm/IR/Statepoint.h" 96 #include "llvm/IR/Type.h" 97 #include "llvm/IR/Use.h" 98 #include "llvm/IR/User.h" 99 #include "llvm/IR/Value.h" 100 #include "llvm/Pass.h" 101 #include "llvm/Support/AtomicOrdering.h" 102 #include "llvm/Support/Casting.h" 103 #include "llvm/Support/CommandLine.h" 104 #include "llvm/Support/Debug.h" 105 #include "llvm/Support/Dwarf.h" 106 #include "llvm/Support/ErrorHandling.h" 107 #include "llvm/Support/MathExtras.h" 108 #include "llvm/Support/raw_ostream.h" 109 #include <algorithm> 110 #include <cassert> 111 #include <cstdint> 112 #include <memory> 113 #include <string> 114 #include <utility> 115 116 using namespace llvm; 117 118 static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true)); 119 120 namespace llvm { 121 122 struct VerifierSupport { 123 raw_ostream *OS; 124 const Module &M; 125 ModuleSlotTracker MST; 126 const DataLayout &DL; 127 LLVMContext &Context; 128 129 /// Track the brokenness of the module while recursively visiting. 130 bool Broken = false; 131 /// Broken debug info can be "recovered" from by stripping the debug info. 132 bool BrokenDebugInfo = false; 133 /// Whether to treat broken debug info as an error. 134 bool TreatBrokenDebugInfoAsError = true; 135 136 explicit VerifierSupport(raw_ostream *OS, const Module &M) 137 : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {} 138 139 private: 140 void Write(const Module *M) { 141 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n"; 142 } 143 144 void Write(const Value *V) { 145 if (!V) 146 return; 147 if (isa<Instruction>(V)) { 148 V->print(*OS, MST); 149 *OS << '\n'; 150 } else { 151 V->printAsOperand(*OS, true, MST); 152 *OS << '\n'; 153 } 154 } 155 156 void Write(ImmutableCallSite CS) { 157 Write(CS.getInstruction()); 158 } 159 160 void Write(const Metadata *MD) { 161 if (!MD) 162 return; 163 MD->print(*OS, MST, &M); 164 *OS << '\n'; 165 } 166 167 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) { 168 Write(MD.get()); 169 } 170 171 void Write(const NamedMDNode *NMD) { 172 if (!NMD) 173 return; 174 NMD->print(*OS, MST); 175 *OS << '\n'; 176 } 177 178 void Write(Type *T) { 179 if (!T) 180 return; 181 *OS << ' ' << *T; 182 } 183 184 void Write(const Comdat *C) { 185 if (!C) 186 return; 187 *OS << *C; 188 } 189 190 void Write(const APInt *AI) { 191 if (!AI) 192 return; 193 *OS << *AI << '\n'; 194 } 195 196 void Write(const unsigned i) { *OS << i << '\n'; } 197 198 template <typename T> void Write(ArrayRef<T> Vs) { 199 for (const T &V : Vs) 200 Write(V); 201 } 202 203 template <typename T1, typename... Ts> 204 void WriteTs(const T1 &V1, const Ts &... Vs) { 205 Write(V1); 206 WriteTs(Vs...); 207 } 208 209 template <typename... Ts> void WriteTs() {} 210 211 public: 212 /// \brief A check failed, so printout out the condition and the message. 213 /// 214 /// This provides a nice place to put a breakpoint if you want to see why 215 /// something is not correct. 216 void CheckFailed(const Twine &Message) { 217 if (OS) 218 *OS << Message << '\n'; 219 Broken = true; 220 } 221 222 /// \brief A check failed (with values to print). 223 /// 224 /// This calls the Message-only version so that the above is easier to set a 225 /// breakpoint on. 226 template <typename T1, typename... Ts> 227 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) { 228 CheckFailed(Message); 229 if (OS) 230 WriteTs(V1, Vs...); 231 } 232 233 /// A debug info check failed. 234 void DebugInfoCheckFailed(const Twine &Message) { 235 if (OS) 236 *OS << Message << '\n'; 237 Broken |= TreatBrokenDebugInfoAsError; 238 BrokenDebugInfo = true; 239 } 240 241 /// A debug info check failed (with values to print). 242 template <typename T1, typename... Ts> 243 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, 244 const Ts &... Vs) { 245 DebugInfoCheckFailed(Message); 246 if (OS) 247 WriteTs(V1, Vs...); 248 } 249 }; 250 251 } // namespace llvm 252 253 namespace { 254 255 class Verifier : public InstVisitor<Verifier>, VerifierSupport { 256 friend class InstVisitor<Verifier>; 257 258 DominatorTree DT; 259 260 /// \brief When verifying a basic block, keep track of all of the 261 /// instructions we have seen so far. 262 /// 263 /// This allows us to do efficient dominance checks for the case when an 264 /// instruction has an operand that is an instruction in the same block. 265 SmallPtrSet<Instruction *, 16> InstsInThisBlock; 266 267 /// \brief Keep track of the metadata nodes that have been checked already. 268 SmallPtrSet<const Metadata *, 32> MDNodes; 269 270 /// Track all DICompileUnits visited. 271 SmallPtrSet<const Metadata *, 2> CUVisited; 272 273 /// \brief The result type for a landingpad. 274 Type *LandingPadResultTy; 275 276 /// \brief Whether we've seen a call to @llvm.localescape in this function 277 /// already. 278 bool SawFrameEscape; 279 280 /// Stores the count of how many objects were passed to llvm.localescape for a 281 /// given function and the largest index passed to llvm.localrecover. 282 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo; 283 284 // Maps catchswitches and cleanuppads that unwind to siblings to the 285 // terminators that indicate the unwind, used to detect cycles therein. 286 MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo; 287 288 /// Cache of constants visited in search of ConstantExprs. 289 SmallPtrSet<const Constant *, 32> ConstantExprVisited; 290 291 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic. 292 SmallVector<const Function *, 4> DeoptimizeDeclarations; 293 294 // Verify that this GlobalValue is only used in this module. 295 // This map is used to avoid visiting uses twice. We can arrive at a user 296 // twice, if they have multiple operands. In particular for very large 297 // constant expressions, we can arrive at a particular user many times. 298 SmallPtrSet<const Value *, 32> GlobalValueVisited; 299 300 TBAAVerifier TBAAVerifyHelper; 301 302 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I); 303 304 public: 305 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError, 306 const Module &M) 307 : VerifierSupport(OS, M), LandingPadResultTy(nullptr), 308 SawFrameEscape(false), TBAAVerifyHelper(this) { 309 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError; 310 } 311 312 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; } 313 314 bool verify(const Function &F) { 315 assert(F.getParent() == &M && 316 "An instance of this class only works with a specific module!"); 317 318 // First ensure the function is well-enough formed to compute dominance 319 // information, and directly compute a dominance tree. We don't rely on the 320 // pass manager to provide this as it isolates us from a potentially 321 // out-of-date dominator tree and makes it significantly more complex to run 322 // this code outside of a pass manager. 323 // FIXME: It's really gross that we have to cast away constness here. 324 if (!F.empty()) 325 DT.recalculate(const_cast<Function &>(F)); 326 327 for (const BasicBlock &BB : F) { 328 if (!BB.empty() && BB.back().isTerminator()) 329 continue; 330 331 if (OS) { 332 *OS << "Basic Block in function '" << F.getName() 333 << "' does not have terminator!\n"; 334 BB.printAsOperand(*OS, true, MST); 335 *OS << "\n"; 336 } 337 return false; 338 } 339 340 Broken = false; 341 // FIXME: We strip const here because the inst visitor strips const. 342 visit(const_cast<Function &>(F)); 343 verifySiblingFuncletUnwinds(); 344 InstsInThisBlock.clear(); 345 LandingPadResultTy = nullptr; 346 SawFrameEscape = false; 347 SiblingFuncletInfo.clear(); 348 349 return !Broken; 350 } 351 352 /// Verify the module that this instance of \c Verifier was initialized with. 353 bool verify() { 354 Broken = false; 355 356 // Collect all declarations of the llvm.experimental.deoptimize intrinsic. 357 for (const Function &F : M) 358 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize) 359 DeoptimizeDeclarations.push_back(&F); 360 361 // Now that we've visited every function, verify that we never asked to 362 // recover a frame index that wasn't escaped. 363 verifyFrameRecoverIndices(); 364 for (const GlobalVariable &GV : M.globals()) 365 visitGlobalVariable(GV); 366 367 for (const GlobalAlias &GA : M.aliases()) 368 visitGlobalAlias(GA); 369 370 for (const NamedMDNode &NMD : M.named_metadata()) 371 visitNamedMDNode(NMD); 372 373 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable()) 374 visitComdat(SMEC.getValue()); 375 376 visitModuleFlags(M); 377 visitModuleIdents(M); 378 379 verifyCompileUnits(); 380 381 verifyDeoptimizeCallingConvs(); 382 383 return !Broken; 384 } 385 386 private: 387 // Verification methods... 388 void visitGlobalValue(const GlobalValue &GV); 389 void visitGlobalVariable(const GlobalVariable &GV); 390 void visitGlobalAlias(const GlobalAlias &GA); 391 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C); 392 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited, 393 const GlobalAlias &A, const Constant &C); 394 void visitNamedMDNode(const NamedMDNode &NMD); 395 void visitMDNode(const MDNode &MD); 396 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F); 397 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F); 398 void visitComdat(const Comdat &C); 399 void visitModuleIdents(const Module &M); 400 void visitModuleFlags(const Module &M); 401 void visitModuleFlag(const MDNode *Op, 402 DenseMap<const MDString *, const MDNode *> &SeenIDs, 403 SmallVectorImpl<const MDNode *> &Requirements); 404 void visitFunction(const Function &F); 405 void visitBasicBlock(BasicBlock &BB); 406 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty); 407 void visitDereferenceableMetadata(Instruction &I, MDNode *MD); 408 409 template <class Ty> bool isValidMetadataArray(const MDTuple &N); 410 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); 411 #include "llvm/IR/Metadata.def" 412 void visitDIScope(const DIScope &N); 413 void visitDIVariable(const DIVariable &N); 414 void visitDILexicalBlockBase(const DILexicalBlockBase &N); 415 void visitDITemplateParameter(const DITemplateParameter &N); 416 417 void visitTemplateParams(const MDNode &N, const Metadata &RawParams); 418 419 // InstVisitor overrides... 420 using InstVisitor<Verifier>::visit; 421 void visit(Instruction &I); 422 423 void visitTruncInst(TruncInst &I); 424 void visitZExtInst(ZExtInst &I); 425 void visitSExtInst(SExtInst &I); 426 void visitFPTruncInst(FPTruncInst &I); 427 void visitFPExtInst(FPExtInst &I); 428 void visitFPToUIInst(FPToUIInst &I); 429 void visitFPToSIInst(FPToSIInst &I); 430 void visitUIToFPInst(UIToFPInst &I); 431 void visitSIToFPInst(SIToFPInst &I); 432 void visitIntToPtrInst(IntToPtrInst &I); 433 void visitPtrToIntInst(PtrToIntInst &I); 434 void visitBitCastInst(BitCastInst &I); 435 void visitAddrSpaceCastInst(AddrSpaceCastInst &I); 436 void visitPHINode(PHINode &PN); 437 void visitBinaryOperator(BinaryOperator &B); 438 void visitICmpInst(ICmpInst &IC); 439 void visitFCmpInst(FCmpInst &FC); 440 void visitExtractElementInst(ExtractElementInst &EI); 441 void visitInsertElementInst(InsertElementInst &EI); 442 void visitShuffleVectorInst(ShuffleVectorInst &EI); 443 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); } 444 void visitCallInst(CallInst &CI); 445 void visitInvokeInst(InvokeInst &II); 446 void visitGetElementPtrInst(GetElementPtrInst &GEP); 447 void visitLoadInst(LoadInst &LI); 448 void visitStoreInst(StoreInst &SI); 449 void verifyDominatesUse(Instruction &I, unsigned i); 450 void visitInstruction(Instruction &I); 451 void visitTerminatorInst(TerminatorInst &I); 452 void visitBranchInst(BranchInst &BI); 453 void visitReturnInst(ReturnInst &RI); 454 void visitSwitchInst(SwitchInst &SI); 455 void visitIndirectBrInst(IndirectBrInst &BI); 456 void visitSelectInst(SelectInst &SI); 457 void visitUserOp1(Instruction &I); 458 void visitUserOp2(Instruction &I) { visitUserOp1(I); } 459 void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS); 460 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI); 461 template <class DbgIntrinsicTy> 462 void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII); 463 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); 464 void visitAtomicRMWInst(AtomicRMWInst &RMWI); 465 void visitFenceInst(FenceInst &FI); 466 void visitAllocaInst(AllocaInst &AI); 467 void visitExtractValueInst(ExtractValueInst &EVI); 468 void visitInsertValueInst(InsertValueInst &IVI); 469 void visitEHPadPredecessors(Instruction &I); 470 void visitLandingPadInst(LandingPadInst &LPI); 471 void visitResumeInst(ResumeInst &RI); 472 void visitCatchPadInst(CatchPadInst &CPI); 473 void visitCatchReturnInst(CatchReturnInst &CatchReturn); 474 void visitCleanupPadInst(CleanupPadInst &CPI); 475 void visitFuncletPadInst(FuncletPadInst &FPI); 476 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch); 477 void visitCleanupReturnInst(CleanupReturnInst &CRI); 478 479 void verifyCallSite(CallSite CS); 480 void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal); 481 void verifySwiftErrorValue(const Value *SwiftErrorVal); 482 void verifyMustTailCall(CallInst &CI); 483 bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT, 484 unsigned ArgNo, std::string &Suffix); 485 bool verifyAttributeCount(AttributeSet Attrs, unsigned Params); 486 void verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction, 487 const Value *V); 488 void verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 489 bool isReturnValue, const Value *V); 490 void verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 491 const Value *V); 492 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs); 493 494 void visitConstantExprsRecursively(const Constant *EntryC); 495 void visitConstantExpr(const ConstantExpr *CE); 496 void verifyStatepoint(ImmutableCallSite CS); 497 void verifyFrameRecoverIndices(); 498 void verifySiblingFuncletUnwinds(); 499 500 void verifyFragmentExpression(const DbgInfoIntrinsic &I); 501 502 /// Module-level debug info verification... 503 void verifyCompileUnits(); 504 505 /// Module-level verification that all @llvm.experimental.deoptimize 506 /// declarations share the same calling convention. 507 void verifyDeoptimizeCallingConvs(); 508 }; 509 510 } // end anonymous namespace 511 512 /// We know that cond should be true, if not print an error message. 513 #define Assert(C, ...) \ 514 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false) 515 516 /// We know that a debug info condition should be true, if not print 517 /// an error message. 518 #define AssertDI(C, ...) \ 519 do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false) 520 521 void Verifier::visit(Instruction &I) { 522 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) 523 Assert(I.getOperand(i) != nullptr, "Operand is null", &I); 524 InstVisitor<Verifier>::visit(I); 525 } 526 527 // Helper to recursively iterate over indirect users. By 528 // returning false, the callback can ask to stop recursing 529 // further. 530 static void forEachUser(const Value *User, 531 SmallPtrSet<const Value *, 32> &Visited, 532 llvm::function_ref<bool(const Value *)> Callback) { 533 if (!Visited.insert(User).second) 534 return; 535 for (const Value *TheNextUser : User->materialized_users()) 536 if (Callback(TheNextUser)) 537 forEachUser(TheNextUser, Visited, Callback); 538 } 539 540 void Verifier::visitGlobalValue(const GlobalValue &GV) { 541 Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(), 542 "Global is external, but doesn't have external or weak linkage!", &GV); 543 544 Assert(GV.getAlignment() <= Value::MaximumAlignment, 545 "huge alignment values are unsupported", &GV); 546 Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), 547 "Only global variables can have appending linkage!", &GV); 548 549 if (GV.hasAppendingLinkage()) { 550 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV); 551 Assert(GVar && GVar->getValueType()->isArrayTy(), 552 "Only global arrays can have appending linkage!", GVar); 553 } 554 555 if (GV.isDeclarationForLinker()) 556 Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV); 557 558 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool { 559 if (const Instruction *I = dyn_cast<Instruction>(V)) { 560 if (!I->getParent() || !I->getParent()->getParent()) 561 CheckFailed("Global is referenced by parentless instruction!", &GV, &M, 562 I); 563 else if (I->getParent()->getParent()->getParent() != &M) 564 CheckFailed("Global is referenced in a different module!", &GV, &M, I, 565 I->getParent()->getParent(), 566 I->getParent()->getParent()->getParent()); 567 return false; 568 } else if (const Function *F = dyn_cast<Function>(V)) { 569 if (F->getParent() != &M) 570 CheckFailed("Global is used by function in a different module", &GV, &M, 571 F, F->getParent()); 572 return false; 573 } 574 return true; 575 }); 576 } 577 578 void Verifier::visitGlobalVariable(const GlobalVariable &GV) { 579 if (GV.hasInitializer()) { 580 Assert(GV.getInitializer()->getType() == GV.getValueType(), 581 "Global variable initializer type does not match global " 582 "variable type!", 583 &GV); 584 // If the global has common linkage, it must have a zero initializer and 585 // cannot be constant. 586 if (GV.hasCommonLinkage()) { 587 Assert(GV.getInitializer()->isNullValue(), 588 "'common' global must have a zero initializer!", &GV); 589 Assert(!GV.isConstant(), "'common' global may not be marked constant!", 590 &GV); 591 Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV); 592 } 593 } 594 595 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" || 596 GV.getName() == "llvm.global_dtors")) { 597 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 598 "invalid linkage for intrinsic global variable", &GV); 599 // Don't worry about emitting an error for it not being an array, 600 // visitGlobalValue will complain on appending non-array. 601 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) { 602 StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 603 PointerType *FuncPtrTy = 604 FunctionType::get(Type::getVoidTy(Context), false)->getPointerTo(); 605 // FIXME: Reject the 2-field form in LLVM 4.0. 606 Assert(STy && 607 (STy->getNumElements() == 2 || STy->getNumElements() == 3) && 608 STy->getTypeAtIndex(0u)->isIntegerTy(32) && 609 STy->getTypeAtIndex(1) == FuncPtrTy, 610 "wrong type for intrinsic global variable", &GV); 611 if (STy->getNumElements() == 3) { 612 Type *ETy = STy->getTypeAtIndex(2); 613 Assert(ETy->isPointerTy() && 614 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8), 615 "wrong type for intrinsic global variable", &GV); 616 } 617 } 618 } 619 620 if (GV.hasName() && (GV.getName() == "llvm.used" || 621 GV.getName() == "llvm.compiler.used")) { 622 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 623 "invalid linkage for intrinsic global variable", &GV); 624 Type *GVType = GV.getValueType(); 625 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) { 626 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType()); 627 Assert(PTy, "wrong type for intrinsic global variable", &GV); 628 if (GV.hasInitializer()) { 629 const Constant *Init = GV.getInitializer(); 630 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init); 631 Assert(InitArray, "wrong initalizer for intrinsic global variable", 632 Init); 633 for (Value *Op : InitArray->operands()) { 634 Value *V = Op->stripPointerCastsNoFollowAliases(); 635 Assert(isa<GlobalVariable>(V) || isa<Function>(V) || 636 isa<GlobalAlias>(V), 637 "invalid llvm.used member", V); 638 Assert(V->hasName(), "members of llvm.used must be named", V); 639 } 640 } 641 } 642 } 643 644 Assert(!GV.hasDLLImportStorageClass() || 645 (GV.isDeclaration() && GV.hasExternalLinkage()) || 646 GV.hasAvailableExternallyLinkage(), 647 "Global is marked as dllimport, but not external", &GV); 648 649 // Visit any debug info attachments. 650 SmallVector<MDNode *, 1> MDs; 651 GV.getMetadata(LLVMContext::MD_dbg, MDs); 652 for (auto *MD : MDs) { 653 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD)) 654 visitDIGlobalVariableExpression(*GVE); 655 else 656 AssertDI(false, "!dbg attachment of global variable must be a DIGlobalVariableExpression"); 657 } 658 659 if (!GV.hasInitializer()) { 660 visitGlobalValue(GV); 661 return; 662 } 663 664 // Walk any aggregate initializers looking for bitcasts between address spaces 665 visitConstantExprsRecursively(GV.getInitializer()); 666 667 visitGlobalValue(GV); 668 } 669 670 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) { 671 SmallPtrSet<const GlobalAlias*, 4> Visited; 672 Visited.insert(&GA); 673 visitAliaseeSubExpr(Visited, GA, C); 674 } 675 676 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited, 677 const GlobalAlias &GA, const Constant &C) { 678 if (const auto *GV = dyn_cast<GlobalValue>(&C)) { 679 Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition", 680 &GA); 681 682 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) { 683 Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA); 684 685 Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias", 686 &GA); 687 } else { 688 // Only continue verifying subexpressions of GlobalAliases. 689 // Do not recurse into global initializers. 690 return; 691 } 692 } 693 694 if (const auto *CE = dyn_cast<ConstantExpr>(&C)) 695 visitConstantExprsRecursively(CE); 696 697 for (const Use &U : C.operands()) { 698 Value *V = &*U; 699 if (const auto *GA2 = dyn_cast<GlobalAlias>(V)) 700 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee()); 701 else if (const auto *C2 = dyn_cast<Constant>(V)) 702 visitAliaseeSubExpr(Visited, GA, *C2); 703 } 704 } 705 706 void Verifier::visitGlobalAlias(const GlobalAlias &GA) { 707 Assert(GlobalAlias::isValidLinkage(GA.getLinkage()), 708 "Alias should have private, internal, linkonce, weak, linkonce_odr, " 709 "weak_odr, or external linkage!", 710 &GA); 711 const Constant *Aliasee = GA.getAliasee(); 712 Assert(Aliasee, "Aliasee cannot be NULL!", &GA); 713 Assert(GA.getType() == Aliasee->getType(), 714 "Alias and aliasee types should match!", &GA); 715 716 Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee), 717 "Aliasee should be either GlobalValue or ConstantExpr", &GA); 718 719 visitAliaseeSubExpr(GA, *Aliasee); 720 721 visitGlobalValue(GA); 722 } 723 724 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) { 725 // There used to be various other llvm.dbg.* nodes, but we don't support 726 // upgrading them and we want to reserve the namespace for future uses. 727 if (NMD.getName().startswith("llvm.dbg.")) 728 AssertDI(NMD.getName() == "llvm.dbg.cu", 729 "unrecognized named metadata node in the llvm.dbg namespace", 730 &NMD); 731 for (const MDNode *MD : NMD.operands()) { 732 if (NMD.getName() == "llvm.dbg.cu") 733 AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD); 734 735 if (!MD) 736 continue; 737 738 visitMDNode(*MD); 739 } 740 } 741 742 void Verifier::visitMDNode(const MDNode &MD) { 743 // Only visit each node once. Metadata can be mutually recursive, so this 744 // avoids infinite recursion here, as well as being an optimization. 745 if (!MDNodes.insert(&MD).second) 746 return; 747 748 switch (MD.getMetadataID()) { 749 default: 750 llvm_unreachable("Invalid MDNode subclass"); 751 case Metadata::MDTupleKind: 752 break; 753 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \ 754 case Metadata::CLASS##Kind: \ 755 visit##CLASS(cast<CLASS>(MD)); \ 756 break; 757 #include "llvm/IR/Metadata.def" 758 } 759 760 for (const Metadata *Op : MD.operands()) { 761 if (!Op) 762 continue; 763 Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!", 764 &MD, Op); 765 if (auto *N = dyn_cast<MDNode>(Op)) { 766 visitMDNode(*N); 767 continue; 768 } 769 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) { 770 visitValueAsMetadata(*V, nullptr); 771 continue; 772 } 773 } 774 775 // Check these last, so we diagnose problems in operands first. 776 Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD); 777 Assert(MD.isResolved(), "All nodes should be resolved!", &MD); 778 } 779 780 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) { 781 Assert(MD.getValue(), "Expected valid value", &MD); 782 Assert(!MD.getValue()->getType()->isMetadataTy(), 783 "Unexpected metadata round-trip through values", &MD, MD.getValue()); 784 785 auto *L = dyn_cast<LocalAsMetadata>(&MD); 786 if (!L) 787 return; 788 789 Assert(F, "function-local metadata used outside a function", L); 790 791 // If this was an instruction, bb, or argument, verify that it is in the 792 // function that we expect. 793 Function *ActualF = nullptr; 794 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) { 795 Assert(I->getParent(), "function-local metadata not in basic block", L, I); 796 ActualF = I->getParent()->getParent(); 797 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue())) 798 ActualF = BB->getParent(); 799 else if (Argument *A = dyn_cast<Argument>(L->getValue())) 800 ActualF = A->getParent(); 801 assert(ActualF && "Unimplemented function local metadata case!"); 802 803 Assert(ActualF == F, "function-local metadata used in wrong function", L); 804 } 805 806 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) { 807 Metadata *MD = MDV.getMetadata(); 808 if (auto *N = dyn_cast<MDNode>(MD)) { 809 visitMDNode(*N); 810 return; 811 } 812 813 // Only visit each node once. Metadata can be mutually recursive, so this 814 // avoids infinite recursion here, as well as being an optimization. 815 if (!MDNodes.insert(MD).second) 816 return; 817 818 if (auto *V = dyn_cast<ValueAsMetadata>(MD)) 819 visitValueAsMetadata(*V, F); 820 } 821 822 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); } 823 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); } 824 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); } 825 826 template <class Ty> 827 static bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) { 828 for (Metadata *MD : N.operands()) { 829 if (MD) { 830 if (!isa<Ty>(MD)) 831 return false; 832 } else { 833 if (!AllowNull) 834 return false; 835 } 836 } 837 return true; 838 } 839 840 template <class Ty> static bool isValidMetadataArray(const MDTuple &N) { 841 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false); 842 } 843 844 template <class Ty> static bool isValidMetadataNullArray(const MDTuple &N) { 845 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true); 846 } 847 848 void Verifier::visitDILocation(const DILocation &N) { 849 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 850 "location requires a valid scope", &N, N.getRawScope()); 851 if (auto *IA = N.getRawInlinedAt()) 852 AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA); 853 } 854 855 void Verifier::visitGenericDINode(const GenericDINode &N) { 856 AssertDI(N.getTag(), "invalid tag", &N); 857 } 858 859 void Verifier::visitDIScope(const DIScope &N) { 860 if (auto *F = N.getRawFile()) 861 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 862 } 863 864 void Verifier::visitDISubrange(const DISubrange &N) { 865 AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N); 866 AssertDI(N.getCount() >= -1, "invalid subrange count", &N); 867 } 868 869 void Verifier::visitDIEnumerator(const DIEnumerator &N) { 870 AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N); 871 } 872 873 void Verifier::visitDIBasicType(const DIBasicType &N) { 874 AssertDI(N.getTag() == dwarf::DW_TAG_base_type || 875 N.getTag() == dwarf::DW_TAG_unspecified_type, 876 "invalid tag", &N); 877 } 878 879 void Verifier::visitDIDerivedType(const DIDerivedType &N) { 880 // Common scope checks. 881 visitDIScope(N); 882 883 AssertDI(N.getTag() == dwarf::DW_TAG_typedef || 884 N.getTag() == dwarf::DW_TAG_pointer_type || 885 N.getTag() == dwarf::DW_TAG_ptr_to_member_type || 886 N.getTag() == dwarf::DW_TAG_reference_type || 887 N.getTag() == dwarf::DW_TAG_rvalue_reference_type || 888 N.getTag() == dwarf::DW_TAG_const_type || 889 N.getTag() == dwarf::DW_TAG_volatile_type || 890 N.getTag() == dwarf::DW_TAG_restrict_type || 891 N.getTag() == dwarf::DW_TAG_atomic_type || 892 N.getTag() == dwarf::DW_TAG_member || 893 N.getTag() == dwarf::DW_TAG_inheritance || 894 N.getTag() == dwarf::DW_TAG_friend, 895 "invalid tag", &N); 896 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) { 897 AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N, 898 N.getRawExtraData()); 899 } 900 901 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 902 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 903 N.getRawBaseType()); 904 } 905 906 static bool hasConflictingReferenceFlags(unsigned Flags) { 907 return (Flags & DINode::FlagLValueReference) && 908 (Flags & DINode::FlagRValueReference); 909 } 910 911 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) { 912 auto *Params = dyn_cast<MDTuple>(&RawParams); 913 AssertDI(Params, "invalid template params", &N, &RawParams); 914 for (Metadata *Op : Params->operands()) { 915 AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter", 916 &N, Params, Op); 917 } 918 } 919 920 void Verifier::visitDICompositeType(const DICompositeType &N) { 921 // Common scope checks. 922 visitDIScope(N); 923 924 AssertDI(N.getTag() == dwarf::DW_TAG_array_type || 925 N.getTag() == dwarf::DW_TAG_structure_type || 926 N.getTag() == dwarf::DW_TAG_union_type || 927 N.getTag() == dwarf::DW_TAG_enumeration_type || 928 N.getTag() == dwarf::DW_TAG_class_type, 929 "invalid tag", &N); 930 931 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 932 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 933 N.getRawBaseType()); 934 935 AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()), 936 "invalid composite elements", &N, N.getRawElements()); 937 AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N, 938 N.getRawVTableHolder()); 939 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 940 "invalid reference flags", &N); 941 if (auto *Params = N.getRawTemplateParams()) 942 visitTemplateParams(N, *Params); 943 944 if (N.getTag() == dwarf::DW_TAG_class_type || 945 N.getTag() == dwarf::DW_TAG_union_type) { 946 AssertDI(N.getFile() && !N.getFile()->getFilename().empty(), 947 "class/union requires a filename", &N, N.getFile()); 948 } 949 } 950 951 void Verifier::visitDISubroutineType(const DISubroutineType &N) { 952 AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N); 953 if (auto *Types = N.getRawTypeArray()) { 954 AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types); 955 for (Metadata *Ty : N.getTypeArray()->operands()) { 956 AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty); 957 } 958 } 959 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 960 "invalid reference flags", &N); 961 } 962 963 void Verifier::visitDIFile(const DIFile &N) { 964 AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N); 965 AssertDI((N.getChecksumKind() != DIFile::CSK_None || 966 N.getChecksum().empty()), "invalid checksum kind", &N); 967 } 968 969 void Verifier::visitDICompileUnit(const DICompileUnit &N) { 970 AssertDI(N.isDistinct(), "compile units must be distinct", &N); 971 AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N); 972 973 // Don't bother verifying the compilation directory or producer string 974 // as those could be empty. 975 AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N, 976 N.getRawFile()); 977 AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N, 978 N.getFile()); 979 980 AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind), 981 "invalid emission kind", &N); 982 983 if (auto *Array = N.getRawEnumTypes()) { 984 AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array); 985 for (Metadata *Op : N.getEnumTypes()->operands()) { 986 auto *Enum = dyn_cast_or_null<DICompositeType>(Op); 987 AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type, 988 "invalid enum type", &N, N.getEnumTypes(), Op); 989 } 990 } 991 if (auto *Array = N.getRawRetainedTypes()) { 992 AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array); 993 for (Metadata *Op : N.getRetainedTypes()->operands()) { 994 AssertDI(Op && (isa<DIType>(Op) || 995 (isa<DISubprogram>(Op) && 996 !cast<DISubprogram>(Op)->isDefinition())), 997 "invalid retained type", &N, Op); 998 } 999 } 1000 if (auto *Array = N.getRawGlobalVariables()) { 1001 AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array); 1002 for (Metadata *Op : N.getGlobalVariables()->operands()) { 1003 AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)), 1004 "invalid global variable ref", &N, Op); 1005 } 1006 } 1007 if (auto *Array = N.getRawImportedEntities()) { 1008 AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array); 1009 for (Metadata *Op : N.getImportedEntities()->operands()) { 1010 AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref", 1011 &N, Op); 1012 } 1013 } 1014 if (auto *Array = N.getRawMacros()) { 1015 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 1016 for (Metadata *Op : N.getMacros()->operands()) { 1017 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 1018 } 1019 } 1020 CUVisited.insert(&N); 1021 } 1022 1023 void Verifier::visitDISubprogram(const DISubprogram &N) { 1024 AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N); 1025 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 1026 if (auto *F = N.getRawFile()) 1027 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1028 if (auto *T = N.getRawType()) 1029 AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T); 1030 AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N, 1031 N.getRawContainingType()); 1032 if (auto *Params = N.getRawTemplateParams()) 1033 visitTemplateParams(N, *Params); 1034 if (auto *S = N.getRawDeclaration()) 1035 AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(), 1036 "invalid subprogram declaration", &N, S); 1037 if (auto *RawVars = N.getRawVariables()) { 1038 auto *Vars = dyn_cast<MDTuple>(RawVars); 1039 AssertDI(Vars, "invalid variable list", &N, RawVars); 1040 for (Metadata *Op : Vars->operands()) { 1041 AssertDI(Op && isa<DILocalVariable>(Op), "invalid local variable", &N, 1042 Vars, Op); 1043 } 1044 } 1045 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 1046 "invalid reference flags", &N); 1047 1048 auto *Unit = N.getRawUnit(); 1049 if (N.isDefinition()) { 1050 // Subprogram definitions (not part of the type hierarchy). 1051 AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N); 1052 AssertDI(Unit, "subprogram definitions must have a compile unit", &N); 1053 AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit); 1054 } else { 1055 // Subprogram declarations (part of the type hierarchy). 1056 AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N); 1057 } 1058 } 1059 1060 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) { 1061 AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N); 1062 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1063 "invalid local scope", &N, N.getRawScope()); 1064 } 1065 1066 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) { 1067 visitDILexicalBlockBase(N); 1068 1069 AssertDI(N.getLine() || !N.getColumn(), 1070 "cannot have column info without line info", &N); 1071 } 1072 1073 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) { 1074 visitDILexicalBlockBase(N); 1075 } 1076 1077 void Verifier::visitDINamespace(const DINamespace &N) { 1078 AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N); 1079 if (auto *S = N.getRawScope()) 1080 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S); 1081 } 1082 1083 void Verifier::visitDIMacro(const DIMacro &N) { 1084 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define || 1085 N.getMacinfoType() == dwarf::DW_MACINFO_undef, 1086 "invalid macinfo type", &N); 1087 AssertDI(!N.getName().empty(), "anonymous macro", &N); 1088 if (!N.getValue().empty()) { 1089 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix"); 1090 } 1091 } 1092 1093 void Verifier::visitDIMacroFile(const DIMacroFile &N) { 1094 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file, 1095 "invalid macinfo type", &N); 1096 if (auto *F = N.getRawFile()) 1097 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1098 1099 if (auto *Array = N.getRawElements()) { 1100 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 1101 for (Metadata *Op : N.getElements()->operands()) { 1102 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 1103 } 1104 } 1105 } 1106 1107 void Verifier::visitDIModule(const DIModule &N) { 1108 AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N); 1109 AssertDI(!N.getName().empty(), "anonymous module", &N); 1110 } 1111 1112 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) { 1113 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1114 } 1115 1116 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) { 1117 visitDITemplateParameter(N); 1118 1119 AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag", 1120 &N); 1121 } 1122 1123 void Verifier::visitDITemplateValueParameter( 1124 const DITemplateValueParameter &N) { 1125 visitDITemplateParameter(N); 1126 1127 AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter || 1128 N.getTag() == dwarf::DW_TAG_GNU_template_template_param || 1129 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack, 1130 "invalid tag", &N); 1131 } 1132 1133 void Verifier::visitDIVariable(const DIVariable &N) { 1134 if (auto *S = N.getRawScope()) 1135 AssertDI(isa<DIScope>(S), "invalid scope", &N, S); 1136 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1137 if (auto *F = N.getRawFile()) 1138 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1139 } 1140 1141 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) { 1142 // Checks common to all variables. 1143 visitDIVariable(N); 1144 1145 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1146 AssertDI(!N.getName().empty(), "missing global variable name", &N); 1147 if (auto *Member = N.getRawStaticDataMemberDeclaration()) { 1148 AssertDI(isa<DIDerivedType>(Member), 1149 "invalid static data member declaration", &N, Member); 1150 } 1151 } 1152 1153 void Verifier::visitDILocalVariable(const DILocalVariable &N) { 1154 // Checks common to all variables. 1155 visitDIVariable(N); 1156 1157 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1158 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1159 "local variable requires a valid scope", &N, N.getRawScope()); 1160 } 1161 1162 void Verifier::visitDIExpression(const DIExpression &N) { 1163 AssertDI(N.isValid(), "invalid expression", &N); 1164 } 1165 1166 void Verifier::visitDIGlobalVariableExpression( 1167 const DIGlobalVariableExpression &GVE) { 1168 AssertDI(GVE.getVariable(), "missing variable"); 1169 if (auto *Var = GVE.getVariable()) 1170 visitDIGlobalVariable(*Var); 1171 if (auto *Expr = GVE.getExpression()) 1172 visitDIExpression(*Expr); 1173 } 1174 1175 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) { 1176 AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N); 1177 if (auto *T = N.getRawType()) 1178 AssertDI(isType(T), "invalid type ref", &N, T); 1179 if (auto *F = N.getRawFile()) 1180 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1181 } 1182 1183 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) { 1184 AssertDI(N.getTag() == dwarf::DW_TAG_imported_module || 1185 N.getTag() == dwarf::DW_TAG_imported_declaration, 1186 "invalid tag", &N); 1187 if (auto *S = N.getRawScope()) 1188 AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S); 1189 AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N, 1190 N.getRawEntity()); 1191 } 1192 1193 void Verifier::visitComdat(const Comdat &C) { 1194 // The Module is invalid if the GlobalValue has private linkage. Entities 1195 // with private linkage don't have entries in the symbol table. 1196 if (const GlobalValue *GV = M.getNamedValue(C.getName())) 1197 Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage", 1198 GV); 1199 } 1200 1201 void Verifier::visitModuleIdents(const Module &M) { 1202 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident"); 1203 if (!Idents) 1204 return; 1205 1206 // llvm.ident takes a list of metadata entry. Each entry has only one string. 1207 // Scan each llvm.ident entry and make sure that this requirement is met. 1208 for (const MDNode *N : Idents->operands()) { 1209 Assert(N->getNumOperands() == 1, 1210 "incorrect number of operands in llvm.ident metadata", N); 1211 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)), 1212 ("invalid value for llvm.ident metadata entry operand" 1213 "(the operand should be a string)"), 1214 N->getOperand(0)); 1215 } 1216 } 1217 1218 void Verifier::visitModuleFlags(const Module &M) { 1219 const NamedMDNode *Flags = M.getModuleFlagsMetadata(); 1220 if (!Flags) return; 1221 1222 // Scan each flag, and track the flags and requirements. 1223 DenseMap<const MDString*, const MDNode*> SeenIDs; 1224 SmallVector<const MDNode*, 16> Requirements; 1225 for (const MDNode *MDN : Flags->operands()) 1226 visitModuleFlag(MDN, SeenIDs, Requirements); 1227 1228 // Validate that the requirements in the module are valid. 1229 for (const MDNode *Requirement : Requirements) { 1230 const MDString *Flag = cast<MDString>(Requirement->getOperand(0)); 1231 const Metadata *ReqValue = Requirement->getOperand(1); 1232 1233 const MDNode *Op = SeenIDs.lookup(Flag); 1234 if (!Op) { 1235 CheckFailed("invalid requirement on flag, flag is not present in module", 1236 Flag); 1237 continue; 1238 } 1239 1240 if (Op->getOperand(2) != ReqValue) { 1241 CheckFailed(("invalid requirement on flag, " 1242 "flag does not have the required value"), 1243 Flag); 1244 continue; 1245 } 1246 } 1247 } 1248 1249 void 1250 Verifier::visitModuleFlag(const MDNode *Op, 1251 DenseMap<const MDString *, const MDNode *> &SeenIDs, 1252 SmallVectorImpl<const MDNode *> &Requirements) { 1253 // Each module flag should have three arguments, the merge behavior (a 1254 // constant int), the flag ID (an MDString), and the value. 1255 Assert(Op->getNumOperands() == 3, 1256 "incorrect number of operands in module flag", Op); 1257 Module::ModFlagBehavior MFB; 1258 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) { 1259 Assert( 1260 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)), 1261 "invalid behavior operand in module flag (expected constant integer)", 1262 Op->getOperand(0)); 1263 Assert(false, 1264 "invalid behavior operand in module flag (unexpected constant)", 1265 Op->getOperand(0)); 1266 } 1267 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 1268 Assert(ID, "invalid ID operand in module flag (expected metadata string)", 1269 Op->getOperand(1)); 1270 1271 // Sanity check the values for behaviors with additional requirements. 1272 switch (MFB) { 1273 case Module::Error: 1274 case Module::Warning: 1275 case Module::Override: 1276 // These behavior types accept any value. 1277 break; 1278 1279 case Module::Require: { 1280 // The value should itself be an MDNode with two operands, a flag ID (an 1281 // MDString), and a value. 1282 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2)); 1283 Assert(Value && Value->getNumOperands() == 2, 1284 "invalid value for 'require' module flag (expected metadata pair)", 1285 Op->getOperand(2)); 1286 Assert(isa<MDString>(Value->getOperand(0)), 1287 ("invalid value for 'require' module flag " 1288 "(first value operand should be a string)"), 1289 Value->getOperand(0)); 1290 1291 // Append it to the list of requirements, to check once all module flags are 1292 // scanned. 1293 Requirements.push_back(Value); 1294 break; 1295 } 1296 1297 case Module::Append: 1298 case Module::AppendUnique: { 1299 // These behavior types require the operand be an MDNode. 1300 Assert(isa<MDNode>(Op->getOperand(2)), 1301 "invalid value for 'append'-type module flag " 1302 "(expected a metadata node)", 1303 Op->getOperand(2)); 1304 break; 1305 } 1306 } 1307 1308 // Unless this is a "requires" flag, check the ID is unique. 1309 if (MFB != Module::Require) { 1310 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second; 1311 Assert(Inserted, 1312 "module flag identifiers must be unique (or of 'require' type)", ID); 1313 } 1314 } 1315 1316 void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, 1317 bool isFunction, const Value *V) { 1318 unsigned Slot = ~0U; 1319 for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I) 1320 if (Attrs.getSlotIndex(I) == Idx) { 1321 Slot = I; 1322 break; 1323 } 1324 1325 assert(Slot != ~0U && "Attribute set inconsistency!"); 1326 1327 for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot); 1328 I != E; ++I) { 1329 if (I->isStringAttribute()) 1330 continue; 1331 1332 if (I->getKindAsEnum() == Attribute::NoReturn || 1333 I->getKindAsEnum() == Attribute::NoUnwind || 1334 I->getKindAsEnum() == Attribute::NoInline || 1335 I->getKindAsEnum() == Attribute::AlwaysInline || 1336 I->getKindAsEnum() == Attribute::OptimizeForSize || 1337 I->getKindAsEnum() == Attribute::StackProtect || 1338 I->getKindAsEnum() == Attribute::StackProtectReq || 1339 I->getKindAsEnum() == Attribute::StackProtectStrong || 1340 I->getKindAsEnum() == Attribute::SafeStack || 1341 I->getKindAsEnum() == Attribute::NoRedZone || 1342 I->getKindAsEnum() == Attribute::NoImplicitFloat || 1343 I->getKindAsEnum() == Attribute::Naked || 1344 I->getKindAsEnum() == Attribute::InlineHint || 1345 I->getKindAsEnum() == Attribute::StackAlignment || 1346 I->getKindAsEnum() == Attribute::UWTable || 1347 I->getKindAsEnum() == Attribute::NonLazyBind || 1348 I->getKindAsEnum() == Attribute::ReturnsTwice || 1349 I->getKindAsEnum() == Attribute::SanitizeAddress || 1350 I->getKindAsEnum() == Attribute::SanitizeThread || 1351 I->getKindAsEnum() == Attribute::SanitizeMemory || 1352 I->getKindAsEnum() == Attribute::MinSize || 1353 I->getKindAsEnum() == Attribute::NoDuplicate || 1354 I->getKindAsEnum() == Attribute::Builtin || 1355 I->getKindAsEnum() == Attribute::NoBuiltin || 1356 I->getKindAsEnum() == Attribute::Cold || 1357 I->getKindAsEnum() == Attribute::OptimizeNone || 1358 I->getKindAsEnum() == Attribute::JumpTable || 1359 I->getKindAsEnum() == Attribute::Convergent || 1360 I->getKindAsEnum() == Attribute::ArgMemOnly || 1361 I->getKindAsEnum() == Attribute::NoRecurse || 1362 I->getKindAsEnum() == Attribute::InaccessibleMemOnly || 1363 I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly || 1364 I->getKindAsEnum() == Attribute::AllocSize) { 1365 if (!isFunction) { 1366 CheckFailed("Attribute '" + I->getAsString() + 1367 "' only applies to functions!", V); 1368 return; 1369 } 1370 } else if (I->getKindAsEnum() == Attribute::ReadOnly || 1371 I->getKindAsEnum() == Attribute::WriteOnly || 1372 I->getKindAsEnum() == Attribute::ReadNone) { 1373 if (Idx == 0) { 1374 CheckFailed("Attribute '" + I->getAsString() + 1375 "' does not apply to function returns"); 1376 return; 1377 } 1378 } else if (isFunction) { 1379 CheckFailed("Attribute '" + I->getAsString() + 1380 "' does not apply to functions!", V); 1381 return; 1382 } 1383 } 1384 } 1385 1386 // VerifyParameterAttrs - Check the given attributes for an argument or return 1387 // value of the specified type. The value V is printed in error messages. 1388 void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 1389 bool isReturnValue, const Value *V) { 1390 if (!Attrs.hasAttributes(Idx)) 1391 return; 1392 1393 verifyAttributeTypes(Attrs, Idx, false, V); 1394 1395 if (isReturnValue) 1396 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1397 !Attrs.hasAttribute(Idx, Attribute::Nest) && 1398 !Attrs.hasAttribute(Idx, Attribute::StructRet) && 1399 !Attrs.hasAttribute(Idx, Attribute::NoCapture) && 1400 !Attrs.hasAttribute(Idx, Attribute::Returned) && 1401 !Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1402 !Attrs.hasAttribute(Idx, Attribute::SwiftSelf) && 1403 !Attrs.hasAttribute(Idx, Attribute::SwiftError), 1404 "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', " 1405 "'returned', 'swiftself', and 'swifterror' do not apply to return " 1406 "values!", 1407 V); 1408 1409 // Check for mutually incompatible attributes. Only inreg is compatible with 1410 // sret. 1411 unsigned AttrCount = 0; 1412 AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal); 1413 AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca); 1414 AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) || 1415 Attrs.hasAttribute(Idx, Attribute::InReg); 1416 AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest); 1417 Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', " 1418 "and 'sret' are incompatible!", 1419 V); 1420 1421 Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1422 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1423 "Attributes " 1424 "'inalloca and readonly' are incompatible!", 1425 V); 1426 1427 Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) && 1428 Attrs.hasAttribute(Idx, Attribute::Returned)), 1429 "Attributes " 1430 "'sret and returned' are incompatible!", 1431 V); 1432 1433 Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) && 1434 Attrs.hasAttribute(Idx, Attribute::SExt)), 1435 "Attributes " 1436 "'zeroext and signext' are incompatible!", 1437 V); 1438 1439 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && 1440 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1441 "Attributes " 1442 "'readnone and readonly' are incompatible!", 1443 V); 1444 1445 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && 1446 Attrs.hasAttribute(Idx, Attribute::WriteOnly)), 1447 "Attributes " 1448 "'readnone and writeonly' are incompatible!", 1449 V); 1450 1451 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadOnly) && 1452 Attrs.hasAttribute(Idx, Attribute::WriteOnly)), 1453 "Attributes " 1454 "'readonly and writeonly' are incompatible!", 1455 V); 1456 1457 Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) && 1458 Attrs.hasAttribute(Idx, Attribute::AlwaysInline)), 1459 "Attributes " 1460 "'noinline and alwaysinline' are incompatible!", 1461 V); 1462 1463 Assert( 1464 !AttrBuilder(Attrs, Idx).overlaps(AttributeFuncs::typeIncompatible(Ty)), 1465 "Wrong types for attribute: " + 1466 AttributeSet::get(Context, Idx, AttributeFuncs::typeIncompatible(Ty)) 1467 .getAsString(Idx), 1468 V); 1469 1470 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) { 1471 SmallPtrSet<Type*, 4> Visited; 1472 if (!PTy->getElementType()->isSized(&Visited)) { 1473 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1474 !Attrs.hasAttribute(Idx, Attribute::InAlloca), 1475 "Attributes 'byval' and 'inalloca' do not support unsized types!", 1476 V); 1477 } 1478 if (!isa<PointerType>(PTy->getElementType())) 1479 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1480 "Attribute 'swifterror' only applies to parameters " 1481 "with pointer to pointer type!", 1482 V); 1483 } else { 1484 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal), 1485 "Attribute 'byval' only applies to parameters with pointer type!", 1486 V); 1487 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1488 "Attribute 'swifterror' only applies to parameters " 1489 "with pointer type!", 1490 V); 1491 } 1492 } 1493 1494 // Check parameter attributes against a function type. 1495 // The value V is printed in error messages. 1496 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 1497 const Value *V) { 1498 if (Attrs.isEmpty()) 1499 return; 1500 1501 bool SawNest = false; 1502 bool SawReturned = false; 1503 bool SawSRet = false; 1504 bool SawSwiftSelf = false; 1505 bool SawSwiftError = false; 1506 1507 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1508 unsigned Idx = Attrs.getSlotIndex(i); 1509 1510 Type *Ty; 1511 if (Idx == 0) 1512 Ty = FT->getReturnType(); 1513 else if (Idx-1 < FT->getNumParams()) 1514 Ty = FT->getParamType(Idx-1); 1515 else 1516 break; // VarArgs attributes, verified elsewhere. 1517 1518 verifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V); 1519 1520 if (Idx == 0) 1521 continue; 1522 1523 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 1524 Assert(!SawNest, "More than one parameter has attribute nest!", V); 1525 SawNest = true; 1526 } 1527 1528 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 1529 Assert(!SawReturned, "More than one parameter has attribute returned!", 1530 V); 1531 Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()), 1532 "Incompatible " 1533 "argument and return types for 'returned' attribute", 1534 V); 1535 SawReturned = true; 1536 } 1537 1538 if (Attrs.hasAttribute(Idx, Attribute::StructRet)) { 1539 Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V); 1540 Assert(Idx == 1 || Idx == 2, 1541 "Attribute 'sret' is not on first or second parameter!", V); 1542 SawSRet = true; 1543 } 1544 1545 if (Attrs.hasAttribute(Idx, Attribute::SwiftSelf)) { 1546 Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V); 1547 SawSwiftSelf = true; 1548 } 1549 1550 if (Attrs.hasAttribute(Idx, Attribute::SwiftError)) { 1551 Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", 1552 V); 1553 SawSwiftError = true; 1554 } 1555 1556 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) { 1557 Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!", 1558 V); 1559 } 1560 } 1561 1562 if (!Attrs.hasAttributes(AttributeSet::FunctionIndex)) 1563 return; 1564 1565 verifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V); 1566 1567 Assert( 1568 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1569 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)), 1570 "Attributes 'readnone and readonly' are incompatible!", V); 1571 1572 Assert( 1573 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1574 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)), 1575 "Attributes 'readnone and writeonly' are incompatible!", V); 1576 1577 Assert( 1578 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly) && 1579 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)), 1580 "Attributes 'readonly and writeonly' are incompatible!", V); 1581 1582 Assert( 1583 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1584 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1585 Attribute::InaccessibleMemOrArgMemOnly)), 1586 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V); 1587 1588 Assert( 1589 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1590 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1591 Attribute::InaccessibleMemOnly)), 1592 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V); 1593 1594 Assert( 1595 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) && 1596 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1597 Attribute::AlwaysInline)), 1598 "Attributes 'noinline and alwaysinline' are incompatible!", V); 1599 1600 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1601 Attribute::OptimizeNone)) { 1602 Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline), 1603 "Attribute 'optnone' requires 'noinline'!", V); 1604 1605 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, 1606 Attribute::OptimizeForSize), 1607 "Attributes 'optsize and optnone' are incompatible!", V); 1608 1609 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize), 1610 "Attributes 'minsize and optnone' are incompatible!", V); 1611 } 1612 1613 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1614 Attribute::JumpTable)) { 1615 const GlobalValue *GV = cast<GlobalValue>(V); 1616 Assert(GV->hasGlobalUnnamedAddr(), 1617 "Attribute 'jumptable' requires 'unnamed_addr'", V); 1618 } 1619 1620 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::AllocSize)) { 1621 std::pair<unsigned, Optional<unsigned>> Args = 1622 Attrs.getAllocSizeArgs(AttributeSet::FunctionIndex); 1623 1624 auto CheckParam = [&](StringRef Name, unsigned ParamNo) { 1625 if (ParamNo >= FT->getNumParams()) { 1626 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V); 1627 return false; 1628 } 1629 1630 if (!FT->getParamType(ParamNo)->isIntegerTy()) { 1631 CheckFailed("'allocsize' " + Name + 1632 " argument must refer to an integer parameter", 1633 V); 1634 return false; 1635 } 1636 1637 return true; 1638 }; 1639 1640 if (!CheckParam("element size", Args.first)) 1641 return; 1642 1643 if (Args.second && !CheckParam("number of elements", *Args.second)) 1644 return; 1645 } 1646 } 1647 1648 void Verifier::verifyFunctionMetadata( 1649 ArrayRef<std::pair<unsigned, MDNode *>> MDs) { 1650 for (const auto &Pair : MDs) { 1651 if (Pair.first == LLVMContext::MD_prof) { 1652 MDNode *MD = Pair.second; 1653 Assert(MD->getNumOperands() == 2, 1654 "!prof annotations should have exactly 2 operands", MD); 1655 1656 // Check first operand. 1657 Assert(MD->getOperand(0) != nullptr, "first operand should not be null", 1658 MD); 1659 Assert(isa<MDString>(MD->getOperand(0)), 1660 "expected string with name of the !prof annotation", MD); 1661 MDString *MDS = cast<MDString>(MD->getOperand(0)); 1662 StringRef ProfName = MDS->getString(); 1663 Assert(ProfName.equals("function_entry_count"), 1664 "first operand should be 'function_entry_count'", MD); 1665 1666 // Check second operand. 1667 Assert(MD->getOperand(1) != nullptr, "second operand should not be null", 1668 MD); 1669 Assert(isa<ConstantAsMetadata>(MD->getOperand(1)), 1670 "expected integer argument to function_entry_count", MD); 1671 } 1672 } 1673 } 1674 1675 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) { 1676 if (!ConstantExprVisited.insert(EntryC).second) 1677 return; 1678 1679 SmallVector<const Constant *, 16> Stack; 1680 Stack.push_back(EntryC); 1681 1682 while (!Stack.empty()) { 1683 const Constant *C = Stack.pop_back_val(); 1684 1685 // Check this constant expression. 1686 if (const auto *CE = dyn_cast<ConstantExpr>(C)) 1687 visitConstantExpr(CE); 1688 1689 if (const auto *GV = dyn_cast<GlobalValue>(C)) { 1690 // Global Values get visited separately, but we do need to make sure 1691 // that the global value is in the correct module 1692 Assert(GV->getParent() == &M, "Referencing global in another module!", 1693 EntryC, &M, GV, GV->getParent()); 1694 continue; 1695 } 1696 1697 // Visit all sub-expressions. 1698 for (const Use &U : C->operands()) { 1699 const auto *OpC = dyn_cast<Constant>(U); 1700 if (!OpC) 1701 continue; 1702 if (!ConstantExprVisited.insert(OpC).second) 1703 continue; 1704 Stack.push_back(OpC); 1705 } 1706 } 1707 } 1708 1709 void Verifier::visitConstantExpr(const ConstantExpr *CE) { 1710 if (CE->getOpcode() == Instruction::BitCast) 1711 Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0), 1712 CE->getType()), 1713 "Invalid bitcast", CE); 1714 1715 if (CE->getOpcode() == Instruction::IntToPtr || 1716 CE->getOpcode() == Instruction::PtrToInt) { 1717 auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr 1718 ? CE->getType() 1719 : CE->getOperand(0)->getType(); 1720 StringRef Msg = CE->getOpcode() == Instruction::IntToPtr 1721 ? "inttoptr not supported for non-integral pointers" 1722 : "ptrtoint not supported for non-integral pointers"; 1723 Assert( 1724 !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())), 1725 Msg); 1726 } 1727 } 1728 1729 bool Verifier::verifyAttributeCount(AttributeSet Attrs, unsigned Params) { 1730 if (Attrs.getNumSlots() == 0) 1731 return true; 1732 1733 unsigned LastSlot = Attrs.getNumSlots() - 1; 1734 unsigned LastIndex = Attrs.getSlotIndex(LastSlot); 1735 if (LastIndex <= Params 1736 || (LastIndex == AttributeSet::FunctionIndex 1737 && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params))) 1738 return true; 1739 1740 return false; 1741 } 1742 1743 /// Verify that statepoint intrinsic is well formed. 1744 void Verifier::verifyStatepoint(ImmutableCallSite CS) { 1745 assert(CS.getCalledFunction() && 1746 CS.getCalledFunction()->getIntrinsicID() == 1747 Intrinsic::experimental_gc_statepoint); 1748 1749 const Instruction &CI = *CS.getInstruction(); 1750 1751 Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() && 1752 !CS.onlyAccessesArgMemory(), 1753 "gc.statepoint must read and write all memory to preserve " 1754 "reordering restrictions required by safepoint semantics", 1755 &CI); 1756 1757 const Value *IDV = CS.getArgument(0); 1758 Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer", 1759 &CI); 1760 1761 const Value *NumPatchBytesV = CS.getArgument(1); 1762 Assert(isa<ConstantInt>(NumPatchBytesV), 1763 "gc.statepoint number of patchable bytes must be a constant integer", 1764 &CI); 1765 const int64_t NumPatchBytes = 1766 cast<ConstantInt>(NumPatchBytesV)->getSExtValue(); 1767 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!"); 1768 Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be " 1769 "positive", 1770 &CI); 1771 1772 const Value *Target = CS.getArgument(2); 1773 auto *PT = dyn_cast<PointerType>(Target->getType()); 1774 Assert(PT && PT->getElementType()->isFunctionTy(), 1775 "gc.statepoint callee must be of function pointer type", &CI, Target); 1776 FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType()); 1777 1778 const Value *NumCallArgsV = CS.getArgument(3); 1779 Assert(isa<ConstantInt>(NumCallArgsV), 1780 "gc.statepoint number of arguments to underlying call " 1781 "must be constant integer", 1782 &CI); 1783 const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue(); 1784 Assert(NumCallArgs >= 0, 1785 "gc.statepoint number of arguments to underlying call " 1786 "must be positive", 1787 &CI); 1788 const int NumParams = (int)TargetFuncType->getNumParams(); 1789 if (TargetFuncType->isVarArg()) { 1790 Assert(NumCallArgs >= NumParams, 1791 "gc.statepoint mismatch in number of vararg call args", &CI); 1792 1793 // TODO: Remove this limitation 1794 Assert(TargetFuncType->getReturnType()->isVoidTy(), 1795 "gc.statepoint doesn't support wrapping non-void " 1796 "vararg functions yet", 1797 &CI); 1798 } else 1799 Assert(NumCallArgs == NumParams, 1800 "gc.statepoint mismatch in number of call args", &CI); 1801 1802 const Value *FlagsV = CS.getArgument(4); 1803 Assert(isa<ConstantInt>(FlagsV), 1804 "gc.statepoint flags must be constant integer", &CI); 1805 const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue(); 1806 Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, 1807 "unknown flag used in gc.statepoint flags argument", &CI); 1808 1809 // Verify that the types of the call parameter arguments match 1810 // the type of the wrapped callee. 1811 for (int i = 0; i < NumParams; i++) { 1812 Type *ParamType = TargetFuncType->getParamType(i); 1813 Type *ArgType = CS.getArgument(5 + i)->getType(); 1814 Assert(ArgType == ParamType, 1815 "gc.statepoint call argument does not match wrapped " 1816 "function type", 1817 &CI); 1818 } 1819 1820 const int EndCallArgsInx = 4 + NumCallArgs; 1821 1822 const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1); 1823 Assert(isa<ConstantInt>(NumTransitionArgsV), 1824 "gc.statepoint number of transition arguments " 1825 "must be constant integer", 1826 &CI); 1827 const int NumTransitionArgs = 1828 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue(); 1829 Assert(NumTransitionArgs >= 0, 1830 "gc.statepoint number of transition arguments must be positive", &CI); 1831 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs; 1832 1833 const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1); 1834 Assert(isa<ConstantInt>(NumDeoptArgsV), 1835 "gc.statepoint number of deoptimization arguments " 1836 "must be constant integer", 1837 &CI); 1838 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue(); 1839 Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments " 1840 "must be positive", 1841 &CI); 1842 1843 const int ExpectedNumArgs = 1844 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs; 1845 Assert(ExpectedNumArgs <= (int)CS.arg_size(), 1846 "gc.statepoint too few arguments according to length fields", &CI); 1847 1848 // Check that the only uses of this gc.statepoint are gc.result or 1849 // gc.relocate calls which are tied to this statepoint and thus part 1850 // of the same statepoint sequence 1851 for (const User *U : CI.users()) { 1852 const CallInst *Call = dyn_cast<const CallInst>(U); 1853 Assert(Call, "illegal use of statepoint token", &CI, U); 1854 if (!Call) continue; 1855 Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call), 1856 "gc.result or gc.relocate are the only value uses " 1857 "of a gc.statepoint", 1858 &CI, U); 1859 if (isa<GCResultInst>(Call)) { 1860 Assert(Call->getArgOperand(0) == &CI, 1861 "gc.result connected to wrong gc.statepoint", &CI, Call); 1862 } else if (isa<GCRelocateInst>(Call)) { 1863 Assert(Call->getArgOperand(0) == &CI, 1864 "gc.relocate connected to wrong gc.statepoint", &CI, Call); 1865 } 1866 } 1867 1868 // Note: It is legal for a single derived pointer to be listed multiple 1869 // times. It's non-optimal, but it is legal. It can also happen after 1870 // insertion if we strip a bitcast away. 1871 // Note: It is really tempting to check that each base is relocated and 1872 // that a derived pointer is never reused as a base pointer. This turns 1873 // out to be problematic since optimizations run after safepoint insertion 1874 // can recognize equality properties that the insertion logic doesn't know 1875 // about. See example statepoint.ll in the verifier subdirectory 1876 } 1877 1878 void Verifier::verifyFrameRecoverIndices() { 1879 for (auto &Counts : FrameEscapeInfo) { 1880 Function *F = Counts.first; 1881 unsigned EscapedObjectCount = Counts.second.first; 1882 unsigned MaxRecoveredIndex = Counts.second.second; 1883 Assert(MaxRecoveredIndex <= EscapedObjectCount, 1884 "all indices passed to llvm.localrecover must be less than the " 1885 "number of arguments passed ot llvm.localescape in the parent " 1886 "function", 1887 F); 1888 } 1889 } 1890 1891 static Instruction *getSuccPad(TerminatorInst *Terminator) { 1892 BasicBlock *UnwindDest; 1893 if (auto *II = dyn_cast<InvokeInst>(Terminator)) 1894 UnwindDest = II->getUnwindDest(); 1895 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator)) 1896 UnwindDest = CSI->getUnwindDest(); 1897 else 1898 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest(); 1899 return UnwindDest->getFirstNonPHI(); 1900 } 1901 1902 void Verifier::verifySiblingFuncletUnwinds() { 1903 SmallPtrSet<Instruction *, 8> Visited; 1904 SmallPtrSet<Instruction *, 8> Active; 1905 for (const auto &Pair : SiblingFuncletInfo) { 1906 Instruction *PredPad = Pair.first; 1907 if (Visited.count(PredPad)) 1908 continue; 1909 Active.insert(PredPad); 1910 TerminatorInst *Terminator = Pair.second; 1911 do { 1912 Instruction *SuccPad = getSuccPad(Terminator); 1913 if (Active.count(SuccPad)) { 1914 // Found a cycle; report error 1915 Instruction *CyclePad = SuccPad; 1916 SmallVector<Instruction *, 8> CycleNodes; 1917 do { 1918 CycleNodes.push_back(CyclePad); 1919 TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad]; 1920 if (CycleTerminator != CyclePad) 1921 CycleNodes.push_back(CycleTerminator); 1922 CyclePad = getSuccPad(CycleTerminator); 1923 } while (CyclePad != SuccPad); 1924 Assert(false, "EH pads can't handle each other's exceptions", 1925 ArrayRef<Instruction *>(CycleNodes)); 1926 } 1927 // Don't re-walk a node we've already checked 1928 if (!Visited.insert(SuccPad).second) 1929 break; 1930 // Walk to this successor if it has a map entry. 1931 PredPad = SuccPad; 1932 auto TermI = SiblingFuncletInfo.find(PredPad); 1933 if (TermI == SiblingFuncletInfo.end()) 1934 break; 1935 Terminator = TermI->second; 1936 Active.insert(PredPad); 1937 } while (true); 1938 // Each node only has one successor, so we've walked all the active 1939 // nodes' successors. 1940 Active.clear(); 1941 } 1942 } 1943 1944 // visitFunction - Verify that a function is ok. 1945 // 1946 void Verifier::visitFunction(const Function &F) { 1947 visitGlobalValue(F); 1948 1949 // Check function arguments. 1950 FunctionType *FT = F.getFunctionType(); 1951 unsigned NumArgs = F.arg_size(); 1952 1953 Assert(&Context == &F.getContext(), 1954 "Function context does not match Module context!", &F); 1955 1956 Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F); 1957 Assert(FT->getNumParams() == NumArgs, 1958 "# formal arguments must match # of arguments for function type!", &F, 1959 FT); 1960 Assert(F.getReturnType()->isFirstClassType() || 1961 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(), 1962 "Functions cannot return aggregate values!", &F); 1963 1964 Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), 1965 "Invalid struct return type!", &F); 1966 1967 AttributeSet Attrs = F.getAttributes(); 1968 1969 Assert(verifyAttributeCount(Attrs, FT->getNumParams()), 1970 "Attribute after last parameter!", &F); 1971 1972 // Check function attributes. 1973 verifyFunctionAttrs(FT, Attrs, &F); 1974 1975 // On function declarations/definitions, we do not support the builtin 1976 // attribute. We do not check this in VerifyFunctionAttrs since that is 1977 // checking for Attributes that can/can not ever be on functions. 1978 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin), 1979 "Attribute 'builtin' can only be applied to a callsite.", &F); 1980 1981 // Check that this function meets the restrictions on this calling convention. 1982 // Sometimes varargs is used for perfectly forwarding thunks, so some of these 1983 // restrictions can be lifted. 1984 switch (F.getCallingConv()) { 1985 default: 1986 case CallingConv::C: 1987 break; 1988 case CallingConv::Fast: 1989 case CallingConv::Cold: 1990 case CallingConv::Intel_OCL_BI: 1991 case CallingConv::PTX_Kernel: 1992 case CallingConv::PTX_Device: 1993 Assert(!F.isVarArg(), "Calling convention does not support varargs or " 1994 "perfect forwarding!", 1995 &F); 1996 break; 1997 } 1998 1999 bool isLLVMdotName = F.getName().size() >= 5 && 2000 F.getName().substr(0, 5) == "llvm."; 2001 2002 // Check that the argument values match the function type for this function... 2003 unsigned i = 0; 2004 for (const Argument &Arg : F.args()) { 2005 Assert(Arg.getType() == FT->getParamType(i), 2006 "Argument value does not match function argument type!", &Arg, 2007 FT->getParamType(i)); 2008 Assert(Arg.getType()->isFirstClassType(), 2009 "Function arguments must have first-class types!", &Arg); 2010 if (!isLLVMdotName) { 2011 Assert(!Arg.getType()->isMetadataTy(), 2012 "Function takes metadata but isn't an intrinsic", &Arg, &F); 2013 Assert(!Arg.getType()->isTokenTy(), 2014 "Function takes token but isn't an intrinsic", &Arg, &F); 2015 } 2016 2017 // Check that swifterror argument is only used by loads and stores. 2018 if (Attrs.hasAttribute(i+1, Attribute::SwiftError)) { 2019 verifySwiftErrorValue(&Arg); 2020 } 2021 ++i; 2022 } 2023 2024 if (!isLLVMdotName) 2025 Assert(!F.getReturnType()->isTokenTy(), 2026 "Functions returns a token but isn't an intrinsic", &F); 2027 2028 // Get the function metadata attachments. 2029 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 2030 F.getAllMetadata(MDs); 2031 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync"); 2032 verifyFunctionMetadata(MDs); 2033 2034 // Check validity of the personality function 2035 if (F.hasPersonalityFn()) { 2036 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); 2037 if (Per) 2038 Assert(Per->getParent() == F.getParent(), 2039 "Referencing personality function in another module!", 2040 &F, F.getParent(), Per, Per->getParent()); 2041 } 2042 2043 if (F.isMaterializable()) { 2044 // Function has a body somewhere we can't see. 2045 Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F, 2046 MDs.empty() ? nullptr : MDs.front().second); 2047 } else if (F.isDeclaration()) { 2048 for (const auto &I : MDs) { 2049 AssertDI(I.first != LLVMContext::MD_dbg, 2050 "function declaration may not have a !dbg attachment", &F); 2051 Assert(I.first != LLVMContext::MD_prof, 2052 "function declaration may not have a !prof attachment", &F); 2053 2054 // Verify the metadata itself. 2055 visitMDNode(*I.second); 2056 } 2057 Assert(!F.hasPersonalityFn(), 2058 "Function declaration shouldn't have a personality routine", &F); 2059 } else { 2060 // Verify that this function (which has a body) is not named "llvm.*". It 2061 // is not legal to define intrinsics. 2062 Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F); 2063 2064 // Check the entry node 2065 const BasicBlock *Entry = &F.getEntryBlock(); 2066 Assert(pred_empty(Entry), 2067 "Entry block to function must not have predecessors!", Entry); 2068 2069 // The address of the entry block cannot be taken, unless it is dead. 2070 if (Entry->hasAddressTaken()) { 2071 Assert(!BlockAddress::lookup(Entry)->isConstantUsed(), 2072 "blockaddress may not be used with the entry block!", Entry); 2073 } 2074 2075 unsigned NumDebugAttachments = 0, NumProfAttachments = 0; 2076 // Visit metadata attachments. 2077 for (const auto &I : MDs) { 2078 // Verify that the attachment is legal. 2079 switch (I.first) { 2080 default: 2081 break; 2082 case LLVMContext::MD_dbg: 2083 ++NumDebugAttachments; 2084 AssertDI(NumDebugAttachments == 1, 2085 "function must have a single !dbg attachment", &F, I.second); 2086 AssertDI(isa<DISubprogram>(I.second), 2087 "function !dbg attachment must be a subprogram", &F, I.second); 2088 break; 2089 case LLVMContext::MD_prof: 2090 ++NumProfAttachments; 2091 Assert(NumProfAttachments == 1, 2092 "function must have a single !prof attachment", &F, I.second); 2093 break; 2094 } 2095 2096 // Verify the metadata itself. 2097 visitMDNode(*I.second); 2098 } 2099 } 2100 2101 // If this function is actually an intrinsic, verify that it is only used in 2102 // direct call/invokes, never having its "address taken". 2103 // Only do this if the module is materialized, otherwise we don't have all the 2104 // uses. 2105 if (F.getIntrinsicID() && F.getParent()->isMaterialized()) { 2106 const User *U; 2107 if (F.hasAddressTaken(&U)) 2108 Assert(false, "Invalid user of intrinsic instruction!", U); 2109 } 2110 2111 Assert(!F.hasDLLImportStorageClass() || 2112 (F.isDeclaration() && F.hasExternalLinkage()) || 2113 F.hasAvailableExternallyLinkage(), 2114 "Function is marked as dllimport, but not external.", &F); 2115 2116 auto *N = F.getSubprogram(); 2117 if (!N) 2118 return; 2119 2120 visitDISubprogram(*N); 2121 2122 // Check that all !dbg attachments lead to back to N (or, at least, another 2123 // subprogram that describes the same function). 2124 // 2125 // FIXME: Check this incrementally while visiting !dbg attachments. 2126 // FIXME: Only check when N is the canonical subprogram for F. 2127 SmallPtrSet<const MDNode *, 32> Seen; 2128 for (auto &BB : F) 2129 for (auto &I : BB) { 2130 // Be careful about using DILocation here since we might be dealing with 2131 // broken code (this is the Verifier after all). 2132 DILocation *DL = 2133 dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode()); 2134 if (!DL) 2135 continue; 2136 if (!Seen.insert(DL).second) 2137 continue; 2138 2139 DILocalScope *Scope = DL->getInlinedAtScope(); 2140 if (Scope && !Seen.insert(Scope).second) 2141 continue; 2142 2143 DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr; 2144 2145 // Scope and SP could be the same MDNode and we don't want to skip 2146 // validation in that case 2147 if (SP && ((Scope != SP) && !Seen.insert(SP).second)) 2148 continue; 2149 2150 // FIXME: Once N is canonical, check "SP == &N". 2151 AssertDI(SP->describes(&F), 2152 "!dbg attachment points at wrong subprogram for function", N, &F, 2153 &I, DL, Scope, SP); 2154 } 2155 } 2156 2157 // verifyBasicBlock - Verify that a basic block is well formed... 2158 // 2159 void Verifier::visitBasicBlock(BasicBlock &BB) { 2160 InstsInThisBlock.clear(); 2161 2162 // Ensure that basic blocks have terminators! 2163 Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB); 2164 2165 // Check constraints that this basic block imposes on all of the PHI nodes in 2166 // it. 2167 if (isa<PHINode>(BB.front())) { 2168 SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); 2169 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; 2170 std::sort(Preds.begin(), Preds.end()); 2171 PHINode *PN; 2172 for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) { 2173 // Ensure that PHI nodes have at least one entry! 2174 Assert(PN->getNumIncomingValues() != 0, 2175 "PHI nodes must have at least one entry. If the block is dead, " 2176 "the PHI should be removed!", 2177 PN); 2178 Assert(PN->getNumIncomingValues() == Preds.size(), 2179 "PHINode should have one entry for each predecessor of its " 2180 "parent basic block!", 2181 PN); 2182 2183 // Get and sort all incoming values in the PHI node... 2184 Values.clear(); 2185 Values.reserve(PN->getNumIncomingValues()); 2186 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2187 Values.push_back(std::make_pair(PN->getIncomingBlock(i), 2188 PN->getIncomingValue(i))); 2189 std::sort(Values.begin(), Values.end()); 2190 2191 for (unsigned i = 0, e = Values.size(); i != e; ++i) { 2192 // Check to make sure that if there is more than one entry for a 2193 // particular basic block in this PHI node, that the incoming values are 2194 // all identical. 2195 // 2196 Assert(i == 0 || Values[i].first != Values[i - 1].first || 2197 Values[i].second == Values[i - 1].second, 2198 "PHI node has multiple entries for the same basic block with " 2199 "different incoming values!", 2200 PN, Values[i].first, Values[i].second, Values[i - 1].second); 2201 2202 // Check to make sure that the predecessors and PHI node entries are 2203 // matched up. 2204 Assert(Values[i].first == Preds[i], 2205 "PHI node entries do not match predecessors!", PN, 2206 Values[i].first, Preds[i]); 2207 } 2208 } 2209 } 2210 2211 // Check that all instructions have their parent pointers set up correctly. 2212 for (auto &I : BB) 2213 { 2214 Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!"); 2215 } 2216 } 2217 2218 void Verifier::visitTerminatorInst(TerminatorInst &I) { 2219 // Ensure that terminators only exist at the end of the basic block. 2220 Assert(&I == I.getParent()->getTerminator(), 2221 "Terminator found in the middle of a basic block!", I.getParent()); 2222 visitInstruction(I); 2223 } 2224 2225 void Verifier::visitBranchInst(BranchInst &BI) { 2226 if (BI.isConditional()) { 2227 Assert(BI.getCondition()->getType()->isIntegerTy(1), 2228 "Branch condition is not 'i1' type!", &BI, BI.getCondition()); 2229 } 2230 visitTerminatorInst(BI); 2231 } 2232 2233 void Verifier::visitReturnInst(ReturnInst &RI) { 2234 Function *F = RI.getParent()->getParent(); 2235 unsigned N = RI.getNumOperands(); 2236 if (F->getReturnType()->isVoidTy()) 2237 Assert(N == 0, 2238 "Found return instr that returns non-void in Function of void " 2239 "return type!", 2240 &RI, F->getReturnType()); 2241 else 2242 Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(), 2243 "Function return type does not match operand " 2244 "type of return inst!", 2245 &RI, F->getReturnType()); 2246 2247 // Check to make sure that the return value has necessary properties for 2248 // terminators... 2249 visitTerminatorInst(RI); 2250 } 2251 2252 void Verifier::visitSwitchInst(SwitchInst &SI) { 2253 // Check to make sure that all of the constants in the switch instruction 2254 // have the same type as the switched-on value. 2255 Type *SwitchTy = SI.getCondition()->getType(); 2256 SmallPtrSet<ConstantInt*, 32> Constants; 2257 for (auto &Case : SI.cases()) { 2258 Assert(Case.getCaseValue()->getType() == SwitchTy, 2259 "Switch constants must all be same type as switch value!", &SI); 2260 Assert(Constants.insert(Case.getCaseValue()).second, 2261 "Duplicate integer as switch case", &SI, Case.getCaseValue()); 2262 } 2263 2264 visitTerminatorInst(SI); 2265 } 2266 2267 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { 2268 Assert(BI.getAddress()->getType()->isPointerTy(), 2269 "Indirectbr operand must have pointer type!", &BI); 2270 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i) 2271 Assert(BI.getDestination(i)->getType()->isLabelTy(), 2272 "Indirectbr destinations must all have pointer type!", &BI); 2273 2274 visitTerminatorInst(BI); 2275 } 2276 2277 void Verifier::visitSelectInst(SelectInst &SI) { 2278 Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), 2279 SI.getOperand(2)), 2280 "Invalid operands for select instruction!", &SI); 2281 2282 Assert(SI.getTrueValue()->getType() == SI.getType(), 2283 "Select values must have same type as select instruction!", &SI); 2284 visitInstruction(SI); 2285 } 2286 2287 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of 2288 /// a pass, if any exist, it's an error. 2289 /// 2290 void Verifier::visitUserOp1(Instruction &I) { 2291 Assert(false, "User-defined operators should not live outside of a pass!", &I); 2292 } 2293 2294 void Verifier::visitTruncInst(TruncInst &I) { 2295 // Get the source and destination types 2296 Type *SrcTy = I.getOperand(0)->getType(); 2297 Type *DestTy = I.getType(); 2298 2299 // Get the size of the types in bits, we'll need this later 2300 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2301 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2302 2303 Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I); 2304 Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I); 2305 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2306 "trunc source and destination must both be a vector or neither", &I); 2307 Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I); 2308 2309 visitInstruction(I); 2310 } 2311 2312 void Verifier::visitZExtInst(ZExtInst &I) { 2313 // Get the source and destination types 2314 Type *SrcTy = I.getOperand(0)->getType(); 2315 Type *DestTy = I.getType(); 2316 2317 // Get the size of the types in bits, we'll need this later 2318 Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I); 2319 Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I); 2320 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2321 "zext source and destination must both be a vector or neither", &I); 2322 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2323 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2324 2325 Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I); 2326 2327 visitInstruction(I); 2328 } 2329 2330 void Verifier::visitSExtInst(SExtInst &I) { 2331 // Get the source and destination types 2332 Type *SrcTy = I.getOperand(0)->getType(); 2333 Type *DestTy = I.getType(); 2334 2335 // Get the size of the types in bits, we'll need this later 2336 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2337 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2338 2339 Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I); 2340 Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I); 2341 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2342 "sext source and destination must both be a vector or neither", &I); 2343 Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I); 2344 2345 visitInstruction(I); 2346 } 2347 2348 void Verifier::visitFPTruncInst(FPTruncInst &I) { 2349 // Get the source and destination types 2350 Type *SrcTy = I.getOperand(0)->getType(); 2351 Type *DestTy = I.getType(); 2352 // Get the size of the types in bits, we'll need this later 2353 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2354 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2355 2356 Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I); 2357 Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I); 2358 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2359 "fptrunc source and destination must both be a vector or neither", &I); 2360 Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I); 2361 2362 visitInstruction(I); 2363 } 2364 2365 void Verifier::visitFPExtInst(FPExtInst &I) { 2366 // Get the source and destination types 2367 Type *SrcTy = I.getOperand(0)->getType(); 2368 Type *DestTy = I.getType(); 2369 2370 // Get the size of the types in bits, we'll need this later 2371 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2372 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2373 2374 Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I); 2375 Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I); 2376 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2377 "fpext source and destination must both be a vector or neither", &I); 2378 Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I); 2379 2380 visitInstruction(I); 2381 } 2382 2383 void Verifier::visitUIToFPInst(UIToFPInst &I) { 2384 // Get the source and destination types 2385 Type *SrcTy = I.getOperand(0)->getType(); 2386 Type *DestTy = I.getType(); 2387 2388 bool SrcVec = SrcTy->isVectorTy(); 2389 bool DstVec = DestTy->isVectorTy(); 2390 2391 Assert(SrcVec == DstVec, 2392 "UIToFP source and dest must both be vector or scalar", &I); 2393 Assert(SrcTy->isIntOrIntVectorTy(), 2394 "UIToFP source must be integer or integer vector", &I); 2395 Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector", 2396 &I); 2397 2398 if (SrcVec && DstVec) 2399 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2400 cast<VectorType>(DestTy)->getNumElements(), 2401 "UIToFP source and dest vector length mismatch", &I); 2402 2403 visitInstruction(I); 2404 } 2405 2406 void Verifier::visitSIToFPInst(SIToFPInst &I) { 2407 // Get the source and destination types 2408 Type *SrcTy = I.getOperand(0)->getType(); 2409 Type *DestTy = I.getType(); 2410 2411 bool SrcVec = SrcTy->isVectorTy(); 2412 bool DstVec = DestTy->isVectorTy(); 2413 2414 Assert(SrcVec == DstVec, 2415 "SIToFP source and dest must both be vector or scalar", &I); 2416 Assert(SrcTy->isIntOrIntVectorTy(), 2417 "SIToFP source must be integer or integer vector", &I); 2418 Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector", 2419 &I); 2420 2421 if (SrcVec && DstVec) 2422 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2423 cast<VectorType>(DestTy)->getNumElements(), 2424 "SIToFP source and dest vector length mismatch", &I); 2425 2426 visitInstruction(I); 2427 } 2428 2429 void Verifier::visitFPToUIInst(FPToUIInst &I) { 2430 // Get the source and destination types 2431 Type *SrcTy = I.getOperand(0)->getType(); 2432 Type *DestTy = I.getType(); 2433 2434 bool SrcVec = SrcTy->isVectorTy(); 2435 bool DstVec = DestTy->isVectorTy(); 2436 2437 Assert(SrcVec == DstVec, 2438 "FPToUI source and dest must both be vector or scalar", &I); 2439 Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", 2440 &I); 2441 Assert(DestTy->isIntOrIntVectorTy(), 2442 "FPToUI result must be integer or integer vector", &I); 2443 2444 if (SrcVec && DstVec) 2445 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2446 cast<VectorType>(DestTy)->getNumElements(), 2447 "FPToUI source and dest vector length mismatch", &I); 2448 2449 visitInstruction(I); 2450 } 2451 2452 void Verifier::visitFPToSIInst(FPToSIInst &I) { 2453 // Get the source and destination types 2454 Type *SrcTy = I.getOperand(0)->getType(); 2455 Type *DestTy = I.getType(); 2456 2457 bool SrcVec = SrcTy->isVectorTy(); 2458 bool DstVec = DestTy->isVectorTy(); 2459 2460 Assert(SrcVec == DstVec, 2461 "FPToSI source and dest must both be vector or scalar", &I); 2462 Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", 2463 &I); 2464 Assert(DestTy->isIntOrIntVectorTy(), 2465 "FPToSI result must be integer or integer vector", &I); 2466 2467 if (SrcVec && DstVec) 2468 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2469 cast<VectorType>(DestTy)->getNumElements(), 2470 "FPToSI source and dest vector length mismatch", &I); 2471 2472 visitInstruction(I); 2473 } 2474 2475 void Verifier::visitPtrToIntInst(PtrToIntInst &I) { 2476 // Get the source and destination types 2477 Type *SrcTy = I.getOperand(0)->getType(); 2478 Type *DestTy = I.getType(); 2479 2480 Assert(SrcTy->getScalarType()->isPointerTy(), 2481 "PtrToInt source must be pointer", &I); 2482 2483 if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType())) 2484 Assert(!DL.isNonIntegralPointerType(PTy), 2485 "ptrtoint not supported for non-integral pointers"); 2486 2487 Assert(DestTy->getScalarType()->isIntegerTy(), 2488 "PtrToInt result must be integral", &I); 2489 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch", 2490 &I); 2491 2492 if (SrcTy->isVectorTy()) { 2493 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2494 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2495 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2496 "PtrToInt Vector width mismatch", &I); 2497 } 2498 2499 visitInstruction(I); 2500 } 2501 2502 void Verifier::visitIntToPtrInst(IntToPtrInst &I) { 2503 // Get the source and destination types 2504 Type *SrcTy = I.getOperand(0)->getType(); 2505 Type *DestTy = I.getType(); 2506 2507 Assert(SrcTy->getScalarType()->isIntegerTy(), 2508 "IntToPtr source must be an integral", &I); 2509 Assert(DestTy->getScalarType()->isPointerTy(), 2510 "IntToPtr result must be a pointer", &I); 2511 2512 if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType())) 2513 Assert(!DL.isNonIntegralPointerType(PTy), 2514 "inttoptr not supported for non-integral pointers"); 2515 2516 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch", 2517 &I); 2518 if (SrcTy->isVectorTy()) { 2519 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2520 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2521 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2522 "IntToPtr Vector width mismatch", &I); 2523 } 2524 visitInstruction(I); 2525 } 2526 2527 void Verifier::visitBitCastInst(BitCastInst &I) { 2528 Assert( 2529 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()), 2530 "Invalid bitcast", &I); 2531 visitInstruction(I); 2532 } 2533 2534 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { 2535 Type *SrcTy = I.getOperand(0)->getType(); 2536 Type *DestTy = I.getType(); 2537 2538 Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer", 2539 &I); 2540 Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer", 2541 &I); 2542 Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), 2543 "AddrSpaceCast must be between different address spaces", &I); 2544 if (SrcTy->isVectorTy()) 2545 Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(), 2546 "AddrSpaceCast vector pointer number of elements mismatch", &I); 2547 visitInstruction(I); 2548 } 2549 2550 /// visitPHINode - Ensure that a PHI node is well formed. 2551 /// 2552 void Verifier::visitPHINode(PHINode &PN) { 2553 // Ensure that the PHI nodes are all grouped together at the top of the block. 2554 // This can be tested by checking whether the instruction before this is 2555 // either nonexistent (because this is begin()) or is a PHI node. If not, 2556 // then there is some other instruction before a PHI. 2557 Assert(&PN == &PN.getParent()->front() || 2558 isa<PHINode>(--BasicBlock::iterator(&PN)), 2559 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent()); 2560 2561 // Check that a PHI doesn't yield a Token. 2562 Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!"); 2563 2564 // Check that all of the values of the PHI node have the same type as the 2565 // result, and that the incoming blocks are really basic blocks. 2566 for (Value *IncValue : PN.incoming_values()) { 2567 Assert(PN.getType() == IncValue->getType(), 2568 "PHI node operands are not the same type as the result!", &PN); 2569 } 2570 2571 // All other PHI node constraints are checked in the visitBasicBlock method. 2572 2573 visitInstruction(PN); 2574 } 2575 2576 void Verifier::verifyCallSite(CallSite CS) { 2577 Instruction *I = CS.getInstruction(); 2578 2579 Assert(CS.getCalledValue()->getType()->isPointerTy(), 2580 "Called function must be a pointer!", I); 2581 PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType()); 2582 2583 Assert(FPTy->getElementType()->isFunctionTy(), 2584 "Called function is not pointer to function type!", I); 2585 2586 Assert(FPTy->getElementType() == CS.getFunctionType(), 2587 "Called function is not the same type as the call!", I); 2588 2589 FunctionType *FTy = CS.getFunctionType(); 2590 2591 // Verify that the correct number of arguments are being passed 2592 if (FTy->isVarArg()) 2593 Assert(CS.arg_size() >= FTy->getNumParams(), 2594 "Called function requires more parameters than were provided!", I); 2595 else 2596 Assert(CS.arg_size() == FTy->getNumParams(), 2597 "Incorrect number of arguments passed to called function!", I); 2598 2599 // Verify that all arguments to the call match the function type. 2600 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2601 Assert(CS.getArgument(i)->getType() == FTy->getParamType(i), 2602 "Call parameter type does not match function signature!", 2603 CS.getArgument(i), FTy->getParamType(i), I); 2604 2605 AttributeSet Attrs = CS.getAttributes(); 2606 2607 Assert(verifyAttributeCount(Attrs, CS.arg_size()), 2608 "Attribute after last parameter!", I); 2609 2610 // Verify call attributes. 2611 verifyFunctionAttrs(FTy, Attrs, I); 2612 2613 // Conservatively check the inalloca argument. 2614 // We have a bug if we can find that there is an underlying alloca without 2615 // inalloca. 2616 if (CS.hasInAllocaArgument()) { 2617 Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1); 2618 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets())) 2619 Assert(AI->isUsedWithInAlloca(), 2620 "inalloca argument for call has mismatched alloca", AI, I); 2621 } 2622 2623 // For each argument of the callsite, if it has the swifterror argument, 2624 // make sure the underlying alloca/parameter it comes from has a swifterror as 2625 // well. 2626 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2627 if (CS.paramHasAttr(i+1, Attribute::SwiftError)) { 2628 Value *SwiftErrorArg = CS.getArgument(i); 2629 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) { 2630 Assert(AI->isSwiftError(), 2631 "swifterror argument for call has mismatched alloca", AI, I); 2632 continue; 2633 } 2634 auto ArgI = dyn_cast<Argument>(SwiftErrorArg); 2635 Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I); 2636 Assert(ArgI->hasSwiftErrorAttr(), 2637 "swifterror argument for call has mismatched parameter", ArgI, I); 2638 } 2639 2640 if (FTy->isVarArg()) { 2641 // FIXME? is 'nest' even legal here? 2642 bool SawNest = false; 2643 bool SawReturned = false; 2644 2645 for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) { 2646 if (Attrs.hasAttribute(Idx, Attribute::Nest)) 2647 SawNest = true; 2648 if (Attrs.hasAttribute(Idx, Attribute::Returned)) 2649 SawReturned = true; 2650 } 2651 2652 // Check attributes on the varargs part. 2653 for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) { 2654 Type *Ty = CS.getArgument(Idx-1)->getType(); 2655 verifyParameterAttrs(Attrs, Idx, Ty, false, I); 2656 2657 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 2658 Assert(!SawNest, "More than one parameter has attribute nest!", I); 2659 SawNest = true; 2660 } 2661 2662 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 2663 Assert(!SawReturned, "More than one parameter has attribute returned!", 2664 I); 2665 Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), 2666 "Incompatible argument and return types for 'returned' " 2667 "attribute", 2668 I); 2669 SawReturned = true; 2670 } 2671 2672 Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet), 2673 "Attribute 'sret' cannot be used for vararg call arguments!", I); 2674 2675 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) 2676 Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I); 2677 } 2678 } 2679 2680 // Verify that there's no metadata unless it's a direct call to an intrinsic. 2681 if (CS.getCalledFunction() == nullptr || 2682 !CS.getCalledFunction()->getName().startswith("llvm.")) { 2683 for (Type *ParamTy : FTy->params()) { 2684 Assert(!ParamTy->isMetadataTy(), 2685 "Function has metadata parameter but isn't an intrinsic", I); 2686 Assert(!ParamTy->isTokenTy(), 2687 "Function has token parameter but isn't an intrinsic", I); 2688 } 2689 } 2690 2691 // Verify that indirect calls don't return tokens. 2692 if (CS.getCalledFunction() == nullptr) 2693 Assert(!FTy->getReturnType()->isTokenTy(), 2694 "Return type cannot be token for indirect call!"); 2695 2696 if (Function *F = CS.getCalledFunction()) 2697 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) 2698 visitIntrinsicCallSite(ID, CS); 2699 2700 // Verify that a callsite has at most one "deopt", at most one "funclet" and 2701 // at most one "gc-transition" operand bundle. 2702 bool FoundDeoptBundle = false, FoundFuncletBundle = false, 2703 FoundGCTransitionBundle = false; 2704 for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) { 2705 OperandBundleUse BU = CS.getOperandBundleAt(i); 2706 uint32_t Tag = BU.getTagID(); 2707 if (Tag == LLVMContext::OB_deopt) { 2708 Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I); 2709 FoundDeoptBundle = true; 2710 } else if (Tag == LLVMContext::OB_gc_transition) { 2711 Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles", 2712 I); 2713 FoundGCTransitionBundle = true; 2714 } else if (Tag == LLVMContext::OB_funclet) { 2715 Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I); 2716 FoundFuncletBundle = true; 2717 Assert(BU.Inputs.size() == 1, 2718 "Expected exactly one funclet bundle operand", I); 2719 Assert(isa<FuncletPadInst>(BU.Inputs.front()), 2720 "Funclet bundle operands should correspond to a FuncletPadInst", 2721 I); 2722 } 2723 } 2724 2725 // Verify that each inlinable callsite of a debug-info-bearing function in a 2726 // debug-info-bearing function has a debug location attached to it. Failure to 2727 // do so causes assertion failures when the inliner sets up inline scope info. 2728 if (I->getFunction()->getSubprogram() && CS.getCalledFunction() && 2729 CS.getCalledFunction()->getSubprogram()) 2730 Assert(I->getDebugLoc(), "inlinable function call in a function with debug " 2731 "info must have a !dbg location", 2732 I); 2733 2734 visitInstruction(*I); 2735 } 2736 2737 /// Two types are "congruent" if they are identical, or if they are both pointer 2738 /// types with different pointee types and the same address space. 2739 static bool isTypeCongruent(Type *L, Type *R) { 2740 if (L == R) 2741 return true; 2742 PointerType *PL = dyn_cast<PointerType>(L); 2743 PointerType *PR = dyn_cast<PointerType>(R); 2744 if (!PL || !PR) 2745 return false; 2746 return PL->getAddressSpace() == PR->getAddressSpace(); 2747 } 2748 2749 static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) { 2750 static const Attribute::AttrKind ABIAttrs[] = { 2751 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 2752 Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf, 2753 Attribute::SwiftError}; 2754 AttrBuilder Copy; 2755 for (auto AK : ABIAttrs) { 2756 if (Attrs.hasAttribute(I + 1, AK)) 2757 Copy.addAttribute(AK); 2758 } 2759 if (Attrs.hasAttribute(I + 1, Attribute::Alignment)) 2760 Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1)); 2761 return Copy; 2762 } 2763 2764 void Verifier::verifyMustTailCall(CallInst &CI) { 2765 Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI); 2766 2767 // - The caller and callee prototypes must match. Pointer types of 2768 // parameters or return types may differ in pointee type, but not 2769 // address space. 2770 Function *F = CI.getParent()->getParent(); 2771 FunctionType *CallerTy = F->getFunctionType(); 2772 FunctionType *CalleeTy = CI.getFunctionType(); 2773 Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(), 2774 "cannot guarantee tail call due to mismatched parameter counts", &CI); 2775 Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(), 2776 "cannot guarantee tail call due to mismatched varargs", &CI); 2777 Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()), 2778 "cannot guarantee tail call due to mismatched return types", &CI); 2779 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2780 Assert( 2781 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)), 2782 "cannot guarantee tail call due to mismatched parameter types", &CI); 2783 } 2784 2785 // - The calling conventions of the caller and callee must match. 2786 Assert(F->getCallingConv() == CI.getCallingConv(), 2787 "cannot guarantee tail call due to mismatched calling conv", &CI); 2788 2789 // - All ABI-impacting function attributes, such as sret, byval, inreg, 2790 // returned, and inalloca, must match. 2791 AttributeSet CallerAttrs = F->getAttributes(); 2792 AttributeSet CalleeAttrs = CI.getAttributes(); 2793 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2794 AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs); 2795 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs); 2796 Assert(CallerABIAttrs == CalleeABIAttrs, 2797 "cannot guarantee tail call due to mismatched ABI impacting " 2798 "function attributes", 2799 &CI, CI.getOperand(I)); 2800 } 2801 2802 // - The call must immediately precede a :ref:`ret <i_ret>` instruction, 2803 // or a pointer bitcast followed by a ret instruction. 2804 // - The ret instruction must return the (possibly bitcasted) value 2805 // produced by the call or void. 2806 Value *RetVal = &CI; 2807 Instruction *Next = CI.getNextNode(); 2808 2809 // Handle the optional bitcast. 2810 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) { 2811 Assert(BI->getOperand(0) == RetVal, 2812 "bitcast following musttail call must use the call", BI); 2813 RetVal = BI; 2814 Next = BI->getNextNode(); 2815 } 2816 2817 // Check the return. 2818 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next); 2819 Assert(Ret, "musttail call must be precede a ret with an optional bitcast", 2820 &CI); 2821 Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal, 2822 "musttail call result must be returned", Ret); 2823 } 2824 2825 void Verifier::visitCallInst(CallInst &CI) { 2826 verifyCallSite(&CI); 2827 2828 if (CI.isMustTailCall()) 2829 verifyMustTailCall(CI); 2830 } 2831 2832 void Verifier::visitInvokeInst(InvokeInst &II) { 2833 verifyCallSite(&II); 2834 2835 // Verify that the first non-PHI instruction of the unwind destination is an 2836 // exception handling instruction. 2837 Assert( 2838 II.getUnwindDest()->isEHPad(), 2839 "The unwind destination does not have an exception handling instruction!", 2840 &II); 2841 2842 visitTerminatorInst(II); 2843 } 2844 2845 /// visitBinaryOperator - Check that both arguments to the binary operator are 2846 /// of the same type! 2847 /// 2848 void Verifier::visitBinaryOperator(BinaryOperator &B) { 2849 Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(), 2850 "Both operands to a binary operator are not of the same type!", &B); 2851 2852 switch (B.getOpcode()) { 2853 // Check that integer arithmetic operators are only used with 2854 // integral operands. 2855 case Instruction::Add: 2856 case Instruction::Sub: 2857 case Instruction::Mul: 2858 case Instruction::SDiv: 2859 case Instruction::UDiv: 2860 case Instruction::SRem: 2861 case Instruction::URem: 2862 Assert(B.getType()->isIntOrIntVectorTy(), 2863 "Integer arithmetic operators only work with integral types!", &B); 2864 Assert(B.getType() == B.getOperand(0)->getType(), 2865 "Integer arithmetic operators must have same type " 2866 "for operands and result!", 2867 &B); 2868 break; 2869 // Check that floating-point arithmetic operators are only used with 2870 // floating-point operands. 2871 case Instruction::FAdd: 2872 case Instruction::FSub: 2873 case Instruction::FMul: 2874 case Instruction::FDiv: 2875 case Instruction::FRem: 2876 Assert(B.getType()->isFPOrFPVectorTy(), 2877 "Floating-point arithmetic operators only work with " 2878 "floating-point types!", 2879 &B); 2880 Assert(B.getType() == B.getOperand(0)->getType(), 2881 "Floating-point arithmetic operators must have same type " 2882 "for operands and result!", 2883 &B); 2884 break; 2885 // Check that logical operators are only used with integral operands. 2886 case Instruction::And: 2887 case Instruction::Or: 2888 case Instruction::Xor: 2889 Assert(B.getType()->isIntOrIntVectorTy(), 2890 "Logical operators only work with integral types!", &B); 2891 Assert(B.getType() == B.getOperand(0)->getType(), 2892 "Logical operators must have same type for operands and result!", 2893 &B); 2894 break; 2895 case Instruction::Shl: 2896 case Instruction::LShr: 2897 case Instruction::AShr: 2898 Assert(B.getType()->isIntOrIntVectorTy(), 2899 "Shifts only work with integral types!", &B); 2900 Assert(B.getType() == B.getOperand(0)->getType(), 2901 "Shift return type must be same as operands!", &B); 2902 break; 2903 default: 2904 llvm_unreachable("Unknown BinaryOperator opcode!"); 2905 } 2906 2907 visitInstruction(B); 2908 } 2909 2910 void Verifier::visitICmpInst(ICmpInst &IC) { 2911 // Check that the operands are the same type 2912 Type *Op0Ty = IC.getOperand(0)->getType(); 2913 Type *Op1Ty = IC.getOperand(1)->getType(); 2914 Assert(Op0Ty == Op1Ty, 2915 "Both operands to ICmp instruction are not of the same type!", &IC); 2916 // Check that the operands are the right type 2917 Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(), 2918 "Invalid operand types for ICmp instruction", &IC); 2919 // Check that the predicate is valid. 2920 Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && 2921 IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE, 2922 "Invalid predicate in ICmp instruction!", &IC); 2923 2924 visitInstruction(IC); 2925 } 2926 2927 void Verifier::visitFCmpInst(FCmpInst &FC) { 2928 // Check that the operands are the same type 2929 Type *Op0Ty = FC.getOperand(0)->getType(); 2930 Type *Op1Ty = FC.getOperand(1)->getType(); 2931 Assert(Op0Ty == Op1Ty, 2932 "Both operands to FCmp instruction are not of the same type!", &FC); 2933 // Check that the operands are the right type 2934 Assert(Op0Ty->isFPOrFPVectorTy(), 2935 "Invalid operand types for FCmp instruction", &FC); 2936 // Check that the predicate is valid. 2937 Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE && 2938 FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE, 2939 "Invalid predicate in FCmp instruction!", &FC); 2940 2941 visitInstruction(FC); 2942 } 2943 2944 void Verifier::visitExtractElementInst(ExtractElementInst &EI) { 2945 Assert( 2946 ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)), 2947 "Invalid extractelement operands!", &EI); 2948 visitInstruction(EI); 2949 } 2950 2951 void Verifier::visitInsertElementInst(InsertElementInst &IE) { 2952 Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1), 2953 IE.getOperand(2)), 2954 "Invalid insertelement operands!", &IE); 2955 visitInstruction(IE); 2956 } 2957 2958 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) { 2959 Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1), 2960 SV.getOperand(2)), 2961 "Invalid shufflevector operands!", &SV); 2962 visitInstruction(SV); 2963 } 2964 2965 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { 2966 Type *TargetTy = GEP.getPointerOperandType()->getScalarType(); 2967 2968 Assert(isa<PointerType>(TargetTy), 2969 "GEP base pointer is not a vector or a vector of pointers", &GEP); 2970 Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP); 2971 SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end()); 2972 Type *ElTy = 2973 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs); 2974 Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP); 2975 2976 Assert(GEP.getType()->getScalarType()->isPointerTy() && 2977 GEP.getResultElementType() == ElTy, 2978 "GEP is not of right type for indices!", &GEP, ElTy); 2979 2980 if (GEP.getType()->isVectorTy()) { 2981 // Additional checks for vector GEPs. 2982 unsigned GEPWidth = GEP.getType()->getVectorNumElements(); 2983 if (GEP.getPointerOperandType()->isVectorTy()) 2984 Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(), 2985 "Vector GEP result width doesn't match operand's", &GEP); 2986 for (Value *Idx : Idxs) { 2987 Type *IndexTy = Idx->getType(); 2988 if (IndexTy->isVectorTy()) { 2989 unsigned IndexWidth = IndexTy->getVectorNumElements(); 2990 Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP); 2991 } 2992 Assert(IndexTy->getScalarType()->isIntegerTy(), 2993 "All GEP indices should be of integer type"); 2994 } 2995 } 2996 visitInstruction(GEP); 2997 } 2998 2999 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { 3000 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); 3001 } 3002 3003 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) { 3004 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) && 3005 "precondition violation"); 3006 3007 unsigned NumOperands = Range->getNumOperands(); 3008 Assert(NumOperands % 2 == 0, "Unfinished range!", Range); 3009 unsigned NumRanges = NumOperands / 2; 3010 Assert(NumRanges >= 1, "It should have at least one range!", Range); 3011 3012 ConstantRange LastRange(1); // Dummy initial value 3013 for (unsigned i = 0; i < NumRanges; ++i) { 3014 ConstantInt *Low = 3015 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i)); 3016 Assert(Low, "The lower limit must be an integer!", Low); 3017 ConstantInt *High = 3018 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1)); 3019 Assert(High, "The upper limit must be an integer!", High); 3020 Assert(High->getType() == Low->getType() && High->getType() == Ty, 3021 "Range types must match instruction type!", &I); 3022 3023 APInt HighV = High->getValue(); 3024 APInt LowV = Low->getValue(); 3025 ConstantRange CurRange(LowV, HighV); 3026 Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(), 3027 "Range must not be empty!", Range); 3028 if (i != 0) { 3029 Assert(CurRange.intersectWith(LastRange).isEmptySet(), 3030 "Intervals are overlapping", Range); 3031 Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order", 3032 Range); 3033 Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous", 3034 Range); 3035 } 3036 LastRange = ConstantRange(LowV, HighV); 3037 } 3038 if (NumRanges > 2) { 3039 APInt FirstLow = 3040 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue(); 3041 APInt FirstHigh = 3042 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue(); 3043 ConstantRange FirstRange(FirstLow, FirstHigh); 3044 Assert(FirstRange.intersectWith(LastRange).isEmptySet(), 3045 "Intervals are overlapping", Range); 3046 Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous", 3047 Range); 3048 } 3049 } 3050 3051 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) { 3052 unsigned Size = DL.getTypeSizeInBits(Ty); 3053 Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I); 3054 Assert(!(Size & (Size - 1)), 3055 "atomic memory access' operand must have a power-of-two size", Ty, I); 3056 } 3057 3058 void Verifier::visitLoadInst(LoadInst &LI) { 3059 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType()); 3060 Assert(PTy, "Load operand must be a pointer.", &LI); 3061 Type *ElTy = LI.getType(); 3062 Assert(LI.getAlignment() <= Value::MaximumAlignment, 3063 "huge alignment values are unsupported", &LI); 3064 Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI); 3065 if (LI.isAtomic()) { 3066 Assert(LI.getOrdering() != AtomicOrdering::Release && 3067 LI.getOrdering() != AtomicOrdering::AcquireRelease, 3068 "Load cannot have Release ordering", &LI); 3069 Assert(LI.getAlignment() != 0, 3070 "Atomic load must specify explicit alignment", &LI); 3071 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 3072 ElTy->isFloatingPointTy(), 3073 "atomic load operand must have integer, pointer, or floating point " 3074 "type!", 3075 ElTy, &LI); 3076 checkAtomicMemAccessSize(ElTy, &LI); 3077 } else { 3078 Assert(LI.getSynchScope() == CrossThread, 3079 "Non-atomic load cannot have SynchronizationScope specified", &LI); 3080 } 3081 3082 visitInstruction(LI); 3083 } 3084 3085 void Verifier::visitStoreInst(StoreInst &SI) { 3086 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType()); 3087 Assert(PTy, "Store operand must be a pointer.", &SI); 3088 Type *ElTy = PTy->getElementType(); 3089 Assert(ElTy == SI.getOperand(0)->getType(), 3090 "Stored value type does not match pointer operand type!", &SI, ElTy); 3091 Assert(SI.getAlignment() <= Value::MaximumAlignment, 3092 "huge alignment values are unsupported", &SI); 3093 Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI); 3094 if (SI.isAtomic()) { 3095 Assert(SI.getOrdering() != AtomicOrdering::Acquire && 3096 SI.getOrdering() != AtomicOrdering::AcquireRelease, 3097 "Store cannot have Acquire ordering", &SI); 3098 Assert(SI.getAlignment() != 0, 3099 "Atomic store must specify explicit alignment", &SI); 3100 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 3101 ElTy->isFloatingPointTy(), 3102 "atomic store operand must have integer, pointer, or floating point " 3103 "type!", 3104 ElTy, &SI); 3105 checkAtomicMemAccessSize(ElTy, &SI); 3106 } else { 3107 Assert(SI.getSynchScope() == CrossThread, 3108 "Non-atomic store cannot have SynchronizationScope specified", &SI); 3109 } 3110 visitInstruction(SI); 3111 } 3112 3113 /// Check that SwiftErrorVal is used as a swifterror argument in CS. 3114 void Verifier::verifySwiftErrorCallSite(CallSite CS, 3115 const Value *SwiftErrorVal) { 3116 unsigned Idx = 0; 3117 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 3118 I != E; ++I, ++Idx) { 3119 if (*I == SwiftErrorVal) { 3120 Assert(CS.paramHasAttr(Idx+1, Attribute::SwiftError), 3121 "swifterror value when used in a callsite should be marked " 3122 "with swifterror attribute", 3123 SwiftErrorVal, CS); 3124 } 3125 } 3126 } 3127 3128 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) { 3129 // Check that swifterror value is only used by loads, stores, or as 3130 // a swifterror argument. 3131 for (const User *U : SwiftErrorVal->users()) { 3132 Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) || 3133 isa<InvokeInst>(U), 3134 "swifterror value can only be loaded and stored from, or " 3135 "as a swifterror argument!", 3136 SwiftErrorVal, U); 3137 // If it is used by a store, check it is the second operand. 3138 if (auto StoreI = dyn_cast<StoreInst>(U)) 3139 Assert(StoreI->getOperand(1) == SwiftErrorVal, 3140 "swifterror value should be the second operand when used " 3141 "by stores", SwiftErrorVal, U); 3142 if (auto CallI = dyn_cast<CallInst>(U)) 3143 verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal); 3144 if (auto II = dyn_cast<InvokeInst>(U)) 3145 verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal); 3146 } 3147 } 3148 3149 void Verifier::visitAllocaInst(AllocaInst &AI) { 3150 SmallPtrSet<Type*, 4> Visited; 3151 PointerType *PTy = AI.getType(); 3152 Assert(PTy->getAddressSpace() == 0, 3153 "Allocation instruction pointer not in the generic address space!", 3154 &AI); 3155 Assert(AI.getAllocatedType()->isSized(&Visited), 3156 "Cannot allocate unsized type", &AI); 3157 Assert(AI.getArraySize()->getType()->isIntegerTy(), 3158 "Alloca array size must have integer type", &AI); 3159 Assert(AI.getAlignment() <= Value::MaximumAlignment, 3160 "huge alignment values are unsupported", &AI); 3161 3162 if (AI.isSwiftError()) { 3163 verifySwiftErrorValue(&AI); 3164 } 3165 3166 visitInstruction(AI); 3167 } 3168 3169 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { 3170 3171 // FIXME: more conditions??? 3172 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic, 3173 "cmpxchg instructions must be atomic.", &CXI); 3174 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic, 3175 "cmpxchg instructions must be atomic.", &CXI); 3176 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered, 3177 "cmpxchg instructions cannot be unordered.", &CXI); 3178 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered, 3179 "cmpxchg instructions cannot be unordered.", &CXI); 3180 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()), 3181 "cmpxchg instructions failure argument shall be no stronger than the " 3182 "success argument", 3183 &CXI); 3184 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release && 3185 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, 3186 "cmpxchg failure ordering cannot include release semantics", &CXI); 3187 3188 PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); 3189 Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI); 3190 Type *ElTy = PTy->getElementType(); 3191 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(), 3192 "cmpxchg operand must have integer or pointer type", 3193 ElTy, &CXI); 3194 checkAtomicMemAccessSize(ElTy, &CXI); 3195 Assert(ElTy == CXI.getOperand(1)->getType(), 3196 "Expected value type does not match pointer operand type!", &CXI, 3197 ElTy); 3198 Assert(ElTy == CXI.getOperand(2)->getType(), 3199 "Stored value type does not match pointer operand type!", &CXI, ElTy); 3200 visitInstruction(CXI); 3201 } 3202 3203 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { 3204 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic, 3205 "atomicrmw instructions must be atomic.", &RMWI); 3206 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, 3207 "atomicrmw instructions cannot be unordered.", &RMWI); 3208 PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); 3209 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); 3210 Type *ElTy = PTy->getElementType(); 3211 Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!", 3212 &RMWI, ElTy); 3213 checkAtomicMemAccessSize(ElTy, &RMWI); 3214 Assert(ElTy == RMWI.getOperand(1)->getType(), 3215 "Argument value type does not match pointer operand type!", &RMWI, 3216 ElTy); 3217 Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() && 3218 RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP, 3219 "Invalid binary operation!", &RMWI); 3220 visitInstruction(RMWI); 3221 } 3222 3223 void Verifier::visitFenceInst(FenceInst &FI) { 3224 const AtomicOrdering Ordering = FI.getOrdering(); 3225 Assert(Ordering == AtomicOrdering::Acquire || 3226 Ordering == AtomicOrdering::Release || 3227 Ordering == AtomicOrdering::AcquireRelease || 3228 Ordering == AtomicOrdering::SequentiallyConsistent, 3229 "fence instructions may only have acquire, release, acq_rel, or " 3230 "seq_cst ordering.", 3231 &FI); 3232 visitInstruction(FI); 3233 } 3234 3235 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) { 3236 Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(), 3237 EVI.getIndices()) == EVI.getType(), 3238 "Invalid ExtractValueInst operands!", &EVI); 3239 3240 visitInstruction(EVI); 3241 } 3242 3243 void Verifier::visitInsertValueInst(InsertValueInst &IVI) { 3244 Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(), 3245 IVI.getIndices()) == 3246 IVI.getOperand(1)->getType(), 3247 "Invalid InsertValueInst operands!", &IVI); 3248 3249 visitInstruction(IVI); 3250 } 3251 3252 static Value *getParentPad(Value *EHPad) { 3253 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 3254 return FPI->getParentPad(); 3255 3256 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 3257 } 3258 3259 void Verifier::visitEHPadPredecessors(Instruction &I) { 3260 assert(I.isEHPad()); 3261 3262 BasicBlock *BB = I.getParent(); 3263 Function *F = BB->getParent(); 3264 3265 Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I); 3266 3267 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) { 3268 // The landingpad instruction defines its parent as a landing pad block. The 3269 // landing pad block may be branched to only by the unwind edge of an 3270 // invoke. 3271 for (BasicBlock *PredBB : predecessors(BB)) { 3272 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator()); 3273 Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB, 3274 "Block containing LandingPadInst must be jumped to " 3275 "only by the unwind edge of an invoke.", 3276 LPI); 3277 } 3278 return; 3279 } 3280 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) { 3281 if (!pred_empty(BB)) 3282 Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(), 3283 "Block containg CatchPadInst must be jumped to " 3284 "only by its catchswitch.", 3285 CPI); 3286 Assert(BB != CPI->getCatchSwitch()->getUnwindDest(), 3287 "Catchswitch cannot unwind to one of its catchpads", 3288 CPI->getCatchSwitch(), CPI); 3289 return; 3290 } 3291 3292 // Verify that each pred has a legal terminator with a legal to/from EH 3293 // pad relationship. 3294 Instruction *ToPad = &I; 3295 Value *ToPadParent = getParentPad(ToPad); 3296 for (BasicBlock *PredBB : predecessors(BB)) { 3297 TerminatorInst *TI = PredBB->getTerminator(); 3298 Value *FromPad; 3299 if (auto *II = dyn_cast<InvokeInst>(TI)) { 3300 Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB, 3301 "EH pad must be jumped to via an unwind edge", ToPad, II); 3302 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet)) 3303 FromPad = Bundle->Inputs[0]; 3304 else 3305 FromPad = ConstantTokenNone::get(II->getContext()); 3306 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 3307 FromPad = CRI->getOperand(0); 3308 Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI); 3309 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) { 3310 FromPad = CSI; 3311 } else { 3312 Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI); 3313 } 3314 3315 // The edge may exit from zero or more nested pads. 3316 SmallSet<Value *, 8> Seen; 3317 for (;; FromPad = getParentPad(FromPad)) { 3318 Assert(FromPad != ToPad, 3319 "EH pad cannot handle exceptions raised within it", FromPad, TI); 3320 if (FromPad == ToPadParent) { 3321 // This is a legal unwind edge. 3322 break; 3323 } 3324 Assert(!isa<ConstantTokenNone>(FromPad), 3325 "A single unwind edge may only enter one EH pad", TI); 3326 Assert(Seen.insert(FromPad).second, 3327 "EH pad jumps through a cycle of pads", FromPad); 3328 } 3329 } 3330 } 3331 3332 void Verifier::visitLandingPadInst(LandingPadInst &LPI) { 3333 // The landingpad instruction is ill-formed if it doesn't have any clauses and 3334 // isn't a cleanup. 3335 Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(), 3336 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI); 3337 3338 visitEHPadPredecessors(LPI); 3339 3340 if (!LandingPadResultTy) 3341 LandingPadResultTy = LPI.getType(); 3342 else 3343 Assert(LandingPadResultTy == LPI.getType(), 3344 "The landingpad instruction should have a consistent result type " 3345 "inside a function.", 3346 &LPI); 3347 3348 Function *F = LPI.getParent()->getParent(); 3349 Assert(F->hasPersonalityFn(), 3350 "LandingPadInst needs to be in a function with a personality.", &LPI); 3351 3352 // The landingpad instruction must be the first non-PHI instruction in the 3353 // block. 3354 Assert(LPI.getParent()->getLandingPadInst() == &LPI, 3355 "LandingPadInst not the first non-PHI instruction in the block.", 3356 &LPI); 3357 3358 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) { 3359 Constant *Clause = LPI.getClause(i); 3360 if (LPI.isCatch(i)) { 3361 Assert(isa<PointerType>(Clause->getType()), 3362 "Catch operand does not have pointer type!", &LPI); 3363 } else { 3364 Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI); 3365 Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause), 3366 "Filter operand is not an array of constants!", &LPI); 3367 } 3368 } 3369 3370 visitInstruction(LPI); 3371 } 3372 3373 void Verifier::visitResumeInst(ResumeInst &RI) { 3374 Assert(RI.getFunction()->hasPersonalityFn(), 3375 "ResumeInst needs to be in a function with a personality.", &RI); 3376 3377 if (!LandingPadResultTy) 3378 LandingPadResultTy = RI.getValue()->getType(); 3379 else 3380 Assert(LandingPadResultTy == RI.getValue()->getType(), 3381 "The resume instruction should have a consistent result type " 3382 "inside a function.", 3383 &RI); 3384 3385 visitTerminatorInst(RI); 3386 } 3387 3388 void Verifier::visitCatchPadInst(CatchPadInst &CPI) { 3389 BasicBlock *BB = CPI.getParent(); 3390 3391 Function *F = BB->getParent(); 3392 Assert(F->hasPersonalityFn(), 3393 "CatchPadInst needs to be in a function with a personality.", &CPI); 3394 3395 Assert(isa<CatchSwitchInst>(CPI.getParentPad()), 3396 "CatchPadInst needs to be directly nested in a CatchSwitchInst.", 3397 CPI.getParentPad()); 3398 3399 // The catchpad instruction must be the first non-PHI instruction in the 3400 // block. 3401 Assert(BB->getFirstNonPHI() == &CPI, 3402 "CatchPadInst not the first non-PHI instruction in the block.", &CPI); 3403 3404 visitEHPadPredecessors(CPI); 3405 visitFuncletPadInst(CPI); 3406 } 3407 3408 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) { 3409 Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)), 3410 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn, 3411 CatchReturn.getOperand(0)); 3412 3413 visitTerminatorInst(CatchReturn); 3414 } 3415 3416 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) { 3417 BasicBlock *BB = CPI.getParent(); 3418 3419 Function *F = BB->getParent(); 3420 Assert(F->hasPersonalityFn(), 3421 "CleanupPadInst needs to be in a function with a personality.", &CPI); 3422 3423 // The cleanuppad instruction must be the first non-PHI instruction in the 3424 // block. 3425 Assert(BB->getFirstNonPHI() == &CPI, 3426 "CleanupPadInst not the first non-PHI instruction in the block.", 3427 &CPI); 3428 3429 auto *ParentPad = CPI.getParentPad(); 3430 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3431 "CleanupPadInst has an invalid parent.", &CPI); 3432 3433 visitEHPadPredecessors(CPI); 3434 visitFuncletPadInst(CPI); 3435 } 3436 3437 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) { 3438 User *FirstUser = nullptr; 3439 Value *FirstUnwindPad = nullptr; 3440 SmallVector<FuncletPadInst *, 8> Worklist({&FPI}); 3441 SmallSet<FuncletPadInst *, 8> Seen; 3442 3443 while (!Worklist.empty()) { 3444 FuncletPadInst *CurrentPad = Worklist.pop_back_val(); 3445 Assert(Seen.insert(CurrentPad).second, 3446 "FuncletPadInst must not be nested within itself", CurrentPad); 3447 Value *UnresolvedAncestorPad = nullptr; 3448 for (User *U : CurrentPad->users()) { 3449 BasicBlock *UnwindDest; 3450 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) { 3451 UnwindDest = CRI->getUnwindDest(); 3452 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) { 3453 // We allow catchswitch unwind to caller to nest 3454 // within an outer pad that unwinds somewhere else, 3455 // because catchswitch doesn't have a nounwind variant. 3456 // See e.g. SimplifyCFGOpt::SimplifyUnreachable. 3457 if (CSI->unwindsToCaller()) 3458 continue; 3459 UnwindDest = CSI->getUnwindDest(); 3460 } else if (auto *II = dyn_cast<InvokeInst>(U)) { 3461 UnwindDest = II->getUnwindDest(); 3462 } else if (isa<CallInst>(U)) { 3463 // Calls which don't unwind may be found inside funclet 3464 // pads that unwind somewhere else. We don't *require* 3465 // such calls to be annotated nounwind. 3466 continue; 3467 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) { 3468 // The unwind dest for a cleanup can only be found by 3469 // recursive search. Add it to the worklist, and we'll 3470 // search for its first use that determines where it unwinds. 3471 Worklist.push_back(CPI); 3472 continue; 3473 } else { 3474 Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U); 3475 continue; 3476 } 3477 3478 Value *UnwindPad; 3479 bool ExitsFPI; 3480 if (UnwindDest) { 3481 UnwindPad = UnwindDest->getFirstNonPHI(); 3482 if (!cast<Instruction>(UnwindPad)->isEHPad()) 3483 continue; 3484 Value *UnwindParent = getParentPad(UnwindPad); 3485 // Ignore unwind edges that don't exit CurrentPad. 3486 if (UnwindParent == CurrentPad) 3487 continue; 3488 // Determine whether the original funclet pad is exited, 3489 // and if we are scanning nested pads determine how many 3490 // of them are exited so we can stop searching their 3491 // children. 3492 Value *ExitedPad = CurrentPad; 3493 ExitsFPI = false; 3494 do { 3495 if (ExitedPad == &FPI) { 3496 ExitsFPI = true; 3497 // Now we can resolve any ancestors of CurrentPad up to 3498 // FPI, but not including FPI since we need to make sure 3499 // to check all direct users of FPI for consistency. 3500 UnresolvedAncestorPad = &FPI; 3501 break; 3502 } 3503 Value *ExitedParent = getParentPad(ExitedPad); 3504 if (ExitedParent == UnwindParent) { 3505 // ExitedPad is the ancestor-most pad which this unwind 3506 // edge exits, so we can resolve up to it, meaning that 3507 // ExitedParent is the first ancestor still unresolved. 3508 UnresolvedAncestorPad = ExitedParent; 3509 break; 3510 } 3511 ExitedPad = ExitedParent; 3512 } while (!isa<ConstantTokenNone>(ExitedPad)); 3513 } else { 3514 // Unwinding to caller exits all pads. 3515 UnwindPad = ConstantTokenNone::get(FPI.getContext()); 3516 ExitsFPI = true; 3517 UnresolvedAncestorPad = &FPI; 3518 } 3519 3520 if (ExitsFPI) { 3521 // This unwind edge exits FPI. Make sure it agrees with other 3522 // such edges. 3523 if (FirstUser) { 3524 Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet " 3525 "pad must have the same unwind " 3526 "dest", 3527 &FPI, U, FirstUser); 3528 } else { 3529 FirstUser = U; 3530 FirstUnwindPad = UnwindPad; 3531 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds 3532 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) && 3533 getParentPad(UnwindPad) == getParentPad(&FPI)) 3534 SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U); 3535 } 3536 } 3537 // Make sure we visit all uses of FPI, but for nested pads stop as 3538 // soon as we know where they unwind to. 3539 if (CurrentPad != &FPI) 3540 break; 3541 } 3542 if (UnresolvedAncestorPad) { 3543 if (CurrentPad == UnresolvedAncestorPad) { 3544 // When CurrentPad is FPI itself, we don't mark it as resolved even if 3545 // we've found an unwind edge that exits it, because we need to verify 3546 // all direct uses of FPI. 3547 assert(CurrentPad == &FPI); 3548 continue; 3549 } 3550 // Pop off the worklist any nested pads that we've found an unwind 3551 // destination for. The pads on the worklist are the uncles, 3552 // great-uncles, etc. of CurrentPad. We've found an unwind destination 3553 // for all ancestors of CurrentPad up to but not including 3554 // UnresolvedAncestorPad. 3555 Value *ResolvedPad = CurrentPad; 3556 while (!Worklist.empty()) { 3557 Value *UnclePad = Worklist.back(); 3558 Value *AncestorPad = getParentPad(UnclePad); 3559 // Walk ResolvedPad up the ancestor list until we either find the 3560 // uncle's parent or the last resolved ancestor. 3561 while (ResolvedPad != AncestorPad) { 3562 Value *ResolvedParent = getParentPad(ResolvedPad); 3563 if (ResolvedParent == UnresolvedAncestorPad) { 3564 break; 3565 } 3566 ResolvedPad = ResolvedParent; 3567 } 3568 // If the resolved ancestor search didn't find the uncle's parent, 3569 // then the uncle is not yet resolved. 3570 if (ResolvedPad != AncestorPad) 3571 break; 3572 // This uncle is resolved, so pop it from the worklist. 3573 Worklist.pop_back(); 3574 } 3575 } 3576 } 3577 3578 if (FirstUnwindPad) { 3579 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) { 3580 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest(); 3581 Value *SwitchUnwindPad; 3582 if (SwitchUnwindDest) 3583 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI(); 3584 else 3585 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext()); 3586 Assert(SwitchUnwindPad == FirstUnwindPad, 3587 "Unwind edges out of a catch must have the same unwind dest as " 3588 "the parent catchswitch", 3589 &FPI, FirstUser, CatchSwitch); 3590 } 3591 } 3592 3593 visitInstruction(FPI); 3594 } 3595 3596 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) { 3597 BasicBlock *BB = CatchSwitch.getParent(); 3598 3599 Function *F = BB->getParent(); 3600 Assert(F->hasPersonalityFn(), 3601 "CatchSwitchInst needs to be in a function with a personality.", 3602 &CatchSwitch); 3603 3604 // The catchswitch instruction must be the first non-PHI instruction in the 3605 // block. 3606 Assert(BB->getFirstNonPHI() == &CatchSwitch, 3607 "CatchSwitchInst not the first non-PHI instruction in the block.", 3608 &CatchSwitch); 3609 3610 auto *ParentPad = CatchSwitch.getParentPad(); 3611 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3612 "CatchSwitchInst has an invalid parent.", ParentPad); 3613 3614 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) { 3615 Instruction *I = UnwindDest->getFirstNonPHI(); 3616 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3617 "CatchSwitchInst must unwind to an EH block which is not a " 3618 "landingpad.", 3619 &CatchSwitch); 3620 3621 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds 3622 if (getParentPad(I) == ParentPad) 3623 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch; 3624 } 3625 3626 Assert(CatchSwitch.getNumHandlers() != 0, 3627 "CatchSwitchInst cannot have empty handler list", &CatchSwitch); 3628 3629 for (BasicBlock *Handler : CatchSwitch.handlers()) { 3630 Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()), 3631 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler); 3632 } 3633 3634 visitEHPadPredecessors(CatchSwitch); 3635 visitTerminatorInst(CatchSwitch); 3636 } 3637 3638 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) { 3639 Assert(isa<CleanupPadInst>(CRI.getOperand(0)), 3640 "CleanupReturnInst needs to be provided a CleanupPad", &CRI, 3641 CRI.getOperand(0)); 3642 3643 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) { 3644 Instruction *I = UnwindDest->getFirstNonPHI(); 3645 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3646 "CleanupReturnInst must unwind to an EH block which is not a " 3647 "landingpad.", 3648 &CRI); 3649 } 3650 3651 visitTerminatorInst(CRI); 3652 } 3653 3654 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { 3655 Instruction *Op = cast<Instruction>(I.getOperand(i)); 3656 // If the we have an invalid invoke, don't try to compute the dominance. 3657 // We already reject it in the invoke specific checks and the dominance 3658 // computation doesn't handle multiple edges. 3659 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) { 3660 if (II->getNormalDest() == II->getUnwindDest()) 3661 return; 3662 } 3663 3664 // Quick check whether the def has already been encountered in the same block. 3665 // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI 3666 // uses are defined to happen on the incoming edge, not at the instruction. 3667 // 3668 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata) 3669 // wrapping an SSA value, assert that we've already encountered it. See 3670 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp. 3671 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op)) 3672 return; 3673 3674 const Use &U = I.getOperandUse(i); 3675 Assert(DT.dominates(Op, U), 3676 "Instruction does not dominate all uses!", Op, &I); 3677 } 3678 3679 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) { 3680 Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null " 3681 "apply only to pointer types", &I); 3682 Assert(isa<LoadInst>(I), 3683 "dereferenceable, dereferenceable_or_null apply only to load" 3684 " instructions, use attributes for calls or invokes", &I); 3685 Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null " 3686 "take one operand!", &I); 3687 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0)); 3688 Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, " 3689 "dereferenceable_or_null metadata value must be an i64!", &I); 3690 } 3691 3692 /// verifyInstruction - Verify that an instruction is well formed. 3693 /// 3694 void Verifier::visitInstruction(Instruction &I) { 3695 BasicBlock *BB = I.getParent(); 3696 Assert(BB, "Instruction not embedded in basic block!", &I); 3697 3698 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential 3699 for (User *U : I.users()) { 3700 Assert(U != (User *)&I || !DT.isReachableFromEntry(BB), 3701 "Only PHI nodes may reference their own value!", &I); 3702 } 3703 } 3704 3705 // Check that void typed values don't have names 3706 Assert(!I.getType()->isVoidTy() || !I.hasName(), 3707 "Instruction has a name, but provides a void value!", &I); 3708 3709 // Check that the return value of the instruction is either void or a legal 3710 // value type. 3711 Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(), 3712 "Instruction returns a non-scalar type!", &I); 3713 3714 // Check that the instruction doesn't produce metadata. Calls are already 3715 // checked against the callee type. 3716 Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I), 3717 "Invalid use of metadata!", &I); 3718 3719 // Check that all uses of the instruction, if they are instructions 3720 // themselves, actually have parent basic blocks. If the use is not an 3721 // instruction, it is an error! 3722 for (Use &U : I.uses()) { 3723 if (Instruction *Used = dyn_cast<Instruction>(U.getUser())) 3724 Assert(Used->getParent() != nullptr, 3725 "Instruction referencing" 3726 " instruction not embedded in a basic block!", 3727 &I, Used); 3728 else { 3729 CheckFailed("Use of instruction is not an instruction!", U); 3730 return; 3731 } 3732 } 3733 3734 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 3735 Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I); 3736 3737 // Check to make sure that only first-class-values are operands to 3738 // instructions. 3739 if (!I.getOperand(i)->getType()->isFirstClassType()) { 3740 Assert(false, "Instruction operands must be first-class values!", &I); 3741 } 3742 3743 if (Function *F = dyn_cast<Function>(I.getOperand(i))) { 3744 // Check to make sure that the "address of" an intrinsic function is never 3745 // taken. 3746 Assert( 3747 !F->isIntrinsic() || 3748 i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0), 3749 "Cannot take the address of an intrinsic!", &I); 3750 Assert( 3751 !F->isIntrinsic() || isa<CallInst>(I) || 3752 F->getIntrinsicID() == Intrinsic::donothing || 3753 F->getIntrinsicID() == Intrinsic::coro_resume || 3754 F->getIntrinsicID() == Intrinsic::coro_destroy || 3755 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void || 3756 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 || 3757 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint, 3758 "Cannot invoke an intrinsic other than donothing, patchpoint, " 3759 "statepoint, coro_resume or coro_destroy", 3760 &I); 3761 Assert(F->getParent() == &M, "Referencing function in another module!", 3762 &I, &M, F, F->getParent()); 3763 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) { 3764 Assert(OpBB->getParent() == BB->getParent(), 3765 "Referring to a basic block in another function!", &I); 3766 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) { 3767 Assert(OpArg->getParent() == BB->getParent(), 3768 "Referring to an argument in another function!", &I); 3769 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) { 3770 Assert(GV->getParent() == &M, "Referencing global in another module!", &I, 3771 &M, GV, GV->getParent()); 3772 } else if (isa<Instruction>(I.getOperand(i))) { 3773 verifyDominatesUse(I, i); 3774 } else if (isa<InlineAsm>(I.getOperand(i))) { 3775 Assert((i + 1 == e && isa<CallInst>(I)) || 3776 (i + 3 == e && isa<InvokeInst>(I)), 3777 "Cannot take the address of an inline asm!", &I); 3778 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) { 3779 if (CE->getType()->isPtrOrPtrVectorTy() || 3780 !DL.getNonIntegralAddressSpaces().empty()) { 3781 // If we have a ConstantExpr pointer, we need to see if it came from an 3782 // illegal bitcast. If the datalayout string specifies non-integral 3783 // address spaces then we also need to check for illegal ptrtoint and 3784 // inttoptr expressions. 3785 visitConstantExprsRecursively(CE); 3786 } 3787 } 3788 } 3789 3790 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) { 3791 Assert(I.getType()->isFPOrFPVectorTy(), 3792 "fpmath requires a floating point result!", &I); 3793 Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I); 3794 if (ConstantFP *CFP0 = 3795 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) { 3796 const APFloat &Accuracy = CFP0->getValueAPF(); 3797 Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(), 3798 "fpmath accuracy must have float type", &I); 3799 Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(), 3800 "fpmath accuracy not a positive number!", &I); 3801 } else { 3802 Assert(false, "invalid fpmath accuracy!", &I); 3803 } 3804 } 3805 3806 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) { 3807 Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I), 3808 "Ranges are only for loads, calls and invokes!", &I); 3809 visitRangeMetadata(I, Range, I.getType()); 3810 } 3811 3812 if (I.getMetadata(LLVMContext::MD_nonnull)) { 3813 Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types", 3814 &I); 3815 Assert(isa<LoadInst>(I), 3816 "nonnull applies only to load instructions, use attributes" 3817 " for calls or invokes", 3818 &I); 3819 } 3820 3821 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable)) 3822 visitDereferenceableMetadata(I, MD); 3823 3824 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null)) 3825 visitDereferenceableMetadata(I, MD); 3826 3827 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa)) 3828 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA); 3829 3830 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) { 3831 Assert(I.getType()->isPointerTy(), "align applies only to pointer types", 3832 &I); 3833 Assert(isa<LoadInst>(I), "align applies only to load instructions, " 3834 "use attributes for calls or invokes", &I); 3835 Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I); 3836 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0)); 3837 Assert(CI && CI->getType()->isIntegerTy(64), 3838 "align metadata value must be an i64!", &I); 3839 uint64_t Align = CI->getZExtValue(); 3840 Assert(isPowerOf2_64(Align), 3841 "align metadata value must be a power of 2!", &I); 3842 Assert(Align <= Value::MaximumAlignment, 3843 "alignment is larger that implementation defined limit", &I); 3844 } 3845 3846 if (MDNode *N = I.getDebugLoc().getAsMDNode()) { 3847 AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N); 3848 visitMDNode(*N); 3849 } 3850 3851 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I)) 3852 verifyFragmentExpression(*DII); 3853 3854 InstsInThisBlock.insert(&I); 3855 } 3856 3857 /// Allow intrinsics to be verified in different ways. 3858 void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) { 3859 Function *IF = CS.getCalledFunction(); 3860 Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!", 3861 IF); 3862 3863 // Verify that the intrinsic prototype lines up with what the .td files 3864 // describe. 3865 FunctionType *IFTy = IF->getFunctionType(); 3866 bool IsVarArg = IFTy->isVarArg(); 3867 3868 SmallVector<Intrinsic::IITDescriptor, 8> Table; 3869 getIntrinsicInfoTableEntries(ID, Table); 3870 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; 3871 3872 SmallVector<Type *, 4> ArgTys; 3873 Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(), 3874 TableRef, ArgTys), 3875 "Intrinsic has incorrect return type!", IF); 3876 for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i) 3877 Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i), 3878 TableRef, ArgTys), 3879 "Intrinsic has incorrect argument type!", IF); 3880 3881 // Verify if the intrinsic call matches the vararg property. 3882 if (IsVarArg) 3883 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), 3884 "Intrinsic was not defined with variable arguments!", IF); 3885 else 3886 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), 3887 "Callsite was not defined with variable arguments!", IF); 3888 3889 // All descriptors should be absorbed by now. 3890 Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF); 3891 3892 // Now that we have the intrinsic ID and the actual argument types (and we 3893 // know they are legal for the intrinsic!) get the intrinsic name through the 3894 // usual means. This allows us to verify the mangling of argument types into 3895 // the name. 3896 const std::string ExpectedName = Intrinsic::getName(ID, ArgTys); 3897 Assert(ExpectedName == IF->getName(), 3898 "Intrinsic name not mangled correctly for type arguments! " 3899 "Should be: " + 3900 ExpectedName, 3901 IF); 3902 3903 // If the intrinsic takes MDNode arguments, verify that they are either global 3904 // or are local to *this* function. 3905 for (Value *V : CS.args()) 3906 if (auto *MD = dyn_cast<MetadataAsValue>(V)) 3907 visitMetadataAsValue(*MD, CS.getCaller()); 3908 3909 switch (ID) { 3910 default: 3911 break; 3912 case Intrinsic::coro_id: { 3913 auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts(); 3914 if (isa<ConstantPointerNull>(InfoArg)) 3915 break; 3916 auto *GV = dyn_cast<GlobalVariable>(InfoArg); 3917 Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(), 3918 "info argument of llvm.coro.begin must refer to an initialized " 3919 "constant"); 3920 Constant *Init = GV->getInitializer(); 3921 Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init), 3922 "info argument of llvm.coro.begin must refer to either a struct or " 3923 "an array"); 3924 break; 3925 } 3926 case Intrinsic::ctlz: // llvm.ctlz 3927 case Intrinsic::cttz: // llvm.cttz 3928 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 3929 "is_zero_undef argument of bit counting intrinsics must be a " 3930 "constant int", 3931 CS); 3932 break; 3933 case Intrinsic::experimental_constrained_fadd: 3934 case Intrinsic::experimental_constrained_fsub: 3935 case Intrinsic::experimental_constrained_fmul: 3936 case Intrinsic::experimental_constrained_fdiv: 3937 case Intrinsic::experimental_constrained_frem: 3938 visitConstrainedFPIntrinsic( 3939 cast<ConstrainedFPIntrinsic>(*CS.getInstruction())); 3940 break; 3941 case Intrinsic::dbg_declare: // llvm.dbg.declare 3942 Assert(isa<MetadataAsValue>(CS.getArgOperand(0)), 3943 "invalid llvm.dbg.declare intrinsic call 1", CS); 3944 visitDbgIntrinsic("declare", cast<DbgDeclareInst>(*CS.getInstruction())); 3945 break; 3946 case Intrinsic::dbg_value: // llvm.dbg.value 3947 visitDbgIntrinsic("value", cast<DbgValueInst>(*CS.getInstruction())); 3948 break; 3949 case Intrinsic::memcpy: 3950 case Intrinsic::memmove: 3951 case Intrinsic::memset: { 3952 ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3)); 3953 Assert(AlignCI, 3954 "alignment argument of memory intrinsics must be a constant int", 3955 CS); 3956 const APInt &AlignVal = AlignCI->getValue(); 3957 Assert(AlignCI->isZero() || AlignVal.isPowerOf2(), 3958 "alignment argument of memory intrinsics must be a power of 2", CS); 3959 Assert(isa<ConstantInt>(CS.getArgOperand(4)), 3960 "isvolatile argument of memory intrinsics must be a constant int", 3961 CS); 3962 break; 3963 } 3964 case Intrinsic::memcpy_element_atomic: { 3965 ConstantInt *ElementSizeCI = dyn_cast<ConstantInt>(CS.getArgOperand(3)); 3966 Assert(ElementSizeCI, "element size of the element-wise atomic memory " 3967 "intrinsic must be a constant int", 3968 CS); 3969 const APInt &ElementSizeVal = ElementSizeCI->getValue(); 3970 Assert(ElementSizeVal.isPowerOf2(), 3971 "element size of the element-wise atomic memory intrinsic " 3972 "must be a power of 2", 3973 CS); 3974 3975 auto IsValidAlignment = [&](uint64_t Alignment) { 3976 return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment); 3977 }; 3978 3979 uint64_t DstAlignment = CS.getParamAlignment(1), 3980 SrcAlignment = CS.getParamAlignment(2); 3981 3982 Assert(IsValidAlignment(DstAlignment), 3983 "incorrect alignment of the destination argument", 3984 CS); 3985 Assert(IsValidAlignment(SrcAlignment), 3986 "incorrect alignment of the source argument", 3987 CS); 3988 break; 3989 } 3990 case Intrinsic::gcroot: 3991 case Intrinsic::gcwrite: 3992 case Intrinsic::gcread: 3993 if (ID == Intrinsic::gcroot) { 3994 AllocaInst *AI = 3995 dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts()); 3996 Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS); 3997 Assert(isa<Constant>(CS.getArgOperand(1)), 3998 "llvm.gcroot parameter #2 must be a constant.", CS); 3999 if (!AI->getAllocatedType()->isPointerTy()) { 4000 Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)), 4001 "llvm.gcroot parameter #1 must either be a pointer alloca, " 4002 "or argument #2 must be a non-null constant.", 4003 CS); 4004 } 4005 } 4006 4007 Assert(CS.getParent()->getParent()->hasGC(), 4008 "Enclosing function does not use GC.", CS); 4009 break; 4010 case Intrinsic::init_trampoline: 4011 Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()), 4012 "llvm.init_trampoline parameter #2 must resolve to a function.", 4013 CS); 4014 break; 4015 case Intrinsic::prefetch: 4016 Assert(isa<ConstantInt>(CS.getArgOperand(1)) && 4017 isa<ConstantInt>(CS.getArgOperand(2)) && 4018 cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 && 4019 cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4, 4020 "invalid arguments to llvm.prefetch", CS); 4021 break; 4022 case Intrinsic::stackprotector: 4023 Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()), 4024 "llvm.stackprotector parameter #2 must resolve to an alloca.", CS); 4025 break; 4026 case Intrinsic::lifetime_start: 4027 case Intrinsic::lifetime_end: 4028 case Intrinsic::invariant_start: 4029 Assert(isa<ConstantInt>(CS.getArgOperand(0)), 4030 "size argument of memory use markers must be a constant integer", 4031 CS); 4032 break; 4033 case Intrinsic::invariant_end: 4034 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 4035 "llvm.invariant.end parameter #2 must be a constant integer", CS); 4036 break; 4037 4038 case Intrinsic::localescape: { 4039 BasicBlock *BB = CS.getParent(); 4040 Assert(BB == &BB->getParent()->front(), 4041 "llvm.localescape used outside of entry block", CS); 4042 Assert(!SawFrameEscape, 4043 "multiple calls to llvm.localescape in one function", CS); 4044 for (Value *Arg : CS.args()) { 4045 if (isa<ConstantPointerNull>(Arg)) 4046 continue; // Null values are allowed as placeholders. 4047 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 4048 Assert(AI && AI->isStaticAlloca(), 4049 "llvm.localescape only accepts static allocas", CS); 4050 } 4051 FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands(); 4052 SawFrameEscape = true; 4053 break; 4054 } 4055 case Intrinsic::localrecover: { 4056 Value *FnArg = CS.getArgOperand(0)->stripPointerCasts(); 4057 Function *Fn = dyn_cast<Function>(FnArg); 4058 Assert(Fn && !Fn->isDeclaration(), 4059 "llvm.localrecover first " 4060 "argument must be function defined in this module", 4061 CS); 4062 auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2)); 4063 Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int", 4064 CS); 4065 auto &Entry = FrameEscapeInfo[Fn]; 4066 Entry.second = unsigned( 4067 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1)); 4068 break; 4069 } 4070 4071 case Intrinsic::experimental_gc_statepoint: 4072 Assert(!CS.isInlineAsm(), 4073 "gc.statepoint support for inline assembly unimplemented", CS); 4074 Assert(CS.getParent()->getParent()->hasGC(), 4075 "Enclosing function does not use GC.", CS); 4076 4077 verifyStatepoint(CS); 4078 break; 4079 case Intrinsic::experimental_gc_result: { 4080 Assert(CS.getParent()->getParent()->hasGC(), 4081 "Enclosing function does not use GC.", CS); 4082 // Are we tied to a statepoint properly? 4083 CallSite StatepointCS(CS.getArgOperand(0)); 4084 const Function *StatepointFn = 4085 StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr; 4086 Assert(StatepointFn && StatepointFn->isDeclaration() && 4087 StatepointFn->getIntrinsicID() == 4088 Intrinsic::experimental_gc_statepoint, 4089 "gc.result operand #1 must be from a statepoint", CS, 4090 CS.getArgOperand(0)); 4091 4092 // Assert that result type matches wrapped callee. 4093 const Value *Target = StatepointCS.getArgument(2); 4094 auto *PT = cast<PointerType>(Target->getType()); 4095 auto *TargetFuncType = cast<FunctionType>(PT->getElementType()); 4096 Assert(CS.getType() == TargetFuncType->getReturnType(), 4097 "gc.result result type does not match wrapped callee", CS); 4098 break; 4099 } 4100 case Intrinsic::experimental_gc_relocate: { 4101 Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS); 4102 4103 Assert(isa<PointerType>(CS.getType()->getScalarType()), 4104 "gc.relocate must return a pointer or a vector of pointers", CS); 4105 4106 // Check that this relocate is correctly tied to the statepoint 4107 4108 // This is case for relocate on the unwinding path of an invoke statepoint 4109 if (LandingPadInst *LandingPad = 4110 dyn_cast<LandingPadInst>(CS.getArgOperand(0))) { 4111 4112 const BasicBlock *InvokeBB = 4113 LandingPad->getParent()->getUniquePredecessor(); 4114 4115 // Landingpad relocates should have only one predecessor with invoke 4116 // statepoint terminator 4117 Assert(InvokeBB, "safepoints should have unique landingpads", 4118 LandingPad->getParent()); 4119 Assert(InvokeBB->getTerminator(), "safepoint block should be well formed", 4120 InvokeBB); 4121 Assert(isStatepoint(InvokeBB->getTerminator()), 4122 "gc relocate should be linked to a statepoint", InvokeBB); 4123 } 4124 else { 4125 // In all other cases relocate should be tied to the statepoint directly. 4126 // This covers relocates on a normal return path of invoke statepoint and 4127 // relocates of a call statepoint. 4128 auto Token = CS.getArgOperand(0); 4129 Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)), 4130 "gc relocate is incorrectly tied to the statepoint", CS, Token); 4131 } 4132 4133 // Verify rest of the relocate arguments. 4134 4135 ImmutableCallSite StatepointCS( 4136 cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint()); 4137 4138 // Both the base and derived must be piped through the safepoint. 4139 Value* Base = CS.getArgOperand(1); 4140 Assert(isa<ConstantInt>(Base), 4141 "gc.relocate operand #2 must be integer offset", CS); 4142 4143 Value* Derived = CS.getArgOperand(2); 4144 Assert(isa<ConstantInt>(Derived), 4145 "gc.relocate operand #3 must be integer offset", CS); 4146 4147 const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue(); 4148 const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue(); 4149 // Check the bounds 4150 Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(), 4151 "gc.relocate: statepoint base index out of bounds", CS); 4152 Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(), 4153 "gc.relocate: statepoint derived index out of bounds", CS); 4154 4155 // Check that BaseIndex and DerivedIndex fall within the 'gc parameters' 4156 // section of the statepoint's argument. 4157 Assert(StatepointCS.arg_size() > 0, 4158 "gc.statepoint: insufficient arguments"); 4159 Assert(isa<ConstantInt>(StatepointCS.getArgument(3)), 4160 "gc.statement: number of call arguments must be constant integer"); 4161 const unsigned NumCallArgs = 4162 cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue(); 4163 Assert(StatepointCS.arg_size() > NumCallArgs + 5, 4164 "gc.statepoint: mismatch in number of call arguments"); 4165 Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)), 4166 "gc.statepoint: number of transition arguments must be " 4167 "a constant integer"); 4168 const int NumTransitionArgs = 4169 cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)) 4170 ->getZExtValue(); 4171 const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1; 4172 Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)), 4173 "gc.statepoint: number of deoptimization arguments must be " 4174 "a constant integer"); 4175 const int NumDeoptArgs = 4176 cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)) 4177 ->getZExtValue(); 4178 const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs; 4179 const int GCParamArgsEnd = StatepointCS.arg_size(); 4180 Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd, 4181 "gc.relocate: statepoint base index doesn't fall within the " 4182 "'gc parameters' section of the statepoint call", 4183 CS); 4184 Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd, 4185 "gc.relocate: statepoint derived index doesn't fall within the " 4186 "'gc parameters' section of the statepoint call", 4187 CS); 4188 4189 // Relocated value must be either a pointer type or vector-of-pointer type, 4190 // but gc_relocate does not need to return the same pointer type as the 4191 // relocated pointer. It can be casted to the correct type later if it's 4192 // desired. However, they must have the same address space and 'vectorness' 4193 GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction()); 4194 Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(), 4195 "gc.relocate: relocated value must be a gc pointer", CS); 4196 4197 auto ResultType = CS.getType(); 4198 auto DerivedType = Relocate.getDerivedPtr()->getType(); 4199 Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(), 4200 "gc.relocate: vector relocates to vector and pointer to pointer", 4201 CS); 4202 Assert( 4203 ResultType->getPointerAddressSpace() == 4204 DerivedType->getPointerAddressSpace(), 4205 "gc.relocate: relocating a pointer shouldn't change its address space", 4206 CS); 4207 break; 4208 } 4209 case Intrinsic::eh_exceptioncode: 4210 case Intrinsic::eh_exceptionpointer: { 4211 Assert(isa<CatchPadInst>(CS.getArgOperand(0)), 4212 "eh.exceptionpointer argument must be a catchpad", CS); 4213 break; 4214 } 4215 case Intrinsic::masked_load: { 4216 Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS); 4217 4218 Value *Ptr = CS.getArgOperand(0); 4219 //Value *Alignment = CS.getArgOperand(1); 4220 Value *Mask = CS.getArgOperand(2); 4221 Value *PassThru = CS.getArgOperand(3); 4222 Assert(Mask->getType()->isVectorTy(), 4223 "masked_load: mask must be vector", CS); 4224 4225 // DataTy is the overloaded type 4226 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4227 Assert(DataTy == CS.getType(), 4228 "masked_load: return must match pointer type", CS); 4229 Assert(PassThru->getType() == DataTy, 4230 "masked_load: pass through and data type must match", CS); 4231 Assert(Mask->getType()->getVectorNumElements() == 4232 DataTy->getVectorNumElements(), 4233 "masked_load: vector mask must be same length as data", CS); 4234 break; 4235 } 4236 case Intrinsic::masked_store: { 4237 Value *Val = CS.getArgOperand(0); 4238 Value *Ptr = CS.getArgOperand(1); 4239 //Value *Alignment = CS.getArgOperand(2); 4240 Value *Mask = CS.getArgOperand(3); 4241 Assert(Mask->getType()->isVectorTy(), 4242 "masked_store: mask must be vector", CS); 4243 4244 // DataTy is the overloaded type 4245 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4246 Assert(DataTy == Val->getType(), 4247 "masked_store: storee must match pointer type", CS); 4248 Assert(Mask->getType()->getVectorNumElements() == 4249 DataTy->getVectorNumElements(), 4250 "masked_store: vector mask must be same length as data", CS); 4251 break; 4252 } 4253 4254 case Intrinsic::experimental_guard: { 4255 Assert(CS.isCall(), "experimental_guard cannot be invoked", CS); 4256 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4257 "experimental_guard must have exactly one " 4258 "\"deopt\" operand bundle"); 4259 break; 4260 } 4261 4262 case Intrinsic::experimental_deoptimize: { 4263 Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS); 4264 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4265 "experimental_deoptimize must have exactly one " 4266 "\"deopt\" operand bundle"); 4267 Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(), 4268 "experimental_deoptimize return type must match caller return type"); 4269 4270 if (CS.isCall()) { 4271 auto *DeoptCI = CS.getInstruction(); 4272 auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode()); 4273 Assert(RI, 4274 "calls to experimental_deoptimize must be followed by a return"); 4275 4276 if (!CS.getType()->isVoidTy() && RI) 4277 Assert(RI->getReturnValue() == DeoptCI, 4278 "calls to experimental_deoptimize must be followed by a return " 4279 "of the value computed by experimental_deoptimize"); 4280 } 4281 4282 break; 4283 } 4284 }; 4285 } 4286 4287 /// \brief Carefully grab the subprogram from a local scope. 4288 /// 4289 /// This carefully grabs the subprogram from a local scope, avoiding the 4290 /// built-in assertions that would typically fire. 4291 static DISubprogram *getSubprogram(Metadata *LocalScope) { 4292 if (!LocalScope) 4293 return nullptr; 4294 4295 if (auto *SP = dyn_cast<DISubprogram>(LocalScope)) 4296 return SP; 4297 4298 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope)) 4299 return getSubprogram(LB->getRawScope()); 4300 4301 // Just return null; broken scope chains are checked elsewhere. 4302 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope"); 4303 return nullptr; 4304 } 4305 4306 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { 4307 Assert(isa<MetadataAsValue>(FPI.getOperand(2)), 4308 "invalid rounding mode argument", &FPI); 4309 Assert(FPI.getRoundingMode() != ConstrainedFPIntrinsic::rmInvalid, 4310 "invalid rounding mode argument", &FPI); 4311 Assert(FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ebInvalid, 4312 "invalid exception behavior argument", &FPI); 4313 } 4314 4315 template <class DbgIntrinsicTy> 4316 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) { 4317 auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata(); 4318 AssertDI(isa<ValueAsMetadata>(MD) || 4319 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()), 4320 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD); 4321 AssertDI(isa<DILocalVariable>(DII.getRawVariable()), 4322 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII, 4323 DII.getRawVariable()); 4324 AssertDI(isa<DIExpression>(DII.getRawExpression()), 4325 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII, 4326 DII.getRawExpression()); 4327 4328 // Ignore broken !dbg attachments; they're checked elsewhere. 4329 if (MDNode *N = DII.getDebugLoc().getAsMDNode()) 4330 if (!isa<DILocation>(N)) 4331 return; 4332 4333 BasicBlock *BB = DII.getParent(); 4334 Function *F = BB ? BB->getParent() : nullptr; 4335 4336 // The scopes for variables and !dbg attachments must agree. 4337 DILocalVariable *Var = DII.getVariable(); 4338 DILocation *Loc = DII.getDebugLoc(); 4339 Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", 4340 &DII, BB, F); 4341 4342 DISubprogram *VarSP = getSubprogram(Var->getRawScope()); 4343 DISubprogram *LocSP = getSubprogram(Loc->getRawScope()); 4344 if (!VarSP || !LocSP) 4345 return; // Broken scope chains are checked elsewhere. 4346 4347 AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind + 4348 " variable and !dbg attachment", 4349 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc, 4350 Loc->getScope()->getSubprogram()); 4351 } 4352 4353 static uint64_t getVariableSize(const DILocalVariable &V) { 4354 // Be careful of broken types (checked elsewhere). 4355 const Metadata *RawType = V.getRawType(); 4356 while (RawType) { 4357 // Try to get the size directly. 4358 if (auto *T = dyn_cast<DIType>(RawType)) 4359 if (uint64_t Size = T->getSizeInBits()) 4360 return Size; 4361 4362 if (auto *DT = dyn_cast<DIDerivedType>(RawType)) { 4363 // Look at the base type. 4364 RawType = DT->getRawBaseType(); 4365 continue; 4366 } 4367 4368 // Missing type or size. 4369 break; 4370 } 4371 4372 // Fail gracefully. 4373 return 0; 4374 } 4375 4376 void Verifier::verifyFragmentExpression(const DbgInfoIntrinsic &I) { 4377 DILocalVariable *V; 4378 DIExpression *E; 4379 if (auto *DVI = dyn_cast<DbgValueInst>(&I)) { 4380 V = dyn_cast_or_null<DILocalVariable>(DVI->getRawVariable()); 4381 E = dyn_cast_or_null<DIExpression>(DVI->getRawExpression()); 4382 } else { 4383 auto *DDI = cast<DbgDeclareInst>(&I); 4384 V = dyn_cast_or_null<DILocalVariable>(DDI->getRawVariable()); 4385 E = dyn_cast_or_null<DIExpression>(DDI->getRawExpression()); 4386 } 4387 4388 // We don't know whether this intrinsic verified correctly. 4389 if (!V || !E || !E->isValid()) 4390 return; 4391 4392 // Nothing to do if this isn't a bit piece expression. 4393 auto Fragment = E->getFragmentInfo(); 4394 if (!Fragment) 4395 return; 4396 4397 // The frontend helps out GDB by emitting the members of local anonymous 4398 // unions as artificial local variables with shared storage. When SROA splits 4399 // the storage for artificial local variables that are smaller than the entire 4400 // union, the overhang piece will be outside of the allotted space for the 4401 // variable and this check fails. 4402 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs. 4403 if (V->isArtificial()) 4404 return; 4405 4406 // If there's no size, the type is broken, but that should be checked 4407 // elsewhere. 4408 uint64_t VarSize = getVariableSize(*V); 4409 if (!VarSize) 4410 return; 4411 4412 unsigned FragSize = Fragment->SizeInBits; 4413 unsigned FragOffset = Fragment->OffsetInBits; 4414 AssertDI(FragSize + FragOffset <= VarSize, 4415 "fragment is larger than or outside of variable", &I, V, E); 4416 AssertDI(FragSize != VarSize, "fragment covers entire variable", &I, V, E); 4417 } 4418 4419 void Verifier::verifyCompileUnits() { 4420 auto *CUs = M.getNamedMetadata("llvm.dbg.cu"); 4421 SmallPtrSet<const Metadata *, 2> Listed; 4422 if (CUs) 4423 Listed.insert(CUs->op_begin(), CUs->op_end()); 4424 AssertDI( 4425 all_of(CUVisited, 4426 [&Listed](const Metadata *CU) { return Listed.count(CU); }), 4427 "All DICompileUnits must be listed in llvm.dbg.cu"); 4428 CUVisited.clear(); 4429 } 4430 4431 void Verifier::verifyDeoptimizeCallingConvs() { 4432 if (DeoptimizeDeclarations.empty()) 4433 return; 4434 4435 const Function *First = DeoptimizeDeclarations[0]; 4436 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) { 4437 Assert(First->getCallingConv() == F->getCallingConv(), 4438 "All llvm.experimental.deoptimize declarations must have the same " 4439 "calling convention", 4440 First, F); 4441 } 4442 } 4443 4444 //===----------------------------------------------------------------------===// 4445 // Implement the public interfaces to this file... 4446 //===----------------------------------------------------------------------===// 4447 4448 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) { 4449 Function &F = const_cast<Function &>(f); 4450 4451 // Don't use a raw_null_ostream. Printing IR is expensive. 4452 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent()); 4453 4454 // Note that this function's return value is inverted from what you would 4455 // expect of a function called "verify". 4456 return !V.verify(F); 4457 } 4458 4459 bool llvm::verifyModule(const Module &M, raw_ostream *OS, 4460 bool *BrokenDebugInfo) { 4461 // Don't use a raw_null_ostream. Printing IR is expensive. 4462 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M); 4463 4464 bool Broken = false; 4465 for (const Function &F : M) 4466 Broken |= !V.verify(F); 4467 4468 Broken |= !V.verify(); 4469 if (BrokenDebugInfo) 4470 *BrokenDebugInfo = V.hasBrokenDebugInfo(); 4471 // Note that this function's return value is inverted from what you would 4472 // expect of a function called "verify". 4473 return Broken; 4474 } 4475 4476 namespace { 4477 4478 struct VerifierLegacyPass : public FunctionPass { 4479 static char ID; 4480 4481 std::unique_ptr<Verifier> V; 4482 bool FatalErrors = true; 4483 4484 VerifierLegacyPass() : FunctionPass(ID) { 4485 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4486 } 4487 explicit VerifierLegacyPass(bool FatalErrors) 4488 : FunctionPass(ID), 4489 FatalErrors(FatalErrors) { 4490 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4491 } 4492 4493 bool doInitialization(Module &M) override { 4494 V = llvm::make_unique<Verifier>( 4495 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M); 4496 return false; 4497 } 4498 4499 bool runOnFunction(Function &F) override { 4500 if (!V->verify(F) && FatalErrors) 4501 report_fatal_error("Broken function found, compilation aborted!"); 4502 4503 return false; 4504 } 4505 4506 bool doFinalization(Module &M) override { 4507 bool HasErrors = false; 4508 for (Function &F : M) 4509 if (F.isDeclaration()) 4510 HasErrors |= !V->verify(F); 4511 4512 HasErrors |= !V->verify(); 4513 if (FatalErrors) { 4514 if (HasErrors) 4515 report_fatal_error("Broken module found, compilation aborted!"); 4516 assert(!V->hasBrokenDebugInfo() && "Module contains invalid debug info"); 4517 } 4518 4519 // Strip broken debug info. 4520 if (V->hasBrokenDebugInfo()) { 4521 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4522 M.getContext().diagnose(DiagInvalid); 4523 if (!StripDebugInfo(M)) 4524 report_fatal_error("Failed to strip malformed debug info"); 4525 } 4526 return false; 4527 } 4528 4529 void getAnalysisUsage(AnalysisUsage &AU) const override { 4530 AU.setPreservesAll(); 4531 } 4532 }; 4533 4534 } // end anonymous namespace 4535 4536 /// Helper to issue failure from the TBAA verification 4537 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) { 4538 if (Diagnostic) 4539 return Diagnostic->CheckFailed(Args...); 4540 } 4541 4542 #define AssertTBAA(C, ...) \ 4543 do { \ 4544 if (!(C)) { \ 4545 CheckFailed(__VA_ARGS__); \ 4546 return false; \ 4547 } \ 4548 } while (false) 4549 4550 /// Verify that \p BaseNode can be used as the "base type" in the struct-path 4551 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a 4552 /// struct-type node describing an aggregate data structure (like a struct). 4553 TBAAVerifier::TBAABaseNodeSummary 4554 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode) { 4555 if (BaseNode->getNumOperands() < 2) { 4556 CheckFailed("Base nodes must have at least two operands", &I, BaseNode); 4557 return {true, ~0u}; 4558 } 4559 4560 auto Itr = TBAABaseNodes.find(BaseNode); 4561 if (Itr != TBAABaseNodes.end()) 4562 return Itr->second; 4563 4564 auto Result = verifyTBAABaseNodeImpl(I, BaseNode); 4565 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result}); 4566 (void)InsertResult; 4567 assert(InsertResult.second && "We just checked!"); 4568 return Result; 4569 } 4570 4571 TBAAVerifier::TBAABaseNodeSummary 4572 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode) { 4573 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u}; 4574 4575 if (BaseNode->getNumOperands() == 2) { 4576 // Scalar nodes can only be accessed at offset 0. 4577 return isValidScalarTBAANode(BaseNode) 4578 ? TBAAVerifier::TBAABaseNodeSummary({false, 0}) 4579 : InvalidNode; 4580 } 4581 4582 if (BaseNode->getNumOperands() % 2 != 1) { 4583 CheckFailed("Struct tag nodes must have an odd number of operands!", 4584 BaseNode); 4585 return InvalidNode; 4586 } 4587 4588 if (!isa<MDString>(BaseNode->getOperand(0))) { 4589 CheckFailed("Struct tag nodes have a string as their first operand", 4590 BaseNode); 4591 return InvalidNode; 4592 } 4593 4594 bool Failed = false; 4595 4596 Optional<APInt> PrevOffset; 4597 unsigned BitWidth = ~0u; 4598 4599 // We've already checked that BaseNode is not a degenerate root node with one 4600 // operand in \c verifyTBAABaseNode, so this loop should run at least once. 4601 for (unsigned Idx = 1; Idx < BaseNode->getNumOperands(); Idx += 2) { 4602 const MDOperand &FieldTy = BaseNode->getOperand(Idx); 4603 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1); 4604 if (!isa<MDNode>(FieldTy)) { 4605 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode); 4606 Failed = true; 4607 continue; 4608 } 4609 4610 auto *OffsetEntryCI = 4611 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset); 4612 if (!OffsetEntryCI) { 4613 CheckFailed("Offset entries must be constants!", &I, BaseNode); 4614 Failed = true; 4615 continue; 4616 } 4617 4618 if (BitWidth == ~0u) 4619 BitWidth = OffsetEntryCI->getBitWidth(); 4620 4621 if (OffsetEntryCI->getBitWidth() != BitWidth) { 4622 CheckFailed( 4623 "Bitwidth between the offsets and struct type entries must match", &I, 4624 BaseNode); 4625 Failed = true; 4626 continue; 4627 } 4628 4629 // NB! As far as I can tell, we generate a non-strictly increasing offset 4630 // sequence only from structs that have zero size bit fields. When 4631 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we 4632 // pick the field lexically the latest in struct type metadata node. This 4633 // mirrors the actual behavior of the alias analysis implementation. 4634 bool IsAscending = 4635 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue()); 4636 4637 if (!IsAscending) { 4638 CheckFailed("Offsets must be increasing!", &I, BaseNode); 4639 Failed = true; 4640 } 4641 4642 PrevOffset = OffsetEntryCI->getValue(); 4643 } 4644 4645 return Failed ? InvalidNode 4646 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth); 4647 } 4648 4649 static bool IsRootTBAANode(const MDNode *MD) { 4650 return MD->getNumOperands() < 2; 4651 } 4652 4653 static bool IsScalarTBAANodeImpl(const MDNode *MD, 4654 SmallPtrSetImpl<const MDNode *> &Visited) { 4655 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3) 4656 return false; 4657 4658 if (!isa<MDString>(MD->getOperand(0))) 4659 return false; 4660 4661 if (MD->getNumOperands() == 3) { 4662 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); 4663 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0)))) 4664 return false; 4665 } 4666 4667 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1)); 4668 return Parent && Visited.insert(Parent).second && 4669 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited)); 4670 } 4671 4672 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) { 4673 auto ResultIt = TBAAScalarNodes.find(MD); 4674 if (ResultIt != TBAAScalarNodes.end()) 4675 return ResultIt->second; 4676 4677 SmallPtrSet<const MDNode *, 4> Visited; 4678 bool Result = IsScalarTBAANodeImpl(MD, Visited); 4679 auto InsertResult = TBAAScalarNodes.insert({MD, Result}); 4680 (void)InsertResult; 4681 assert(InsertResult.second && "Just checked!"); 4682 4683 return Result; 4684 } 4685 4686 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p 4687 /// Offset in place to be the offset within the field node returned. 4688 /// 4689 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode. 4690 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I, 4691 const MDNode *BaseNode, 4692 APInt &Offset) { 4693 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!"); 4694 4695 // Scalar nodes have only one possible "field" -- their parent in the access 4696 // hierarchy. Offset must be zero at this point, but our caller is supposed 4697 // to Assert that. 4698 if (BaseNode->getNumOperands() == 2) 4699 return cast<MDNode>(BaseNode->getOperand(1)); 4700 4701 for (unsigned Idx = 1; Idx < BaseNode->getNumOperands(); Idx += 2) { 4702 auto *OffsetEntryCI = 4703 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1)); 4704 if (OffsetEntryCI->getValue().ugt(Offset)) { 4705 if (Idx == 1) { 4706 CheckFailed("Could not find TBAA parent in struct type node", &I, 4707 BaseNode, &Offset); 4708 return nullptr; 4709 } 4710 4711 auto *PrevOffsetEntryCI = 4712 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx - 1)); 4713 Offset -= PrevOffsetEntryCI->getValue(); 4714 return cast<MDNode>(BaseNode->getOperand(Idx - 2)); 4715 } 4716 } 4717 4718 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>( 4719 BaseNode->getOperand(BaseNode->getNumOperands() - 1)); 4720 4721 Offset -= LastOffsetEntryCI->getValue(); 4722 return cast<MDNode>(BaseNode->getOperand(BaseNode->getNumOperands() - 2)); 4723 } 4724 4725 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { 4726 AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 4727 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) || 4728 isa<AtomicCmpXchgInst>(I), 4729 "TBAA is only for loads, stores and calls!", &I); 4730 4731 bool IsStructPathTBAA = 4732 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3; 4733 4734 AssertTBAA( 4735 IsStructPathTBAA, 4736 "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I); 4737 4738 AssertTBAA(MD->getNumOperands() < 5, 4739 "Struct tag metadata must have either 3 or 4 operands", &I, MD); 4740 4741 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0)); 4742 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1)); 4743 4744 if (MD->getNumOperands() == 4) { 4745 auto *IsImmutableCI = 4746 mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(3)); 4747 AssertTBAA(IsImmutableCI, 4748 "Immutability tag on struct tag metadata must be a constant", &I, 4749 MD); 4750 AssertTBAA( 4751 IsImmutableCI->isZero() || IsImmutableCI->isOne(), 4752 "Immutability part of the struct tag metadata must be either 0 or 1", 4753 &I, MD); 4754 } 4755 4756 AssertTBAA(BaseNode && AccessType, 4757 "Malformed struct tag metadata: base and access-type " 4758 "should be non-null and point to Metadata nodes", 4759 &I, MD, BaseNode, AccessType); 4760 4761 AssertTBAA(isValidScalarTBAANode(AccessType), 4762 "Access type node must be a valid scalar type", &I, MD, 4763 AccessType); 4764 4765 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2)); 4766 AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD); 4767 4768 APInt Offset = OffsetCI->getValue(); 4769 bool SeenAccessTypeInPath = false; 4770 4771 SmallPtrSet<MDNode *, 4> StructPath; 4772 4773 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode); 4774 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset)) { 4775 if (!StructPath.insert(BaseNode).second) { 4776 CheckFailed("Cycle detected in struct path", &I, MD); 4777 return false; 4778 } 4779 4780 bool Invalid; 4781 unsigned BaseNodeBitWidth; 4782 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode); 4783 4784 // If the base node is invalid in itself, then we've already printed all the 4785 // errors we wanted to print. 4786 if (Invalid) 4787 return false; 4788 4789 SeenAccessTypeInPath |= BaseNode == AccessType; 4790 4791 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType) 4792 AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access", 4793 &I, MD, &Offset); 4794 4795 AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() || 4796 (BaseNodeBitWidth == 0 && Offset == 0), 4797 "Access bit-width not the same as description bit-width", &I, MD, 4798 BaseNodeBitWidth, Offset.getBitWidth()); 4799 } 4800 4801 AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", 4802 &I, MD); 4803 return true; 4804 } 4805 4806 char VerifierLegacyPass::ID = 0; 4807 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false) 4808 4809 FunctionPass *llvm::createVerifierPass(bool FatalErrors) { 4810 return new VerifierLegacyPass(FatalErrors); 4811 } 4812 4813 AnalysisKey VerifierAnalysis::Key; 4814 VerifierAnalysis::Result VerifierAnalysis::run(Module &M, 4815 ModuleAnalysisManager &) { 4816 Result Res; 4817 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken); 4818 return Res; 4819 } 4820 4821 VerifierAnalysis::Result VerifierAnalysis::run(Function &F, 4822 FunctionAnalysisManager &) { 4823 return { llvm::verifyFunction(F, &dbgs()), false }; 4824 } 4825 4826 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) { 4827 auto Res = AM.getResult<VerifierAnalysis>(M); 4828 if (FatalErrors) { 4829 if (Res.IRBroken) 4830 report_fatal_error("Broken module found, compilation aborted!"); 4831 assert(!Res.DebugInfoBroken && "Module contains invalid debug info"); 4832 } 4833 4834 // Strip broken debug info. 4835 if (Res.DebugInfoBroken) { 4836 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4837 M.getContext().diagnose(DiagInvalid); 4838 if (!StripDebugInfo(M)) 4839 report_fatal_error("Failed to strip malformed debug info"); 4840 } 4841 return PreservedAnalyses::all(); 4842 } 4843 4844 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 4845 auto res = AM.getResult<VerifierAnalysis>(F); 4846 if (res.IRBroken && FatalErrors) 4847 report_fatal_error("Broken function found, compilation aborted!"); 4848 4849 return PreservedAnalyses::all(); 4850 } 4851