1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the function verifier interface, that can be used for some 11 // sanity checking of input to the system. 12 // 13 // Note that this does not provide full `Java style' security and verifications, 14 // instead it just tries to ensure that code is well-formed. 15 // 16 // * Both of a binary operator's parameters are of the same type 17 // * Verify that the indices of mem access instructions match other operands 18 // * Verify that arithmetic and other things are only performed on first-class 19 // types. Verify that shifts & logicals only happen on integrals f.e. 20 // * All of the constants in a switch statement are of the correct type 21 // * The code is in valid SSA form 22 // * It should be illegal to put a label into any other type (like a structure) 23 // or to return one. [except constant arrays!] 24 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad 25 // * PHI nodes must have an entry for each predecessor, with no extras. 26 // * PHI nodes must be the first thing in a basic block, all grouped together 27 // * PHI nodes must have at least one entry 28 // * All basic blocks should only end with terminator insts, not contain them 29 // * The entry node to a function must not have predecessors 30 // * All Instructions must be embedded into a basic block 31 // * Functions cannot take a void-typed parameter 32 // * Verify that a function's argument list agrees with it's declared type. 33 // * It is illegal to specify a name for a void value. 34 // * It is illegal to have a internal global value with no initializer 35 // * It is illegal to have a ret instruction that returns a value that does not 36 // agree with the function return value type. 37 // * Function call argument types match the function prototype 38 // * A landing pad is defined by a landingpad instruction, and can be jumped to 39 // only by the unwind edge of an invoke instruction. 40 // * A landingpad instruction must be the first non-PHI instruction in the 41 // block. 42 // * Landingpad instructions must be in a function with a personality function. 43 // * All other things that are tested by asserts spread about the code... 44 // 45 //===----------------------------------------------------------------------===// 46 47 #include "llvm/IR/Verifier.h" 48 #include "llvm/ADT/MapVector.h" 49 #include "llvm/ADT/STLExtras.h" 50 #include "llvm/ADT/SetVector.h" 51 #include "llvm/ADT/SmallPtrSet.h" 52 #include "llvm/ADT/SmallVector.h" 53 #include "llvm/ADT/StringExtras.h" 54 #include "llvm/IR/CFG.h" 55 #include "llvm/IR/CallSite.h" 56 #include "llvm/IR/CallingConv.h" 57 #include "llvm/IR/ConstantRange.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugInfo.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/DiagnosticInfo.h" 63 #include "llvm/IR/Dominators.h" 64 #include "llvm/IR/InlineAsm.h" 65 #include "llvm/IR/InstIterator.h" 66 #include "llvm/IR/InstVisitor.h" 67 #include "llvm/IR/IntrinsicInst.h" 68 #include "llvm/IR/LLVMContext.h" 69 #include "llvm/IR/Metadata.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/ModuleSlotTracker.h" 72 #include "llvm/IR/PassManager.h" 73 #include "llvm/IR/Statepoint.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/CommandLine.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/ErrorHandling.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include <algorithm> 80 #include <cstdarg> 81 using namespace llvm; 82 83 static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true)); 84 85 namespace { 86 struct VerifierSupport { 87 raw_ostream *OS; 88 const Module *M = nullptr; 89 Optional<ModuleSlotTracker> MST; 90 91 /// Track the brokenness of the module while recursively visiting. 92 bool Broken = false; 93 /// Broken debug info can be "recovered" from by stripping the debug info. 94 bool BrokenDebugInfo = false; 95 /// Whether to treat broken debug info as an error. 96 bool TreatBrokenDebugInfoAsError = true; 97 98 explicit VerifierSupport(raw_ostream *OS) : OS(OS) {} 99 100 private: 101 template <class NodeTy> void Write(const ilist_iterator<NodeTy> &I) { 102 Write(&*I); 103 } 104 105 void Write(const Module *M) { 106 if (!M) 107 return; 108 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n"; 109 } 110 111 void Write(const Value *V) { 112 if (!V) 113 return; 114 if (isa<Instruction>(V)) { 115 V->print(*OS, *MST); 116 *OS << '\n'; 117 } else { 118 V->printAsOperand(*OS, true, *MST); 119 *OS << '\n'; 120 } 121 } 122 void Write(ImmutableCallSite CS) { 123 Write(CS.getInstruction()); 124 } 125 126 void Write(const Metadata *MD) { 127 if (!MD) 128 return; 129 MD->print(*OS, *MST, M); 130 *OS << '\n'; 131 } 132 133 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) { 134 Write(MD.get()); 135 } 136 137 void Write(const NamedMDNode *NMD) { 138 if (!NMD) 139 return; 140 NMD->print(*OS, *MST); 141 *OS << '\n'; 142 } 143 144 void Write(Type *T) { 145 if (!T) 146 return; 147 *OS << ' ' << *T; 148 } 149 150 void Write(const Comdat *C) { 151 if (!C) 152 return; 153 *OS << *C; 154 } 155 156 template <typename T> void Write(ArrayRef<T> Vs) { 157 for (const T &V : Vs) 158 Write(V); 159 } 160 161 template <typename T1, typename... Ts> 162 void WriteTs(const T1 &V1, const Ts &... Vs) { 163 Write(V1); 164 WriteTs(Vs...); 165 } 166 167 template <typename... Ts> void WriteTs() {} 168 169 public: 170 /// \brief A check failed, so printout out the condition and the message. 171 /// 172 /// This provides a nice place to put a breakpoint if you want to see why 173 /// something is not correct. 174 void CheckFailed(const Twine &Message) { 175 if (OS) 176 *OS << Message << '\n'; 177 Broken = true; 178 } 179 180 /// \brief A check failed (with values to print). 181 /// 182 /// This calls the Message-only version so that the above is easier to set a 183 /// breakpoint on. 184 template <typename T1, typename... Ts> 185 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) { 186 CheckFailed(Message); 187 if (OS) 188 WriteTs(V1, Vs...); 189 } 190 191 /// A debug info check failed. 192 void DebugInfoCheckFailed(const Twine &Message) { 193 if (OS) 194 *OS << Message << '\n'; 195 Broken |= TreatBrokenDebugInfoAsError; 196 BrokenDebugInfo = true; 197 } 198 199 /// A debug info check failed (with values to print). 200 template <typename T1, typename... Ts> 201 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, 202 const Ts &... Vs) { 203 DebugInfoCheckFailed(Message); 204 if (OS) 205 WriteTs(V1, Vs...); 206 } 207 }; 208 209 class Verifier : public InstVisitor<Verifier>, VerifierSupport { 210 friend class InstVisitor<Verifier>; 211 212 LLVMContext *Context; 213 DominatorTree DT; 214 215 /// \brief When verifying a basic block, keep track of all of the 216 /// instructions we have seen so far. 217 /// 218 /// This allows us to do efficient dominance checks for the case when an 219 /// instruction has an operand that is an instruction in the same block. 220 SmallPtrSet<Instruction *, 16> InstsInThisBlock; 221 222 /// \brief Keep track of the metadata nodes that have been checked already. 223 SmallPtrSet<const Metadata *, 32> MDNodes; 224 225 /// Track all DICompileUnits visited. 226 SmallPtrSet<const Metadata *, 2> CUVisited; 227 228 /// \brief The result type for a landingpad. 229 Type *LandingPadResultTy; 230 231 /// \brief Whether we've seen a call to @llvm.localescape in this function 232 /// already. 233 bool SawFrameEscape; 234 235 /// Stores the count of how many objects were passed to llvm.localescape for a 236 /// given function and the largest index passed to llvm.localrecover. 237 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo; 238 239 // Maps catchswitches and cleanuppads that unwind to siblings to the 240 // terminators that indicate the unwind, used to detect cycles therein. 241 MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo; 242 243 /// Cache of constants visited in search of ConstantExprs. 244 SmallPtrSet<const Constant *, 32> ConstantExprVisited; 245 246 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic. 247 SmallVector<const Function *, 4> DeoptimizeDeclarations; 248 249 // Verify that this GlobalValue is only used in this module. 250 // This map is used to avoid visiting uses twice. We can arrive at a user 251 // twice, if they have multiple operands. In particular for very large 252 // constant expressions, we can arrive at a particular user many times. 253 SmallPtrSet<const Value *, 32> GlobalValueVisited; 254 255 void checkAtomicMemAccessSize(const Module *M, Type *Ty, 256 const Instruction *I); 257 258 void updateModule(const Module *NewM) { 259 if (M == NewM) 260 return; 261 MST.emplace(NewM); 262 M = NewM; 263 } 264 265 public: 266 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError) 267 : VerifierSupport(OS), Context(nullptr), LandingPadResultTy(nullptr), 268 SawFrameEscape(false) { 269 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError; 270 } 271 272 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; } 273 274 bool verify(const Function &F) { 275 updateModule(F.getParent()); 276 Context = &M->getContext(); 277 278 // First ensure the function is well-enough formed to compute dominance 279 // information. 280 if (F.empty()) { 281 if (OS) 282 *OS << "Function '" << F.getName() 283 << "' does not contain an entry block!\n"; 284 return false; 285 } 286 for (const BasicBlock &BB : F) { 287 if (!BB.empty() && BB.back().isTerminator()) 288 continue; 289 290 if (OS) { 291 *OS << "Basic Block in function '" << F.getName() 292 << "' does not have terminator!\n"; 293 BB.printAsOperand(*OS, true, *MST); 294 *OS << "\n"; 295 } 296 return false; 297 } 298 299 // Now directly compute a dominance tree. We don't rely on the pass 300 // manager to provide this as it isolates us from a potentially 301 // out-of-date dominator tree and makes it significantly more complex to 302 // run this code outside of a pass manager. 303 // FIXME: It's really gross that we have to cast away constness here. 304 DT.recalculate(const_cast<Function &>(F)); 305 306 Broken = false; 307 // FIXME: We strip const here because the inst visitor strips const. 308 visit(const_cast<Function &>(F)); 309 verifySiblingFuncletUnwinds(); 310 InstsInThisBlock.clear(); 311 LandingPadResultTy = nullptr; 312 SawFrameEscape = false; 313 SiblingFuncletInfo.clear(); 314 315 return !Broken; 316 } 317 318 bool verify(const Module &M) { 319 updateModule(&M); 320 Context = &M.getContext(); 321 Broken = false; 322 323 // Scan through, checking all of the external function's linkage now... 324 for (const Function &F : M) { 325 visitGlobalValue(F); 326 327 // Check to make sure function prototypes are okay. 328 if (F.isDeclaration()) { 329 visitFunction(F); 330 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize) 331 DeoptimizeDeclarations.push_back(&F); 332 } 333 } 334 335 // Now that we've visited every function, verify that we never asked to 336 // recover a frame index that wasn't escaped. 337 verifyFrameRecoverIndices(); 338 for (const GlobalVariable &GV : M.globals()) 339 visitGlobalVariable(GV); 340 341 for (const GlobalAlias &GA : M.aliases()) 342 visitGlobalAlias(GA); 343 344 for (const NamedMDNode &NMD : M.named_metadata()) 345 visitNamedMDNode(NMD); 346 347 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable()) 348 visitComdat(SMEC.getValue()); 349 350 visitModuleFlags(M); 351 visitModuleIdents(M); 352 353 verifyCompileUnits(); 354 355 verifyDeoptimizeCallingConvs(); 356 357 return !Broken; 358 } 359 360 private: 361 // Verification methods... 362 void visitGlobalValue(const GlobalValue &GV); 363 void visitGlobalVariable(const GlobalVariable &GV); 364 void visitGlobalAlias(const GlobalAlias &GA); 365 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C); 366 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited, 367 const GlobalAlias &A, const Constant &C); 368 void visitNamedMDNode(const NamedMDNode &NMD); 369 void visitMDNode(const MDNode &MD); 370 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F); 371 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F); 372 void visitComdat(const Comdat &C); 373 void visitModuleIdents(const Module &M); 374 void visitModuleFlags(const Module &M); 375 void visitModuleFlag(const MDNode *Op, 376 DenseMap<const MDString *, const MDNode *> &SeenIDs, 377 SmallVectorImpl<const MDNode *> &Requirements); 378 void visitFunction(const Function &F); 379 void visitBasicBlock(BasicBlock &BB); 380 void visitRangeMetadata(Instruction& I, MDNode* Range, Type* Ty); 381 void visitDereferenceableMetadata(Instruction& I, MDNode* MD); 382 383 template <class Ty> bool isValidMetadataArray(const MDTuple &N); 384 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); 385 #include "llvm/IR/Metadata.def" 386 void visitDIScope(const DIScope &N); 387 void visitDIVariable(const DIVariable &N); 388 void visitDILexicalBlockBase(const DILexicalBlockBase &N); 389 void visitDITemplateParameter(const DITemplateParameter &N); 390 391 void visitTemplateParams(const MDNode &N, const Metadata &RawParams); 392 393 // InstVisitor overrides... 394 using InstVisitor<Verifier>::visit; 395 void visit(Instruction &I); 396 397 void visitTruncInst(TruncInst &I); 398 void visitZExtInst(ZExtInst &I); 399 void visitSExtInst(SExtInst &I); 400 void visitFPTruncInst(FPTruncInst &I); 401 void visitFPExtInst(FPExtInst &I); 402 void visitFPToUIInst(FPToUIInst &I); 403 void visitFPToSIInst(FPToSIInst &I); 404 void visitUIToFPInst(UIToFPInst &I); 405 void visitSIToFPInst(SIToFPInst &I); 406 void visitIntToPtrInst(IntToPtrInst &I); 407 void visitPtrToIntInst(PtrToIntInst &I); 408 void visitBitCastInst(BitCastInst &I); 409 void visitAddrSpaceCastInst(AddrSpaceCastInst &I); 410 void visitPHINode(PHINode &PN); 411 void visitBinaryOperator(BinaryOperator &B); 412 void visitICmpInst(ICmpInst &IC); 413 void visitFCmpInst(FCmpInst &FC); 414 void visitExtractElementInst(ExtractElementInst &EI); 415 void visitInsertElementInst(InsertElementInst &EI); 416 void visitShuffleVectorInst(ShuffleVectorInst &EI); 417 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); } 418 void visitCallInst(CallInst &CI); 419 void visitInvokeInst(InvokeInst &II); 420 void visitGetElementPtrInst(GetElementPtrInst &GEP); 421 void visitLoadInst(LoadInst &LI); 422 void visitStoreInst(StoreInst &SI); 423 void verifyDominatesUse(Instruction &I, unsigned i); 424 void visitInstruction(Instruction &I); 425 void visitTerminatorInst(TerminatorInst &I); 426 void visitBranchInst(BranchInst &BI); 427 void visitReturnInst(ReturnInst &RI); 428 void visitSwitchInst(SwitchInst &SI); 429 void visitIndirectBrInst(IndirectBrInst &BI); 430 void visitSelectInst(SelectInst &SI); 431 void visitUserOp1(Instruction &I); 432 void visitUserOp2(Instruction &I) { visitUserOp1(I); } 433 void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS); 434 template <class DbgIntrinsicTy> 435 void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII); 436 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); 437 void visitAtomicRMWInst(AtomicRMWInst &RMWI); 438 void visitFenceInst(FenceInst &FI); 439 void visitAllocaInst(AllocaInst &AI); 440 void visitExtractValueInst(ExtractValueInst &EVI); 441 void visitInsertValueInst(InsertValueInst &IVI); 442 void visitEHPadPredecessors(Instruction &I); 443 void visitLandingPadInst(LandingPadInst &LPI); 444 void visitCatchPadInst(CatchPadInst &CPI); 445 void visitCatchReturnInst(CatchReturnInst &CatchReturn); 446 void visitCleanupPadInst(CleanupPadInst &CPI); 447 void visitFuncletPadInst(FuncletPadInst &FPI); 448 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch); 449 void visitCleanupReturnInst(CleanupReturnInst &CRI); 450 451 void verifyCallSite(CallSite CS); 452 void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal); 453 void verifySwiftErrorValue(const Value *SwiftErrorVal); 454 void verifyMustTailCall(CallInst &CI); 455 bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT, 456 unsigned ArgNo, std::string &Suffix); 457 bool verifyIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos, 458 SmallVectorImpl<Type *> &ArgTys); 459 bool verifyIntrinsicIsVarArg(bool isVarArg, 460 ArrayRef<Intrinsic::IITDescriptor> &Infos); 461 bool verifyAttributeCount(AttributeSet Attrs, unsigned Params); 462 void verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction, 463 const Value *V); 464 void verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 465 bool isReturnValue, const Value *V); 466 void verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 467 const Value *V); 468 void verifyFunctionMetadata( 469 const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs); 470 471 void visitConstantExprsRecursively(const Constant *EntryC); 472 void visitConstantExpr(const ConstantExpr *CE); 473 void verifyStatepoint(ImmutableCallSite CS); 474 void verifyFrameRecoverIndices(); 475 void verifySiblingFuncletUnwinds(); 476 477 void verifyBitPieceExpression(const DbgInfoIntrinsic &I); 478 479 /// Module-level debug info verification... 480 void verifyCompileUnits(); 481 482 /// Module-level verification that all @llvm.experimental.deoptimize 483 /// declarations share the same calling convention. 484 void verifyDeoptimizeCallingConvs(); 485 }; 486 } // End anonymous namespace 487 488 /// We know that cond should be true, if not print an error message. 489 #define Assert(C, ...) \ 490 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0) 491 492 /// We know that a debug info condition should be true, if not print 493 /// an error message. 494 #define AssertDI(C, ...) \ 495 do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (0) 496 497 498 void Verifier::visit(Instruction &I) { 499 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) 500 Assert(I.getOperand(i) != nullptr, "Operand is null", &I); 501 InstVisitor<Verifier>::visit(I); 502 } 503 504 // Helper to recursively iterate over indirect users. By 505 // returning false, the callback can ask to stop recursing 506 // further. 507 static void forEachUser(const Value *User, 508 SmallPtrSet<const Value *, 32> &Visited, 509 llvm::function_ref<bool(const Value *)> Callback) { 510 if (!Visited.insert(User).second) 511 return; 512 for (const Value *TheNextUser : User->materialized_users()) 513 if (Callback(TheNextUser)) 514 forEachUser(TheNextUser, Visited, Callback); 515 } 516 517 void Verifier::visitGlobalValue(const GlobalValue &GV) { 518 Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(), 519 "Global is external, but doesn't have external or weak linkage!", &GV); 520 521 Assert(GV.getAlignment() <= Value::MaximumAlignment, 522 "huge alignment values are unsupported", &GV); 523 Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), 524 "Only global variables can have appending linkage!", &GV); 525 526 if (GV.hasAppendingLinkage()) { 527 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV); 528 Assert(GVar && GVar->getValueType()->isArrayTy(), 529 "Only global arrays can have appending linkage!", GVar); 530 } 531 532 if (GV.isDeclarationForLinker()) 533 Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV); 534 535 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool { 536 if (const Instruction *I = dyn_cast<Instruction>(V)) { 537 if (!I->getParent() || !I->getParent()->getParent()) 538 CheckFailed("Global is referenced by parentless instruction!", &GV, 539 M, I); 540 else if (I->getParent()->getParent()->getParent() != M) 541 CheckFailed("Global is referenced in a different module!", &GV, 542 M, I, I->getParent()->getParent(), 543 I->getParent()->getParent()->getParent()); 544 return false; 545 } else if (const Function *F = dyn_cast<Function>(V)) { 546 if (F->getParent() != M) 547 CheckFailed("Global is used by function in a different module", &GV, 548 M, F, F->getParent()); 549 return false; 550 } 551 return true; 552 }); 553 } 554 555 void Verifier::visitGlobalVariable(const GlobalVariable &GV) { 556 if (GV.hasInitializer()) { 557 Assert(GV.getInitializer()->getType() == GV.getValueType(), 558 "Global variable initializer type does not match global " 559 "variable type!", 560 &GV); 561 562 // If the global has common linkage, it must have a zero initializer and 563 // cannot be constant. 564 if (GV.hasCommonLinkage()) { 565 Assert(GV.getInitializer()->isNullValue(), 566 "'common' global must have a zero initializer!", &GV); 567 Assert(!GV.isConstant(), "'common' global may not be marked constant!", 568 &GV); 569 Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV); 570 } 571 } 572 573 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" || 574 GV.getName() == "llvm.global_dtors")) { 575 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 576 "invalid linkage for intrinsic global variable", &GV); 577 // Don't worry about emitting an error for it not being an array, 578 // visitGlobalValue will complain on appending non-array. 579 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) { 580 StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 581 PointerType *FuncPtrTy = 582 FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo(); 583 // FIXME: Reject the 2-field form in LLVM 4.0. 584 Assert(STy && 585 (STy->getNumElements() == 2 || STy->getNumElements() == 3) && 586 STy->getTypeAtIndex(0u)->isIntegerTy(32) && 587 STy->getTypeAtIndex(1) == FuncPtrTy, 588 "wrong type for intrinsic global variable", &GV); 589 if (STy->getNumElements() == 3) { 590 Type *ETy = STy->getTypeAtIndex(2); 591 Assert(ETy->isPointerTy() && 592 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8), 593 "wrong type for intrinsic global variable", &GV); 594 } 595 } 596 } 597 598 if (GV.hasName() && (GV.getName() == "llvm.used" || 599 GV.getName() == "llvm.compiler.used")) { 600 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 601 "invalid linkage for intrinsic global variable", &GV); 602 Type *GVType = GV.getValueType(); 603 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) { 604 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType()); 605 Assert(PTy, "wrong type for intrinsic global variable", &GV); 606 if (GV.hasInitializer()) { 607 const Constant *Init = GV.getInitializer(); 608 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init); 609 Assert(InitArray, "wrong initalizer for intrinsic global variable", 610 Init); 611 for (Value *Op : InitArray->operands()) { 612 Value *V = Op->stripPointerCastsNoFollowAliases(); 613 Assert(isa<GlobalVariable>(V) || isa<Function>(V) || 614 isa<GlobalAlias>(V), 615 "invalid llvm.used member", V); 616 Assert(V->hasName(), "members of llvm.used must be named", V); 617 } 618 } 619 } 620 } 621 622 Assert(!GV.hasDLLImportStorageClass() || 623 (GV.isDeclaration() && GV.hasExternalLinkage()) || 624 GV.hasAvailableExternallyLinkage(), 625 "Global is marked as dllimport, but not external", &GV); 626 627 if (!GV.hasInitializer()) { 628 visitGlobalValue(GV); 629 return; 630 } 631 632 // Walk any aggregate initializers looking for bitcasts between address spaces 633 visitConstantExprsRecursively(GV.getInitializer()); 634 635 visitGlobalValue(GV); 636 } 637 638 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) { 639 SmallPtrSet<const GlobalAlias*, 4> Visited; 640 Visited.insert(&GA); 641 visitAliaseeSubExpr(Visited, GA, C); 642 } 643 644 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited, 645 const GlobalAlias &GA, const Constant &C) { 646 if (const auto *GV = dyn_cast<GlobalValue>(&C)) { 647 Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition", 648 &GA); 649 650 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) { 651 Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA); 652 653 Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias", 654 &GA); 655 } else { 656 // Only continue verifying subexpressions of GlobalAliases. 657 // Do not recurse into global initializers. 658 return; 659 } 660 } 661 662 if (const auto *CE = dyn_cast<ConstantExpr>(&C)) 663 visitConstantExprsRecursively(CE); 664 665 for (const Use &U : C.operands()) { 666 Value *V = &*U; 667 if (const auto *GA2 = dyn_cast<GlobalAlias>(V)) 668 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee()); 669 else if (const auto *C2 = dyn_cast<Constant>(V)) 670 visitAliaseeSubExpr(Visited, GA, *C2); 671 } 672 } 673 674 void Verifier::visitGlobalAlias(const GlobalAlias &GA) { 675 Assert(GlobalAlias::isValidLinkage(GA.getLinkage()), 676 "Alias should have private, internal, linkonce, weak, linkonce_odr, " 677 "weak_odr, or external linkage!", 678 &GA); 679 const Constant *Aliasee = GA.getAliasee(); 680 Assert(Aliasee, "Aliasee cannot be NULL!", &GA); 681 Assert(GA.getType() == Aliasee->getType(), 682 "Alias and aliasee types should match!", &GA); 683 684 Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee), 685 "Aliasee should be either GlobalValue or ConstantExpr", &GA); 686 687 visitAliaseeSubExpr(GA, *Aliasee); 688 689 visitGlobalValue(GA); 690 } 691 692 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) { 693 for (const MDNode *MD : NMD.operands()) { 694 if (NMD.getName() == "llvm.dbg.cu") { 695 AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD); 696 } 697 698 if (!MD) 699 continue; 700 701 visitMDNode(*MD); 702 } 703 } 704 705 void Verifier::visitMDNode(const MDNode &MD) { 706 // Only visit each node once. Metadata can be mutually recursive, so this 707 // avoids infinite recursion here, as well as being an optimization. 708 if (!MDNodes.insert(&MD).second) 709 return; 710 711 switch (MD.getMetadataID()) { 712 default: 713 llvm_unreachable("Invalid MDNode subclass"); 714 case Metadata::MDTupleKind: 715 break; 716 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \ 717 case Metadata::CLASS##Kind: \ 718 visit##CLASS(cast<CLASS>(MD)); \ 719 break; 720 #include "llvm/IR/Metadata.def" 721 } 722 723 for (const Metadata *Op : MD.operands()) { 724 if (!Op) 725 continue; 726 Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!", 727 &MD, Op); 728 if (auto *N = dyn_cast<MDNode>(Op)) { 729 visitMDNode(*N); 730 continue; 731 } 732 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) { 733 visitValueAsMetadata(*V, nullptr); 734 continue; 735 } 736 } 737 738 // Check these last, so we diagnose problems in operands first. 739 Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD); 740 Assert(MD.isResolved(), "All nodes should be resolved!", &MD); 741 } 742 743 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) { 744 Assert(MD.getValue(), "Expected valid value", &MD); 745 Assert(!MD.getValue()->getType()->isMetadataTy(), 746 "Unexpected metadata round-trip through values", &MD, MD.getValue()); 747 748 auto *L = dyn_cast<LocalAsMetadata>(&MD); 749 if (!L) 750 return; 751 752 Assert(F, "function-local metadata used outside a function", L); 753 754 // If this was an instruction, bb, or argument, verify that it is in the 755 // function that we expect. 756 Function *ActualF = nullptr; 757 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) { 758 Assert(I->getParent(), "function-local metadata not in basic block", L, I); 759 ActualF = I->getParent()->getParent(); 760 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue())) 761 ActualF = BB->getParent(); 762 else if (Argument *A = dyn_cast<Argument>(L->getValue())) 763 ActualF = A->getParent(); 764 assert(ActualF && "Unimplemented function local metadata case!"); 765 766 Assert(ActualF == F, "function-local metadata used in wrong function", L); 767 } 768 769 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) { 770 Metadata *MD = MDV.getMetadata(); 771 if (auto *N = dyn_cast<MDNode>(MD)) { 772 visitMDNode(*N); 773 return; 774 } 775 776 // Only visit each node once. Metadata can be mutually recursive, so this 777 // avoids infinite recursion here, as well as being an optimization. 778 if (!MDNodes.insert(MD).second) 779 return; 780 781 if (auto *V = dyn_cast<ValueAsMetadata>(MD)) 782 visitValueAsMetadata(*V, F); 783 } 784 785 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); } 786 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); } 787 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); } 788 789 template <class Ty> 790 bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) { 791 for (Metadata *MD : N.operands()) { 792 if (MD) { 793 if (!isa<Ty>(MD)) 794 return false; 795 } else { 796 if (!AllowNull) 797 return false; 798 } 799 } 800 return true; 801 } 802 803 template <class Ty> 804 bool isValidMetadataArray(const MDTuple &N) { 805 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false); 806 } 807 808 template <class Ty> 809 bool isValidMetadataNullArray(const MDTuple &N) { 810 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true); 811 } 812 813 void Verifier::visitDILocation(const DILocation &N) { 814 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 815 "location requires a valid scope", &N, N.getRawScope()); 816 if (auto *IA = N.getRawInlinedAt()) 817 AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA); 818 } 819 820 void Verifier::visitGenericDINode(const GenericDINode &N) { 821 AssertDI(N.getTag(), "invalid tag", &N); 822 } 823 824 void Verifier::visitDIScope(const DIScope &N) { 825 if (auto *F = N.getRawFile()) 826 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 827 } 828 829 void Verifier::visitDISubrange(const DISubrange &N) { 830 AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N); 831 AssertDI(N.getCount() >= -1, "invalid subrange count", &N); 832 } 833 834 void Verifier::visitDIEnumerator(const DIEnumerator &N) { 835 AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N); 836 } 837 838 void Verifier::visitDIBasicType(const DIBasicType &N) { 839 AssertDI(N.getTag() == dwarf::DW_TAG_base_type || 840 N.getTag() == dwarf::DW_TAG_unspecified_type, 841 "invalid tag", &N); 842 } 843 844 void Verifier::visitDIDerivedType(const DIDerivedType &N) { 845 // Common scope checks. 846 visitDIScope(N); 847 848 AssertDI(N.getTag() == dwarf::DW_TAG_typedef || 849 N.getTag() == dwarf::DW_TAG_pointer_type || 850 N.getTag() == dwarf::DW_TAG_ptr_to_member_type || 851 N.getTag() == dwarf::DW_TAG_reference_type || 852 N.getTag() == dwarf::DW_TAG_rvalue_reference_type || 853 N.getTag() == dwarf::DW_TAG_const_type || 854 N.getTag() == dwarf::DW_TAG_volatile_type || 855 N.getTag() == dwarf::DW_TAG_restrict_type || 856 N.getTag() == dwarf::DW_TAG_member || 857 N.getTag() == dwarf::DW_TAG_inheritance || 858 N.getTag() == dwarf::DW_TAG_friend, 859 "invalid tag", &N); 860 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) { 861 AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N, 862 N.getRawExtraData()); 863 } 864 865 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 866 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 867 N.getRawBaseType()); 868 } 869 870 static bool hasConflictingReferenceFlags(unsigned Flags) { 871 return (Flags & DINode::FlagLValueReference) && 872 (Flags & DINode::FlagRValueReference); 873 } 874 875 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) { 876 auto *Params = dyn_cast<MDTuple>(&RawParams); 877 AssertDI(Params, "invalid template params", &N, &RawParams); 878 for (Metadata *Op : Params->operands()) { 879 AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter", 880 &N, Params, Op); 881 } 882 } 883 884 void Verifier::visitDICompositeType(const DICompositeType &N) { 885 // Common scope checks. 886 visitDIScope(N); 887 888 AssertDI(N.getTag() == dwarf::DW_TAG_array_type || 889 N.getTag() == dwarf::DW_TAG_structure_type || 890 N.getTag() == dwarf::DW_TAG_union_type || 891 N.getTag() == dwarf::DW_TAG_enumeration_type || 892 N.getTag() == dwarf::DW_TAG_class_type, 893 "invalid tag", &N); 894 895 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 896 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 897 N.getRawBaseType()); 898 899 AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()), 900 "invalid composite elements", &N, N.getRawElements()); 901 AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N, 902 N.getRawVTableHolder()); 903 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 904 "invalid reference flags", &N); 905 if (auto *Params = N.getRawTemplateParams()) 906 visitTemplateParams(N, *Params); 907 908 if (N.getTag() == dwarf::DW_TAG_class_type || 909 N.getTag() == dwarf::DW_TAG_union_type) { 910 AssertDI(N.getFile() && !N.getFile()->getFilename().empty(), 911 "class/union requires a filename", &N, N.getFile()); 912 } 913 } 914 915 void Verifier::visitDISubroutineType(const DISubroutineType &N) { 916 AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N); 917 if (auto *Types = N.getRawTypeArray()) { 918 AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types); 919 for (Metadata *Ty : N.getTypeArray()->operands()) { 920 AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty); 921 } 922 } 923 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 924 "invalid reference flags", &N); 925 } 926 927 void Verifier::visitDIFile(const DIFile &N) { 928 AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N); 929 } 930 931 void Verifier::visitDICompileUnit(const DICompileUnit &N) { 932 AssertDI(N.isDistinct(), "compile units must be distinct", &N); 933 AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N); 934 935 // Don't bother verifying the compilation directory or producer string 936 // as those could be empty. 937 AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N, 938 N.getRawFile()); 939 AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N, 940 N.getFile()); 941 942 AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind), 943 "invalid emission kind", &N); 944 945 if (auto *Array = N.getRawEnumTypes()) { 946 AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array); 947 for (Metadata *Op : N.getEnumTypes()->operands()) { 948 auto *Enum = dyn_cast_or_null<DICompositeType>(Op); 949 AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type, 950 "invalid enum type", &N, N.getEnumTypes(), Op); 951 } 952 } 953 if (auto *Array = N.getRawRetainedTypes()) { 954 AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array); 955 for (Metadata *Op : N.getRetainedTypes()->operands()) { 956 AssertDI(Op && (isa<DIType>(Op) || 957 (isa<DISubprogram>(Op) && 958 cast<DISubprogram>(Op)->isDefinition() == false)), 959 "invalid retained type", &N, Op); 960 } 961 } 962 if (auto *Array = N.getRawGlobalVariables()) { 963 AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array); 964 for (Metadata *Op : N.getGlobalVariables()->operands()) { 965 AssertDI(Op && isa<DIGlobalVariable>(Op), "invalid global variable ref", 966 &N, Op); 967 } 968 } 969 if (auto *Array = N.getRawImportedEntities()) { 970 AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array); 971 for (Metadata *Op : N.getImportedEntities()->operands()) { 972 AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref", 973 &N, Op); 974 } 975 } 976 if (auto *Array = N.getRawMacros()) { 977 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 978 for (Metadata *Op : N.getMacros()->operands()) { 979 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 980 } 981 } 982 CUVisited.insert(&N); 983 } 984 985 void Verifier::visitDISubprogram(const DISubprogram &N) { 986 AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N); 987 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 988 if (auto *F = N.getRawFile()) 989 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 990 if (auto *T = N.getRawType()) 991 AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T); 992 AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N, 993 N.getRawContainingType()); 994 if (auto *Params = N.getRawTemplateParams()) 995 visitTemplateParams(N, *Params); 996 if (auto *S = N.getRawDeclaration()) 997 AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(), 998 "invalid subprogram declaration", &N, S); 999 if (auto *RawVars = N.getRawVariables()) { 1000 auto *Vars = dyn_cast<MDTuple>(RawVars); 1001 AssertDI(Vars, "invalid variable list", &N, RawVars); 1002 for (Metadata *Op : Vars->operands()) { 1003 AssertDI(Op && isa<DILocalVariable>(Op), "invalid local variable", &N, 1004 Vars, Op); 1005 } 1006 } 1007 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 1008 "invalid reference flags", &N); 1009 1010 auto *Unit = N.getRawUnit(); 1011 if (N.isDefinition()) { 1012 // Subprogram definitions (not part of the type hierarchy). 1013 AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N); 1014 AssertDI(Unit, "subprogram definitions must have a compile unit", &N); 1015 AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit); 1016 } else { 1017 // Subprogram declarations (part of the type hierarchy). 1018 AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N); 1019 } 1020 } 1021 1022 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) { 1023 AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N); 1024 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1025 "invalid local scope", &N, N.getRawScope()); 1026 } 1027 1028 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) { 1029 visitDILexicalBlockBase(N); 1030 1031 AssertDI(N.getLine() || !N.getColumn(), 1032 "cannot have column info without line info", &N); 1033 } 1034 1035 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) { 1036 visitDILexicalBlockBase(N); 1037 } 1038 1039 void Verifier::visitDINamespace(const DINamespace &N) { 1040 AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N); 1041 if (auto *S = N.getRawScope()) 1042 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S); 1043 } 1044 1045 void Verifier::visitDIMacro(const DIMacro &N) { 1046 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define || 1047 N.getMacinfoType() == dwarf::DW_MACINFO_undef, 1048 "invalid macinfo type", &N); 1049 AssertDI(!N.getName().empty(), "anonymous macro", &N); 1050 if (!N.getValue().empty()) { 1051 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix"); 1052 } 1053 } 1054 1055 void Verifier::visitDIMacroFile(const DIMacroFile &N) { 1056 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file, 1057 "invalid macinfo type", &N); 1058 if (auto *F = N.getRawFile()) 1059 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1060 1061 if (auto *Array = N.getRawElements()) { 1062 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 1063 for (Metadata *Op : N.getElements()->operands()) { 1064 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 1065 } 1066 } 1067 } 1068 1069 void Verifier::visitDIModule(const DIModule &N) { 1070 AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N); 1071 AssertDI(!N.getName().empty(), "anonymous module", &N); 1072 } 1073 1074 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) { 1075 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1076 } 1077 1078 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) { 1079 visitDITemplateParameter(N); 1080 1081 AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag", 1082 &N); 1083 } 1084 1085 void Verifier::visitDITemplateValueParameter( 1086 const DITemplateValueParameter &N) { 1087 visitDITemplateParameter(N); 1088 1089 AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter || 1090 N.getTag() == dwarf::DW_TAG_GNU_template_template_param || 1091 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack, 1092 "invalid tag", &N); 1093 } 1094 1095 void Verifier::visitDIVariable(const DIVariable &N) { 1096 if (auto *S = N.getRawScope()) 1097 AssertDI(isa<DIScope>(S), "invalid scope", &N, S); 1098 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1099 if (auto *F = N.getRawFile()) 1100 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1101 } 1102 1103 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) { 1104 // Checks common to all variables. 1105 visitDIVariable(N); 1106 1107 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1108 AssertDI(!N.getName().empty(), "missing global variable name", &N); 1109 if (auto *V = N.getRawVariable()) { 1110 AssertDI(isa<ConstantAsMetadata>(V) && 1111 !isa<Function>(cast<ConstantAsMetadata>(V)->getValue()), 1112 "invalid global varaible ref", &N, V); 1113 visitConstantExprsRecursively(cast<ConstantAsMetadata>(V)->getValue()); 1114 } 1115 if (auto *Member = N.getRawStaticDataMemberDeclaration()) { 1116 AssertDI(isa<DIDerivedType>(Member), 1117 "invalid static data member declaration", &N, Member); 1118 } 1119 } 1120 1121 void Verifier::visitDILocalVariable(const DILocalVariable &N) { 1122 // Checks common to all variables. 1123 visitDIVariable(N); 1124 1125 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1126 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1127 "local variable requires a valid scope", &N, N.getRawScope()); 1128 } 1129 1130 void Verifier::visitDIExpression(const DIExpression &N) { 1131 AssertDI(N.isValid(), "invalid expression", &N); 1132 } 1133 1134 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) { 1135 AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N); 1136 if (auto *T = N.getRawType()) 1137 AssertDI(isType(T), "invalid type ref", &N, T); 1138 if (auto *F = N.getRawFile()) 1139 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1140 } 1141 1142 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) { 1143 AssertDI(N.getTag() == dwarf::DW_TAG_imported_module || 1144 N.getTag() == dwarf::DW_TAG_imported_declaration, 1145 "invalid tag", &N); 1146 if (auto *S = N.getRawScope()) 1147 AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S); 1148 AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N, 1149 N.getRawEntity()); 1150 } 1151 1152 void Verifier::visitComdat(const Comdat &C) { 1153 // The Module is invalid if the GlobalValue has private linkage. Entities 1154 // with private linkage don't have entries in the symbol table. 1155 if (const GlobalValue *GV = M->getNamedValue(C.getName())) 1156 Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage", 1157 GV); 1158 } 1159 1160 void Verifier::visitModuleIdents(const Module &M) { 1161 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident"); 1162 if (!Idents) 1163 return; 1164 1165 // llvm.ident takes a list of metadata entry. Each entry has only one string. 1166 // Scan each llvm.ident entry and make sure that this requirement is met. 1167 for (const MDNode *N : Idents->operands()) { 1168 Assert(N->getNumOperands() == 1, 1169 "incorrect number of operands in llvm.ident metadata", N); 1170 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)), 1171 ("invalid value for llvm.ident metadata entry operand" 1172 "(the operand should be a string)"), 1173 N->getOperand(0)); 1174 } 1175 } 1176 1177 void Verifier::visitModuleFlags(const Module &M) { 1178 const NamedMDNode *Flags = M.getModuleFlagsMetadata(); 1179 if (!Flags) return; 1180 1181 // Scan each flag, and track the flags and requirements. 1182 DenseMap<const MDString*, const MDNode*> SeenIDs; 1183 SmallVector<const MDNode*, 16> Requirements; 1184 for (const MDNode *MDN : Flags->operands()) 1185 visitModuleFlag(MDN, SeenIDs, Requirements); 1186 1187 // Validate that the requirements in the module are valid. 1188 for (const MDNode *Requirement : Requirements) { 1189 const MDString *Flag = cast<MDString>(Requirement->getOperand(0)); 1190 const Metadata *ReqValue = Requirement->getOperand(1); 1191 1192 const MDNode *Op = SeenIDs.lookup(Flag); 1193 if (!Op) { 1194 CheckFailed("invalid requirement on flag, flag is not present in module", 1195 Flag); 1196 continue; 1197 } 1198 1199 if (Op->getOperand(2) != ReqValue) { 1200 CheckFailed(("invalid requirement on flag, " 1201 "flag does not have the required value"), 1202 Flag); 1203 continue; 1204 } 1205 } 1206 } 1207 1208 void 1209 Verifier::visitModuleFlag(const MDNode *Op, 1210 DenseMap<const MDString *, const MDNode *> &SeenIDs, 1211 SmallVectorImpl<const MDNode *> &Requirements) { 1212 // Each module flag should have three arguments, the merge behavior (a 1213 // constant int), the flag ID (an MDString), and the value. 1214 Assert(Op->getNumOperands() == 3, 1215 "incorrect number of operands in module flag", Op); 1216 Module::ModFlagBehavior MFB; 1217 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) { 1218 Assert( 1219 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)), 1220 "invalid behavior operand in module flag (expected constant integer)", 1221 Op->getOperand(0)); 1222 Assert(false, 1223 "invalid behavior operand in module flag (unexpected constant)", 1224 Op->getOperand(0)); 1225 } 1226 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 1227 Assert(ID, "invalid ID operand in module flag (expected metadata string)", 1228 Op->getOperand(1)); 1229 1230 // Sanity check the values for behaviors with additional requirements. 1231 switch (MFB) { 1232 case Module::Error: 1233 case Module::Warning: 1234 case Module::Override: 1235 // These behavior types accept any value. 1236 break; 1237 1238 case Module::Require: { 1239 // The value should itself be an MDNode with two operands, a flag ID (an 1240 // MDString), and a value. 1241 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2)); 1242 Assert(Value && Value->getNumOperands() == 2, 1243 "invalid value for 'require' module flag (expected metadata pair)", 1244 Op->getOperand(2)); 1245 Assert(isa<MDString>(Value->getOperand(0)), 1246 ("invalid value for 'require' module flag " 1247 "(first value operand should be a string)"), 1248 Value->getOperand(0)); 1249 1250 // Append it to the list of requirements, to check once all module flags are 1251 // scanned. 1252 Requirements.push_back(Value); 1253 break; 1254 } 1255 1256 case Module::Append: 1257 case Module::AppendUnique: { 1258 // These behavior types require the operand be an MDNode. 1259 Assert(isa<MDNode>(Op->getOperand(2)), 1260 "invalid value for 'append'-type module flag " 1261 "(expected a metadata node)", 1262 Op->getOperand(2)); 1263 break; 1264 } 1265 } 1266 1267 // Unless this is a "requires" flag, check the ID is unique. 1268 if (MFB != Module::Require) { 1269 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second; 1270 Assert(Inserted, 1271 "module flag identifiers must be unique (or of 'require' type)", ID); 1272 } 1273 } 1274 1275 void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, 1276 bool isFunction, const Value *V) { 1277 unsigned Slot = ~0U; 1278 for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I) 1279 if (Attrs.getSlotIndex(I) == Idx) { 1280 Slot = I; 1281 break; 1282 } 1283 1284 assert(Slot != ~0U && "Attribute set inconsistency!"); 1285 1286 for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot); 1287 I != E; ++I) { 1288 if (I->isStringAttribute()) 1289 continue; 1290 1291 if (I->getKindAsEnum() == Attribute::NoReturn || 1292 I->getKindAsEnum() == Attribute::NoUnwind || 1293 I->getKindAsEnum() == Attribute::NoInline || 1294 I->getKindAsEnum() == Attribute::AlwaysInline || 1295 I->getKindAsEnum() == Attribute::OptimizeForSize || 1296 I->getKindAsEnum() == Attribute::StackProtect || 1297 I->getKindAsEnum() == Attribute::StackProtectReq || 1298 I->getKindAsEnum() == Attribute::StackProtectStrong || 1299 I->getKindAsEnum() == Attribute::SafeStack || 1300 I->getKindAsEnum() == Attribute::NoRedZone || 1301 I->getKindAsEnum() == Attribute::NoImplicitFloat || 1302 I->getKindAsEnum() == Attribute::Naked || 1303 I->getKindAsEnum() == Attribute::InlineHint || 1304 I->getKindAsEnum() == Attribute::StackAlignment || 1305 I->getKindAsEnum() == Attribute::UWTable || 1306 I->getKindAsEnum() == Attribute::NonLazyBind || 1307 I->getKindAsEnum() == Attribute::ReturnsTwice || 1308 I->getKindAsEnum() == Attribute::SanitizeAddress || 1309 I->getKindAsEnum() == Attribute::SanitizeThread || 1310 I->getKindAsEnum() == Attribute::SanitizeMemory || 1311 I->getKindAsEnum() == Attribute::MinSize || 1312 I->getKindAsEnum() == Attribute::NoDuplicate || 1313 I->getKindAsEnum() == Attribute::Builtin || 1314 I->getKindAsEnum() == Attribute::NoBuiltin || 1315 I->getKindAsEnum() == Attribute::Cold || 1316 I->getKindAsEnum() == Attribute::OptimizeNone || 1317 I->getKindAsEnum() == Attribute::JumpTable || 1318 I->getKindAsEnum() == Attribute::Convergent || 1319 I->getKindAsEnum() == Attribute::ArgMemOnly || 1320 I->getKindAsEnum() == Attribute::NoRecurse || 1321 I->getKindAsEnum() == Attribute::InaccessibleMemOnly || 1322 I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly || 1323 I->getKindAsEnum() == Attribute::AllocSize) { 1324 if (!isFunction) { 1325 CheckFailed("Attribute '" + I->getAsString() + 1326 "' only applies to functions!", V); 1327 return; 1328 } 1329 } else if (I->getKindAsEnum() == Attribute::ReadOnly || 1330 I->getKindAsEnum() == Attribute::ReadNone) { 1331 if (Idx == 0) { 1332 CheckFailed("Attribute '" + I->getAsString() + 1333 "' does not apply to function returns"); 1334 return; 1335 } 1336 } else if (isFunction) { 1337 CheckFailed("Attribute '" + I->getAsString() + 1338 "' does not apply to functions!", V); 1339 return; 1340 } 1341 } 1342 } 1343 1344 // VerifyParameterAttrs - Check the given attributes for an argument or return 1345 // value of the specified type. The value V is printed in error messages. 1346 void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 1347 bool isReturnValue, const Value *V) { 1348 if (!Attrs.hasAttributes(Idx)) 1349 return; 1350 1351 verifyAttributeTypes(Attrs, Idx, false, V); 1352 1353 if (isReturnValue) 1354 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1355 !Attrs.hasAttribute(Idx, Attribute::Nest) && 1356 !Attrs.hasAttribute(Idx, Attribute::StructRet) && 1357 !Attrs.hasAttribute(Idx, Attribute::NoCapture) && 1358 !Attrs.hasAttribute(Idx, Attribute::Returned) && 1359 !Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1360 !Attrs.hasAttribute(Idx, Attribute::SwiftSelf) && 1361 !Attrs.hasAttribute(Idx, Attribute::SwiftError), 1362 "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', " 1363 "'returned', 'swiftself', and 'swifterror' do not apply to return " 1364 "values!", 1365 V); 1366 1367 // Check for mutually incompatible attributes. Only inreg is compatible with 1368 // sret. 1369 unsigned AttrCount = 0; 1370 AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal); 1371 AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca); 1372 AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) || 1373 Attrs.hasAttribute(Idx, Attribute::InReg); 1374 AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest); 1375 Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', " 1376 "and 'sret' are incompatible!", 1377 V); 1378 1379 Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1380 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1381 "Attributes " 1382 "'inalloca and readonly' are incompatible!", 1383 V); 1384 1385 Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) && 1386 Attrs.hasAttribute(Idx, Attribute::Returned)), 1387 "Attributes " 1388 "'sret and returned' are incompatible!", 1389 V); 1390 1391 Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) && 1392 Attrs.hasAttribute(Idx, Attribute::SExt)), 1393 "Attributes " 1394 "'zeroext and signext' are incompatible!", 1395 V); 1396 1397 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && 1398 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1399 "Attributes " 1400 "'readnone and readonly' are incompatible!", 1401 V); 1402 1403 Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) && 1404 Attrs.hasAttribute(Idx, Attribute::AlwaysInline)), 1405 "Attributes " 1406 "'noinline and alwaysinline' are incompatible!", 1407 V); 1408 1409 Assert(!AttrBuilder(Attrs, Idx) 1410 .overlaps(AttributeFuncs::typeIncompatible(Ty)), 1411 "Wrong types for attribute: " + 1412 AttributeSet::get(*Context, Idx, 1413 AttributeFuncs::typeIncompatible(Ty)).getAsString(Idx), 1414 V); 1415 1416 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) { 1417 SmallPtrSet<Type*, 4> Visited; 1418 if (!PTy->getElementType()->isSized(&Visited)) { 1419 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1420 !Attrs.hasAttribute(Idx, Attribute::InAlloca), 1421 "Attributes 'byval' and 'inalloca' do not support unsized types!", 1422 V); 1423 } 1424 if (!isa<PointerType>(PTy->getElementType())) 1425 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1426 "Attribute 'swifterror' only applies to parameters " 1427 "with pointer to pointer type!", 1428 V); 1429 } else { 1430 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal), 1431 "Attribute 'byval' only applies to parameters with pointer type!", 1432 V); 1433 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1434 "Attribute 'swifterror' only applies to parameters " 1435 "with pointer type!", 1436 V); 1437 } 1438 } 1439 1440 // Check parameter attributes against a function type. 1441 // The value V is printed in error messages. 1442 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 1443 const Value *V) { 1444 if (Attrs.isEmpty()) 1445 return; 1446 1447 bool SawNest = false; 1448 bool SawReturned = false; 1449 bool SawSRet = false; 1450 bool SawSwiftSelf = false; 1451 bool SawSwiftError = false; 1452 1453 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1454 unsigned Idx = Attrs.getSlotIndex(i); 1455 1456 Type *Ty; 1457 if (Idx == 0) 1458 Ty = FT->getReturnType(); 1459 else if (Idx-1 < FT->getNumParams()) 1460 Ty = FT->getParamType(Idx-1); 1461 else 1462 break; // VarArgs attributes, verified elsewhere. 1463 1464 verifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V); 1465 1466 if (Idx == 0) 1467 continue; 1468 1469 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 1470 Assert(!SawNest, "More than one parameter has attribute nest!", V); 1471 SawNest = true; 1472 } 1473 1474 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 1475 Assert(!SawReturned, "More than one parameter has attribute returned!", 1476 V); 1477 Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()), 1478 "Incompatible " 1479 "argument and return types for 'returned' attribute", 1480 V); 1481 SawReturned = true; 1482 } 1483 1484 if (Attrs.hasAttribute(Idx, Attribute::StructRet)) { 1485 Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V); 1486 Assert(Idx == 1 || Idx == 2, 1487 "Attribute 'sret' is not on first or second parameter!", V); 1488 SawSRet = true; 1489 } 1490 1491 if (Attrs.hasAttribute(Idx, Attribute::SwiftSelf)) { 1492 Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V); 1493 SawSwiftSelf = true; 1494 } 1495 1496 if (Attrs.hasAttribute(Idx, Attribute::SwiftError)) { 1497 Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", 1498 V); 1499 SawSwiftError = true; 1500 } 1501 1502 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) { 1503 Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!", 1504 V); 1505 } 1506 } 1507 1508 if (!Attrs.hasAttributes(AttributeSet::FunctionIndex)) 1509 return; 1510 1511 verifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V); 1512 1513 Assert( 1514 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1515 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)), 1516 "Attributes 'readnone and readonly' are incompatible!", V); 1517 1518 Assert( 1519 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1520 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1521 Attribute::InaccessibleMemOrArgMemOnly)), 1522 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V); 1523 1524 Assert( 1525 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1526 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1527 Attribute::InaccessibleMemOnly)), 1528 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V); 1529 1530 Assert( 1531 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) && 1532 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1533 Attribute::AlwaysInline)), 1534 "Attributes 'noinline and alwaysinline' are incompatible!", V); 1535 1536 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1537 Attribute::OptimizeNone)) { 1538 Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline), 1539 "Attribute 'optnone' requires 'noinline'!", V); 1540 1541 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, 1542 Attribute::OptimizeForSize), 1543 "Attributes 'optsize and optnone' are incompatible!", V); 1544 1545 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize), 1546 "Attributes 'minsize and optnone' are incompatible!", V); 1547 } 1548 1549 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1550 Attribute::JumpTable)) { 1551 const GlobalValue *GV = cast<GlobalValue>(V); 1552 Assert(GV->hasUnnamedAddr(), 1553 "Attribute 'jumptable' requires 'unnamed_addr'", V); 1554 } 1555 1556 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::AllocSize)) { 1557 std::pair<unsigned, Optional<unsigned>> Args = 1558 Attrs.getAllocSizeArgs(AttributeSet::FunctionIndex); 1559 1560 auto CheckParam = [&](StringRef Name, unsigned ParamNo) { 1561 if (ParamNo >= FT->getNumParams()) { 1562 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V); 1563 return false; 1564 } 1565 1566 if (!FT->getParamType(ParamNo)->isIntegerTy()) { 1567 CheckFailed("'allocsize' " + Name + 1568 " argument must refer to an integer parameter", 1569 V); 1570 return false; 1571 } 1572 1573 return true; 1574 }; 1575 1576 if (!CheckParam("element size", Args.first)) 1577 return; 1578 1579 if (Args.second && !CheckParam("number of elements", *Args.second)) 1580 return; 1581 } 1582 } 1583 1584 void Verifier::verifyFunctionMetadata( 1585 const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs) { 1586 if (MDs.empty()) 1587 return; 1588 1589 for (const auto &Pair : MDs) { 1590 if (Pair.first == LLVMContext::MD_prof) { 1591 MDNode *MD = Pair.second; 1592 Assert(MD->getNumOperands() == 2, 1593 "!prof annotations should have exactly 2 operands", MD); 1594 1595 // Check first operand. 1596 Assert(MD->getOperand(0) != nullptr, "first operand should not be null", 1597 MD); 1598 Assert(isa<MDString>(MD->getOperand(0)), 1599 "expected string with name of the !prof annotation", MD); 1600 MDString *MDS = cast<MDString>(MD->getOperand(0)); 1601 StringRef ProfName = MDS->getString(); 1602 Assert(ProfName.equals("function_entry_count"), 1603 "first operand should be 'function_entry_count'", MD); 1604 1605 // Check second operand. 1606 Assert(MD->getOperand(1) != nullptr, "second operand should not be null", 1607 MD); 1608 Assert(isa<ConstantAsMetadata>(MD->getOperand(1)), 1609 "expected integer argument to function_entry_count", MD); 1610 } 1611 } 1612 } 1613 1614 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) { 1615 if (!ConstantExprVisited.insert(EntryC).second) 1616 return; 1617 1618 SmallVector<const Constant *, 16> Stack; 1619 Stack.push_back(EntryC); 1620 1621 while (!Stack.empty()) { 1622 const Constant *C = Stack.pop_back_val(); 1623 1624 // Check this constant expression. 1625 if (const auto *CE = dyn_cast<ConstantExpr>(C)) 1626 visitConstantExpr(CE); 1627 1628 if (const auto *GV = dyn_cast<GlobalValue>(C)) { 1629 // Global Values get visited separately, but we do need to make sure 1630 // that the global value is in the correct module 1631 Assert(GV->getParent() == M, "Referencing global in another module!", 1632 EntryC, M, GV, GV->getParent()); 1633 continue; 1634 } 1635 1636 // Visit all sub-expressions. 1637 for (const Use &U : C->operands()) { 1638 const auto *OpC = dyn_cast<Constant>(U); 1639 if (!OpC) 1640 continue; 1641 if (!ConstantExprVisited.insert(OpC).second) 1642 continue; 1643 Stack.push_back(OpC); 1644 } 1645 } 1646 } 1647 1648 void Verifier::visitConstantExpr(const ConstantExpr *CE) { 1649 if (CE->getOpcode() != Instruction::BitCast) 1650 return; 1651 1652 Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0), 1653 CE->getType()), 1654 "Invalid bitcast", CE); 1655 } 1656 1657 bool Verifier::verifyAttributeCount(AttributeSet Attrs, unsigned Params) { 1658 if (Attrs.getNumSlots() == 0) 1659 return true; 1660 1661 unsigned LastSlot = Attrs.getNumSlots() - 1; 1662 unsigned LastIndex = Attrs.getSlotIndex(LastSlot); 1663 if (LastIndex <= Params 1664 || (LastIndex == AttributeSet::FunctionIndex 1665 && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params))) 1666 return true; 1667 1668 return false; 1669 } 1670 1671 /// Verify that statepoint intrinsic is well formed. 1672 void Verifier::verifyStatepoint(ImmutableCallSite CS) { 1673 assert(CS.getCalledFunction() && 1674 CS.getCalledFunction()->getIntrinsicID() == 1675 Intrinsic::experimental_gc_statepoint); 1676 1677 const Instruction &CI = *CS.getInstruction(); 1678 1679 Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() && 1680 !CS.onlyAccessesArgMemory(), 1681 "gc.statepoint must read and write all memory to preserve " 1682 "reordering restrictions required by safepoint semantics", 1683 &CI); 1684 1685 const Value *IDV = CS.getArgument(0); 1686 Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer", 1687 &CI); 1688 1689 const Value *NumPatchBytesV = CS.getArgument(1); 1690 Assert(isa<ConstantInt>(NumPatchBytesV), 1691 "gc.statepoint number of patchable bytes must be a constant integer", 1692 &CI); 1693 const int64_t NumPatchBytes = 1694 cast<ConstantInt>(NumPatchBytesV)->getSExtValue(); 1695 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!"); 1696 Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be " 1697 "positive", 1698 &CI); 1699 1700 const Value *Target = CS.getArgument(2); 1701 auto *PT = dyn_cast<PointerType>(Target->getType()); 1702 Assert(PT && PT->getElementType()->isFunctionTy(), 1703 "gc.statepoint callee must be of function pointer type", &CI, Target); 1704 FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType()); 1705 1706 const Value *NumCallArgsV = CS.getArgument(3); 1707 Assert(isa<ConstantInt>(NumCallArgsV), 1708 "gc.statepoint number of arguments to underlying call " 1709 "must be constant integer", 1710 &CI); 1711 const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue(); 1712 Assert(NumCallArgs >= 0, 1713 "gc.statepoint number of arguments to underlying call " 1714 "must be positive", 1715 &CI); 1716 const int NumParams = (int)TargetFuncType->getNumParams(); 1717 if (TargetFuncType->isVarArg()) { 1718 Assert(NumCallArgs >= NumParams, 1719 "gc.statepoint mismatch in number of vararg call args", &CI); 1720 1721 // TODO: Remove this limitation 1722 Assert(TargetFuncType->getReturnType()->isVoidTy(), 1723 "gc.statepoint doesn't support wrapping non-void " 1724 "vararg functions yet", 1725 &CI); 1726 } else 1727 Assert(NumCallArgs == NumParams, 1728 "gc.statepoint mismatch in number of call args", &CI); 1729 1730 const Value *FlagsV = CS.getArgument(4); 1731 Assert(isa<ConstantInt>(FlagsV), 1732 "gc.statepoint flags must be constant integer", &CI); 1733 const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue(); 1734 Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, 1735 "unknown flag used in gc.statepoint flags argument", &CI); 1736 1737 // Verify that the types of the call parameter arguments match 1738 // the type of the wrapped callee. 1739 for (int i = 0; i < NumParams; i++) { 1740 Type *ParamType = TargetFuncType->getParamType(i); 1741 Type *ArgType = CS.getArgument(5 + i)->getType(); 1742 Assert(ArgType == ParamType, 1743 "gc.statepoint call argument does not match wrapped " 1744 "function type", 1745 &CI); 1746 } 1747 1748 const int EndCallArgsInx = 4 + NumCallArgs; 1749 1750 const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1); 1751 Assert(isa<ConstantInt>(NumTransitionArgsV), 1752 "gc.statepoint number of transition arguments " 1753 "must be constant integer", 1754 &CI); 1755 const int NumTransitionArgs = 1756 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue(); 1757 Assert(NumTransitionArgs >= 0, 1758 "gc.statepoint number of transition arguments must be positive", &CI); 1759 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs; 1760 1761 const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1); 1762 Assert(isa<ConstantInt>(NumDeoptArgsV), 1763 "gc.statepoint number of deoptimization arguments " 1764 "must be constant integer", 1765 &CI); 1766 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue(); 1767 Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments " 1768 "must be positive", 1769 &CI); 1770 1771 const int ExpectedNumArgs = 1772 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs; 1773 Assert(ExpectedNumArgs <= (int)CS.arg_size(), 1774 "gc.statepoint too few arguments according to length fields", &CI); 1775 1776 // Check that the only uses of this gc.statepoint are gc.result or 1777 // gc.relocate calls which are tied to this statepoint and thus part 1778 // of the same statepoint sequence 1779 for (const User *U : CI.users()) { 1780 const CallInst *Call = dyn_cast<const CallInst>(U); 1781 Assert(Call, "illegal use of statepoint token", &CI, U); 1782 if (!Call) continue; 1783 Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call), 1784 "gc.result or gc.relocate are the only value uses" 1785 "of a gc.statepoint", 1786 &CI, U); 1787 if (isa<GCResultInst>(Call)) { 1788 Assert(Call->getArgOperand(0) == &CI, 1789 "gc.result connected to wrong gc.statepoint", &CI, Call); 1790 } else if (isa<GCRelocateInst>(Call)) { 1791 Assert(Call->getArgOperand(0) == &CI, 1792 "gc.relocate connected to wrong gc.statepoint", &CI, Call); 1793 } 1794 } 1795 1796 // Note: It is legal for a single derived pointer to be listed multiple 1797 // times. It's non-optimal, but it is legal. It can also happen after 1798 // insertion if we strip a bitcast away. 1799 // Note: It is really tempting to check that each base is relocated and 1800 // that a derived pointer is never reused as a base pointer. This turns 1801 // out to be problematic since optimizations run after safepoint insertion 1802 // can recognize equality properties that the insertion logic doesn't know 1803 // about. See example statepoint.ll in the verifier subdirectory 1804 } 1805 1806 void Verifier::verifyFrameRecoverIndices() { 1807 for (auto &Counts : FrameEscapeInfo) { 1808 Function *F = Counts.first; 1809 unsigned EscapedObjectCount = Counts.second.first; 1810 unsigned MaxRecoveredIndex = Counts.second.second; 1811 Assert(MaxRecoveredIndex <= EscapedObjectCount, 1812 "all indices passed to llvm.localrecover must be less than the " 1813 "number of arguments passed ot llvm.localescape in the parent " 1814 "function", 1815 F); 1816 } 1817 } 1818 1819 static Instruction *getSuccPad(TerminatorInst *Terminator) { 1820 BasicBlock *UnwindDest; 1821 if (auto *II = dyn_cast<InvokeInst>(Terminator)) 1822 UnwindDest = II->getUnwindDest(); 1823 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator)) 1824 UnwindDest = CSI->getUnwindDest(); 1825 else 1826 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest(); 1827 return UnwindDest->getFirstNonPHI(); 1828 } 1829 1830 void Verifier::verifySiblingFuncletUnwinds() { 1831 SmallPtrSet<Instruction *, 8> Visited; 1832 SmallPtrSet<Instruction *, 8> Active; 1833 for (const auto &Pair : SiblingFuncletInfo) { 1834 Instruction *PredPad = Pair.first; 1835 if (Visited.count(PredPad)) 1836 continue; 1837 Active.insert(PredPad); 1838 TerminatorInst *Terminator = Pair.second; 1839 do { 1840 Instruction *SuccPad = getSuccPad(Terminator); 1841 if (Active.count(SuccPad)) { 1842 // Found a cycle; report error 1843 Instruction *CyclePad = SuccPad; 1844 SmallVector<Instruction *, 8> CycleNodes; 1845 do { 1846 CycleNodes.push_back(CyclePad); 1847 TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad]; 1848 if (CycleTerminator != CyclePad) 1849 CycleNodes.push_back(CycleTerminator); 1850 CyclePad = getSuccPad(CycleTerminator); 1851 } while (CyclePad != SuccPad); 1852 Assert(false, "EH pads can't handle each other's exceptions", 1853 ArrayRef<Instruction *>(CycleNodes)); 1854 } 1855 // Don't re-walk a node we've already checked 1856 if (!Visited.insert(SuccPad).second) 1857 break; 1858 // Walk to this successor if it has a map entry. 1859 PredPad = SuccPad; 1860 auto TermI = SiblingFuncletInfo.find(PredPad); 1861 if (TermI == SiblingFuncletInfo.end()) 1862 break; 1863 Terminator = TermI->second; 1864 Active.insert(PredPad); 1865 } while (true); 1866 // Each node only has one successor, so we've walked all the active 1867 // nodes' successors. 1868 Active.clear(); 1869 } 1870 } 1871 1872 // visitFunction - Verify that a function is ok. 1873 // 1874 void Verifier::visitFunction(const Function &F) { 1875 // Check function arguments. 1876 FunctionType *FT = F.getFunctionType(); 1877 unsigned NumArgs = F.arg_size(); 1878 1879 Assert(Context == &F.getContext(), 1880 "Function context does not match Module context!", &F); 1881 1882 Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F); 1883 Assert(FT->getNumParams() == NumArgs, 1884 "# formal arguments must match # of arguments for function type!", &F, 1885 FT); 1886 Assert(F.getReturnType()->isFirstClassType() || 1887 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(), 1888 "Functions cannot return aggregate values!", &F); 1889 1890 Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), 1891 "Invalid struct return type!", &F); 1892 1893 AttributeSet Attrs = F.getAttributes(); 1894 1895 Assert(verifyAttributeCount(Attrs, FT->getNumParams()), 1896 "Attribute after last parameter!", &F); 1897 1898 // Check function attributes. 1899 verifyFunctionAttrs(FT, Attrs, &F); 1900 1901 // On function declarations/definitions, we do not support the builtin 1902 // attribute. We do not check this in VerifyFunctionAttrs since that is 1903 // checking for Attributes that can/can not ever be on functions. 1904 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin), 1905 "Attribute 'builtin' can only be applied to a callsite.", &F); 1906 1907 // Check that this function meets the restrictions on this calling convention. 1908 // Sometimes varargs is used for perfectly forwarding thunks, so some of these 1909 // restrictions can be lifted. 1910 switch (F.getCallingConv()) { 1911 default: 1912 case CallingConv::C: 1913 break; 1914 case CallingConv::Fast: 1915 case CallingConv::Cold: 1916 case CallingConv::Intel_OCL_BI: 1917 case CallingConv::PTX_Kernel: 1918 case CallingConv::PTX_Device: 1919 Assert(!F.isVarArg(), "Calling convention does not support varargs or " 1920 "perfect forwarding!", 1921 &F); 1922 break; 1923 } 1924 1925 bool isLLVMdotName = F.getName().size() >= 5 && 1926 F.getName().substr(0, 5) == "llvm."; 1927 1928 // Check that the argument values match the function type for this function... 1929 unsigned i = 0; 1930 for (const Argument &Arg : F.args()) { 1931 Assert(Arg.getType() == FT->getParamType(i), 1932 "Argument value does not match function argument type!", &Arg, 1933 FT->getParamType(i)); 1934 Assert(Arg.getType()->isFirstClassType(), 1935 "Function arguments must have first-class types!", &Arg); 1936 if (!isLLVMdotName) { 1937 Assert(!Arg.getType()->isMetadataTy(), 1938 "Function takes metadata but isn't an intrinsic", &Arg, &F); 1939 Assert(!Arg.getType()->isTokenTy(), 1940 "Function takes token but isn't an intrinsic", &Arg, &F); 1941 } 1942 1943 // Check that swifterror argument is only used by loads and stores. 1944 if (Attrs.hasAttribute(i+1, Attribute::SwiftError)) { 1945 verifySwiftErrorValue(&Arg); 1946 } 1947 ++i; 1948 } 1949 1950 if (!isLLVMdotName) 1951 Assert(!F.getReturnType()->isTokenTy(), 1952 "Functions returns a token but isn't an intrinsic", &F); 1953 1954 // Get the function metadata attachments. 1955 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 1956 F.getAllMetadata(MDs); 1957 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync"); 1958 verifyFunctionMetadata(MDs); 1959 1960 // Check validity of the personality function 1961 if (F.hasPersonalityFn()) { 1962 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); 1963 if (Per) 1964 Assert(Per->getParent() == F.getParent(), 1965 "Referencing personality function in another module!", 1966 &F, F.getParent(), Per, Per->getParent()); 1967 } 1968 1969 if (F.isMaterializable()) { 1970 // Function has a body somewhere we can't see. 1971 Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F, 1972 MDs.empty() ? nullptr : MDs.front().second); 1973 } else if (F.isDeclaration()) { 1974 Assert(MDs.empty(), "function without a body cannot have metadata", &F, 1975 MDs.empty() ? nullptr : MDs.front().second); 1976 Assert(!F.hasPersonalityFn(), 1977 "Function declaration shouldn't have a personality routine", &F); 1978 } else { 1979 // Verify that this function (which has a body) is not named "llvm.*". It 1980 // is not legal to define intrinsics. 1981 Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F); 1982 1983 // Check the entry node 1984 const BasicBlock *Entry = &F.getEntryBlock(); 1985 Assert(pred_empty(Entry), 1986 "Entry block to function must not have predecessors!", Entry); 1987 1988 // The address of the entry block cannot be taken, unless it is dead. 1989 if (Entry->hasAddressTaken()) { 1990 Assert(!BlockAddress::lookup(Entry)->isConstantUsed(), 1991 "blockaddress may not be used with the entry block!", Entry); 1992 } 1993 1994 unsigned NumDebugAttachments = 0; 1995 // Visit metadata attachments. 1996 for (const auto &I : MDs) { 1997 // Verify that the attachment is legal. 1998 switch (I.first) { 1999 default: 2000 break; 2001 case LLVMContext::MD_dbg: 2002 ++NumDebugAttachments; 2003 AssertDI(NumDebugAttachments == 1, 2004 "function must have a single !dbg attachment", &F, I.second); 2005 AssertDI(isa<DISubprogram>(I.second), 2006 "function !dbg attachment must be a subprogram", &F, I.second); 2007 break; 2008 } 2009 2010 // Verify the metadata itself. 2011 visitMDNode(*I.second); 2012 } 2013 } 2014 2015 // If this function is actually an intrinsic, verify that it is only used in 2016 // direct call/invokes, never having its "address taken". 2017 // Only do this if the module is materialized, otherwise we don't have all the 2018 // uses. 2019 if (F.getIntrinsicID() && F.getParent()->isMaterialized()) { 2020 const User *U; 2021 if (F.hasAddressTaken(&U)) 2022 Assert(0, "Invalid user of intrinsic instruction!", U); 2023 } 2024 2025 Assert(!F.hasDLLImportStorageClass() || 2026 (F.isDeclaration() && F.hasExternalLinkage()) || 2027 F.hasAvailableExternallyLinkage(), 2028 "Function is marked as dllimport, but not external.", &F); 2029 2030 auto *N = F.getSubprogram(); 2031 if (!N) 2032 return; 2033 2034 visitDISubprogram(*N); 2035 2036 // Check that all !dbg attachments lead to back to N (or, at least, another 2037 // subprogram that describes the same function). 2038 // 2039 // FIXME: Check this incrementally while visiting !dbg attachments. 2040 // FIXME: Only check when N is the canonical subprogram for F. 2041 SmallPtrSet<const MDNode *, 32> Seen; 2042 for (auto &BB : F) 2043 for (auto &I : BB) { 2044 // Be careful about using DILocation here since we might be dealing with 2045 // broken code (this is the Verifier after all). 2046 DILocation *DL = 2047 dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode()); 2048 if (!DL) 2049 continue; 2050 if (!Seen.insert(DL).second) 2051 continue; 2052 2053 DILocalScope *Scope = DL->getInlinedAtScope(); 2054 if (Scope && !Seen.insert(Scope).second) 2055 continue; 2056 2057 DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr; 2058 2059 // Scope and SP could be the same MDNode and we don't want to skip 2060 // validation in that case 2061 if (SP && ((Scope != SP) && !Seen.insert(SP).second)) 2062 continue; 2063 2064 // FIXME: Once N is canonical, check "SP == &N". 2065 Assert(SP->describes(&F), 2066 "!dbg attachment points at wrong subprogram for function", N, &F, 2067 &I, DL, Scope, SP); 2068 } 2069 } 2070 2071 // verifyBasicBlock - Verify that a basic block is well formed... 2072 // 2073 void Verifier::visitBasicBlock(BasicBlock &BB) { 2074 InstsInThisBlock.clear(); 2075 2076 // Ensure that basic blocks have terminators! 2077 Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB); 2078 2079 // Check constraints that this basic block imposes on all of the PHI nodes in 2080 // it. 2081 if (isa<PHINode>(BB.front())) { 2082 SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); 2083 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; 2084 std::sort(Preds.begin(), Preds.end()); 2085 PHINode *PN; 2086 for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) { 2087 // Ensure that PHI nodes have at least one entry! 2088 Assert(PN->getNumIncomingValues() != 0, 2089 "PHI nodes must have at least one entry. If the block is dead, " 2090 "the PHI should be removed!", 2091 PN); 2092 Assert(PN->getNumIncomingValues() == Preds.size(), 2093 "PHINode should have one entry for each predecessor of its " 2094 "parent basic block!", 2095 PN); 2096 2097 // Get and sort all incoming values in the PHI node... 2098 Values.clear(); 2099 Values.reserve(PN->getNumIncomingValues()); 2100 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2101 Values.push_back(std::make_pair(PN->getIncomingBlock(i), 2102 PN->getIncomingValue(i))); 2103 std::sort(Values.begin(), Values.end()); 2104 2105 for (unsigned i = 0, e = Values.size(); i != e; ++i) { 2106 // Check to make sure that if there is more than one entry for a 2107 // particular basic block in this PHI node, that the incoming values are 2108 // all identical. 2109 // 2110 Assert(i == 0 || Values[i].first != Values[i - 1].first || 2111 Values[i].second == Values[i - 1].second, 2112 "PHI node has multiple entries for the same basic block with " 2113 "different incoming values!", 2114 PN, Values[i].first, Values[i].second, Values[i - 1].second); 2115 2116 // Check to make sure that the predecessors and PHI node entries are 2117 // matched up. 2118 Assert(Values[i].first == Preds[i], 2119 "PHI node entries do not match predecessors!", PN, 2120 Values[i].first, Preds[i]); 2121 } 2122 } 2123 } 2124 2125 // Check that all instructions have their parent pointers set up correctly. 2126 for (auto &I : BB) 2127 { 2128 Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!"); 2129 } 2130 } 2131 2132 void Verifier::visitTerminatorInst(TerminatorInst &I) { 2133 // Ensure that terminators only exist at the end of the basic block. 2134 Assert(&I == I.getParent()->getTerminator(), 2135 "Terminator found in the middle of a basic block!", I.getParent()); 2136 visitInstruction(I); 2137 } 2138 2139 void Verifier::visitBranchInst(BranchInst &BI) { 2140 if (BI.isConditional()) { 2141 Assert(BI.getCondition()->getType()->isIntegerTy(1), 2142 "Branch condition is not 'i1' type!", &BI, BI.getCondition()); 2143 } 2144 visitTerminatorInst(BI); 2145 } 2146 2147 void Verifier::visitReturnInst(ReturnInst &RI) { 2148 Function *F = RI.getParent()->getParent(); 2149 unsigned N = RI.getNumOperands(); 2150 if (F->getReturnType()->isVoidTy()) 2151 Assert(N == 0, 2152 "Found return instr that returns non-void in Function of void " 2153 "return type!", 2154 &RI, F->getReturnType()); 2155 else 2156 Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(), 2157 "Function return type does not match operand " 2158 "type of return inst!", 2159 &RI, F->getReturnType()); 2160 2161 // Check to make sure that the return value has necessary properties for 2162 // terminators... 2163 visitTerminatorInst(RI); 2164 } 2165 2166 void Verifier::visitSwitchInst(SwitchInst &SI) { 2167 // Check to make sure that all of the constants in the switch instruction 2168 // have the same type as the switched-on value. 2169 Type *SwitchTy = SI.getCondition()->getType(); 2170 SmallPtrSet<ConstantInt*, 32> Constants; 2171 for (auto &Case : SI.cases()) { 2172 Assert(Case.getCaseValue()->getType() == SwitchTy, 2173 "Switch constants must all be same type as switch value!", &SI); 2174 Assert(Constants.insert(Case.getCaseValue()).second, 2175 "Duplicate integer as switch case", &SI, Case.getCaseValue()); 2176 } 2177 2178 visitTerminatorInst(SI); 2179 } 2180 2181 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { 2182 Assert(BI.getAddress()->getType()->isPointerTy(), 2183 "Indirectbr operand must have pointer type!", &BI); 2184 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i) 2185 Assert(BI.getDestination(i)->getType()->isLabelTy(), 2186 "Indirectbr destinations must all have pointer type!", &BI); 2187 2188 visitTerminatorInst(BI); 2189 } 2190 2191 void Verifier::visitSelectInst(SelectInst &SI) { 2192 Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), 2193 SI.getOperand(2)), 2194 "Invalid operands for select instruction!", &SI); 2195 2196 Assert(SI.getTrueValue()->getType() == SI.getType(), 2197 "Select values must have same type as select instruction!", &SI); 2198 visitInstruction(SI); 2199 } 2200 2201 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of 2202 /// a pass, if any exist, it's an error. 2203 /// 2204 void Verifier::visitUserOp1(Instruction &I) { 2205 Assert(0, "User-defined operators should not live outside of a pass!", &I); 2206 } 2207 2208 void Verifier::visitTruncInst(TruncInst &I) { 2209 // Get the source and destination types 2210 Type *SrcTy = I.getOperand(0)->getType(); 2211 Type *DestTy = I.getType(); 2212 2213 // Get the size of the types in bits, we'll need this later 2214 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2215 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2216 2217 Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I); 2218 Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I); 2219 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2220 "trunc source and destination must both be a vector or neither", &I); 2221 Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I); 2222 2223 visitInstruction(I); 2224 } 2225 2226 void Verifier::visitZExtInst(ZExtInst &I) { 2227 // Get the source and destination types 2228 Type *SrcTy = I.getOperand(0)->getType(); 2229 Type *DestTy = I.getType(); 2230 2231 // Get the size of the types in bits, we'll need this later 2232 Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I); 2233 Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I); 2234 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2235 "zext source and destination must both be a vector or neither", &I); 2236 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2237 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2238 2239 Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I); 2240 2241 visitInstruction(I); 2242 } 2243 2244 void Verifier::visitSExtInst(SExtInst &I) { 2245 // Get the source and destination types 2246 Type *SrcTy = I.getOperand(0)->getType(); 2247 Type *DestTy = I.getType(); 2248 2249 // Get the size of the types in bits, we'll need this later 2250 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2251 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2252 2253 Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I); 2254 Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I); 2255 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2256 "sext source and destination must both be a vector or neither", &I); 2257 Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I); 2258 2259 visitInstruction(I); 2260 } 2261 2262 void Verifier::visitFPTruncInst(FPTruncInst &I) { 2263 // Get the source and destination types 2264 Type *SrcTy = I.getOperand(0)->getType(); 2265 Type *DestTy = I.getType(); 2266 // Get the size of the types in bits, we'll need this later 2267 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2268 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2269 2270 Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I); 2271 Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I); 2272 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2273 "fptrunc source and destination must both be a vector or neither", &I); 2274 Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I); 2275 2276 visitInstruction(I); 2277 } 2278 2279 void Verifier::visitFPExtInst(FPExtInst &I) { 2280 // Get the source and destination types 2281 Type *SrcTy = I.getOperand(0)->getType(); 2282 Type *DestTy = I.getType(); 2283 2284 // Get the size of the types in bits, we'll need this later 2285 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2286 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2287 2288 Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I); 2289 Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I); 2290 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2291 "fpext source and destination must both be a vector or neither", &I); 2292 Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I); 2293 2294 visitInstruction(I); 2295 } 2296 2297 void Verifier::visitUIToFPInst(UIToFPInst &I) { 2298 // Get the source and destination types 2299 Type *SrcTy = I.getOperand(0)->getType(); 2300 Type *DestTy = I.getType(); 2301 2302 bool SrcVec = SrcTy->isVectorTy(); 2303 bool DstVec = DestTy->isVectorTy(); 2304 2305 Assert(SrcVec == DstVec, 2306 "UIToFP source and dest must both be vector or scalar", &I); 2307 Assert(SrcTy->isIntOrIntVectorTy(), 2308 "UIToFP source must be integer or integer vector", &I); 2309 Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector", 2310 &I); 2311 2312 if (SrcVec && DstVec) 2313 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2314 cast<VectorType>(DestTy)->getNumElements(), 2315 "UIToFP source and dest vector length mismatch", &I); 2316 2317 visitInstruction(I); 2318 } 2319 2320 void Verifier::visitSIToFPInst(SIToFPInst &I) { 2321 // Get the source and destination types 2322 Type *SrcTy = I.getOperand(0)->getType(); 2323 Type *DestTy = I.getType(); 2324 2325 bool SrcVec = SrcTy->isVectorTy(); 2326 bool DstVec = DestTy->isVectorTy(); 2327 2328 Assert(SrcVec == DstVec, 2329 "SIToFP source and dest must both be vector or scalar", &I); 2330 Assert(SrcTy->isIntOrIntVectorTy(), 2331 "SIToFP source must be integer or integer vector", &I); 2332 Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector", 2333 &I); 2334 2335 if (SrcVec && DstVec) 2336 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2337 cast<VectorType>(DestTy)->getNumElements(), 2338 "SIToFP source and dest vector length mismatch", &I); 2339 2340 visitInstruction(I); 2341 } 2342 2343 void Verifier::visitFPToUIInst(FPToUIInst &I) { 2344 // Get the source and destination types 2345 Type *SrcTy = I.getOperand(0)->getType(); 2346 Type *DestTy = I.getType(); 2347 2348 bool SrcVec = SrcTy->isVectorTy(); 2349 bool DstVec = DestTy->isVectorTy(); 2350 2351 Assert(SrcVec == DstVec, 2352 "FPToUI source and dest must both be vector or scalar", &I); 2353 Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", 2354 &I); 2355 Assert(DestTy->isIntOrIntVectorTy(), 2356 "FPToUI result must be integer or integer vector", &I); 2357 2358 if (SrcVec && DstVec) 2359 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2360 cast<VectorType>(DestTy)->getNumElements(), 2361 "FPToUI source and dest vector length mismatch", &I); 2362 2363 visitInstruction(I); 2364 } 2365 2366 void Verifier::visitFPToSIInst(FPToSIInst &I) { 2367 // Get the source and destination types 2368 Type *SrcTy = I.getOperand(0)->getType(); 2369 Type *DestTy = I.getType(); 2370 2371 bool SrcVec = SrcTy->isVectorTy(); 2372 bool DstVec = DestTy->isVectorTy(); 2373 2374 Assert(SrcVec == DstVec, 2375 "FPToSI source and dest must both be vector or scalar", &I); 2376 Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", 2377 &I); 2378 Assert(DestTy->isIntOrIntVectorTy(), 2379 "FPToSI result must be integer or integer vector", &I); 2380 2381 if (SrcVec && DstVec) 2382 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2383 cast<VectorType>(DestTy)->getNumElements(), 2384 "FPToSI source and dest vector length mismatch", &I); 2385 2386 visitInstruction(I); 2387 } 2388 2389 void Verifier::visitPtrToIntInst(PtrToIntInst &I) { 2390 // Get the source and destination types 2391 Type *SrcTy = I.getOperand(0)->getType(); 2392 Type *DestTy = I.getType(); 2393 2394 Assert(SrcTy->getScalarType()->isPointerTy(), 2395 "PtrToInt source must be pointer", &I); 2396 Assert(DestTy->getScalarType()->isIntegerTy(), 2397 "PtrToInt result must be integral", &I); 2398 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch", 2399 &I); 2400 2401 if (SrcTy->isVectorTy()) { 2402 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2403 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2404 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2405 "PtrToInt Vector width mismatch", &I); 2406 } 2407 2408 visitInstruction(I); 2409 } 2410 2411 void Verifier::visitIntToPtrInst(IntToPtrInst &I) { 2412 // Get the source and destination types 2413 Type *SrcTy = I.getOperand(0)->getType(); 2414 Type *DestTy = I.getType(); 2415 2416 Assert(SrcTy->getScalarType()->isIntegerTy(), 2417 "IntToPtr source must be an integral", &I); 2418 Assert(DestTy->getScalarType()->isPointerTy(), 2419 "IntToPtr result must be a pointer", &I); 2420 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch", 2421 &I); 2422 if (SrcTy->isVectorTy()) { 2423 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2424 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2425 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2426 "IntToPtr Vector width mismatch", &I); 2427 } 2428 visitInstruction(I); 2429 } 2430 2431 void Verifier::visitBitCastInst(BitCastInst &I) { 2432 Assert( 2433 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()), 2434 "Invalid bitcast", &I); 2435 visitInstruction(I); 2436 } 2437 2438 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { 2439 Type *SrcTy = I.getOperand(0)->getType(); 2440 Type *DestTy = I.getType(); 2441 2442 Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer", 2443 &I); 2444 Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer", 2445 &I); 2446 Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), 2447 "AddrSpaceCast must be between different address spaces", &I); 2448 if (SrcTy->isVectorTy()) 2449 Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(), 2450 "AddrSpaceCast vector pointer number of elements mismatch", &I); 2451 visitInstruction(I); 2452 } 2453 2454 /// visitPHINode - Ensure that a PHI node is well formed. 2455 /// 2456 void Verifier::visitPHINode(PHINode &PN) { 2457 // Ensure that the PHI nodes are all grouped together at the top of the block. 2458 // This can be tested by checking whether the instruction before this is 2459 // either nonexistent (because this is begin()) or is a PHI node. If not, 2460 // then there is some other instruction before a PHI. 2461 Assert(&PN == &PN.getParent()->front() || 2462 isa<PHINode>(--BasicBlock::iterator(&PN)), 2463 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent()); 2464 2465 // Check that a PHI doesn't yield a Token. 2466 Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!"); 2467 2468 // Check that all of the values of the PHI node have the same type as the 2469 // result, and that the incoming blocks are really basic blocks. 2470 for (Value *IncValue : PN.incoming_values()) { 2471 Assert(PN.getType() == IncValue->getType(), 2472 "PHI node operands are not the same type as the result!", &PN); 2473 } 2474 2475 // All other PHI node constraints are checked in the visitBasicBlock method. 2476 2477 visitInstruction(PN); 2478 } 2479 2480 void Verifier::verifyCallSite(CallSite CS) { 2481 Instruction *I = CS.getInstruction(); 2482 2483 Assert(CS.getCalledValue()->getType()->isPointerTy(), 2484 "Called function must be a pointer!", I); 2485 PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType()); 2486 2487 Assert(FPTy->getElementType()->isFunctionTy(), 2488 "Called function is not pointer to function type!", I); 2489 2490 Assert(FPTy->getElementType() == CS.getFunctionType(), 2491 "Called function is not the same type as the call!", I); 2492 2493 FunctionType *FTy = CS.getFunctionType(); 2494 2495 // Verify that the correct number of arguments are being passed 2496 if (FTy->isVarArg()) 2497 Assert(CS.arg_size() >= FTy->getNumParams(), 2498 "Called function requires more parameters than were provided!", I); 2499 else 2500 Assert(CS.arg_size() == FTy->getNumParams(), 2501 "Incorrect number of arguments passed to called function!", I); 2502 2503 // Verify that all arguments to the call match the function type. 2504 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2505 Assert(CS.getArgument(i)->getType() == FTy->getParamType(i), 2506 "Call parameter type does not match function signature!", 2507 CS.getArgument(i), FTy->getParamType(i), I); 2508 2509 AttributeSet Attrs = CS.getAttributes(); 2510 2511 Assert(verifyAttributeCount(Attrs, CS.arg_size()), 2512 "Attribute after last parameter!", I); 2513 2514 // Verify call attributes. 2515 verifyFunctionAttrs(FTy, Attrs, I); 2516 2517 // Conservatively check the inalloca argument. 2518 // We have a bug if we can find that there is an underlying alloca without 2519 // inalloca. 2520 if (CS.hasInAllocaArgument()) { 2521 Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1); 2522 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets())) 2523 Assert(AI->isUsedWithInAlloca(), 2524 "inalloca argument for call has mismatched alloca", AI, I); 2525 } 2526 2527 // For each argument of the callsite, if it has the swifterror argument, 2528 // make sure the underlying alloca has swifterror as well. 2529 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2530 if (CS.paramHasAttr(i+1, Attribute::SwiftError)) { 2531 Value *SwiftErrorArg = CS.getArgument(i); 2532 auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets()); 2533 Assert(AI, "swifterror argument should come from alloca", AI, I); 2534 if (AI) 2535 Assert(AI->isSwiftError(), 2536 "swifterror argument for call has mismatched alloca", AI, I); 2537 } 2538 2539 if (FTy->isVarArg()) { 2540 // FIXME? is 'nest' even legal here? 2541 bool SawNest = false; 2542 bool SawReturned = false; 2543 2544 for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) { 2545 if (Attrs.hasAttribute(Idx, Attribute::Nest)) 2546 SawNest = true; 2547 if (Attrs.hasAttribute(Idx, Attribute::Returned)) 2548 SawReturned = true; 2549 } 2550 2551 // Check attributes on the varargs part. 2552 for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) { 2553 Type *Ty = CS.getArgument(Idx-1)->getType(); 2554 verifyParameterAttrs(Attrs, Idx, Ty, false, I); 2555 2556 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 2557 Assert(!SawNest, "More than one parameter has attribute nest!", I); 2558 SawNest = true; 2559 } 2560 2561 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 2562 Assert(!SawReturned, "More than one parameter has attribute returned!", 2563 I); 2564 Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), 2565 "Incompatible argument and return types for 'returned' " 2566 "attribute", 2567 I); 2568 SawReturned = true; 2569 } 2570 2571 Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet), 2572 "Attribute 'sret' cannot be used for vararg call arguments!", I); 2573 2574 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) 2575 Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I); 2576 } 2577 } 2578 2579 // Verify that there's no metadata unless it's a direct call to an intrinsic. 2580 if (CS.getCalledFunction() == nullptr || 2581 !CS.getCalledFunction()->getName().startswith("llvm.")) { 2582 for (Type *ParamTy : FTy->params()) { 2583 Assert(!ParamTy->isMetadataTy(), 2584 "Function has metadata parameter but isn't an intrinsic", I); 2585 Assert(!ParamTy->isTokenTy(), 2586 "Function has token parameter but isn't an intrinsic", I); 2587 } 2588 } 2589 2590 // Verify that indirect calls don't return tokens. 2591 if (CS.getCalledFunction() == nullptr) 2592 Assert(!FTy->getReturnType()->isTokenTy(), 2593 "Return type cannot be token for indirect call!"); 2594 2595 if (Function *F = CS.getCalledFunction()) 2596 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) 2597 visitIntrinsicCallSite(ID, CS); 2598 2599 // Verify that a callsite has at most one "deopt", at most one "funclet" and 2600 // at most one "gc-transition" operand bundle. 2601 bool FoundDeoptBundle = false, FoundFuncletBundle = false, 2602 FoundGCTransitionBundle = false; 2603 for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) { 2604 OperandBundleUse BU = CS.getOperandBundleAt(i); 2605 uint32_t Tag = BU.getTagID(); 2606 if (Tag == LLVMContext::OB_deopt) { 2607 Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I); 2608 FoundDeoptBundle = true; 2609 } else if (Tag == LLVMContext::OB_gc_transition) { 2610 Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles", 2611 I); 2612 FoundGCTransitionBundle = true; 2613 } else if (Tag == LLVMContext::OB_funclet) { 2614 Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I); 2615 FoundFuncletBundle = true; 2616 Assert(BU.Inputs.size() == 1, 2617 "Expected exactly one funclet bundle operand", I); 2618 Assert(isa<FuncletPadInst>(BU.Inputs.front()), 2619 "Funclet bundle operands should correspond to a FuncletPadInst", 2620 I); 2621 } 2622 } 2623 2624 // Verify that each inlinable callsite of a debug-info-bearing function in a 2625 // debug-info-bearing function has a debug location attached to it. Failure to 2626 // do so causes assertion failures when the inliner sets up inline scope info. 2627 if (I->getFunction()->getSubprogram() && CS.getCalledFunction() && 2628 CS.getCalledFunction()->getSubprogram()) 2629 Assert(I->getDebugLoc(), "inlinable function call in a function with debug " 2630 "info must have a !dbg location", 2631 I); 2632 2633 visitInstruction(*I); 2634 } 2635 2636 /// Two types are "congruent" if they are identical, or if they are both pointer 2637 /// types with different pointee types and the same address space. 2638 static bool isTypeCongruent(Type *L, Type *R) { 2639 if (L == R) 2640 return true; 2641 PointerType *PL = dyn_cast<PointerType>(L); 2642 PointerType *PR = dyn_cast<PointerType>(R); 2643 if (!PL || !PR) 2644 return false; 2645 return PL->getAddressSpace() == PR->getAddressSpace(); 2646 } 2647 2648 static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) { 2649 static const Attribute::AttrKind ABIAttrs[] = { 2650 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 2651 Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf, 2652 Attribute::SwiftError}; 2653 AttrBuilder Copy; 2654 for (auto AK : ABIAttrs) { 2655 if (Attrs.hasAttribute(I + 1, AK)) 2656 Copy.addAttribute(AK); 2657 } 2658 if (Attrs.hasAttribute(I + 1, Attribute::Alignment)) 2659 Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1)); 2660 return Copy; 2661 } 2662 2663 void Verifier::verifyMustTailCall(CallInst &CI) { 2664 Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI); 2665 2666 // - The caller and callee prototypes must match. Pointer types of 2667 // parameters or return types may differ in pointee type, but not 2668 // address space. 2669 Function *F = CI.getParent()->getParent(); 2670 FunctionType *CallerTy = F->getFunctionType(); 2671 FunctionType *CalleeTy = CI.getFunctionType(); 2672 Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(), 2673 "cannot guarantee tail call due to mismatched parameter counts", &CI); 2674 Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(), 2675 "cannot guarantee tail call due to mismatched varargs", &CI); 2676 Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()), 2677 "cannot guarantee tail call due to mismatched return types", &CI); 2678 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2679 Assert( 2680 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)), 2681 "cannot guarantee tail call due to mismatched parameter types", &CI); 2682 } 2683 2684 // - The calling conventions of the caller and callee must match. 2685 Assert(F->getCallingConv() == CI.getCallingConv(), 2686 "cannot guarantee tail call due to mismatched calling conv", &CI); 2687 2688 // - All ABI-impacting function attributes, such as sret, byval, inreg, 2689 // returned, and inalloca, must match. 2690 AttributeSet CallerAttrs = F->getAttributes(); 2691 AttributeSet CalleeAttrs = CI.getAttributes(); 2692 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2693 AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs); 2694 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs); 2695 Assert(CallerABIAttrs == CalleeABIAttrs, 2696 "cannot guarantee tail call due to mismatched ABI impacting " 2697 "function attributes", 2698 &CI, CI.getOperand(I)); 2699 } 2700 2701 // - The call must immediately precede a :ref:`ret <i_ret>` instruction, 2702 // or a pointer bitcast followed by a ret instruction. 2703 // - The ret instruction must return the (possibly bitcasted) value 2704 // produced by the call or void. 2705 Value *RetVal = &CI; 2706 Instruction *Next = CI.getNextNode(); 2707 2708 // Handle the optional bitcast. 2709 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) { 2710 Assert(BI->getOperand(0) == RetVal, 2711 "bitcast following musttail call must use the call", BI); 2712 RetVal = BI; 2713 Next = BI->getNextNode(); 2714 } 2715 2716 // Check the return. 2717 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next); 2718 Assert(Ret, "musttail call must be precede a ret with an optional bitcast", 2719 &CI); 2720 Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal, 2721 "musttail call result must be returned", Ret); 2722 } 2723 2724 void Verifier::visitCallInst(CallInst &CI) { 2725 verifyCallSite(&CI); 2726 2727 if (CI.isMustTailCall()) 2728 verifyMustTailCall(CI); 2729 } 2730 2731 void Verifier::visitInvokeInst(InvokeInst &II) { 2732 verifyCallSite(&II); 2733 2734 // Verify that the first non-PHI instruction of the unwind destination is an 2735 // exception handling instruction. 2736 Assert( 2737 II.getUnwindDest()->isEHPad(), 2738 "The unwind destination does not have an exception handling instruction!", 2739 &II); 2740 2741 visitTerminatorInst(II); 2742 } 2743 2744 /// visitBinaryOperator - Check that both arguments to the binary operator are 2745 /// of the same type! 2746 /// 2747 void Verifier::visitBinaryOperator(BinaryOperator &B) { 2748 Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(), 2749 "Both operands to a binary operator are not of the same type!", &B); 2750 2751 switch (B.getOpcode()) { 2752 // Check that integer arithmetic operators are only used with 2753 // integral operands. 2754 case Instruction::Add: 2755 case Instruction::Sub: 2756 case Instruction::Mul: 2757 case Instruction::SDiv: 2758 case Instruction::UDiv: 2759 case Instruction::SRem: 2760 case Instruction::URem: 2761 Assert(B.getType()->isIntOrIntVectorTy(), 2762 "Integer arithmetic operators only work with integral types!", &B); 2763 Assert(B.getType() == B.getOperand(0)->getType(), 2764 "Integer arithmetic operators must have same type " 2765 "for operands and result!", 2766 &B); 2767 break; 2768 // Check that floating-point arithmetic operators are only used with 2769 // floating-point operands. 2770 case Instruction::FAdd: 2771 case Instruction::FSub: 2772 case Instruction::FMul: 2773 case Instruction::FDiv: 2774 case Instruction::FRem: 2775 Assert(B.getType()->isFPOrFPVectorTy(), 2776 "Floating-point arithmetic operators only work with " 2777 "floating-point types!", 2778 &B); 2779 Assert(B.getType() == B.getOperand(0)->getType(), 2780 "Floating-point arithmetic operators must have same type " 2781 "for operands and result!", 2782 &B); 2783 break; 2784 // Check that logical operators are only used with integral operands. 2785 case Instruction::And: 2786 case Instruction::Or: 2787 case Instruction::Xor: 2788 Assert(B.getType()->isIntOrIntVectorTy(), 2789 "Logical operators only work with integral types!", &B); 2790 Assert(B.getType() == B.getOperand(0)->getType(), 2791 "Logical operators must have same type for operands and result!", 2792 &B); 2793 break; 2794 case Instruction::Shl: 2795 case Instruction::LShr: 2796 case Instruction::AShr: 2797 Assert(B.getType()->isIntOrIntVectorTy(), 2798 "Shifts only work with integral types!", &B); 2799 Assert(B.getType() == B.getOperand(0)->getType(), 2800 "Shift return type must be same as operands!", &B); 2801 break; 2802 default: 2803 llvm_unreachable("Unknown BinaryOperator opcode!"); 2804 } 2805 2806 visitInstruction(B); 2807 } 2808 2809 void Verifier::visitICmpInst(ICmpInst &IC) { 2810 // Check that the operands are the same type 2811 Type *Op0Ty = IC.getOperand(0)->getType(); 2812 Type *Op1Ty = IC.getOperand(1)->getType(); 2813 Assert(Op0Ty == Op1Ty, 2814 "Both operands to ICmp instruction are not of the same type!", &IC); 2815 // Check that the operands are the right type 2816 Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(), 2817 "Invalid operand types for ICmp instruction", &IC); 2818 // Check that the predicate is valid. 2819 Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && 2820 IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE, 2821 "Invalid predicate in ICmp instruction!", &IC); 2822 2823 visitInstruction(IC); 2824 } 2825 2826 void Verifier::visitFCmpInst(FCmpInst &FC) { 2827 // Check that the operands are the same type 2828 Type *Op0Ty = FC.getOperand(0)->getType(); 2829 Type *Op1Ty = FC.getOperand(1)->getType(); 2830 Assert(Op0Ty == Op1Ty, 2831 "Both operands to FCmp instruction are not of the same type!", &FC); 2832 // Check that the operands are the right type 2833 Assert(Op0Ty->isFPOrFPVectorTy(), 2834 "Invalid operand types for FCmp instruction", &FC); 2835 // Check that the predicate is valid. 2836 Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE && 2837 FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE, 2838 "Invalid predicate in FCmp instruction!", &FC); 2839 2840 visitInstruction(FC); 2841 } 2842 2843 void Verifier::visitExtractElementInst(ExtractElementInst &EI) { 2844 Assert( 2845 ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)), 2846 "Invalid extractelement operands!", &EI); 2847 visitInstruction(EI); 2848 } 2849 2850 void Verifier::visitInsertElementInst(InsertElementInst &IE) { 2851 Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1), 2852 IE.getOperand(2)), 2853 "Invalid insertelement operands!", &IE); 2854 visitInstruction(IE); 2855 } 2856 2857 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) { 2858 Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1), 2859 SV.getOperand(2)), 2860 "Invalid shufflevector operands!", &SV); 2861 visitInstruction(SV); 2862 } 2863 2864 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { 2865 Type *TargetTy = GEP.getPointerOperandType()->getScalarType(); 2866 2867 Assert(isa<PointerType>(TargetTy), 2868 "GEP base pointer is not a vector or a vector of pointers", &GEP); 2869 Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP); 2870 SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end()); 2871 Type *ElTy = 2872 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs); 2873 Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP); 2874 2875 Assert(GEP.getType()->getScalarType()->isPointerTy() && 2876 GEP.getResultElementType() == ElTy, 2877 "GEP is not of right type for indices!", &GEP, ElTy); 2878 2879 if (GEP.getType()->isVectorTy()) { 2880 // Additional checks for vector GEPs. 2881 unsigned GEPWidth = GEP.getType()->getVectorNumElements(); 2882 if (GEP.getPointerOperandType()->isVectorTy()) 2883 Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(), 2884 "Vector GEP result width doesn't match operand's", &GEP); 2885 for (Value *Idx : Idxs) { 2886 Type *IndexTy = Idx->getType(); 2887 if (IndexTy->isVectorTy()) { 2888 unsigned IndexWidth = IndexTy->getVectorNumElements(); 2889 Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP); 2890 } 2891 Assert(IndexTy->getScalarType()->isIntegerTy(), 2892 "All GEP indices should be of integer type"); 2893 } 2894 } 2895 visitInstruction(GEP); 2896 } 2897 2898 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { 2899 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); 2900 } 2901 2902 void Verifier::visitRangeMetadata(Instruction& I, 2903 MDNode* Range, Type* Ty) { 2904 assert(Range && 2905 Range == I.getMetadata(LLVMContext::MD_range) && 2906 "precondition violation"); 2907 2908 unsigned NumOperands = Range->getNumOperands(); 2909 Assert(NumOperands % 2 == 0, "Unfinished range!", Range); 2910 unsigned NumRanges = NumOperands / 2; 2911 Assert(NumRanges >= 1, "It should have at least one range!", Range); 2912 2913 ConstantRange LastRange(1); // Dummy initial value 2914 for (unsigned i = 0; i < NumRanges; ++i) { 2915 ConstantInt *Low = 2916 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i)); 2917 Assert(Low, "The lower limit must be an integer!", Low); 2918 ConstantInt *High = 2919 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1)); 2920 Assert(High, "The upper limit must be an integer!", High); 2921 Assert(High->getType() == Low->getType() && High->getType() == Ty, 2922 "Range types must match instruction type!", &I); 2923 2924 APInt HighV = High->getValue(); 2925 APInt LowV = Low->getValue(); 2926 ConstantRange CurRange(LowV, HighV); 2927 Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(), 2928 "Range must not be empty!", Range); 2929 if (i != 0) { 2930 Assert(CurRange.intersectWith(LastRange).isEmptySet(), 2931 "Intervals are overlapping", Range); 2932 Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order", 2933 Range); 2934 Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous", 2935 Range); 2936 } 2937 LastRange = ConstantRange(LowV, HighV); 2938 } 2939 if (NumRanges > 2) { 2940 APInt FirstLow = 2941 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue(); 2942 APInt FirstHigh = 2943 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue(); 2944 ConstantRange FirstRange(FirstLow, FirstHigh); 2945 Assert(FirstRange.intersectWith(LastRange).isEmptySet(), 2946 "Intervals are overlapping", Range); 2947 Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous", 2948 Range); 2949 } 2950 } 2951 2952 void Verifier::checkAtomicMemAccessSize(const Module *M, Type *Ty, 2953 const Instruction *I) { 2954 unsigned Size = M->getDataLayout().getTypeSizeInBits(Ty); 2955 Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I); 2956 Assert(!(Size & (Size - 1)), 2957 "atomic memory access' operand must have a power-of-two size", Ty, I); 2958 } 2959 2960 void Verifier::visitLoadInst(LoadInst &LI) { 2961 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType()); 2962 Assert(PTy, "Load operand must be a pointer.", &LI); 2963 Type *ElTy = LI.getType(); 2964 Assert(LI.getAlignment() <= Value::MaximumAlignment, 2965 "huge alignment values are unsupported", &LI); 2966 Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI); 2967 if (LI.isAtomic()) { 2968 Assert(LI.getOrdering() != AtomicOrdering::Release && 2969 LI.getOrdering() != AtomicOrdering::AcquireRelease, 2970 "Load cannot have Release ordering", &LI); 2971 Assert(LI.getAlignment() != 0, 2972 "Atomic load must specify explicit alignment", &LI); 2973 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 2974 ElTy->isFloatingPointTy(), 2975 "atomic load operand must have integer, pointer, or floating point " 2976 "type!", 2977 ElTy, &LI); 2978 checkAtomicMemAccessSize(M, ElTy, &LI); 2979 } else { 2980 Assert(LI.getSynchScope() == CrossThread, 2981 "Non-atomic load cannot have SynchronizationScope specified", &LI); 2982 } 2983 2984 visitInstruction(LI); 2985 } 2986 2987 void Verifier::visitStoreInst(StoreInst &SI) { 2988 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType()); 2989 Assert(PTy, "Store operand must be a pointer.", &SI); 2990 Type *ElTy = PTy->getElementType(); 2991 Assert(ElTy == SI.getOperand(0)->getType(), 2992 "Stored value type does not match pointer operand type!", &SI, ElTy); 2993 Assert(SI.getAlignment() <= Value::MaximumAlignment, 2994 "huge alignment values are unsupported", &SI); 2995 Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI); 2996 if (SI.isAtomic()) { 2997 Assert(SI.getOrdering() != AtomicOrdering::Acquire && 2998 SI.getOrdering() != AtomicOrdering::AcquireRelease, 2999 "Store cannot have Acquire ordering", &SI); 3000 Assert(SI.getAlignment() != 0, 3001 "Atomic store must specify explicit alignment", &SI); 3002 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 3003 ElTy->isFloatingPointTy(), 3004 "atomic store operand must have integer, pointer, or floating point " 3005 "type!", 3006 ElTy, &SI); 3007 checkAtomicMemAccessSize(M, ElTy, &SI); 3008 } else { 3009 Assert(SI.getSynchScope() == CrossThread, 3010 "Non-atomic store cannot have SynchronizationScope specified", &SI); 3011 } 3012 visitInstruction(SI); 3013 } 3014 3015 /// Check that SwiftErrorVal is used as a swifterror argument in CS. 3016 void Verifier::verifySwiftErrorCallSite(CallSite CS, 3017 const Value *SwiftErrorVal) { 3018 unsigned Idx = 0; 3019 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 3020 I != E; ++I, ++Idx) { 3021 if (*I == SwiftErrorVal) { 3022 Assert(CS.paramHasAttr(Idx+1, Attribute::SwiftError), 3023 "swifterror value when used in a callsite should be marked " 3024 "with swifterror attribute", 3025 SwiftErrorVal, CS); 3026 } 3027 } 3028 } 3029 3030 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) { 3031 // Check that swifterror value is only used by loads, stores, or as 3032 // a swifterror argument. 3033 for (const User *U : SwiftErrorVal->users()) { 3034 Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) || 3035 isa<InvokeInst>(U), 3036 "swifterror value can only be loaded and stored from, or " 3037 "as a swifterror argument!", 3038 SwiftErrorVal, U); 3039 // If it is used by a store, check it is the second operand. 3040 if (auto StoreI = dyn_cast<StoreInst>(U)) 3041 Assert(StoreI->getOperand(1) == SwiftErrorVal, 3042 "swifterror value should be the second operand when used " 3043 "by stores", SwiftErrorVal, U); 3044 if (auto CallI = dyn_cast<CallInst>(U)) 3045 verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal); 3046 if (auto II = dyn_cast<InvokeInst>(U)) 3047 verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal); 3048 } 3049 } 3050 3051 void Verifier::visitAllocaInst(AllocaInst &AI) { 3052 SmallPtrSet<Type*, 4> Visited; 3053 PointerType *PTy = AI.getType(); 3054 Assert(PTy->getAddressSpace() == 0, 3055 "Allocation instruction pointer not in the generic address space!", 3056 &AI); 3057 Assert(AI.getAllocatedType()->isSized(&Visited), 3058 "Cannot allocate unsized type", &AI); 3059 Assert(AI.getArraySize()->getType()->isIntegerTy(), 3060 "Alloca array size must have integer type", &AI); 3061 Assert(AI.getAlignment() <= Value::MaximumAlignment, 3062 "huge alignment values are unsupported", &AI); 3063 3064 if (AI.isSwiftError()) { 3065 verifySwiftErrorValue(&AI); 3066 } 3067 3068 visitInstruction(AI); 3069 } 3070 3071 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { 3072 3073 // FIXME: more conditions??? 3074 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic, 3075 "cmpxchg instructions must be atomic.", &CXI); 3076 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic, 3077 "cmpxchg instructions must be atomic.", &CXI); 3078 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered, 3079 "cmpxchg instructions cannot be unordered.", &CXI); 3080 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered, 3081 "cmpxchg instructions cannot be unordered.", &CXI); 3082 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()), 3083 "cmpxchg instructions failure argument shall be no stronger than the " 3084 "success argument", 3085 &CXI); 3086 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release && 3087 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, 3088 "cmpxchg failure ordering cannot include release semantics", &CXI); 3089 3090 PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); 3091 Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI); 3092 Type *ElTy = PTy->getElementType(); 3093 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(), 3094 "cmpxchg operand must have integer or pointer type", 3095 ElTy, &CXI); 3096 checkAtomicMemAccessSize(M, ElTy, &CXI); 3097 Assert(ElTy == CXI.getOperand(1)->getType(), 3098 "Expected value type does not match pointer operand type!", &CXI, 3099 ElTy); 3100 Assert(ElTy == CXI.getOperand(2)->getType(), 3101 "Stored value type does not match pointer operand type!", &CXI, ElTy); 3102 visitInstruction(CXI); 3103 } 3104 3105 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { 3106 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic, 3107 "atomicrmw instructions must be atomic.", &RMWI); 3108 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, 3109 "atomicrmw instructions cannot be unordered.", &RMWI); 3110 PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); 3111 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); 3112 Type *ElTy = PTy->getElementType(); 3113 Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!", 3114 &RMWI, ElTy); 3115 checkAtomicMemAccessSize(M, ElTy, &RMWI); 3116 Assert(ElTy == RMWI.getOperand(1)->getType(), 3117 "Argument value type does not match pointer operand type!", &RMWI, 3118 ElTy); 3119 Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() && 3120 RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP, 3121 "Invalid binary operation!", &RMWI); 3122 visitInstruction(RMWI); 3123 } 3124 3125 void Verifier::visitFenceInst(FenceInst &FI) { 3126 const AtomicOrdering Ordering = FI.getOrdering(); 3127 Assert(Ordering == AtomicOrdering::Acquire || 3128 Ordering == AtomicOrdering::Release || 3129 Ordering == AtomicOrdering::AcquireRelease || 3130 Ordering == AtomicOrdering::SequentiallyConsistent, 3131 "fence instructions may only have acquire, release, acq_rel, or " 3132 "seq_cst ordering.", 3133 &FI); 3134 visitInstruction(FI); 3135 } 3136 3137 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) { 3138 Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(), 3139 EVI.getIndices()) == EVI.getType(), 3140 "Invalid ExtractValueInst operands!", &EVI); 3141 3142 visitInstruction(EVI); 3143 } 3144 3145 void Verifier::visitInsertValueInst(InsertValueInst &IVI) { 3146 Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(), 3147 IVI.getIndices()) == 3148 IVI.getOperand(1)->getType(), 3149 "Invalid InsertValueInst operands!", &IVI); 3150 3151 visitInstruction(IVI); 3152 } 3153 3154 static Value *getParentPad(Value *EHPad) { 3155 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 3156 return FPI->getParentPad(); 3157 3158 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 3159 } 3160 3161 void Verifier::visitEHPadPredecessors(Instruction &I) { 3162 assert(I.isEHPad()); 3163 3164 BasicBlock *BB = I.getParent(); 3165 Function *F = BB->getParent(); 3166 3167 Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I); 3168 3169 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) { 3170 // The landingpad instruction defines its parent as a landing pad block. The 3171 // landing pad block may be branched to only by the unwind edge of an 3172 // invoke. 3173 for (BasicBlock *PredBB : predecessors(BB)) { 3174 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator()); 3175 Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB, 3176 "Block containing LandingPadInst must be jumped to " 3177 "only by the unwind edge of an invoke.", 3178 LPI); 3179 } 3180 return; 3181 } 3182 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) { 3183 if (!pred_empty(BB)) 3184 Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(), 3185 "Block containg CatchPadInst must be jumped to " 3186 "only by its catchswitch.", 3187 CPI); 3188 Assert(BB != CPI->getCatchSwitch()->getUnwindDest(), 3189 "Catchswitch cannot unwind to one of its catchpads", 3190 CPI->getCatchSwitch(), CPI); 3191 return; 3192 } 3193 3194 // Verify that each pred has a legal terminator with a legal to/from EH 3195 // pad relationship. 3196 Instruction *ToPad = &I; 3197 Value *ToPadParent = getParentPad(ToPad); 3198 for (BasicBlock *PredBB : predecessors(BB)) { 3199 TerminatorInst *TI = PredBB->getTerminator(); 3200 Value *FromPad; 3201 if (auto *II = dyn_cast<InvokeInst>(TI)) { 3202 Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB, 3203 "EH pad must be jumped to via an unwind edge", ToPad, II); 3204 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet)) 3205 FromPad = Bundle->Inputs[0]; 3206 else 3207 FromPad = ConstantTokenNone::get(II->getContext()); 3208 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 3209 FromPad = CRI->getOperand(0); 3210 Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI); 3211 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) { 3212 FromPad = CSI; 3213 } else { 3214 Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI); 3215 } 3216 3217 // The edge may exit from zero or more nested pads. 3218 SmallSet<Value *, 8> Seen; 3219 for (;; FromPad = getParentPad(FromPad)) { 3220 Assert(FromPad != ToPad, 3221 "EH pad cannot handle exceptions raised within it", FromPad, TI); 3222 if (FromPad == ToPadParent) { 3223 // This is a legal unwind edge. 3224 break; 3225 } 3226 Assert(!isa<ConstantTokenNone>(FromPad), 3227 "A single unwind edge may only enter one EH pad", TI); 3228 Assert(Seen.insert(FromPad).second, 3229 "EH pad jumps through a cycle of pads", FromPad); 3230 } 3231 } 3232 } 3233 3234 void Verifier::visitLandingPadInst(LandingPadInst &LPI) { 3235 // The landingpad instruction is ill-formed if it doesn't have any clauses and 3236 // isn't a cleanup. 3237 Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(), 3238 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI); 3239 3240 visitEHPadPredecessors(LPI); 3241 3242 if (!LandingPadResultTy) 3243 LandingPadResultTy = LPI.getType(); 3244 else 3245 Assert(LandingPadResultTy == LPI.getType(), 3246 "The landingpad instruction should have a consistent result type " 3247 "inside a function.", 3248 &LPI); 3249 3250 Function *F = LPI.getParent()->getParent(); 3251 Assert(F->hasPersonalityFn(), 3252 "LandingPadInst needs to be in a function with a personality.", &LPI); 3253 3254 // The landingpad instruction must be the first non-PHI instruction in the 3255 // block. 3256 Assert(LPI.getParent()->getLandingPadInst() == &LPI, 3257 "LandingPadInst not the first non-PHI instruction in the block.", 3258 &LPI); 3259 3260 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) { 3261 Constant *Clause = LPI.getClause(i); 3262 if (LPI.isCatch(i)) { 3263 Assert(isa<PointerType>(Clause->getType()), 3264 "Catch operand does not have pointer type!", &LPI); 3265 } else { 3266 Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI); 3267 Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause), 3268 "Filter operand is not an array of constants!", &LPI); 3269 } 3270 } 3271 3272 visitInstruction(LPI); 3273 } 3274 3275 void Verifier::visitCatchPadInst(CatchPadInst &CPI) { 3276 BasicBlock *BB = CPI.getParent(); 3277 3278 Function *F = BB->getParent(); 3279 Assert(F->hasPersonalityFn(), 3280 "CatchPadInst needs to be in a function with a personality.", &CPI); 3281 3282 Assert(isa<CatchSwitchInst>(CPI.getParentPad()), 3283 "CatchPadInst needs to be directly nested in a CatchSwitchInst.", 3284 CPI.getParentPad()); 3285 3286 // The catchpad instruction must be the first non-PHI instruction in the 3287 // block. 3288 Assert(BB->getFirstNonPHI() == &CPI, 3289 "CatchPadInst not the first non-PHI instruction in the block.", &CPI); 3290 3291 visitEHPadPredecessors(CPI); 3292 visitFuncletPadInst(CPI); 3293 } 3294 3295 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) { 3296 Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)), 3297 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn, 3298 CatchReturn.getOperand(0)); 3299 3300 visitTerminatorInst(CatchReturn); 3301 } 3302 3303 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) { 3304 BasicBlock *BB = CPI.getParent(); 3305 3306 Function *F = BB->getParent(); 3307 Assert(F->hasPersonalityFn(), 3308 "CleanupPadInst needs to be in a function with a personality.", &CPI); 3309 3310 // The cleanuppad instruction must be the first non-PHI instruction in the 3311 // block. 3312 Assert(BB->getFirstNonPHI() == &CPI, 3313 "CleanupPadInst not the first non-PHI instruction in the block.", 3314 &CPI); 3315 3316 auto *ParentPad = CPI.getParentPad(); 3317 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3318 "CleanupPadInst has an invalid parent.", &CPI); 3319 3320 visitEHPadPredecessors(CPI); 3321 visitFuncletPadInst(CPI); 3322 } 3323 3324 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) { 3325 User *FirstUser = nullptr; 3326 Value *FirstUnwindPad = nullptr; 3327 SmallVector<FuncletPadInst *, 8> Worklist({&FPI}); 3328 SmallSet<FuncletPadInst *, 8> Seen; 3329 3330 while (!Worklist.empty()) { 3331 FuncletPadInst *CurrentPad = Worklist.pop_back_val(); 3332 Assert(Seen.insert(CurrentPad).second, 3333 "FuncletPadInst must not be nested within itself", CurrentPad); 3334 Value *UnresolvedAncestorPad = nullptr; 3335 for (User *U : CurrentPad->users()) { 3336 BasicBlock *UnwindDest; 3337 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) { 3338 UnwindDest = CRI->getUnwindDest(); 3339 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) { 3340 // We allow catchswitch unwind to caller to nest 3341 // within an outer pad that unwinds somewhere else, 3342 // because catchswitch doesn't have a nounwind variant. 3343 // See e.g. SimplifyCFGOpt::SimplifyUnreachable. 3344 if (CSI->unwindsToCaller()) 3345 continue; 3346 UnwindDest = CSI->getUnwindDest(); 3347 } else if (auto *II = dyn_cast<InvokeInst>(U)) { 3348 UnwindDest = II->getUnwindDest(); 3349 } else if (isa<CallInst>(U)) { 3350 // Calls which don't unwind may be found inside funclet 3351 // pads that unwind somewhere else. We don't *require* 3352 // such calls to be annotated nounwind. 3353 continue; 3354 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) { 3355 // The unwind dest for a cleanup can only be found by 3356 // recursive search. Add it to the worklist, and we'll 3357 // search for its first use that determines where it unwinds. 3358 Worklist.push_back(CPI); 3359 continue; 3360 } else { 3361 Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U); 3362 continue; 3363 } 3364 3365 Value *UnwindPad; 3366 bool ExitsFPI; 3367 if (UnwindDest) { 3368 UnwindPad = UnwindDest->getFirstNonPHI(); 3369 if (!cast<Instruction>(UnwindPad)->isEHPad()) 3370 continue; 3371 Value *UnwindParent = getParentPad(UnwindPad); 3372 // Ignore unwind edges that don't exit CurrentPad. 3373 if (UnwindParent == CurrentPad) 3374 continue; 3375 // Determine whether the original funclet pad is exited, 3376 // and if we are scanning nested pads determine how many 3377 // of them are exited so we can stop searching their 3378 // children. 3379 Value *ExitedPad = CurrentPad; 3380 ExitsFPI = false; 3381 do { 3382 if (ExitedPad == &FPI) { 3383 ExitsFPI = true; 3384 // Now we can resolve any ancestors of CurrentPad up to 3385 // FPI, but not including FPI since we need to make sure 3386 // to check all direct users of FPI for consistency. 3387 UnresolvedAncestorPad = &FPI; 3388 break; 3389 } 3390 Value *ExitedParent = getParentPad(ExitedPad); 3391 if (ExitedParent == UnwindParent) { 3392 // ExitedPad is the ancestor-most pad which this unwind 3393 // edge exits, so we can resolve up to it, meaning that 3394 // ExitedParent is the first ancestor still unresolved. 3395 UnresolvedAncestorPad = ExitedParent; 3396 break; 3397 } 3398 ExitedPad = ExitedParent; 3399 } while (!isa<ConstantTokenNone>(ExitedPad)); 3400 } else { 3401 // Unwinding to caller exits all pads. 3402 UnwindPad = ConstantTokenNone::get(FPI.getContext()); 3403 ExitsFPI = true; 3404 UnresolvedAncestorPad = &FPI; 3405 } 3406 3407 if (ExitsFPI) { 3408 // This unwind edge exits FPI. Make sure it agrees with other 3409 // such edges. 3410 if (FirstUser) { 3411 Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet " 3412 "pad must have the same unwind " 3413 "dest", 3414 &FPI, U, FirstUser); 3415 } else { 3416 FirstUser = U; 3417 FirstUnwindPad = UnwindPad; 3418 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds 3419 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) && 3420 getParentPad(UnwindPad) == getParentPad(&FPI)) 3421 SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U); 3422 } 3423 } 3424 // Make sure we visit all uses of FPI, but for nested pads stop as 3425 // soon as we know where they unwind to. 3426 if (CurrentPad != &FPI) 3427 break; 3428 } 3429 if (UnresolvedAncestorPad) { 3430 if (CurrentPad == UnresolvedAncestorPad) { 3431 // When CurrentPad is FPI itself, we don't mark it as resolved even if 3432 // we've found an unwind edge that exits it, because we need to verify 3433 // all direct uses of FPI. 3434 assert(CurrentPad == &FPI); 3435 continue; 3436 } 3437 // Pop off the worklist any nested pads that we've found an unwind 3438 // destination for. The pads on the worklist are the uncles, 3439 // great-uncles, etc. of CurrentPad. We've found an unwind destination 3440 // for all ancestors of CurrentPad up to but not including 3441 // UnresolvedAncestorPad. 3442 Value *ResolvedPad = CurrentPad; 3443 while (!Worklist.empty()) { 3444 Value *UnclePad = Worklist.back(); 3445 Value *AncestorPad = getParentPad(UnclePad); 3446 // Walk ResolvedPad up the ancestor list until we either find the 3447 // uncle's parent or the last resolved ancestor. 3448 while (ResolvedPad != AncestorPad) { 3449 Value *ResolvedParent = getParentPad(ResolvedPad); 3450 if (ResolvedParent == UnresolvedAncestorPad) { 3451 break; 3452 } 3453 ResolvedPad = ResolvedParent; 3454 } 3455 // If the resolved ancestor search didn't find the uncle's parent, 3456 // then the uncle is not yet resolved. 3457 if (ResolvedPad != AncestorPad) 3458 break; 3459 // This uncle is resolved, so pop it from the worklist. 3460 Worklist.pop_back(); 3461 } 3462 } 3463 } 3464 3465 if (FirstUnwindPad) { 3466 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) { 3467 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest(); 3468 Value *SwitchUnwindPad; 3469 if (SwitchUnwindDest) 3470 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI(); 3471 else 3472 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext()); 3473 Assert(SwitchUnwindPad == FirstUnwindPad, 3474 "Unwind edges out of a catch must have the same unwind dest as " 3475 "the parent catchswitch", 3476 &FPI, FirstUser, CatchSwitch); 3477 } 3478 } 3479 3480 visitInstruction(FPI); 3481 } 3482 3483 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) { 3484 BasicBlock *BB = CatchSwitch.getParent(); 3485 3486 Function *F = BB->getParent(); 3487 Assert(F->hasPersonalityFn(), 3488 "CatchSwitchInst needs to be in a function with a personality.", 3489 &CatchSwitch); 3490 3491 // The catchswitch instruction must be the first non-PHI instruction in the 3492 // block. 3493 Assert(BB->getFirstNonPHI() == &CatchSwitch, 3494 "CatchSwitchInst not the first non-PHI instruction in the block.", 3495 &CatchSwitch); 3496 3497 auto *ParentPad = CatchSwitch.getParentPad(); 3498 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3499 "CatchSwitchInst has an invalid parent.", ParentPad); 3500 3501 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) { 3502 Instruction *I = UnwindDest->getFirstNonPHI(); 3503 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3504 "CatchSwitchInst must unwind to an EH block which is not a " 3505 "landingpad.", 3506 &CatchSwitch); 3507 3508 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds 3509 if (getParentPad(I) == ParentPad) 3510 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch; 3511 } 3512 3513 Assert(CatchSwitch.getNumHandlers() != 0, 3514 "CatchSwitchInst cannot have empty handler list", &CatchSwitch); 3515 3516 for (BasicBlock *Handler : CatchSwitch.handlers()) { 3517 Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()), 3518 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler); 3519 } 3520 3521 visitEHPadPredecessors(CatchSwitch); 3522 visitTerminatorInst(CatchSwitch); 3523 } 3524 3525 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) { 3526 Assert(isa<CleanupPadInst>(CRI.getOperand(0)), 3527 "CleanupReturnInst needs to be provided a CleanupPad", &CRI, 3528 CRI.getOperand(0)); 3529 3530 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) { 3531 Instruction *I = UnwindDest->getFirstNonPHI(); 3532 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3533 "CleanupReturnInst must unwind to an EH block which is not a " 3534 "landingpad.", 3535 &CRI); 3536 } 3537 3538 visitTerminatorInst(CRI); 3539 } 3540 3541 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { 3542 Instruction *Op = cast<Instruction>(I.getOperand(i)); 3543 // If the we have an invalid invoke, don't try to compute the dominance. 3544 // We already reject it in the invoke specific checks and the dominance 3545 // computation doesn't handle multiple edges. 3546 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) { 3547 if (II->getNormalDest() == II->getUnwindDest()) 3548 return; 3549 } 3550 3551 // Quick check whether the def has already been encountered in the same block. 3552 // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI 3553 // uses are defined to happen on the incoming edge, not at the instruction. 3554 // 3555 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata) 3556 // wrapping an SSA value, assert that we've already encountered it. See 3557 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp. 3558 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op)) 3559 return; 3560 3561 const Use &U = I.getOperandUse(i); 3562 Assert(DT.dominates(Op, U), 3563 "Instruction does not dominate all uses!", Op, &I); 3564 } 3565 3566 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) { 3567 Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null " 3568 "apply only to pointer types", &I); 3569 Assert(isa<LoadInst>(I), 3570 "dereferenceable, dereferenceable_or_null apply only to load" 3571 " instructions, use attributes for calls or invokes", &I); 3572 Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null " 3573 "take one operand!", &I); 3574 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0)); 3575 Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, " 3576 "dereferenceable_or_null metadata value must be an i64!", &I); 3577 } 3578 3579 /// verifyInstruction - Verify that an instruction is well formed. 3580 /// 3581 void Verifier::visitInstruction(Instruction &I) { 3582 BasicBlock *BB = I.getParent(); 3583 Assert(BB, "Instruction not embedded in basic block!", &I); 3584 3585 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential 3586 for (User *U : I.users()) { 3587 Assert(U != (User *)&I || !DT.isReachableFromEntry(BB), 3588 "Only PHI nodes may reference their own value!", &I); 3589 } 3590 } 3591 3592 // Check that void typed values don't have names 3593 Assert(!I.getType()->isVoidTy() || !I.hasName(), 3594 "Instruction has a name, but provides a void value!", &I); 3595 3596 // Check that the return value of the instruction is either void or a legal 3597 // value type. 3598 Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(), 3599 "Instruction returns a non-scalar type!", &I); 3600 3601 // Check that the instruction doesn't produce metadata. Calls are already 3602 // checked against the callee type. 3603 Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I), 3604 "Invalid use of metadata!", &I); 3605 3606 // Check that all uses of the instruction, if they are instructions 3607 // themselves, actually have parent basic blocks. If the use is not an 3608 // instruction, it is an error! 3609 for (Use &U : I.uses()) { 3610 if (Instruction *Used = dyn_cast<Instruction>(U.getUser())) 3611 Assert(Used->getParent() != nullptr, 3612 "Instruction referencing" 3613 " instruction not embedded in a basic block!", 3614 &I, Used); 3615 else { 3616 CheckFailed("Use of instruction is not an instruction!", U); 3617 return; 3618 } 3619 } 3620 3621 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 3622 Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I); 3623 3624 // Check to make sure that only first-class-values are operands to 3625 // instructions. 3626 if (!I.getOperand(i)->getType()->isFirstClassType()) { 3627 Assert(0, "Instruction operands must be first-class values!", &I); 3628 } 3629 3630 if (Function *F = dyn_cast<Function>(I.getOperand(i))) { 3631 // Check to make sure that the "address of" an intrinsic function is never 3632 // taken. 3633 Assert( 3634 !F->isIntrinsic() || 3635 i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0), 3636 "Cannot take the address of an intrinsic!", &I); 3637 Assert( 3638 !F->isIntrinsic() || isa<CallInst>(I) || 3639 F->getIntrinsicID() == Intrinsic::donothing || 3640 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void || 3641 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 || 3642 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint, 3643 "Cannot invoke an intrinsic other than donothing, patchpoint or " 3644 "statepoint", 3645 &I); 3646 Assert(F->getParent() == M, "Referencing function in another module!", 3647 &I, M, F, F->getParent()); 3648 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) { 3649 Assert(OpBB->getParent() == BB->getParent(), 3650 "Referring to a basic block in another function!", &I); 3651 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) { 3652 Assert(OpArg->getParent() == BB->getParent(), 3653 "Referring to an argument in another function!", &I); 3654 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) { 3655 Assert(GV->getParent() == M, "Referencing global in another module!", &I, M, GV, GV->getParent()); 3656 } else if (isa<Instruction>(I.getOperand(i))) { 3657 verifyDominatesUse(I, i); 3658 } else if (isa<InlineAsm>(I.getOperand(i))) { 3659 Assert((i + 1 == e && isa<CallInst>(I)) || 3660 (i + 3 == e && isa<InvokeInst>(I)), 3661 "Cannot take the address of an inline asm!", &I); 3662 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) { 3663 if (CE->getType()->isPtrOrPtrVectorTy()) { 3664 // If we have a ConstantExpr pointer, we need to see if it came from an 3665 // illegal bitcast (inttoptr <constant int> ) 3666 visitConstantExprsRecursively(CE); 3667 } 3668 } 3669 } 3670 3671 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) { 3672 Assert(I.getType()->isFPOrFPVectorTy(), 3673 "fpmath requires a floating point result!", &I); 3674 Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I); 3675 if (ConstantFP *CFP0 = 3676 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) { 3677 APFloat Accuracy = CFP0->getValueAPF(); 3678 Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(), 3679 "fpmath accuracy not a positive number!", &I); 3680 } else { 3681 Assert(false, "invalid fpmath accuracy!", &I); 3682 } 3683 } 3684 3685 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) { 3686 Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I), 3687 "Ranges are only for loads, calls and invokes!", &I); 3688 visitRangeMetadata(I, Range, I.getType()); 3689 } 3690 3691 if (I.getMetadata(LLVMContext::MD_nonnull)) { 3692 Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types", 3693 &I); 3694 Assert(isa<LoadInst>(I), 3695 "nonnull applies only to load instructions, use attributes" 3696 " for calls or invokes", 3697 &I); 3698 } 3699 3700 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable)) 3701 visitDereferenceableMetadata(I, MD); 3702 3703 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null)) 3704 visitDereferenceableMetadata(I, MD); 3705 3706 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) { 3707 Assert(I.getType()->isPointerTy(), "align applies only to pointer types", 3708 &I); 3709 Assert(isa<LoadInst>(I), "align applies only to load instructions, " 3710 "use attributes for calls or invokes", &I); 3711 Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I); 3712 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0)); 3713 Assert(CI && CI->getType()->isIntegerTy(64), 3714 "align metadata value must be an i64!", &I); 3715 uint64_t Align = CI->getZExtValue(); 3716 Assert(isPowerOf2_64(Align), 3717 "align metadata value must be a power of 2!", &I); 3718 Assert(Align <= Value::MaximumAlignment, 3719 "alignment is larger that implementation defined limit", &I); 3720 } 3721 3722 if (MDNode *N = I.getDebugLoc().getAsMDNode()) { 3723 AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N); 3724 visitMDNode(*N); 3725 } 3726 3727 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I)) 3728 verifyBitPieceExpression(*DII); 3729 3730 InstsInThisBlock.insert(&I); 3731 } 3732 3733 /// Verify that the specified type (which comes from an intrinsic argument or 3734 /// return value) matches the type constraints specified by the .td file (e.g. 3735 /// an "any integer" argument really is an integer). 3736 /// 3737 /// This returns true on error but does not print a message. 3738 bool Verifier::verifyIntrinsicType(Type *Ty, 3739 ArrayRef<Intrinsic::IITDescriptor> &Infos, 3740 SmallVectorImpl<Type*> &ArgTys) { 3741 using namespace Intrinsic; 3742 3743 // If we ran out of descriptors, there are too many arguments. 3744 if (Infos.empty()) return true; 3745 IITDescriptor D = Infos.front(); 3746 Infos = Infos.slice(1); 3747 3748 switch (D.Kind) { 3749 case IITDescriptor::Void: return !Ty->isVoidTy(); 3750 case IITDescriptor::VarArg: return true; 3751 case IITDescriptor::MMX: return !Ty->isX86_MMXTy(); 3752 case IITDescriptor::Token: return !Ty->isTokenTy(); 3753 case IITDescriptor::Metadata: return !Ty->isMetadataTy(); 3754 case IITDescriptor::Half: return !Ty->isHalfTy(); 3755 case IITDescriptor::Float: return !Ty->isFloatTy(); 3756 case IITDescriptor::Double: return !Ty->isDoubleTy(); 3757 case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width); 3758 case IITDescriptor::Vector: { 3759 VectorType *VT = dyn_cast<VectorType>(Ty); 3760 return !VT || VT->getNumElements() != D.Vector_Width || 3761 verifyIntrinsicType(VT->getElementType(), Infos, ArgTys); 3762 } 3763 case IITDescriptor::Pointer: { 3764 PointerType *PT = dyn_cast<PointerType>(Ty); 3765 return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace || 3766 verifyIntrinsicType(PT->getElementType(), Infos, ArgTys); 3767 } 3768 3769 case IITDescriptor::Struct: { 3770 StructType *ST = dyn_cast<StructType>(Ty); 3771 if (!ST || ST->getNumElements() != D.Struct_NumElements) 3772 return true; 3773 3774 for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) 3775 if (verifyIntrinsicType(ST->getElementType(i), Infos, ArgTys)) 3776 return true; 3777 return false; 3778 } 3779 3780 case IITDescriptor::Argument: 3781 // Two cases here - If this is the second occurrence of an argument, verify 3782 // that the later instance matches the previous instance. 3783 if (D.getArgumentNumber() < ArgTys.size()) 3784 return Ty != ArgTys[D.getArgumentNumber()]; 3785 3786 // Otherwise, if this is the first instance of an argument, record it and 3787 // verify the "Any" kind. 3788 assert(D.getArgumentNumber() == ArgTys.size() && "Table consistency error"); 3789 ArgTys.push_back(Ty); 3790 3791 switch (D.getArgumentKind()) { 3792 case IITDescriptor::AK_Any: return false; // Success 3793 case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy(); 3794 case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy(); 3795 case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty); 3796 case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty); 3797 } 3798 llvm_unreachable("all argument kinds not covered"); 3799 3800 case IITDescriptor::ExtendArgument: { 3801 // This may only be used when referring to a previous vector argument. 3802 if (D.getArgumentNumber() >= ArgTys.size()) 3803 return true; 3804 3805 Type *NewTy = ArgTys[D.getArgumentNumber()]; 3806 if (VectorType *VTy = dyn_cast<VectorType>(NewTy)) 3807 NewTy = VectorType::getExtendedElementVectorType(VTy); 3808 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy)) 3809 NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth()); 3810 else 3811 return true; 3812 3813 return Ty != NewTy; 3814 } 3815 case IITDescriptor::TruncArgument: { 3816 // This may only be used when referring to a previous vector argument. 3817 if (D.getArgumentNumber() >= ArgTys.size()) 3818 return true; 3819 3820 Type *NewTy = ArgTys[D.getArgumentNumber()]; 3821 if (VectorType *VTy = dyn_cast<VectorType>(NewTy)) 3822 NewTy = VectorType::getTruncatedElementVectorType(VTy); 3823 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy)) 3824 NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2); 3825 else 3826 return true; 3827 3828 return Ty != NewTy; 3829 } 3830 case IITDescriptor::HalfVecArgument: 3831 // This may only be used when referring to a previous vector argument. 3832 return D.getArgumentNumber() >= ArgTys.size() || 3833 !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || 3834 VectorType::getHalfElementsVectorType( 3835 cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty; 3836 case IITDescriptor::SameVecWidthArgument: { 3837 if (D.getArgumentNumber() >= ArgTys.size()) 3838 return true; 3839 VectorType * ReferenceType = 3840 dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]); 3841 VectorType *ThisArgType = dyn_cast<VectorType>(Ty); 3842 if (!ThisArgType || !ReferenceType || 3843 (ReferenceType->getVectorNumElements() != 3844 ThisArgType->getVectorNumElements())) 3845 return true; 3846 return verifyIntrinsicType(ThisArgType->getVectorElementType(), 3847 Infos, ArgTys); 3848 } 3849 case IITDescriptor::PtrToArgument: { 3850 if (D.getArgumentNumber() >= ArgTys.size()) 3851 return true; 3852 Type * ReferenceType = ArgTys[D.getArgumentNumber()]; 3853 PointerType *ThisArgType = dyn_cast<PointerType>(Ty); 3854 return (!ThisArgType || ThisArgType->getElementType() != ReferenceType); 3855 } 3856 case IITDescriptor::VecOfPtrsToElt: { 3857 if (D.getArgumentNumber() >= ArgTys.size()) 3858 return true; 3859 VectorType * ReferenceType = 3860 dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]); 3861 VectorType *ThisArgVecTy = dyn_cast<VectorType>(Ty); 3862 if (!ThisArgVecTy || !ReferenceType || 3863 (ReferenceType->getVectorNumElements() != 3864 ThisArgVecTy->getVectorNumElements())) 3865 return true; 3866 PointerType *ThisArgEltTy = 3867 dyn_cast<PointerType>(ThisArgVecTy->getVectorElementType()); 3868 if (!ThisArgEltTy) 3869 return true; 3870 return ThisArgEltTy->getElementType() != 3871 ReferenceType->getVectorElementType(); 3872 } 3873 } 3874 llvm_unreachable("unhandled"); 3875 } 3876 3877 /// Verify if the intrinsic has variable arguments. This method is intended to 3878 /// be called after all the fixed arguments have been verified first. 3879 /// 3880 /// This method returns true on error and does not print an error message. 3881 bool 3882 Verifier::verifyIntrinsicIsVarArg(bool isVarArg, 3883 ArrayRef<Intrinsic::IITDescriptor> &Infos) { 3884 using namespace Intrinsic; 3885 3886 // If there are no descriptors left, then it can't be a vararg. 3887 if (Infos.empty()) 3888 return isVarArg; 3889 3890 // There should be only one descriptor remaining at this point. 3891 if (Infos.size() != 1) 3892 return true; 3893 3894 // Check and verify the descriptor. 3895 IITDescriptor D = Infos.front(); 3896 Infos = Infos.slice(1); 3897 if (D.Kind == IITDescriptor::VarArg) 3898 return !isVarArg; 3899 3900 return true; 3901 } 3902 3903 /// Allow intrinsics to be verified in different ways. 3904 void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) { 3905 Function *IF = CS.getCalledFunction(); 3906 Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!", 3907 IF); 3908 3909 // Verify that the intrinsic prototype lines up with what the .td files 3910 // describe. 3911 FunctionType *IFTy = IF->getFunctionType(); 3912 bool IsVarArg = IFTy->isVarArg(); 3913 3914 SmallVector<Intrinsic::IITDescriptor, 8> Table; 3915 getIntrinsicInfoTableEntries(ID, Table); 3916 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; 3917 3918 SmallVector<Type *, 4> ArgTys; 3919 Assert(!verifyIntrinsicType(IFTy->getReturnType(), TableRef, ArgTys), 3920 "Intrinsic has incorrect return type!", IF); 3921 for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i) 3922 Assert(!verifyIntrinsicType(IFTy->getParamType(i), TableRef, ArgTys), 3923 "Intrinsic has incorrect argument type!", IF); 3924 3925 // Verify if the intrinsic call matches the vararg property. 3926 if (IsVarArg) 3927 Assert(!verifyIntrinsicIsVarArg(IsVarArg, TableRef), 3928 "Intrinsic was not defined with variable arguments!", IF); 3929 else 3930 Assert(!verifyIntrinsicIsVarArg(IsVarArg, TableRef), 3931 "Callsite was not defined with variable arguments!", IF); 3932 3933 // All descriptors should be absorbed by now. 3934 Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF); 3935 3936 // Now that we have the intrinsic ID and the actual argument types (and we 3937 // know they are legal for the intrinsic!) get the intrinsic name through the 3938 // usual means. This allows us to verify the mangling of argument types into 3939 // the name. 3940 const std::string ExpectedName = Intrinsic::getName(ID, ArgTys); 3941 Assert(ExpectedName == IF->getName(), 3942 "Intrinsic name not mangled correctly for type arguments! " 3943 "Should be: " + 3944 ExpectedName, 3945 IF); 3946 3947 // If the intrinsic takes MDNode arguments, verify that they are either global 3948 // or are local to *this* function. 3949 for (Value *V : CS.args()) 3950 if (auto *MD = dyn_cast<MetadataAsValue>(V)) 3951 visitMetadataAsValue(*MD, CS.getCaller()); 3952 3953 switch (ID) { 3954 default: 3955 break; 3956 case Intrinsic::ctlz: // llvm.ctlz 3957 case Intrinsic::cttz: // llvm.cttz 3958 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 3959 "is_zero_undef argument of bit counting intrinsics must be a " 3960 "constant int", 3961 CS); 3962 break; 3963 case Intrinsic::dbg_declare: // llvm.dbg.declare 3964 Assert(isa<MetadataAsValue>(CS.getArgOperand(0)), 3965 "invalid llvm.dbg.declare intrinsic call 1", CS); 3966 visitDbgIntrinsic("declare", cast<DbgDeclareInst>(*CS.getInstruction())); 3967 break; 3968 case Intrinsic::dbg_value: // llvm.dbg.value 3969 visitDbgIntrinsic("value", cast<DbgValueInst>(*CS.getInstruction())); 3970 break; 3971 case Intrinsic::memcpy: 3972 case Intrinsic::memmove: 3973 case Intrinsic::memset: { 3974 ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3)); 3975 Assert(AlignCI, 3976 "alignment argument of memory intrinsics must be a constant int", 3977 CS); 3978 const APInt &AlignVal = AlignCI->getValue(); 3979 Assert(AlignCI->isZero() || AlignVal.isPowerOf2(), 3980 "alignment argument of memory intrinsics must be a power of 2", CS); 3981 Assert(isa<ConstantInt>(CS.getArgOperand(4)), 3982 "isvolatile argument of memory intrinsics must be a constant int", 3983 CS); 3984 break; 3985 } 3986 case Intrinsic::gcroot: 3987 case Intrinsic::gcwrite: 3988 case Intrinsic::gcread: 3989 if (ID == Intrinsic::gcroot) { 3990 AllocaInst *AI = 3991 dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts()); 3992 Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS); 3993 Assert(isa<Constant>(CS.getArgOperand(1)), 3994 "llvm.gcroot parameter #2 must be a constant.", CS); 3995 if (!AI->getAllocatedType()->isPointerTy()) { 3996 Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)), 3997 "llvm.gcroot parameter #1 must either be a pointer alloca, " 3998 "or argument #2 must be a non-null constant.", 3999 CS); 4000 } 4001 } 4002 4003 Assert(CS.getParent()->getParent()->hasGC(), 4004 "Enclosing function does not use GC.", CS); 4005 break; 4006 case Intrinsic::init_trampoline: 4007 Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()), 4008 "llvm.init_trampoline parameter #2 must resolve to a function.", 4009 CS); 4010 break; 4011 case Intrinsic::prefetch: 4012 Assert(isa<ConstantInt>(CS.getArgOperand(1)) && 4013 isa<ConstantInt>(CS.getArgOperand(2)) && 4014 cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 && 4015 cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4, 4016 "invalid arguments to llvm.prefetch", CS); 4017 break; 4018 case Intrinsic::stackprotector: 4019 Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()), 4020 "llvm.stackprotector parameter #2 must resolve to an alloca.", CS); 4021 break; 4022 case Intrinsic::lifetime_start: 4023 case Intrinsic::lifetime_end: 4024 case Intrinsic::invariant_start: 4025 Assert(isa<ConstantInt>(CS.getArgOperand(0)), 4026 "size argument of memory use markers must be a constant integer", 4027 CS); 4028 break; 4029 case Intrinsic::invariant_end: 4030 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 4031 "llvm.invariant.end parameter #2 must be a constant integer", CS); 4032 break; 4033 4034 case Intrinsic::localescape: { 4035 BasicBlock *BB = CS.getParent(); 4036 Assert(BB == &BB->getParent()->front(), 4037 "llvm.localescape used outside of entry block", CS); 4038 Assert(!SawFrameEscape, 4039 "multiple calls to llvm.localescape in one function", CS); 4040 for (Value *Arg : CS.args()) { 4041 if (isa<ConstantPointerNull>(Arg)) 4042 continue; // Null values are allowed as placeholders. 4043 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 4044 Assert(AI && AI->isStaticAlloca(), 4045 "llvm.localescape only accepts static allocas", CS); 4046 } 4047 FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands(); 4048 SawFrameEscape = true; 4049 break; 4050 } 4051 case Intrinsic::localrecover: { 4052 Value *FnArg = CS.getArgOperand(0)->stripPointerCasts(); 4053 Function *Fn = dyn_cast<Function>(FnArg); 4054 Assert(Fn && !Fn->isDeclaration(), 4055 "llvm.localrecover first " 4056 "argument must be function defined in this module", 4057 CS); 4058 auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2)); 4059 Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int", 4060 CS); 4061 auto &Entry = FrameEscapeInfo[Fn]; 4062 Entry.second = unsigned( 4063 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1)); 4064 break; 4065 } 4066 4067 case Intrinsic::experimental_gc_statepoint: 4068 Assert(!CS.isInlineAsm(), 4069 "gc.statepoint support for inline assembly unimplemented", CS); 4070 Assert(CS.getParent()->getParent()->hasGC(), 4071 "Enclosing function does not use GC.", CS); 4072 4073 verifyStatepoint(CS); 4074 break; 4075 case Intrinsic::experimental_gc_result: { 4076 Assert(CS.getParent()->getParent()->hasGC(), 4077 "Enclosing function does not use GC.", CS); 4078 // Are we tied to a statepoint properly? 4079 CallSite StatepointCS(CS.getArgOperand(0)); 4080 const Function *StatepointFn = 4081 StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr; 4082 Assert(StatepointFn && StatepointFn->isDeclaration() && 4083 StatepointFn->getIntrinsicID() == 4084 Intrinsic::experimental_gc_statepoint, 4085 "gc.result operand #1 must be from a statepoint", CS, 4086 CS.getArgOperand(0)); 4087 4088 // Assert that result type matches wrapped callee. 4089 const Value *Target = StatepointCS.getArgument(2); 4090 auto *PT = cast<PointerType>(Target->getType()); 4091 auto *TargetFuncType = cast<FunctionType>(PT->getElementType()); 4092 Assert(CS.getType() == TargetFuncType->getReturnType(), 4093 "gc.result result type does not match wrapped callee", CS); 4094 break; 4095 } 4096 case Intrinsic::experimental_gc_relocate: { 4097 Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS); 4098 4099 Assert(isa<PointerType>(CS.getType()->getScalarType()), 4100 "gc.relocate must return a pointer or a vector of pointers", CS); 4101 4102 // Check that this relocate is correctly tied to the statepoint 4103 4104 // This is case for relocate on the unwinding path of an invoke statepoint 4105 if (LandingPadInst *LandingPad = 4106 dyn_cast<LandingPadInst>(CS.getArgOperand(0))) { 4107 4108 const BasicBlock *InvokeBB = 4109 LandingPad->getParent()->getUniquePredecessor(); 4110 4111 // Landingpad relocates should have only one predecessor with invoke 4112 // statepoint terminator 4113 Assert(InvokeBB, "safepoints should have unique landingpads", 4114 LandingPad->getParent()); 4115 Assert(InvokeBB->getTerminator(), "safepoint block should be well formed", 4116 InvokeBB); 4117 Assert(isStatepoint(InvokeBB->getTerminator()), 4118 "gc relocate should be linked to a statepoint", InvokeBB); 4119 } 4120 else { 4121 // In all other cases relocate should be tied to the statepoint directly. 4122 // This covers relocates on a normal return path of invoke statepoint and 4123 // relocates of a call statepoint. 4124 auto Token = CS.getArgOperand(0); 4125 Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)), 4126 "gc relocate is incorrectly tied to the statepoint", CS, Token); 4127 } 4128 4129 // Verify rest of the relocate arguments. 4130 4131 ImmutableCallSite StatepointCS( 4132 cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint()); 4133 4134 // Both the base and derived must be piped through the safepoint. 4135 Value* Base = CS.getArgOperand(1); 4136 Assert(isa<ConstantInt>(Base), 4137 "gc.relocate operand #2 must be integer offset", CS); 4138 4139 Value* Derived = CS.getArgOperand(2); 4140 Assert(isa<ConstantInt>(Derived), 4141 "gc.relocate operand #3 must be integer offset", CS); 4142 4143 const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue(); 4144 const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue(); 4145 // Check the bounds 4146 Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(), 4147 "gc.relocate: statepoint base index out of bounds", CS); 4148 Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(), 4149 "gc.relocate: statepoint derived index out of bounds", CS); 4150 4151 // Check that BaseIndex and DerivedIndex fall within the 'gc parameters' 4152 // section of the statepoint's argument. 4153 Assert(StatepointCS.arg_size() > 0, 4154 "gc.statepoint: insufficient arguments"); 4155 Assert(isa<ConstantInt>(StatepointCS.getArgument(3)), 4156 "gc.statement: number of call arguments must be constant integer"); 4157 const unsigned NumCallArgs = 4158 cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue(); 4159 Assert(StatepointCS.arg_size() > NumCallArgs + 5, 4160 "gc.statepoint: mismatch in number of call arguments"); 4161 Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)), 4162 "gc.statepoint: number of transition arguments must be " 4163 "a constant integer"); 4164 const int NumTransitionArgs = 4165 cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)) 4166 ->getZExtValue(); 4167 const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1; 4168 Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)), 4169 "gc.statepoint: number of deoptimization arguments must be " 4170 "a constant integer"); 4171 const int NumDeoptArgs = 4172 cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)) 4173 ->getZExtValue(); 4174 const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs; 4175 const int GCParamArgsEnd = StatepointCS.arg_size(); 4176 Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd, 4177 "gc.relocate: statepoint base index doesn't fall within the " 4178 "'gc parameters' section of the statepoint call", 4179 CS); 4180 Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd, 4181 "gc.relocate: statepoint derived index doesn't fall within the " 4182 "'gc parameters' section of the statepoint call", 4183 CS); 4184 4185 // Relocated value must be either a pointer type or vector-of-pointer type, 4186 // but gc_relocate does not need to return the same pointer type as the 4187 // relocated pointer. It can be casted to the correct type later if it's 4188 // desired. However, they must have the same address space and 'vectorness' 4189 GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction()); 4190 Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(), 4191 "gc.relocate: relocated value must be a gc pointer", CS); 4192 4193 auto ResultType = CS.getType(); 4194 auto DerivedType = Relocate.getDerivedPtr()->getType(); 4195 Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(), 4196 "gc.relocate: vector relocates to vector and pointer to pointer", 4197 CS); 4198 Assert( 4199 ResultType->getPointerAddressSpace() == 4200 DerivedType->getPointerAddressSpace(), 4201 "gc.relocate: relocating a pointer shouldn't change its address space", 4202 CS); 4203 break; 4204 } 4205 case Intrinsic::eh_exceptioncode: 4206 case Intrinsic::eh_exceptionpointer: { 4207 Assert(isa<CatchPadInst>(CS.getArgOperand(0)), 4208 "eh.exceptionpointer argument must be a catchpad", CS); 4209 break; 4210 } 4211 case Intrinsic::masked_load: { 4212 Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS); 4213 4214 Value *Ptr = CS.getArgOperand(0); 4215 //Value *Alignment = CS.getArgOperand(1); 4216 Value *Mask = CS.getArgOperand(2); 4217 Value *PassThru = CS.getArgOperand(3); 4218 Assert(Mask->getType()->isVectorTy(), 4219 "masked_load: mask must be vector", CS); 4220 4221 // DataTy is the overloaded type 4222 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4223 Assert(DataTy == CS.getType(), 4224 "masked_load: return must match pointer type", CS); 4225 Assert(PassThru->getType() == DataTy, 4226 "masked_load: pass through and data type must match", CS); 4227 Assert(Mask->getType()->getVectorNumElements() == 4228 DataTy->getVectorNumElements(), 4229 "masked_load: vector mask must be same length as data", CS); 4230 break; 4231 } 4232 case Intrinsic::masked_store: { 4233 Value *Val = CS.getArgOperand(0); 4234 Value *Ptr = CS.getArgOperand(1); 4235 //Value *Alignment = CS.getArgOperand(2); 4236 Value *Mask = CS.getArgOperand(3); 4237 Assert(Mask->getType()->isVectorTy(), 4238 "masked_store: mask must be vector", CS); 4239 4240 // DataTy is the overloaded type 4241 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4242 Assert(DataTy == Val->getType(), 4243 "masked_store: storee must match pointer type", CS); 4244 Assert(Mask->getType()->getVectorNumElements() == 4245 DataTy->getVectorNumElements(), 4246 "masked_store: vector mask must be same length as data", CS); 4247 break; 4248 } 4249 4250 case Intrinsic::experimental_guard: { 4251 Assert(CS.isCall(), "experimental_guard cannot be invoked", CS); 4252 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4253 "experimental_guard must have exactly one " 4254 "\"deopt\" operand bundle"); 4255 break; 4256 } 4257 4258 case Intrinsic::experimental_deoptimize: { 4259 Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS); 4260 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4261 "experimental_deoptimize must have exactly one " 4262 "\"deopt\" operand bundle"); 4263 Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(), 4264 "experimental_deoptimize return type must match caller return type"); 4265 4266 if (CS.isCall()) { 4267 auto *DeoptCI = CS.getInstruction(); 4268 auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode()); 4269 Assert(RI, 4270 "calls to experimental_deoptimize must be followed by a return"); 4271 4272 if (!CS.getType()->isVoidTy() && RI) 4273 Assert(RI->getReturnValue() == DeoptCI, 4274 "calls to experimental_deoptimize must be followed by a return " 4275 "of the value computed by experimental_deoptimize"); 4276 } 4277 4278 break; 4279 } 4280 }; 4281 } 4282 4283 /// \brief Carefully grab the subprogram from a local scope. 4284 /// 4285 /// This carefully grabs the subprogram from a local scope, avoiding the 4286 /// built-in assertions that would typically fire. 4287 static DISubprogram *getSubprogram(Metadata *LocalScope) { 4288 if (!LocalScope) 4289 return nullptr; 4290 4291 if (auto *SP = dyn_cast<DISubprogram>(LocalScope)) 4292 return SP; 4293 4294 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope)) 4295 return getSubprogram(LB->getRawScope()); 4296 4297 // Just return null; broken scope chains are checked elsewhere. 4298 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope"); 4299 return nullptr; 4300 } 4301 4302 template <class DbgIntrinsicTy> 4303 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) { 4304 auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata(); 4305 AssertDI(isa<ValueAsMetadata>(MD) || 4306 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()), 4307 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD); 4308 AssertDI(isa<DILocalVariable>(DII.getRawVariable()), 4309 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII, 4310 DII.getRawVariable()); 4311 AssertDI(isa<DIExpression>(DII.getRawExpression()), 4312 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII, 4313 DII.getRawExpression()); 4314 4315 // Ignore broken !dbg attachments; they're checked elsewhere. 4316 if (MDNode *N = DII.getDebugLoc().getAsMDNode()) 4317 if (!isa<DILocation>(N)) 4318 return; 4319 4320 BasicBlock *BB = DII.getParent(); 4321 Function *F = BB ? BB->getParent() : nullptr; 4322 4323 // The scopes for variables and !dbg attachments must agree. 4324 DILocalVariable *Var = DII.getVariable(); 4325 DILocation *Loc = DII.getDebugLoc(); 4326 Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", 4327 &DII, BB, F); 4328 4329 DISubprogram *VarSP = getSubprogram(Var->getRawScope()); 4330 DISubprogram *LocSP = getSubprogram(Loc->getRawScope()); 4331 if (!VarSP || !LocSP) 4332 return; // Broken scope chains are checked elsewhere. 4333 4334 Assert(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind + 4335 " variable and !dbg attachment", 4336 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc, 4337 Loc->getScope()->getSubprogram()); 4338 } 4339 4340 static uint64_t getVariableSize(const DILocalVariable &V) { 4341 // Be careful of broken types (checked elsewhere). 4342 const Metadata *RawType = V.getRawType(); 4343 while (RawType) { 4344 // Try to get the size directly. 4345 if (auto *T = dyn_cast<DIType>(RawType)) 4346 if (uint64_t Size = T->getSizeInBits()) 4347 return Size; 4348 4349 if (auto *DT = dyn_cast<DIDerivedType>(RawType)) { 4350 // Look at the base type. 4351 RawType = DT->getRawBaseType(); 4352 continue; 4353 } 4354 4355 // Missing type or size. 4356 break; 4357 } 4358 4359 // Fail gracefully. 4360 return 0; 4361 } 4362 4363 void Verifier::verifyBitPieceExpression(const DbgInfoIntrinsic &I) { 4364 DILocalVariable *V; 4365 DIExpression *E; 4366 if (auto *DVI = dyn_cast<DbgValueInst>(&I)) { 4367 V = dyn_cast_or_null<DILocalVariable>(DVI->getRawVariable()); 4368 E = dyn_cast_or_null<DIExpression>(DVI->getRawExpression()); 4369 } else { 4370 auto *DDI = cast<DbgDeclareInst>(&I); 4371 V = dyn_cast_or_null<DILocalVariable>(DDI->getRawVariable()); 4372 E = dyn_cast_or_null<DIExpression>(DDI->getRawExpression()); 4373 } 4374 4375 // We don't know whether this intrinsic verified correctly. 4376 if (!V || !E || !E->isValid()) 4377 return; 4378 4379 // Nothing to do if this isn't a bit piece expression. 4380 if (!E->isBitPiece()) 4381 return; 4382 4383 // The frontend helps out GDB by emitting the members of local anonymous 4384 // unions as artificial local variables with shared storage. When SROA splits 4385 // the storage for artificial local variables that are smaller than the entire 4386 // union, the overhang piece will be outside of the allotted space for the 4387 // variable and this check fails. 4388 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs. 4389 if (V->isArtificial()) 4390 return; 4391 4392 // If there's no size, the type is broken, but that should be checked 4393 // elsewhere. 4394 uint64_t VarSize = getVariableSize(*V); 4395 if (!VarSize) 4396 return; 4397 4398 unsigned PieceSize = E->getBitPieceSize(); 4399 unsigned PieceOffset = E->getBitPieceOffset(); 4400 Assert(PieceSize + PieceOffset <= VarSize, 4401 "piece is larger than or outside of variable", &I, V, E); 4402 Assert(PieceSize != VarSize, "piece covers entire variable", &I, V, E); 4403 } 4404 4405 void Verifier::verifyCompileUnits() { 4406 auto *CUs = M->getNamedMetadata("llvm.dbg.cu"); 4407 SmallPtrSet<const Metadata *, 2> Listed; 4408 if (CUs) 4409 Listed.insert(CUs->op_begin(), CUs->op_end()); 4410 Assert( 4411 std::all_of(CUVisited.begin(), CUVisited.end(), 4412 [&Listed](const Metadata *CU) { return Listed.count(CU); }), 4413 "All DICompileUnits must be listed in llvm.dbg.cu"); 4414 CUVisited.clear(); 4415 } 4416 4417 void Verifier::verifyDeoptimizeCallingConvs() { 4418 if (DeoptimizeDeclarations.empty()) 4419 return; 4420 4421 const Function *First = DeoptimizeDeclarations[0]; 4422 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) { 4423 Assert(First->getCallingConv() == F->getCallingConv(), 4424 "All llvm.experimental.deoptimize declarations must have the same " 4425 "calling convention", 4426 First, F); 4427 } 4428 } 4429 4430 //===----------------------------------------------------------------------===// 4431 // Implement the public interfaces to this file... 4432 //===----------------------------------------------------------------------===// 4433 4434 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) { 4435 Function &F = const_cast<Function &>(f); 4436 assert(!F.isDeclaration() && "Cannot verify external functions"); 4437 4438 // Don't use a raw_null_ostream. Printing IR is expensive. 4439 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true); 4440 4441 // Note that this function's return value is inverted from what you would 4442 // expect of a function called "verify". 4443 return !V.verify(F); 4444 } 4445 4446 bool llvm::verifyModule(const Module &M, raw_ostream *OS, 4447 bool *BrokenDebugInfo) { 4448 // Don't use a raw_null_ostream. Printing IR is expensive. 4449 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo); 4450 4451 bool Broken = false; 4452 for (const Function &F : M) 4453 if (!F.isDeclaration() && !F.isMaterializable()) 4454 Broken |= !V.verify(F); 4455 4456 Broken |= !V.verify(M); 4457 if (BrokenDebugInfo) 4458 *BrokenDebugInfo = V.hasBrokenDebugInfo(); 4459 // Note that this function's return value is inverted from what you would 4460 // expect of a function called "verify". 4461 return Broken; 4462 } 4463 4464 namespace { 4465 struct VerifierLegacyPass : public FunctionPass { 4466 static char ID; 4467 4468 Verifier V; 4469 bool FatalErrors = true; 4470 4471 VerifierLegacyPass() 4472 : FunctionPass(ID), 4473 V(&dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false) { 4474 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4475 } 4476 explicit VerifierLegacyPass(bool FatalErrors) 4477 : FunctionPass(ID), 4478 V(&dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false), 4479 FatalErrors(FatalErrors) { 4480 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4481 } 4482 4483 bool runOnFunction(Function &F) override { 4484 if (!V.verify(F) && FatalErrors) 4485 report_fatal_error("Broken function found, compilation aborted!"); 4486 4487 return false; 4488 } 4489 4490 bool doFinalization(Module &M) override { 4491 bool HasErrors = !V.verify(M); 4492 if (FatalErrors) { 4493 if (HasErrors) 4494 report_fatal_error("Broken module found, compilation aborted!"); 4495 assert(!V.hasBrokenDebugInfo() && "Module contains invalid debug info"); 4496 } 4497 4498 // Strip broken debug info. 4499 if (V.hasBrokenDebugInfo()) { 4500 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4501 M.getContext().diagnose(DiagInvalid); 4502 if (!StripDebugInfo(M)) 4503 report_fatal_error("Failed to strip malformed debug info"); 4504 } 4505 return false; 4506 } 4507 4508 void getAnalysisUsage(AnalysisUsage &AU) const override { 4509 AU.setPreservesAll(); 4510 } 4511 }; 4512 } 4513 4514 char VerifierLegacyPass::ID = 0; 4515 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false) 4516 4517 FunctionPass *llvm::createVerifierPass(bool FatalErrors) { 4518 return new VerifierLegacyPass(FatalErrors); 4519 } 4520 4521 char VerifierAnalysis::PassID; 4522 VerifierAnalysis::Result VerifierAnalysis::run(Module &M) { 4523 Result Res; 4524 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken); 4525 return Res; 4526 } 4527 4528 VerifierAnalysis::Result VerifierAnalysis::run(Function &F) { 4529 return { llvm::verifyFunction(F, &dbgs()), false }; 4530 } 4531 4532 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) { 4533 auto Res = AM.getResult<VerifierAnalysis>(M); 4534 if (FatalErrors) { 4535 if (Res.IRBroken) 4536 report_fatal_error("Broken module found, compilation aborted!"); 4537 assert(!Res.DebugInfoBroken && "Module contains invalid debug info"); 4538 } 4539 4540 // Strip broken debug info. 4541 if (Res.DebugInfoBroken) { 4542 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4543 M.getContext().diagnose(DiagInvalid); 4544 if (!StripDebugInfo(M)) 4545 report_fatal_error("Failed to strip malformed debug info"); 4546 } 4547 return PreservedAnalyses::all(); 4548 } 4549 4550 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 4551 auto res = AM.getResult<VerifierAnalysis>(F); 4552 if (res.IRBroken && FatalErrors) 4553 report_fatal_error("Broken function found, compilation aborted!"); 4554 4555 return PreservedAnalyses::all(); 4556 } 4557