1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // basic correctness checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 // * Both of a binary operator's parameters are of the same type
16 // * Verify that the indices of mem access instructions match other operands
17 // * Verify that arithmetic and other things are only performed on first-class
18 // types. Verify that shifts & logicals only happen on integrals f.e.
19 // * All of the constants in a switch statement are of the correct type
20 // * The code is in valid SSA form
21 // * It should be illegal to put a label into any other type (like a structure)
22 // or to return one. [except constant arrays!]
23 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 // * PHI nodes must have an entry for each predecessor, with no extras.
25 // * PHI nodes must be the first thing in a basic block, all grouped together
26 // * PHI nodes must have at least one entry
27 // * All basic blocks should only end with terminator insts, not contain them
28 // * The entry node to a function must not have predecessors
29 // * All Instructions must be embedded into a basic block
30 // * Functions cannot take a void-typed parameter
31 // * Verify that a function's argument list agrees with it's declared type.
32 // * It is illegal to specify a name for a void value.
33 // * It is illegal to have a internal global value with no initializer
34 // * It is illegal to have a ret instruction that returns a value that does not
35 // agree with the function return value type.
36 // * Function call argument types match the function prototype
37 // * A landing pad is defined by a landingpad instruction, and can be jumped to
38 // only by the unwind edge of an invoke instruction.
39 // * A landingpad instruction must be the first non-PHI instruction in the
40 // block.
41 // * Landingpad instructions must be in a function with a personality function.
42 // * All other things that are tested by asserts spread about the code...
43 //
44 //===----------------------------------------------------------------------===//
45
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/BinaryFormat/Dwarf.h"
62 #include "llvm/IR/Argument.h"
63 #include "llvm/IR/Attributes.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/CFG.h"
66 #include "llvm/IR/CallingConv.h"
67 #include "llvm/IR/Comdat.h"
68 #include "llvm/IR/Constant.h"
69 #include "llvm/IR/ConstantRange.h"
70 #include "llvm/IR/Constants.h"
71 #include "llvm/IR/DataLayout.h"
72 #include "llvm/IR/DebugInfoMetadata.h"
73 #include "llvm/IR/DebugLoc.h"
74 #include "llvm/IR/DerivedTypes.h"
75 #include "llvm/IR/Dominators.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/IR/GlobalAlias.h"
78 #include "llvm/IR/GlobalValue.h"
79 #include "llvm/IR/GlobalVariable.h"
80 #include "llvm/IR/InlineAsm.h"
81 #include "llvm/IR/InstVisitor.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/Intrinsics.h"
87 #include "llvm/IR/IntrinsicsAArch64.h"
88 #include "llvm/IR/IntrinsicsARM.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/ModuleSlotTracker.h"
94 #include "llvm/IR/PassManager.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/Use.h"
98 #include "llvm/IR/User.h"
99 #include "llvm/IR/Value.h"
100 #include "llvm/InitializePasses.h"
101 #include "llvm/Pass.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CommandLine.h"
105 #include "llvm/Support/ErrorHandling.h"
106 #include "llvm/Support/MathExtras.h"
107 #include "llvm/Support/raw_ostream.h"
108 #include <algorithm>
109 #include <cassert>
110 #include <cstdint>
111 #include <memory>
112 #include <string>
113 #include <utility>
114
115 using namespace llvm;
116
117 static cl::opt<bool> VerifyNoAliasScopeDomination(
118 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
119 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
120 "scopes are not dominating"));
121
122 namespace llvm {
123
124 struct VerifierSupport {
125 raw_ostream *OS;
126 const Module &M;
127 ModuleSlotTracker MST;
128 Triple TT;
129 const DataLayout &DL;
130 LLVMContext &Context;
131
132 /// Track the brokenness of the module while recursively visiting.
133 bool Broken = false;
134 /// Broken debug info can be "recovered" from by stripping the debug info.
135 bool BrokenDebugInfo = false;
136 /// Whether to treat broken debug info as an error.
137 bool TreatBrokenDebugInfoAsError = true;
138
VerifierSupportllvm::VerifierSupport139 explicit VerifierSupport(raw_ostream *OS, const Module &M)
140 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
141 Context(M.getContext()) {}
142
143 private:
Writellvm::VerifierSupport144 void Write(const Module *M) {
145 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
146 }
147
Writellvm::VerifierSupport148 void Write(const Value *V) {
149 if (V)
150 Write(*V);
151 }
152
Writellvm::VerifierSupport153 void Write(const Value &V) {
154 if (isa<Instruction>(V)) {
155 V.print(*OS, MST);
156 *OS << '\n';
157 } else {
158 V.printAsOperand(*OS, true, MST);
159 *OS << '\n';
160 }
161 }
162
Writellvm::VerifierSupport163 void Write(const Metadata *MD) {
164 if (!MD)
165 return;
166 MD->print(*OS, MST, &M);
167 *OS << '\n';
168 }
169
Writellvm::VerifierSupport170 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
171 Write(MD.get());
172 }
173
Writellvm::VerifierSupport174 void Write(const NamedMDNode *NMD) {
175 if (!NMD)
176 return;
177 NMD->print(*OS, MST);
178 *OS << '\n';
179 }
180
Writellvm::VerifierSupport181 void Write(Type *T) {
182 if (!T)
183 return;
184 *OS << ' ' << *T;
185 }
186
Writellvm::VerifierSupport187 void Write(const Comdat *C) {
188 if (!C)
189 return;
190 *OS << *C;
191 }
192
Writellvm::VerifierSupport193 void Write(const APInt *AI) {
194 if (!AI)
195 return;
196 *OS << *AI << '\n';
197 }
198
Writellvm::VerifierSupport199 void Write(const unsigned i) { *OS << i << '\n'; }
200
201 // NOLINTNEXTLINE(readability-identifier-naming)
Writellvm::VerifierSupport202 void Write(const Attribute *A) {
203 if (!A)
204 return;
205 *OS << A->getAsString() << '\n';
206 }
207
208 // NOLINTNEXTLINE(readability-identifier-naming)
Writellvm::VerifierSupport209 void Write(const AttributeSet *AS) {
210 if (!AS)
211 return;
212 *OS << AS->getAsString() << '\n';
213 }
214
215 // NOLINTNEXTLINE(readability-identifier-naming)
Writellvm::VerifierSupport216 void Write(const AttributeList *AL) {
217 if (!AL)
218 return;
219 AL->print(*OS);
220 }
221
Writellvm::VerifierSupport222 template <typename T> void Write(ArrayRef<T> Vs) {
223 for (const T &V : Vs)
224 Write(V);
225 }
226
227 template <typename T1, typename... Ts>
WriteTsllvm::VerifierSupport228 void WriteTs(const T1 &V1, const Ts &... Vs) {
229 Write(V1);
230 WriteTs(Vs...);
231 }
232
WriteTsllvm::VerifierSupport233 template <typename... Ts> void WriteTs() {}
234
235 public:
236 /// A check failed, so printout out the condition and the message.
237 ///
238 /// This provides a nice place to put a breakpoint if you want to see why
239 /// something is not correct.
CheckFailedllvm::VerifierSupport240 void CheckFailed(const Twine &Message) {
241 if (OS)
242 *OS << Message << '\n';
243 Broken = true;
244 }
245
246 /// A check failed (with values to print).
247 ///
248 /// This calls the Message-only version so that the above is easier to set a
249 /// breakpoint on.
250 template <typename T1, typename... Ts>
CheckFailedllvm::VerifierSupport251 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
252 CheckFailed(Message);
253 if (OS)
254 WriteTs(V1, Vs...);
255 }
256
257 /// A debug info check failed.
DebugInfoCheckFailedllvm::VerifierSupport258 void DebugInfoCheckFailed(const Twine &Message) {
259 if (OS)
260 *OS << Message << '\n';
261 Broken |= TreatBrokenDebugInfoAsError;
262 BrokenDebugInfo = true;
263 }
264
265 /// A debug info check failed (with values to print).
266 template <typename T1, typename... Ts>
DebugInfoCheckFailedllvm::VerifierSupport267 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
268 const Ts &... Vs) {
269 DebugInfoCheckFailed(Message);
270 if (OS)
271 WriteTs(V1, Vs...);
272 }
273 };
274
275 } // namespace llvm
276
277 namespace {
278
279 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
280 friend class InstVisitor<Verifier>;
281
282 // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
283 // the alignment size should not exceed 2^15. Since encode(Align)
284 // would plus the shift value by 1, the alignment size should
285 // not exceed 2^14, otherwise it can NOT be properly lowered
286 // in backend.
287 static constexpr unsigned ParamMaxAlignment = 1 << 14;
288 DominatorTree DT;
289
290 /// When verifying a basic block, keep track of all of the
291 /// instructions we have seen so far.
292 ///
293 /// This allows us to do efficient dominance checks for the case when an
294 /// instruction has an operand that is an instruction in the same block.
295 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
296
297 /// Keep track of the metadata nodes that have been checked already.
298 SmallPtrSet<const Metadata *, 32> MDNodes;
299
300 /// Keep track which DISubprogram is attached to which function.
301 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
302
303 /// Track all DICompileUnits visited.
304 SmallPtrSet<const Metadata *, 2> CUVisited;
305
306 /// The result type for a landingpad.
307 Type *LandingPadResultTy;
308
309 /// Whether we've seen a call to @llvm.localescape in this function
310 /// already.
311 bool SawFrameEscape;
312
313 /// Whether the current function has a DISubprogram attached to it.
314 bool HasDebugInfo = false;
315
316 /// The current source language.
317 dwarf::SourceLanguage CurrentSourceLang = dwarf::DW_LANG_lo_user;
318
319 /// Whether source was present on the first DIFile encountered in each CU.
320 DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
321
322 /// Stores the count of how many objects were passed to llvm.localescape for a
323 /// given function and the largest index passed to llvm.localrecover.
324 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
325
326 // Maps catchswitches and cleanuppads that unwind to siblings to the
327 // terminators that indicate the unwind, used to detect cycles therein.
328 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
329
330 /// Cache of constants visited in search of ConstantExprs.
331 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
332
333 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
334 SmallVector<const Function *, 4> DeoptimizeDeclarations;
335
336 /// Cache of attribute lists verified.
337 SmallPtrSet<const void *, 32> AttributeListsVisited;
338
339 // Verify that this GlobalValue is only used in this module.
340 // This map is used to avoid visiting uses twice. We can arrive at a user
341 // twice, if they have multiple operands. In particular for very large
342 // constant expressions, we can arrive at a particular user many times.
343 SmallPtrSet<const Value *, 32> GlobalValueVisited;
344
345 // Keeps track of duplicate function argument debug info.
346 SmallVector<const DILocalVariable *, 16> DebugFnArgs;
347
348 TBAAVerifier TBAAVerifyHelper;
349
350 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
351
352 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
353
354 public:
Verifier(raw_ostream * OS,bool ShouldTreatBrokenDebugInfoAsError,const Module & M)355 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
356 const Module &M)
357 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
358 SawFrameEscape(false), TBAAVerifyHelper(this) {
359 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
360 }
361
hasBrokenDebugInfo() const362 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
363
verify(const Function & F)364 bool verify(const Function &F) {
365 assert(F.getParent() == &M &&
366 "An instance of this class only works with a specific module!");
367
368 // First ensure the function is well-enough formed to compute dominance
369 // information, and directly compute a dominance tree. We don't rely on the
370 // pass manager to provide this as it isolates us from a potentially
371 // out-of-date dominator tree and makes it significantly more complex to run
372 // this code outside of a pass manager.
373 // FIXME: It's really gross that we have to cast away constness here.
374 if (!F.empty())
375 DT.recalculate(const_cast<Function &>(F));
376
377 for (const BasicBlock &BB : F) {
378 if (!BB.empty() && BB.back().isTerminator())
379 continue;
380
381 if (OS) {
382 *OS << "Basic Block in function '" << F.getName()
383 << "' does not have terminator!\n";
384 BB.printAsOperand(*OS, true, MST);
385 *OS << "\n";
386 }
387 return false;
388 }
389
390 Broken = false;
391 // FIXME: We strip const here because the inst visitor strips const.
392 visit(const_cast<Function &>(F));
393 verifySiblingFuncletUnwinds();
394 InstsInThisBlock.clear();
395 DebugFnArgs.clear();
396 LandingPadResultTy = nullptr;
397 SawFrameEscape = false;
398 SiblingFuncletInfo.clear();
399 verifyNoAliasScopeDecl();
400 NoAliasScopeDecls.clear();
401
402 return !Broken;
403 }
404
405 /// Verify the module that this instance of \c Verifier was initialized with.
verify()406 bool verify() {
407 Broken = false;
408
409 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
410 for (const Function &F : M)
411 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
412 DeoptimizeDeclarations.push_back(&F);
413
414 // Now that we've visited every function, verify that we never asked to
415 // recover a frame index that wasn't escaped.
416 verifyFrameRecoverIndices();
417 for (const GlobalVariable &GV : M.globals())
418 visitGlobalVariable(GV);
419
420 for (const GlobalAlias &GA : M.aliases())
421 visitGlobalAlias(GA);
422
423 for (const GlobalIFunc &GI : M.ifuncs())
424 visitGlobalIFunc(GI);
425
426 for (const NamedMDNode &NMD : M.named_metadata())
427 visitNamedMDNode(NMD);
428
429 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
430 visitComdat(SMEC.getValue());
431
432 visitModuleFlags();
433 visitModuleIdents();
434 visitModuleCommandLines();
435
436 verifyCompileUnits();
437
438 verifyDeoptimizeCallingConvs();
439 DISubprogramAttachments.clear();
440 return !Broken;
441 }
442
443 private:
444 /// Whether a metadata node is allowed to be, or contain, a DILocation.
445 enum class AreDebugLocsAllowed { No, Yes };
446
447 // Verification methods...
448 void visitGlobalValue(const GlobalValue &GV);
449 void visitGlobalVariable(const GlobalVariable &GV);
450 void visitGlobalAlias(const GlobalAlias &GA);
451 void visitGlobalIFunc(const GlobalIFunc &GI);
452 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
453 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
454 const GlobalAlias &A, const Constant &C);
455 void visitNamedMDNode(const NamedMDNode &NMD);
456 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
457 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
458 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
459 void visitComdat(const Comdat &C);
460 void visitModuleIdents();
461 void visitModuleCommandLines();
462 void visitModuleFlags();
463 void visitModuleFlag(const MDNode *Op,
464 DenseMap<const MDString *, const MDNode *> &SeenIDs,
465 SmallVectorImpl<const MDNode *> &Requirements);
466 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
467 void visitFunction(const Function &F);
468 void visitBasicBlock(BasicBlock &BB);
469 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
470 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
471 void visitProfMetadata(Instruction &I, MDNode *MD);
472 void visitCallStackMetadata(MDNode *MD);
473 void visitMemProfMetadata(Instruction &I, MDNode *MD);
474 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
475 void visitAnnotationMetadata(MDNode *Annotation);
476 void visitAliasScopeMetadata(const MDNode *MD);
477 void visitAliasScopeListMetadata(const MDNode *MD);
478 void visitAccessGroupMetadata(const MDNode *MD);
479
480 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
481 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
482 #include "llvm/IR/Metadata.def"
483 void visitDIScope(const DIScope &N);
484 void visitDIVariable(const DIVariable &N);
485 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
486 void visitDITemplateParameter(const DITemplateParameter &N);
487
488 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
489
490 // InstVisitor overrides...
491 using InstVisitor<Verifier>::visit;
492 void visit(Instruction &I);
493
494 void visitTruncInst(TruncInst &I);
495 void visitZExtInst(ZExtInst &I);
496 void visitSExtInst(SExtInst &I);
497 void visitFPTruncInst(FPTruncInst &I);
498 void visitFPExtInst(FPExtInst &I);
499 void visitFPToUIInst(FPToUIInst &I);
500 void visitFPToSIInst(FPToSIInst &I);
501 void visitUIToFPInst(UIToFPInst &I);
502 void visitSIToFPInst(SIToFPInst &I);
503 void visitIntToPtrInst(IntToPtrInst &I);
504 void visitPtrToIntInst(PtrToIntInst &I);
505 void visitBitCastInst(BitCastInst &I);
506 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
507 void visitPHINode(PHINode &PN);
508 void visitCallBase(CallBase &Call);
509 void visitUnaryOperator(UnaryOperator &U);
510 void visitBinaryOperator(BinaryOperator &B);
511 void visitICmpInst(ICmpInst &IC);
512 void visitFCmpInst(FCmpInst &FC);
513 void visitExtractElementInst(ExtractElementInst &EI);
514 void visitInsertElementInst(InsertElementInst &EI);
515 void visitShuffleVectorInst(ShuffleVectorInst &EI);
visitVAArgInst(VAArgInst & VAA)516 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
517 void visitCallInst(CallInst &CI);
518 void visitInvokeInst(InvokeInst &II);
519 void visitGetElementPtrInst(GetElementPtrInst &GEP);
520 void visitLoadInst(LoadInst &LI);
521 void visitStoreInst(StoreInst &SI);
522 void verifyDominatesUse(Instruction &I, unsigned i);
523 void visitInstruction(Instruction &I);
524 void visitTerminator(Instruction &I);
525 void visitBranchInst(BranchInst &BI);
526 void visitReturnInst(ReturnInst &RI);
527 void visitSwitchInst(SwitchInst &SI);
528 void visitIndirectBrInst(IndirectBrInst &BI);
529 void visitCallBrInst(CallBrInst &CBI);
530 void visitSelectInst(SelectInst &SI);
531 void visitUserOp1(Instruction &I);
visitUserOp2(Instruction & I)532 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
533 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
534 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
535 void visitVPIntrinsic(VPIntrinsic &VPI);
536 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
537 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
538 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
539 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
540 void visitFenceInst(FenceInst &FI);
541 void visitAllocaInst(AllocaInst &AI);
542 void visitExtractValueInst(ExtractValueInst &EVI);
543 void visitInsertValueInst(InsertValueInst &IVI);
544 void visitEHPadPredecessors(Instruction &I);
545 void visitLandingPadInst(LandingPadInst &LPI);
546 void visitResumeInst(ResumeInst &RI);
547 void visitCatchPadInst(CatchPadInst &CPI);
548 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
549 void visitCleanupPadInst(CleanupPadInst &CPI);
550 void visitFuncletPadInst(FuncletPadInst &FPI);
551 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
552 void visitCleanupReturnInst(CleanupReturnInst &CRI);
553
554 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
555 void verifySwiftErrorValue(const Value *SwiftErrorVal);
556 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
557 void verifyMustTailCall(CallInst &CI);
558 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
559 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
560 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
561 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
562 const Value *V);
563 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
564 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
565 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
566
567 void visitConstantExprsRecursively(const Constant *EntryC);
568 void visitConstantExpr(const ConstantExpr *CE);
569 void verifyInlineAsmCall(const CallBase &Call);
570 void verifyStatepoint(const CallBase &Call);
571 void verifyFrameRecoverIndices();
572 void verifySiblingFuncletUnwinds();
573
574 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
575 template <typename ValueOrMetadata>
576 void verifyFragmentExpression(const DIVariable &V,
577 DIExpression::FragmentInfo Fragment,
578 ValueOrMetadata *Desc);
579 void verifyFnArgs(const DbgVariableIntrinsic &I);
580 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
581
582 /// Module-level debug info verification...
583 void verifyCompileUnits();
584
585 /// Module-level verification that all @llvm.experimental.deoptimize
586 /// declarations share the same calling convention.
587 void verifyDeoptimizeCallingConvs();
588
589 void verifyAttachedCallBundle(const CallBase &Call,
590 const OperandBundleUse &BU);
591
592 /// Verify all-or-nothing property of DIFile source attribute within a CU.
593 void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
594
595 /// Verify the llvm.experimental.noalias.scope.decl declarations
596 void verifyNoAliasScopeDecl();
597 };
598
599 } // end anonymous namespace
600
601 /// We know that cond should be true, if not print an error message.
602 #define Check(C, ...) \
603 do { \
604 if (!(C)) { \
605 CheckFailed(__VA_ARGS__); \
606 return; \
607 } \
608 } while (false)
609
610 /// We know that a debug info condition should be true, if not print
611 /// an error message.
612 #define CheckDI(C, ...) \
613 do { \
614 if (!(C)) { \
615 DebugInfoCheckFailed(__VA_ARGS__); \
616 return; \
617 } \
618 } while (false)
619
visit(Instruction & I)620 void Verifier::visit(Instruction &I) {
621 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
622 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
623 InstVisitor<Verifier>::visit(I);
624 }
625
626 // Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
forEachUser(const Value * User,SmallPtrSet<const Value *,32> & Visited,llvm::function_ref<bool (const Value *)> Callback)627 static void forEachUser(const Value *User,
628 SmallPtrSet<const Value *, 32> &Visited,
629 llvm::function_ref<bool(const Value *)> Callback) {
630 if (!Visited.insert(User).second)
631 return;
632
633 SmallVector<const Value *> WorkList;
634 append_range(WorkList, User->materialized_users());
635 while (!WorkList.empty()) {
636 const Value *Cur = WorkList.pop_back_val();
637 if (!Visited.insert(Cur).second)
638 continue;
639 if (Callback(Cur))
640 append_range(WorkList, Cur->materialized_users());
641 }
642 }
643
visitGlobalValue(const GlobalValue & GV)644 void Verifier::visitGlobalValue(const GlobalValue &GV) {
645 Check(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
646 "Global is external, but doesn't have external or weak linkage!", &GV);
647
648 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
649
650 if (MaybeAlign A = GO->getAlign()) {
651 Check(A->value() <= Value::MaximumAlignment,
652 "huge alignment values are unsupported", GO);
653 }
654 }
655 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
656 "Only global variables can have appending linkage!", &GV);
657
658 if (GV.hasAppendingLinkage()) {
659 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
660 Check(GVar && GVar->getValueType()->isArrayTy(),
661 "Only global arrays can have appending linkage!", GVar);
662 }
663
664 if (GV.isDeclarationForLinker())
665 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
666
667 if (GV.hasDLLImportStorageClass()) {
668 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
669 &GV);
670
671 Check((GV.isDeclaration() &&
672 (GV.hasExternalLinkage() || GV.hasExternalWeakLinkage())) ||
673 GV.hasAvailableExternallyLinkage(),
674 "Global is marked as dllimport, but not external", &GV);
675 }
676
677 if (GV.isImplicitDSOLocal())
678 Check(GV.isDSOLocal(),
679 "GlobalValue with local linkage or non-default "
680 "visibility must be dso_local!",
681 &GV);
682
683 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
684 if (const Instruction *I = dyn_cast<Instruction>(V)) {
685 if (!I->getParent() || !I->getParent()->getParent())
686 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
687 I);
688 else if (I->getParent()->getParent()->getParent() != &M)
689 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
690 I->getParent()->getParent(),
691 I->getParent()->getParent()->getParent());
692 return false;
693 } else if (const Function *F = dyn_cast<Function>(V)) {
694 if (F->getParent() != &M)
695 CheckFailed("Global is used by function in a different module", &GV, &M,
696 F, F->getParent());
697 return false;
698 }
699 return true;
700 });
701 }
702
visitGlobalVariable(const GlobalVariable & GV)703 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
704 if (GV.hasInitializer()) {
705 Check(GV.getInitializer()->getType() == GV.getValueType(),
706 "Global variable initializer type does not match global "
707 "variable type!",
708 &GV);
709 // If the global has common linkage, it must have a zero initializer and
710 // cannot be constant.
711 if (GV.hasCommonLinkage()) {
712 Check(GV.getInitializer()->isNullValue(),
713 "'common' global must have a zero initializer!", &GV);
714 Check(!GV.isConstant(), "'common' global may not be marked constant!",
715 &GV);
716 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
717 }
718 }
719
720 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
721 GV.getName() == "llvm.global_dtors")) {
722 Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
723 "invalid linkage for intrinsic global variable", &GV);
724 // Don't worry about emitting an error for it not being an array,
725 // visitGlobalValue will complain on appending non-array.
726 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
727 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
728 PointerType *FuncPtrTy =
729 FunctionType::get(Type::getVoidTy(Context), false)->
730 getPointerTo(DL.getProgramAddressSpace());
731 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
732 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
733 STy->getTypeAtIndex(1) == FuncPtrTy,
734 "wrong type for intrinsic global variable", &GV);
735 Check(STy->getNumElements() == 3,
736 "the third field of the element type is mandatory, "
737 "specify i8* null to migrate from the obsoleted 2-field form");
738 Type *ETy = STy->getTypeAtIndex(2);
739 Type *Int8Ty = Type::getInt8Ty(ETy->getContext());
740 Check(ETy->isPointerTy() &&
741 cast<PointerType>(ETy)->isOpaqueOrPointeeTypeMatches(Int8Ty),
742 "wrong type for intrinsic global variable", &GV);
743 }
744 }
745
746 if (GV.hasName() && (GV.getName() == "llvm.used" ||
747 GV.getName() == "llvm.compiler.used")) {
748 Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
749 "invalid linkage for intrinsic global variable", &GV);
750 Type *GVType = GV.getValueType();
751 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
752 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
753 Check(PTy, "wrong type for intrinsic global variable", &GV);
754 if (GV.hasInitializer()) {
755 const Constant *Init = GV.getInitializer();
756 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
757 Check(InitArray, "wrong initalizer for intrinsic global variable",
758 Init);
759 for (Value *Op : InitArray->operands()) {
760 Value *V = Op->stripPointerCasts();
761 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
762 isa<GlobalAlias>(V),
763 Twine("invalid ") + GV.getName() + " member", V);
764 Check(V->hasName(),
765 Twine("members of ") + GV.getName() + " must be named", V);
766 }
767 }
768 }
769 }
770
771 // Visit any debug info attachments.
772 SmallVector<MDNode *, 1> MDs;
773 GV.getMetadata(LLVMContext::MD_dbg, MDs);
774 for (auto *MD : MDs) {
775 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
776 visitDIGlobalVariableExpression(*GVE);
777 else
778 CheckDI(false, "!dbg attachment of global variable must be a "
779 "DIGlobalVariableExpression");
780 }
781
782 // Scalable vectors cannot be global variables, since we don't know
783 // the runtime size. If the global is an array containing scalable vectors,
784 // that will be caught by the isValidElementType methods in StructType or
785 // ArrayType instead.
786 Check(!isa<ScalableVectorType>(GV.getValueType()),
787 "Globals cannot contain scalable vectors", &GV);
788
789 if (auto *STy = dyn_cast<StructType>(GV.getValueType()))
790 Check(!STy->containsScalableVectorType(),
791 "Globals cannot contain scalable vectors", &GV);
792
793 if (!GV.hasInitializer()) {
794 visitGlobalValue(GV);
795 return;
796 }
797
798 // Walk any aggregate initializers looking for bitcasts between address spaces
799 visitConstantExprsRecursively(GV.getInitializer());
800
801 visitGlobalValue(GV);
802 }
803
visitAliaseeSubExpr(const GlobalAlias & GA,const Constant & C)804 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
805 SmallPtrSet<const GlobalAlias*, 4> Visited;
806 Visited.insert(&GA);
807 visitAliaseeSubExpr(Visited, GA, C);
808 }
809
visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias * > & Visited,const GlobalAlias & GA,const Constant & C)810 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
811 const GlobalAlias &GA, const Constant &C) {
812 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
813 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
814 &GA);
815
816 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
817 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
818
819 Check(!GA2->isInterposable(),
820 "Alias cannot point to an interposable alias", &GA);
821 } else {
822 // Only continue verifying subexpressions of GlobalAliases.
823 // Do not recurse into global initializers.
824 return;
825 }
826 }
827
828 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
829 visitConstantExprsRecursively(CE);
830
831 for (const Use &U : C.operands()) {
832 Value *V = &*U;
833 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
834 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
835 else if (const auto *C2 = dyn_cast<Constant>(V))
836 visitAliaseeSubExpr(Visited, GA, *C2);
837 }
838 }
839
visitGlobalAlias(const GlobalAlias & GA)840 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
841 Check(GlobalAlias::isValidLinkage(GA.getLinkage()),
842 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
843 "weak_odr, or external linkage!",
844 &GA);
845 const Constant *Aliasee = GA.getAliasee();
846 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
847 Check(GA.getType() == Aliasee->getType(),
848 "Alias and aliasee types should match!", &GA);
849
850 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
851 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
852
853 visitAliaseeSubExpr(GA, *Aliasee);
854
855 visitGlobalValue(GA);
856 }
857
visitGlobalIFunc(const GlobalIFunc & GI)858 void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
859 Check(GlobalIFunc::isValidLinkage(GI.getLinkage()),
860 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
861 "weak_odr, or external linkage!",
862 &GI);
863 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
864 // is a Function definition.
865 const Function *Resolver = GI.getResolverFunction();
866 Check(Resolver, "IFunc must have a Function resolver", &GI);
867 Check(!Resolver->isDeclarationForLinker(),
868 "IFunc resolver must be a definition", &GI);
869
870 // Check that the immediate resolver operand (prior to any bitcasts) has the
871 // correct type.
872 const Type *ResolverTy = GI.getResolver()->getType();
873 const Type *ResolverFuncTy =
874 GlobalIFunc::getResolverFunctionType(GI.getValueType());
875 Check(ResolverTy == ResolverFuncTy->getPointerTo(),
876 "IFunc resolver has incorrect type", &GI);
877 }
878
visitNamedMDNode(const NamedMDNode & NMD)879 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
880 // There used to be various other llvm.dbg.* nodes, but we don't support
881 // upgrading them and we want to reserve the namespace for future uses.
882 if (NMD.getName().startswith("llvm.dbg."))
883 CheckDI(NMD.getName() == "llvm.dbg.cu",
884 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
885 for (const MDNode *MD : NMD.operands()) {
886 if (NMD.getName() == "llvm.dbg.cu")
887 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
888
889 if (!MD)
890 continue;
891
892 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
893 }
894 }
895
visitMDNode(const MDNode & MD,AreDebugLocsAllowed AllowLocs)896 void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
897 // Only visit each node once. Metadata can be mutually recursive, so this
898 // avoids infinite recursion here, as well as being an optimization.
899 if (!MDNodes.insert(&MD).second)
900 return;
901
902 Check(&MD.getContext() == &Context,
903 "MDNode context does not match Module context!", &MD);
904
905 switch (MD.getMetadataID()) {
906 default:
907 llvm_unreachable("Invalid MDNode subclass");
908 case Metadata::MDTupleKind:
909 break;
910 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
911 case Metadata::CLASS##Kind: \
912 visit##CLASS(cast<CLASS>(MD)); \
913 break;
914 #include "llvm/IR/Metadata.def"
915 }
916
917 for (const Metadata *Op : MD.operands()) {
918 if (!Op)
919 continue;
920 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
921 &MD, Op);
922 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
923 "DILocation not allowed within this metadata node", &MD, Op);
924 if (auto *N = dyn_cast<MDNode>(Op)) {
925 visitMDNode(*N, AllowLocs);
926 continue;
927 }
928 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
929 visitValueAsMetadata(*V, nullptr);
930 continue;
931 }
932 }
933
934 // Check these last, so we diagnose problems in operands first.
935 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
936 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
937 }
938
visitValueAsMetadata(const ValueAsMetadata & MD,Function * F)939 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
940 Check(MD.getValue(), "Expected valid value", &MD);
941 Check(!MD.getValue()->getType()->isMetadataTy(),
942 "Unexpected metadata round-trip through values", &MD, MD.getValue());
943
944 auto *L = dyn_cast<LocalAsMetadata>(&MD);
945 if (!L)
946 return;
947
948 Check(F, "function-local metadata used outside a function", L);
949
950 // If this was an instruction, bb, or argument, verify that it is in the
951 // function that we expect.
952 Function *ActualF = nullptr;
953 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
954 Check(I->getParent(), "function-local metadata not in basic block", L, I);
955 ActualF = I->getParent()->getParent();
956 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
957 ActualF = BB->getParent();
958 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
959 ActualF = A->getParent();
960 assert(ActualF && "Unimplemented function local metadata case!");
961
962 Check(ActualF == F, "function-local metadata used in wrong function", L);
963 }
964
visitMetadataAsValue(const MetadataAsValue & MDV,Function * F)965 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
966 Metadata *MD = MDV.getMetadata();
967 if (auto *N = dyn_cast<MDNode>(MD)) {
968 visitMDNode(*N, AreDebugLocsAllowed::No);
969 return;
970 }
971
972 // Only visit each node once. Metadata can be mutually recursive, so this
973 // avoids infinite recursion here, as well as being an optimization.
974 if (!MDNodes.insert(MD).second)
975 return;
976
977 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
978 visitValueAsMetadata(*V, F);
979 }
980
isType(const Metadata * MD)981 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
isScope(const Metadata * MD)982 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
isDINode(const Metadata * MD)983 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
984
visitDILocation(const DILocation & N)985 void Verifier::visitDILocation(const DILocation &N) {
986 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
987 "location requires a valid scope", &N, N.getRawScope());
988 if (auto *IA = N.getRawInlinedAt())
989 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
990 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
991 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
992 }
993
visitGenericDINode(const GenericDINode & N)994 void Verifier::visitGenericDINode(const GenericDINode &N) {
995 CheckDI(N.getTag(), "invalid tag", &N);
996 }
997
visitDIScope(const DIScope & N)998 void Verifier::visitDIScope(const DIScope &N) {
999 if (auto *F = N.getRawFile())
1000 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1001 }
1002
visitDISubrange(const DISubrange & N)1003 void Verifier::visitDISubrange(const DISubrange &N) {
1004 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1005 bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1006 CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1007 N.getRawUpperBound(),
1008 "Subrange must contain count or upperBound", &N);
1009 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1010 "Subrange can have any one of count or upperBound", &N);
1011 auto *CBound = N.getRawCountNode();
1012 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1013 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1014 "Count must be signed constant or DIVariable or DIExpression", &N);
1015 auto Count = N.getCount();
1016 CheckDI(!Count || !Count.is<ConstantInt *>() ||
1017 Count.get<ConstantInt *>()->getSExtValue() >= -1,
1018 "invalid subrange count", &N);
1019 auto *LBound = N.getRawLowerBound();
1020 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1021 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1022 "LowerBound must be signed constant or DIVariable or DIExpression",
1023 &N);
1024 auto *UBound = N.getRawUpperBound();
1025 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1026 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1027 "UpperBound must be signed constant or DIVariable or DIExpression",
1028 &N);
1029 auto *Stride = N.getRawStride();
1030 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1031 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1032 "Stride must be signed constant or DIVariable or DIExpression", &N);
1033 }
1034
visitDIGenericSubrange(const DIGenericSubrange & N)1035 void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1036 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1037 CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1038 "GenericSubrange must contain count or upperBound", &N);
1039 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1040 "GenericSubrange can have any one of count or upperBound", &N);
1041 auto *CBound = N.getRawCountNode();
1042 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1043 "Count must be signed constant or DIVariable or DIExpression", &N);
1044 auto *LBound = N.getRawLowerBound();
1045 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1046 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1047 "LowerBound must be signed constant or DIVariable or DIExpression",
1048 &N);
1049 auto *UBound = N.getRawUpperBound();
1050 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1051 "UpperBound must be signed constant or DIVariable or DIExpression",
1052 &N);
1053 auto *Stride = N.getRawStride();
1054 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1055 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1056 "Stride must be signed constant or DIVariable or DIExpression", &N);
1057 }
1058
visitDIEnumerator(const DIEnumerator & N)1059 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1060 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1061 }
1062
visitDIBasicType(const DIBasicType & N)1063 void Verifier::visitDIBasicType(const DIBasicType &N) {
1064 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1065 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1066 N.getTag() == dwarf::DW_TAG_string_type,
1067 "invalid tag", &N);
1068 }
1069
visitDIStringType(const DIStringType & N)1070 void Verifier::visitDIStringType(const DIStringType &N) {
1071 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1072 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1073 &N);
1074 }
1075
visitDIDerivedType(const DIDerivedType & N)1076 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1077 // Common scope checks.
1078 visitDIScope(N);
1079
1080 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1081 N.getTag() == dwarf::DW_TAG_pointer_type ||
1082 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1083 N.getTag() == dwarf::DW_TAG_reference_type ||
1084 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1085 N.getTag() == dwarf::DW_TAG_const_type ||
1086 N.getTag() == dwarf::DW_TAG_immutable_type ||
1087 N.getTag() == dwarf::DW_TAG_volatile_type ||
1088 N.getTag() == dwarf::DW_TAG_restrict_type ||
1089 N.getTag() == dwarf::DW_TAG_atomic_type ||
1090 N.getTag() == dwarf::DW_TAG_member ||
1091 N.getTag() == dwarf::DW_TAG_inheritance ||
1092 N.getTag() == dwarf::DW_TAG_friend ||
1093 N.getTag() == dwarf::DW_TAG_set_type,
1094 "invalid tag", &N);
1095 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1096 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1097 N.getRawExtraData());
1098 }
1099
1100 if (N.getTag() == dwarf::DW_TAG_set_type) {
1101 if (auto *T = N.getRawBaseType()) {
1102 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1103 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1104 CheckDI(
1105 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1106 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1107 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1108 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1109 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1110 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1111 "invalid set base type", &N, T);
1112 }
1113 }
1114
1115 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1116 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1117 N.getRawBaseType());
1118
1119 if (N.getDWARFAddressSpace()) {
1120 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1121 N.getTag() == dwarf::DW_TAG_reference_type ||
1122 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1123 "DWARF address space only applies to pointer or reference types",
1124 &N);
1125 }
1126 }
1127
1128 /// Detect mutually exclusive flags.
hasConflictingReferenceFlags(unsigned Flags)1129 static bool hasConflictingReferenceFlags(unsigned Flags) {
1130 return ((Flags & DINode::FlagLValueReference) &&
1131 (Flags & DINode::FlagRValueReference)) ||
1132 ((Flags & DINode::FlagTypePassByValue) &&
1133 (Flags & DINode::FlagTypePassByReference));
1134 }
1135
visitTemplateParams(const MDNode & N,const Metadata & RawParams)1136 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1137 auto *Params = dyn_cast<MDTuple>(&RawParams);
1138 CheckDI(Params, "invalid template params", &N, &RawParams);
1139 for (Metadata *Op : Params->operands()) {
1140 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1141 &N, Params, Op);
1142 }
1143 }
1144
visitDICompositeType(const DICompositeType & N)1145 void Verifier::visitDICompositeType(const DICompositeType &N) {
1146 // Common scope checks.
1147 visitDIScope(N);
1148
1149 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1150 N.getTag() == dwarf::DW_TAG_structure_type ||
1151 N.getTag() == dwarf::DW_TAG_union_type ||
1152 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1153 N.getTag() == dwarf::DW_TAG_class_type ||
1154 N.getTag() == dwarf::DW_TAG_variant_part ||
1155 N.getTag() == dwarf::DW_TAG_namelist,
1156 "invalid tag", &N);
1157
1158 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1159 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1160 N.getRawBaseType());
1161
1162 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1163 "invalid composite elements", &N, N.getRawElements());
1164 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1165 N.getRawVTableHolder());
1166 CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1167 "invalid reference flags", &N);
1168 unsigned DIBlockByRefStruct = 1 << 4;
1169 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1170 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1171
1172 if (N.isVector()) {
1173 const DINodeArray Elements = N.getElements();
1174 CheckDI(Elements.size() == 1 &&
1175 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1176 "invalid vector, expected one element of type subrange", &N);
1177 }
1178
1179 if (auto *Params = N.getRawTemplateParams())
1180 visitTemplateParams(N, *Params);
1181
1182 if (auto *D = N.getRawDiscriminator()) {
1183 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1184 "discriminator can only appear on variant part");
1185 }
1186
1187 if (N.getRawDataLocation()) {
1188 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1189 "dataLocation can only appear in array type");
1190 }
1191
1192 if (N.getRawAssociated()) {
1193 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1194 "associated can only appear in array type");
1195 }
1196
1197 if (N.getRawAllocated()) {
1198 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1199 "allocated can only appear in array type");
1200 }
1201
1202 if (N.getRawRank()) {
1203 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1204 "rank can only appear in array type");
1205 }
1206 }
1207
visitDISubroutineType(const DISubroutineType & N)1208 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1209 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1210 if (auto *Types = N.getRawTypeArray()) {
1211 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1212 for (Metadata *Ty : N.getTypeArray()->operands()) {
1213 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1214 }
1215 }
1216 CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1217 "invalid reference flags", &N);
1218 }
1219
visitDIFile(const DIFile & N)1220 void Verifier::visitDIFile(const DIFile &N) {
1221 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1222 Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1223 if (Checksum) {
1224 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1225 "invalid checksum kind", &N);
1226 size_t Size;
1227 switch (Checksum->Kind) {
1228 case DIFile::CSK_MD5:
1229 Size = 32;
1230 break;
1231 case DIFile::CSK_SHA1:
1232 Size = 40;
1233 break;
1234 case DIFile::CSK_SHA256:
1235 Size = 64;
1236 break;
1237 }
1238 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1239 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1240 "invalid checksum", &N);
1241 }
1242 }
1243
visitDICompileUnit(const DICompileUnit & N)1244 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1245 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1246 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1247
1248 // Don't bother verifying the compilation directory or producer string
1249 // as those could be empty.
1250 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1251 N.getRawFile());
1252 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1253 N.getFile());
1254
1255 CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1256
1257 verifySourceDebugInfo(N, *N.getFile());
1258
1259 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1260 "invalid emission kind", &N);
1261
1262 if (auto *Array = N.getRawEnumTypes()) {
1263 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1264 for (Metadata *Op : N.getEnumTypes()->operands()) {
1265 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1266 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1267 "invalid enum type", &N, N.getEnumTypes(), Op);
1268 }
1269 }
1270 if (auto *Array = N.getRawRetainedTypes()) {
1271 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1272 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1273 CheckDI(
1274 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1275 !cast<DISubprogram>(Op)->isDefinition())),
1276 "invalid retained type", &N, Op);
1277 }
1278 }
1279 if (auto *Array = N.getRawGlobalVariables()) {
1280 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1281 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1282 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1283 "invalid global variable ref", &N, Op);
1284 }
1285 }
1286 if (auto *Array = N.getRawImportedEntities()) {
1287 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1288 for (Metadata *Op : N.getImportedEntities()->operands()) {
1289 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1290 &N, Op);
1291 }
1292 }
1293 if (auto *Array = N.getRawMacros()) {
1294 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1295 for (Metadata *Op : N.getMacros()->operands()) {
1296 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1297 }
1298 }
1299 CUVisited.insert(&N);
1300 }
1301
visitDISubprogram(const DISubprogram & N)1302 void Verifier::visitDISubprogram(const DISubprogram &N) {
1303 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1304 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1305 if (auto *F = N.getRawFile())
1306 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1307 else
1308 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1309 if (auto *T = N.getRawType())
1310 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1311 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1312 N.getRawContainingType());
1313 if (auto *Params = N.getRawTemplateParams())
1314 visitTemplateParams(N, *Params);
1315 if (auto *S = N.getRawDeclaration())
1316 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1317 "invalid subprogram declaration", &N, S);
1318 if (auto *RawNode = N.getRawRetainedNodes()) {
1319 auto *Node = dyn_cast<MDTuple>(RawNode);
1320 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1321 for (Metadata *Op : Node->operands()) {
1322 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1323 "invalid retained nodes, expected DILocalVariable or DILabel", &N,
1324 Node, Op);
1325 }
1326 }
1327 CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1328 "invalid reference flags", &N);
1329
1330 auto *Unit = N.getRawUnit();
1331 if (N.isDefinition()) {
1332 // Subprogram definitions (not part of the type hierarchy).
1333 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1334 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1335 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1336 if (N.getFile())
1337 verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1338 } else {
1339 // Subprogram declarations (part of the type hierarchy).
1340 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1341 }
1342
1343 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1344 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1345 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1346 for (Metadata *Op : ThrownTypes->operands())
1347 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1348 Op);
1349 }
1350
1351 if (N.areAllCallsDescribed())
1352 CheckDI(N.isDefinition(),
1353 "DIFlagAllCallsDescribed must be attached to a definition");
1354 }
1355
visitDILexicalBlockBase(const DILexicalBlockBase & N)1356 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1357 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1358 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1359 "invalid local scope", &N, N.getRawScope());
1360 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1361 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1362 }
1363
visitDILexicalBlock(const DILexicalBlock & N)1364 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1365 visitDILexicalBlockBase(N);
1366
1367 CheckDI(N.getLine() || !N.getColumn(),
1368 "cannot have column info without line info", &N);
1369 }
1370
visitDILexicalBlockFile(const DILexicalBlockFile & N)1371 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1372 visitDILexicalBlockBase(N);
1373 }
1374
visitDICommonBlock(const DICommonBlock & N)1375 void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1376 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1377 if (auto *S = N.getRawScope())
1378 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1379 if (auto *S = N.getRawDecl())
1380 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1381 }
1382
visitDINamespace(const DINamespace & N)1383 void Verifier::visitDINamespace(const DINamespace &N) {
1384 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1385 if (auto *S = N.getRawScope())
1386 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1387 }
1388
visitDIMacro(const DIMacro & N)1389 void Verifier::visitDIMacro(const DIMacro &N) {
1390 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1391 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1392 "invalid macinfo type", &N);
1393 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1394 if (!N.getValue().empty()) {
1395 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1396 }
1397 }
1398
visitDIMacroFile(const DIMacroFile & N)1399 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1400 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1401 "invalid macinfo type", &N);
1402 if (auto *F = N.getRawFile())
1403 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1404
1405 if (auto *Array = N.getRawElements()) {
1406 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1407 for (Metadata *Op : N.getElements()->operands()) {
1408 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1409 }
1410 }
1411 }
1412
visitDIArgList(const DIArgList & N)1413 void Verifier::visitDIArgList(const DIArgList &N) {
1414 CheckDI(!N.getNumOperands(),
1415 "DIArgList should have no operands other than a list of "
1416 "ValueAsMetadata",
1417 &N);
1418 }
1419
visitDIModule(const DIModule & N)1420 void Verifier::visitDIModule(const DIModule &N) {
1421 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1422 CheckDI(!N.getName().empty(), "anonymous module", &N);
1423 }
1424
visitDITemplateParameter(const DITemplateParameter & N)1425 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1426 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1427 }
1428
visitDITemplateTypeParameter(const DITemplateTypeParameter & N)1429 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1430 visitDITemplateParameter(N);
1431
1432 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1433 &N);
1434 }
1435
visitDITemplateValueParameter(const DITemplateValueParameter & N)1436 void Verifier::visitDITemplateValueParameter(
1437 const DITemplateValueParameter &N) {
1438 visitDITemplateParameter(N);
1439
1440 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1441 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1442 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1443 "invalid tag", &N);
1444 }
1445
visitDIVariable(const DIVariable & N)1446 void Verifier::visitDIVariable(const DIVariable &N) {
1447 if (auto *S = N.getRawScope())
1448 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1449 if (auto *F = N.getRawFile())
1450 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1451 }
1452
visitDIGlobalVariable(const DIGlobalVariable & N)1453 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1454 // Checks common to all variables.
1455 visitDIVariable(N);
1456
1457 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1458 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1459 // Check only if the global variable is not an extern
1460 if (N.isDefinition())
1461 CheckDI(N.getType(), "missing global variable type", &N);
1462 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1463 CheckDI(isa<DIDerivedType>(Member),
1464 "invalid static data member declaration", &N, Member);
1465 }
1466 }
1467
visitDILocalVariable(const DILocalVariable & N)1468 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1469 // Checks common to all variables.
1470 visitDIVariable(N);
1471
1472 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1473 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1474 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1475 "local variable requires a valid scope", &N, N.getRawScope());
1476 if (auto Ty = N.getType())
1477 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1478 }
1479
visitDILabel(const DILabel & N)1480 void Verifier::visitDILabel(const DILabel &N) {
1481 if (auto *S = N.getRawScope())
1482 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1483 if (auto *F = N.getRawFile())
1484 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1485
1486 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1487 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1488 "label requires a valid scope", &N, N.getRawScope());
1489 }
1490
visitDIExpression(const DIExpression & N)1491 void Verifier::visitDIExpression(const DIExpression &N) {
1492 CheckDI(N.isValid(), "invalid expression", &N);
1493 }
1494
visitDIGlobalVariableExpression(const DIGlobalVariableExpression & GVE)1495 void Verifier::visitDIGlobalVariableExpression(
1496 const DIGlobalVariableExpression &GVE) {
1497 CheckDI(GVE.getVariable(), "missing variable");
1498 if (auto *Var = GVE.getVariable())
1499 visitDIGlobalVariable(*Var);
1500 if (auto *Expr = GVE.getExpression()) {
1501 visitDIExpression(*Expr);
1502 if (auto Fragment = Expr->getFragmentInfo())
1503 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1504 }
1505 }
1506
visitDIObjCProperty(const DIObjCProperty & N)1507 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1508 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1509 if (auto *T = N.getRawType())
1510 CheckDI(isType(T), "invalid type ref", &N, T);
1511 if (auto *F = N.getRawFile())
1512 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1513 }
1514
visitDIImportedEntity(const DIImportedEntity & N)1515 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1516 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1517 N.getTag() == dwarf::DW_TAG_imported_declaration,
1518 "invalid tag", &N);
1519 if (auto *S = N.getRawScope())
1520 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1521 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1522 N.getRawEntity());
1523 }
1524
visitComdat(const Comdat & C)1525 void Verifier::visitComdat(const Comdat &C) {
1526 // In COFF the Module is invalid if the GlobalValue has private linkage.
1527 // Entities with private linkage don't have entries in the symbol table.
1528 if (TT.isOSBinFormatCOFF())
1529 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1530 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1531 GV);
1532 }
1533
visitModuleIdents()1534 void Verifier::visitModuleIdents() {
1535 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1536 if (!Idents)
1537 return;
1538
1539 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1540 // Scan each llvm.ident entry and make sure that this requirement is met.
1541 for (const MDNode *N : Idents->operands()) {
1542 Check(N->getNumOperands() == 1,
1543 "incorrect number of operands in llvm.ident metadata", N);
1544 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1545 ("invalid value for llvm.ident metadata entry operand"
1546 "(the operand should be a string)"),
1547 N->getOperand(0));
1548 }
1549 }
1550
visitModuleCommandLines()1551 void Verifier::visitModuleCommandLines() {
1552 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1553 if (!CommandLines)
1554 return;
1555
1556 // llvm.commandline takes a list of metadata entry. Each entry has only one
1557 // string. Scan each llvm.commandline entry and make sure that this
1558 // requirement is met.
1559 for (const MDNode *N : CommandLines->operands()) {
1560 Check(N->getNumOperands() == 1,
1561 "incorrect number of operands in llvm.commandline metadata", N);
1562 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1563 ("invalid value for llvm.commandline metadata entry operand"
1564 "(the operand should be a string)"),
1565 N->getOperand(0));
1566 }
1567 }
1568
visitModuleFlags()1569 void Verifier::visitModuleFlags() {
1570 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1571 if (!Flags) return;
1572
1573 // Scan each flag, and track the flags and requirements.
1574 DenseMap<const MDString*, const MDNode*> SeenIDs;
1575 SmallVector<const MDNode*, 16> Requirements;
1576 for (const MDNode *MDN : Flags->operands())
1577 visitModuleFlag(MDN, SeenIDs, Requirements);
1578
1579 // Validate that the requirements in the module are valid.
1580 for (const MDNode *Requirement : Requirements) {
1581 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1582 const Metadata *ReqValue = Requirement->getOperand(1);
1583
1584 const MDNode *Op = SeenIDs.lookup(Flag);
1585 if (!Op) {
1586 CheckFailed("invalid requirement on flag, flag is not present in module",
1587 Flag);
1588 continue;
1589 }
1590
1591 if (Op->getOperand(2) != ReqValue) {
1592 CheckFailed(("invalid requirement on flag, "
1593 "flag does not have the required value"),
1594 Flag);
1595 continue;
1596 }
1597 }
1598 }
1599
1600 void
visitModuleFlag(const MDNode * Op,DenseMap<const MDString *,const MDNode * > & SeenIDs,SmallVectorImpl<const MDNode * > & Requirements)1601 Verifier::visitModuleFlag(const MDNode *Op,
1602 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1603 SmallVectorImpl<const MDNode *> &Requirements) {
1604 // Each module flag should have three arguments, the merge behavior (a
1605 // constant int), the flag ID (an MDString), and the value.
1606 Check(Op->getNumOperands() == 3,
1607 "incorrect number of operands in module flag", Op);
1608 Module::ModFlagBehavior MFB;
1609 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1610 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1611 "invalid behavior operand in module flag (expected constant integer)",
1612 Op->getOperand(0));
1613 Check(false,
1614 "invalid behavior operand in module flag (unexpected constant)",
1615 Op->getOperand(0));
1616 }
1617 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1618 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1619 Op->getOperand(1));
1620
1621 // Check the values for behaviors with additional requirements.
1622 switch (MFB) {
1623 case Module::Error:
1624 case Module::Warning:
1625 case Module::Override:
1626 // These behavior types accept any value.
1627 break;
1628
1629 case Module::Min: {
1630 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1631 Check(V && V->getValue().isNonNegative(),
1632 "invalid value for 'min' module flag (expected constant non-negative "
1633 "integer)",
1634 Op->getOperand(2));
1635 break;
1636 }
1637
1638 case Module::Max: {
1639 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1640 "invalid value for 'max' module flag (expected constant integer)",
1641 Op->getOperand(2));
1642 break;
1643 }
1644
1645 case Module::Require: {
1646 // The value should itself be an MDNode with two operands, a flag ID (an
1647 // MDString), and a value.
1648 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1649 Check(Value && Value->getNumOperands() == 2,
1650 "invalid value for 'require' module flag (expected metadata pair)",
1651 Op->getOperand(2));
1652 Check(isa<MDString>(Value->getOperand(0)),
1653 ("invalid value for 'require' module flag "
1654 "(first value operand should be a string)"),
1655 Value->getOperand(0));
1656
1657 // Append it to the list of requirements, to check once all module flags are
1658 // scanned.
1659 Requirements.push_back(Value);
1660 break;
1661 }
1662
1663 case Module::Append:
1664 case Module::AppendUnique: {
1665 // These behavior types require the operand be an MDNode.
1666 Check(isa<MDNode>(Op->getOperand(2)),
1667 "invalid value for 'append'-type module flag "
1668 "(expected a metadata node)",
1669 Op->getOperand(2));
1670 break;
1671 }
1672 }
1673
1674 // Unless this is a "requires" flag, check the ID is unique.
1675 if (MFB != Module::Require) {
1676 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1677 Check(Inserted,
1678 "module flag identifiers must be unique (or of 'require' type)", ID);
1679 }
1680
1681 if (ID->getString() == "wchar_size") {
1682 ConstantInt *Value
1683 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1684 Check(Value, "wchar_size metadata requires constant integer argument");
1685 }
1686
1687 if (ID->getString() == "Linker Options") {
1688 // If the llvm.linker.options named metadata exists, we assume that the
1689 // bitcode reader has upgraded the module flag. Otherwise the flag might
1690 // have been created by a client directly.
1691 Check(M.getNamedMetadata("llvm.linker.options"),
1692 "'Linker Options' named metadata no longer supported");
1693 }
1694
1695 if (ID->getString() == "SemanticInterposition") {
1696 ConstantInt *Value =
1697 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1698 Check(Value,
1699 "SemanticInterposition metadata requires constant integer argument");
1700 }
1701
1702 if (ID->getString() == "CG Profile") {
1703 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1704 visitModuleFlagCGProfileEntry(MDO);
1705 }
1706 }
1707
visitModuleFlagCGProfileEntry(const MDOperand & MDO)1708 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1709 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1710 if (!FuncMDO)
1711 return;
1712 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1713 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1714 "expected a Function or null", FuncMDO);
1715 };
1716 auto Node = dyn_cast_or_null<MDNode>(MDO);
1717 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1718 CheckFunction(Node->getOperand(0));
1719 CheckFunction(Node->getOperand(1));
1720 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1721 Check(Count && Count->getType()->isIntegerTy(),
1722 "expected an integer constant", Node->getOperand(2));
1723 }
1724
verifyAttributeTypes(AttributeSet Attrs,const Value * V)1725 void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1726 for (Attribute A : Attrs) {
1727
1728 if (A.isStringAttribute()) {
1729 #define GET_ATTR_NAMES
1730 #define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1731 #define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1732 if (A.getKindAsString() == #DISPLAY_NAME) { \
1733 auto V = A.getValueAsString(); \
1734 if (!(V.empty() || V == "true" || V == "false")) \
1735 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1736 ""); \
1737 }
1738
1739 #include "llvm/IR/Attributes.inc"
1740 continue;
1741 }
1742
1743 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1744 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1745 V);
1746 return;
1747 }
1748 }
1749 }
1750
1751 // VerifyParameterAttrs - Check the given attributes for an argument or return
1752 // value of the specified type. The value V is printed in error messages.
verifyParameterAttrs(AttributeSet Attrs,Type * Ty,const Value * V)1753 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1754 const Value *V) {
1755 if (!Attrs.hasAttributes())
1756 return;
1757
1758 verifyAttributeTypes(Attrs, V);
1759
1760 for (Attribute Attr : Attrs)
1761 Check(Attr.isStringAttribute() ||
1762 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1763 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1764 V);
1765
1766 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1767 Check(Attrs.getNumAttributes() == 1,
1768 "Attribute 'immarg' is incompatible with other attributes", V);
1769 }
1770
1771 // Check for mutually incompatible attributes. Only inreg is compatible with
1772 // sret.
1773 unsigned AttrCount = 0;
1774 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1775 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1776 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1777 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1778 Attrs.hasAttribute(Attribute::InReg);
1779 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1780 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1781 Check(AttrCount <= 1,
1782 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1783 "'byref', and 'sret' are incompatible!",
1784 V);
1785
1786 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1787 Attrs.hasAttribute(Attribute::ReadOnly)),
1788 "Attributes "
1789 "'inalloca and readonly' are incompatible!",
1790 V);
1791
1792 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1793 Attrs.hasAttribute(Attribute::Returned)),
1794 "Attributes "
1795 "'sret and returned' are incompatible!",
1796 V);
1797
1798 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1799 Attrs.hasAttribute(Attribute::SExt)),
1800 "Attributes "
1801 "'zeroext and signext' are incompatible!",
1802 V);
1803
1804 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1805 Attrs.hasAttribute(Attribute::ReadOnly)),
1806 "Attributes "
1807 "'readnone and readonly' are incompatible!",
1808 V);
1809
1810 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1811 Attrs.hasAttribute(Attribute::WriteOnly)),
1812 "Attributes "
1813 "'readnone and writeonly' are incompatible!",
1814 V);
1815
1816 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1817 Attrs.hasAttribute(Attribute::WriteOnly)),
1818 "Attributes "
1819 "'readonly and writeonly' are incompatible!",
1820 V);
1821
1822 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
1823 Attrs.hasAttribute(Attribute::AlwaysInline)),
1824 "Attributes "
1825 "'noinline and alwaysinline' are incompatible!",
1826 V);
1827
1828 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1829 for (Attribute Attr : Attrs) {
1830 if (!Attr.isStringAttribute() &&
1831 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
1832 CheckFailed("Attribute '" + Attr.getAsString() +
1833 "' applied to incompatible type!", V);
1834 return;
1835 }
1836 }
1837
1838 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1839 if (Attrs.hasAttribute(Attribute::ByVal)) {
1840 if (Attrs.hasAttribute(Attribute::Alignment)) {
1841 Align AttrAlign = Attrs.getAlignment().valueOrOne();
1842 Align MaxAlign(ParamMaxAlignment);
1843 Check(AttrAlign <= MaxAlign,
1844 "Attribute 'align' exceed the max size 2^14", V);
1845 }
1846 SmallPtrSet<Type *, 4> Visited;
1847 Check(Attrs.getByValType()->isSized(&Visited),
1848 "Attribute 'byval' does not support unsized types!", V);
1849 }
1850 if (Attrs.hasAttribute(Attribute::ByRef)) {
1851 SmallPtrSet<Type *, 4> Visited;
1852 Check(Attrs.getByRefType()->isSized(&Visited),
1853 "Attribute 'byref' does not support unsized types!", V);
1854 }
1855 if (Attrs.hasAttribute(Attribute::InAlloca)) {
1856 SmallPtrSet<Type *, 4> Visited;
1857 Check(Attrs.getInAllocaType()->isSized(&Visited),
1858 "Attribute 'inalloca' does not support unsized types!", V);
1859 }
1860 if (Attrs.hasAttribute(Attribute::Preallocated)) {
1861 SmallPtrSet<Type *, 4> Visited;
1862 Check(Attrs.getPreallocatedType()->isSized(&Visited),
1863 "Attribute 'preallocated' does not support unsized types!", V);
1864 }
1865 if (!PTy->isOpaque()) {
1866 if (!isa<PointerType>(PTy->getNonOpaquePointerElementType()))
1867 Check(!Attrs.hasAttribute(Attribute::SwiftError),
1868 "Attribute 'swifterror' only applies to parameters "
1869 "with pointer to pointer type!",
1870 V);
1871 if (Attrs.hasAttribute(Attribute::ByRef)) {
1872 Check(Attrs.getByRefType() == PTy->getNonOpaquePointerElementType(),
1873 "Attribute 'byref' type does not match parameter!", V);
1874 }
1875
1876 if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
1877 Check(Attrs.getByValType() == PTy->getNonOpaquePointerElementType(),
1878 "Attribute 'byval' type does not match parameter!", V);
1879 }
1880
1881 if (Attrs.hasAttribute(Attribute::Preallocated)) {
1882 Check(Attrs.getPreallocatedType() ==
1883 PTy->getNonOpaquePointerElementType(),
1884 "Attribute 'preallocated' type does not match parameter!", V);
1885 }
1886
1887 if (Attrs.hasAttribute(Attribute::InAlloca)) {
1888 Check(Attrs.getInAllocaType() == PTy->getNonOpaquePointerElementType(),
1889 "Attribute 'inalloca' type does not match parameter!", V);
1890 }
1891
1892 if (Attrs.hasAttribute(Attribute::ElementType)) {
1893 Check(Attrs.getElementType() == PTy->getNonOpaquePointerElementType(),
1894 "Attribute 'elementtype' type does not match parameter!", V);
1895 }
1896 }
1897 }
1898 }
1899
checkUnsignedBaseTenFuncAttr(AttributeList Attrs,StringRef Attr,const Value * V)1900 void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
1901 const Value *V) {
1902 if (Attrs.hasFnAttr(Attr)) {
1903 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
1904 unsigned N;
1905 if (S.getAsInteger(10, N))
1906 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
1907 }
1908 }
1909
1910 // Check parameter attributes against a function type.
1911 // The value V is printed in error messages.
verifyFunctionAttrs(FunctionType * FT,AttributeList Attrs,const Value * V,bool IsIntrinsic,bool IsInlineAsm)1912 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1913 const Value *V, bool IsIntrinsic,
1914 bool IsInlineAsm) {
1915 if (Attrs.isEmpty())
1916 return;
1917
1918 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
1919 Check(Attrs.hasParentContext(Context),
1920 "Attribute list does not match Module context!", &Attrs, V);
1921 for (const auto &AttrSet : Attrs) {
1922 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
1923 "Attribute set does not match Module context!", &AttrSet, V);
1924 for (const auto &A : AttrSet) {
1925 Check(A.hasParentContext(Context),
1926 "Attribute does not match Module context!", &A, V);
1927 }
1928 }
1929 }
1930
1931 bool SawNest = false;
1932 bool SawReturned = false;
1933 bool SawSRet = false;
1934 bool SawSwiftSelf = false;
1935 bool SawSwiftAsync = false;
1936 bool SawSwiftError = false;
1937
1938 // Verify return value attributes.
1939 AttributeSet RetAttrs = Attrs.getRetAttrs();
1940 for (Attribute RetAttr : RetAttrs)
1941 Check(RetAttr.isStringAttribute() ||
1942 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
1943 "Attribute '" + RetAttr.getAsString() +
1944 "' does not apply to function return values",
1945 V);
1946
1947 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1948
1949 // Verify parameter attributes.
1950 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1951 Type *Ty = FT->getParamType(i);
1952 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
1953
1954 if (!IsIntrinsic) {
1955 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1956 "immarg attribute only applies to intrinsics", V);
1957 if (!IsInlineAsm)
1958 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
1959 "Attribute 'elementtype' can only be applied to intrinsics"
1960 " and inline asm.",
1961 V);
1962 }
1963
1964 verifyParameterAttrs(ArgAttrs, Ty, V);
1965
1966 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1967 Check(!SawNest, "More than one parameter has attribute nest!", V);
1968 SawNest = true;
1969 }
1970
1971 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1972 Check(!SawReturned, "More than one parameter has attribute returned!", V);
1973 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1974 "Incompatible argument and return types for 'returned' attribute",
1975 V);
1976 SawReturned = true;
1977 }
1978
1979 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1980 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1981 Check(i == 0 || i == 1,
1982 "Attribute 'sret' is not on first or second parameter!", V);
1983 SawSRet = true;
1984 }
1985
1986 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1987 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1988 SawSwiftSelf = true;
1989 }
1990
1991 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
1992 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
1993 SawSwiftAsync = true;
1994 }
1995
1996 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1997 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
1998 SawSwiftError = true;
1999 }
2000
2001 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2002 Check(i == FT->getNumParams() - 1,
2003 "inalloca isn't on the last parameter!", V);
2004 }
2005 }
2006
2007 if (!Attrs.hasFnAttrs())
2008 return;
2009
2010 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2011 for (Attribute FnAttr : Attrs.getFnAttrs())
2012 Check(FnAttr.isStringAttribute() ||
2013 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2014 "Attribute '" + FnAttr.getAsString() +
2015 "' does not apply to functions!",
2016 V);
2017
2018 Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
2019 Attrs.hasFnAttr(Attribute::ReadOnly)),
2020 "Attributes 'readnone and readonly' are incompatible!", V);
2021
2022 Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
2023 Attrs.hasFnAttr(Attribute::WriteOnly)),
2024 "Attributes 'readnone and writeonly' are incompatible!", V);
2025
2026 Check(!(Attrs.hasFnAttr(Attribute::ReadOnly) &&
2027 Attrs.hasFnAttr(Attribute::WriteOnly)),
2028 "Attributes 'readonly and writeonly' are incompatible!", V);
2029
2030 Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
2031 Attrs.hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly)),
2032 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
2033 "incompatible!",
2034 V);
2035
2036 Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
2037 Attrs.hasFnAttr(Attribute::InaccessibleMemOnly)),
2038 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
2039
2040 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2041 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2042 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2043
2044 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2045 Check(Attrs.hasFnAttr(Attribute::NoInline),
2046 "Attribute 'optnone' requires 'noinline'!", V);
2047
2048 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2049 "Attributes 'optsize and optnone' are incompatible!", V);
2050
2051 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2052 "Attributes 'minsize and optnone' are incompatible!", V);
2053 }
2054
2055 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2056 const GlobalValue *GV = cast<GlobalValue>(V);
2057 Check(GV->hasGlobalUnnamedAddr(),
2058 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2059 }
2060
2061 if (Attrs.hasFnAttr(Attribute::AllocSize)) {
2062 std::pair<unsigned, Optional<unsigned>> Args =
2063 Attrs.getFnAttrs().getAllocSizeArgs();
2064
2065 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2066 if (ParamNo >= FT->getNumParams()) {
2067 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2068 return false;
2069 }
2070
2071 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2072 CheckFailed("'allocsize' " + Name +
2073 " argument must refer to an integer parameter",
2074 V);
2075 return false;
2076 }
2077
2078 return true;
2079 };
2080
2081 if (!CheckParam("element size", Args.first))
2082 return;
2083
2084 if (Args.second && !CheckParam("number of elements", *Args.second))
2085 return;
2086 }
2087
2088 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2089 AllocFnKind K = Attrs.getAllocKind();
2090 AllocFnKind Type =
2091 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2092 if (!is_contained(
2093 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2094 Type))
2095 CheckFailed(
2096 "'allockind()' requires exactly one of alloc, realloc, and free");
2097 if ((Type == AllocFnKind::Free) &&
2098 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2099 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2100 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2101 "or aligned modifiers.");
2102 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2103 if ((K & ZeroedUninit) == ZeroedUninit)
2104 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2105 }
2106
2107 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2108 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2109 if (VScaleMin == 0)
2110 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2111
2112 Optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2113 if (VScaleMax && VScaleMin > VScaleMax)
2114 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2115 }
2116
2117 if (Attrs.hasFnAttr("frame-pointer")) {
2118 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2119 if (FP != "all" && FP != "non-leaf" && FP != "none")
2120 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2121 }
2122
2123 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2124 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2125 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2126 }
2127
verifyFunctionMetadata(ArrayRef<std::pair<unsigned,MDNode * >> MDs)2128 void Verifier::verifyFunctionMetadata(
2129 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2130 for (const auto &Pair : MDs) {
2131 if (Pair.first == LLVMContext::MD_prof) {
2132 MDNode *MD = Pair.second;
2133 Check(MD->getNumOperands() >= 2,
2134 "!prof annotations should have no less than 2 operands", MD);
2135
2136 // Check first operand.
2137 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2138 MD);
2139 Check(isa<MDString>(MD->getOperand(0)),
2140 "expected string with name of the !prof annotation", MD);
2141 MDString *MDS = cast<MDString>(MD->getOperand(0));
2142 StringRef ProfName = MDS->getString();
2143 Check(ProfName.equals("function_entry_count") ||
2144 ProfName.equals("synthetic_function_entry_count"),
2145 "first operand should be 'function_entry_count'"
2146 " or 'synthetic_function_entry_count'",
2147 MD);
2148
2149 // Check second operand.
2150 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2151 MD);
2152 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2153 "expected integer argument to function_entry_count", MD);
2154 }
2155 }
2156 }
2157
visitConstantExprsRecursively(const Constant * EntryC)2158 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2159 if (!ConstantExprVisited.insert(EntryC).second)
2160 return;
2161
2162 SmallVector<const Constant *, 16> Stack;
2163 Stack.push_back(EntryC);
2164
2165 while (!Stack.empty()) {
2166 const Constant *C = Stack.pop_back_val();
2167
2168 // Check this constant expression.
2169 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2170 visitConstantExpr(CE);
2171
2172 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2173 // Global Values get visited separately, but we do need to make sure
2174 // that the global value is in the correct module
2175 Check(GV->getParent() == &M, "Referencing global in another module!",
2176 EntryC, &M, GV, GV->getParent());
2177 continue;
2178 }
2179
2180 // Visit all sub-expressions.
2181 for (const Use &U : C->operands()) {
2182 const auto *OpC = dyn_cast<Constant>(U);
2183 if (!OpC)
2184 continue;
2185 if (!ConstantExprVisited.insert(OpC).second)
2186 continue;
2187 Stack.push_back(OpC);
2188 }
2189 }
2190 }
2191
visitConstantExpr(const ConstantExpr * CE)2192 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2193 if (CE->getOpcode() == Instruction::BitCast)
2194 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2195 CE->getType()),
2196 "Invalid bitcast", CE);
2197 }
2198
verifyAttributeCount(AttributeList Attrs,unsigned Params)2199 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2200 // There shouldn't be more attribute sets than there are parameters plus the
2201 // function and return value.
2202 return Attrs.getNumAttrSets() <= Params + 2;
2203 }
2204
verifyInlineAsmCall(const CallBase & Call)2205 void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2206 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2207 unsigned ArgNo = 0;
2208 unsigned LabelNo = 0;
2209 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2210 if (CI.Type == InlineAsm::isLabel) {
2211 ++LabelNo;
2212 continue;
2213 }
2214
2215 // Only deal with constraints that correspond to call arguments.
2216 if (!CI.hasArg())
2217 continue;
2218
2219 if (CI.isIndirect) {
2220 const Value *Arg = Call.getArgOperand(ArgNo);
2221 Check(Arg->getType()->isPointerTy(),
2222 "Operand for indirect constraint must have pointer type", &Call);
2223
2224 Check(Call.getParamElementType(ArgNo),
2225 "Operand for indirect constraint must have elementtype attribute",
2226 &Call);
2227 } else {
2228 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2229 "Elementtype attribute can only be applied for indirect "
2230 "constraints",
2231 &Call);
2232 }
2233
2234 ArgNo++;
2235 }
2236
2237 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2238 Check(LabelNo == CallBr->getNumIndirectDests(),
2239 "Number of label constraints does not match number of callbr dests",
2240 &Call);
2241 } else {
2242 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2243 &Call);
2244 }
2245 }
2246
2247 /// Verify that statepoint intrinsic is well formed.
verifyStatepoint(const CallBase & Call)2248 void Verifier::verifyStatepoint(const CallBase &Call) {
2249 assert(Call.getCalledFunction() &&
2250 Call.getCalledFunction()->getIntrinsicID() ==
2251 Intrinsic::experimental_gc_statepoint);
2252
2253 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2254 !Call.onlyAccessesArgMemory(),
2255 "gc.statepoint must read and write all memory to preserve "
2256 "reordering restrictions required by safepoint semantics",
2257 Call);
2258
2259 const int64_t NumPatchBytes =
2260 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2261 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2262 Check(NumPatchBytes >= 0,
2263 "gc.statepoint number of patchable bytes must be "
2264 "positive",
2265 Call);
2266
2267 Type *TargetElemType = Call.getParamElementType(2);
2268 Check(TargetElemType,
2269 "gc.statepoint callee argument must have elementtype attribute", Call);
2270 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2271 Check(TargetFuncType,
2272 "gc.statepoint callee elementtype must be function type", Call);
2273
2274 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2275 Check(NumCallArgs >= 0,
2276 "gc.statepoint number of arguments to underlying call "
2277 "must be positive",
2278 Call);
2279 const int NumParams = (int)TargetFuncType->getNumParams();
2280 if (TargetFuncType->isVarArg()) {
2281 Check(NumCallArgs >= NumParams,
2282 "gc.statepoint mismatch in number of vararg call args", Call);
2283
2284 // TODO: Remove this limitation
2285 Check(TargetFuncType->getReturnType()->isVoidTy(),
2286 "gc.statepoint doesn't support wrapping non-void "
2287 "vararg functions yet",
2288 Call);
2289 } else
2290 Check(NumCallArgs == NumParams,
2291 "gc.statepoint mismatch in number of call args", Call);
2292
2293 const uint64_t Flags
2294 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2295 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2296 "unknown flag used in gc.statepoint flags argument", Call);
2297
2298 // Verify that the types of the call parameter arguments match
2299 // the type of the wrapped callee.
2300 AttributeList Attrs = Call.getAttributes();
2301 for (int i = 0; i < NumParams; i++) {
2302 Type *ParamType = TargetFuncType->getParamType(i);
2303 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2304 Check(ArgType == ParamType,
2305 "gc.statepoint call argument does not match wrapped "
2306 "function type",
2307 Call);
2308
2309 if (TargetFuncType->isVarArg()) {
2310 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2311 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2312 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2313 }
2314 }
2315
2316 const int EndCallArgsInx = 4 + NumCallArgs;
2317
2318 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2319 Check(isa<ConstantInt>(NumTransitionArgsV),
2320 "gc.statepoint number of transition arguments "
2321 "must be constant integer",
2322 Call);
2323 const int NumTransitionArgs =
2324 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2325 Check(NumTransitionArgs == 0,
2326 "gc.statepoint w/inline transition bundle is deprecated", Call);
2327 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2328
2329 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2330 Check(isa<ConstantInt>(NumDeoptArgsV),
2331 "gc.statepoint number of deoptimization arguments "
2332 "must be constant integer",
2333 Call);
2334 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2335 Check(NumDeoptArgs == 0,
2336 "gc.statepoint w/inline deopt operands is deprecated", Call);
2337
2338 const int ExpectedNumArgs = 7 + NumCallArgs;
2339 Check(ExpectedNumArgs == (int)Call.arg_size(),
2340 "gc.statepoint too many arguments", Call);
2341
2342 // Check that the only uses of this gc.statepoint are gc.result or
2343 // gc.relocate calls which are tied to this statepoint and thus part
2344 // of the same statepoint sequence
2345 for (const User *U : Call.users()) {
2346 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2347 Check(UserCall, "illegal use of statepoint token", Call, U);
2348 if (!UserCall)
2349 continue;
2350 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2351 "gc.result or gc.relocate are the only value uses "
2352 "of a gc.statepoint",
2353 Call, U);
2354 if (isa<GCResultInst>(UserCall)) {
2355 Check(UserCall->getArgOperand(0) == &Call,
2356 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2357 } else if (isa<GCRelocateInst>(Call)) {
2358 Check(UserCall->getArgOperand(0) == &Call,
2359 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2360 }
2361 }
2362
2363 // Note: It is legal for a single derived pointer to be listed multiple
2364 // times. It's non-optimal, but it is legal. It can also happen after
2365 // insertion if we strip a bitcast away.
2366 // Note: It is really tempting to check that each base is relocated and
2367 // that a derived pointer is never reused as a base pointer. This turns
2368 // out to be problematic since optimizations run after safepoint insertion
2369 // can recognize equality properties that the insertion logic doesn't know
2370 // about. See example statepoint.ll in the verifier subdirectory
2371 }
2372
verifyFrameRecoverIndices()2373 void Verifier::verifyFrameRecoverIndices() {
2374 for (auto &Counts : FrameEscapeInfo) {
2375 Function *F = Counts.first;
2376 unsigned EscapedObjectCount = Counts.second.first;
2377 unsigned MaxRecoveredIndex = Counts.second.second;
2378 Check(MaxRecoveredIndex <= EscapedObjectCount,
2379 "all indices passed to llvm.localrecover must be less than the "
2380 "number of arguments passed to llvm.localescape in the parent "
2381 "function",
2382 F);
2383 }
2384 }
2385
getSuccPad(Instruction * Terminator)2386 static Instruction *getSuccPad(Instruction *Terminator) {
2387 BasicBlock *UnwindDest;
2388 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2389 UnwindDest = II->getUnwindDest();
2390 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2391 UnwindDest = CSI->getUnwindDest();
2392 else
2393 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2394 return UnwindDest->getFirstNonPHI();
2395 }
2396
verifySiblingFuncletUnwinds()2397 void Verifier::verifySiblingFuncletUnwinds() {
2398 SmallPtrSet<Instruction *, 8> Visited;
2399 SmallPtrSet<Instruction *, 8> Active;
2400 for (const auto &Pair : SiblingFuncletInfo) {
2401 Instruction *PredPad = Pair.first;
2402 if (Visited.count(PredPad))
2403 continue;
2404 Active.insert(PredPad);
2405 Instruction *Terminator = Pair.second;
2406 do {
2407 Instruction *SuccPad = getSuccPad(Terminator);
2408 if (Active.count(SuccPad)) {
2409 // Found a cycle; report error
2410 Instruction *CyclePad = SuccPad;
2411 SmallVector<Instruction *, 8> CycleNodes;
2412 do {
2413 CycleNodes.push_back(CyclePad);
2414 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2415 if (CycleTerminator != CyclePad)
2416 CycleNodes.push_back(CycleTerminator);
2417 CyclePad = getSuccPad(CycleTerminator);
2418 } while (CyclePad != SuccPad);
2419 Check(false, "EH pads can't handle each other's exceptions",
2420 ArrayRef<Instruction *>(CycleNodes));
2421 }
2422 // Don't re-walk a node we've already checked
2423 if (!Visited.insert(SuccPad).second)
2424 break;
2425 // Walk to this successor if it has a map entry.
2426 PredPad = SuccPad;
2427 auto TermI = SiblingFuncletInfo.find(PredPad);
2428 if (TermI == SiblingFuncletInfo.end())
2429 break;
2430 Terminator = TermI->second;
2431 Active.insert(PredPad);
2432 } while (true);
2433 // Each node only has one successor, so we've walked all the active
2434 // nodes' successors.
2435 Active.clear();
2436 }
2437 }
2438
2439 // visitFunction - Verify that a function is ok.
2440 //
visitFunction(const Function & F)2441 void Verifier::visitFunction(const Function &F) {
2442 visitGlobalValue(F);
2443
2444 // Check function arguments.
2445 FunctionType *FT = F.getFunctionType();
2446 unsigned NumArgs = F.arg_size();
2447
2448 Check(&Context == &F.getContext(),
2449 "Function context does not match Module context!", &F);
2450
2451 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2452 Check(FT->getNumParams() == NumArgs,
2453 "# formal arguments must match # of arguments for function type!", &F,
2454 FT);
2455 Check(F.getReturnType()->isFirstClassType() ||
2456 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2457 "Functions cannot return aggregate values!", &F);
2458
2459 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2460 "Invalid struct return type!", &F);
2461
2462 AttributeList Attrs = F.getAttributes();
2463
2464 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2465 "Attribute after last parameter!", &F);
2466
2467 bool IsIntrinsic = F.isIntrinsic();
2468
2469 // Check function attributes.
2470 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2471
2472 // On function declarations/definitions, we do not support the builtin
2473 // attribute. We do not check this in VerifyFunctionAttrs since that is
2474 // checking for Attributes that can/can not ever be on functions.
2475 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2476 "Attribute 'builtin' can only be applied to a callsite.", &F);
2477
2478 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2479 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2480
2481 // Check that this function meets the restrictions on this calling convention.
2482 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2483 // restrictions can be lifted.
2484 switch (F.getCallingConv()) {
2485 default:
2486 case CallingConv::C:
2487 break;
2488 case CallingConv::X86_INTR: {
2489 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2490 "Calling convention parameter requires byval", &F);
2491 break;
2492 }
2493 case CallingConv::AMDGPU_KERNEL:
2494 case CallingConv::SPIR_KERNEL:
2495 Check(F.getReturnType()->isVoidTy(),
2496 "Calling convention requires void return type", &F);
2497 LLVM_FALLTHROUGH;
2498 case CallingConv::AMDGPU_VS:
2499 case CallingConv::AMDGPU_HS:
2500 case CallingConv::AMDGPU_GS:
2501 case CallingConv::AMDGPU_PS:
2502 case CallingConv::AMDGPU_CS:
2503 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2504 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2505 const unsigned StackAS = DL.getAllocaAddrSpace();
2506 unsigned i = 0;
2507 for (const Argument &Arg : F.args()) {
2508 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2509 "Calling convention disallows byval", &F);
2510 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2511 "Calling convention disallows preallocated", &F);
2512 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2513 "Calling convention disallows inalloca", &F);
2514
2515 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2516 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2517 // value here.
2518 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2519 "Calling convention disallows stack byref", &F);
2520 }
2521
2522 ++i;
2523 }
2524 }
2525
2526 LLVM_FALLTHROUGH;
2527 case CallingConv::Fast:
2528 case CallingConv::Cold:
2529 case CallingConv::Intel_OCL_BI:
2530 case CallingConv::PTX_Kernel:
2531 case CallingConv::PTX_Device:
2532 Check(!F.isVarArg(),
2533 "Calling convention does not support varargs or "
2534 "perfect forwarding!",
2535 &F);
2536 break;
2537 }
2538
2539 // Check that the argument values match the function type for this function...
2540 unsigned i = 0;
2541 for (const Argument &Arg : F.args()) {
2542 Check(Arg.getType() == FT->getParamType(i),
2543 "Argument value does not match function argument type!", &Arg,
2544 FT->getParamType(i));
2545 Check(Arg.getType()->isFirstClassType(),
2546 "Function arguments must have first-class types!", &Arg);
2547 if (!IsIntrinsic) {
2548 Check(!Arg.getType()->isMetadataTy(),
2549 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2550 Check(!Arg.getType()->isTokenTy(),
2551 "Function takes token but isn't an intrinsic", &Arg, &F);
2552 Check(!Arg.getType()->isX86_AMXTy(),
2553 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2554 }
2555
2556 // Check that swifterror argument is only used by loads and stores.
2557 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2558 verifySwiftErrorValue(&Arg);
2559 }
2560 ++i;
2561 }
2562
2563 if (!IsIntrinsic) {
2564 Check(!F.getReturnType()->isTokenTy(),
2565 "Function returns a token but isn't an intrinsic", &F);
2566 Check(!F.getReturnType()->isX86_AMXTy(),
2567 "Function returns a x86_amx but isn't an intrinsic", &F);
2568 }
2569
2570 // Get the function metadata attachments.
2571 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2572 F.getAllMetadata(MDs);
2573 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2574 verifyFunctionMetadata(MDs);
2575
2576 // Check validity of the personality function
2577 if (F.hasPersonalityFn()) {
2578 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2579 if (Per)
2580 Check(Per->getParent() == F.getParent(),
2581 "Referencing personality function in another module!", &F,
2582 F.getParent(), Per, Per->getParent());
2583 }
2584
2585 if (F.isMaterializable()) {
2586 // Function has a body somewhere we can't see.
2587 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2588 MDs.empty() ? nullptr : MDs.front().second);
2589 } else if (F.isDeclaration()) {
2590 for (const auto &I : MDs) {
2591 // This is used for call site debug information.
2592 CheckDI(I.first != LLVMContext::MD_dbg ||
2593 !cast<DISubprogram>(I.second)->isDistinct(),
2594 "function declaration may only have a unique !dbg attachment",
2595 &F);
2596 Check(I.first != LLVMContext::MD_prof,
2597 "function declaration may not have a !prof attachment", &F);
2598
2599 // Verify the metadata itself.
2600 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2601 }
2602 Check(!F.hasPersonalityFn(),
2603 "Function declaration shouldn't have a personality routine", &F);
2604 } else {
2605 // Verify that this function (which has a body) is not named "llvm.*". It
2606 // is not legal to define intrinsics.
2607 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2608
2609 // Check the entry node
2610 const BasicBlock *Entry = &F.getEntryBlock();
2611 Check(pred_empty(Entry),
2612 "Entry block to function must not have predecessors!", Entry);
2613
2614 // The address of the entry block cannot be taken, unless it is dead.
2615 if (Entry->hasAddressTaken()) {
2616 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2617 "blockaddress may not be used with the entry block!", Entry);
2618 }
2619
2620 unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2621 // Visit metadata attachments.
2622 for (const auto &I : MDs) {
2623 // Verify that the attachment is legal.
2624 auto AllowLocs = AreDebugLocsAllowed::No;
2625 switch (I.first) {
2626 default:
2627 break;
2628 case LLVMContext::MD_dbg: {
2629 ++NumDebugAttachments;
2630 CheckDI(NumDebugAttachments == 1,
2631 "function must have a single !dbg attachment", &F, I.second);
2632 CheckDI(isa<DISubprogram>(I.second),
2633 "function !dbg attachment must be a subprogram", &F, I.second);
2634 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2635 "function definition may only have a distinct !dbg attachment",
2636 &F);
2637
2638 auto *SP = cast<DISubprogram>(I.second);
2639 const Function *&AttachedTo = DISubprogramAttachments[SP];
2640 CheckDI(!AttachedTo || AttachedTo == &F,
2641 "DISubprogram attached to more than one function", SP, &F);
2642 AttachedTo = &F;
2643 AllowLocs = AreDebugLocsAllowed::Yes;
2644 break;
2645 }
2646 case LLVMContext::MD_prof:
2647 ++NumProfAttachments;
2648 Check(NumProfAttachments == 1,
2649 "function must have a single !prof attachment", &F, I.second);
2650 break;
2651 }
2652
2653 // Verify the metadata itself.
2654 visitMDNode(*I.second, AllowLocs);
2655 }
2656 }
2657
2658 // If this function is actually an intrinsic, verify that it is only used in
2659 // direct call/invokes, never having its "address taken".
2660 // Only do this if the module is materialized, otherwise we don't have all the
2661 // uses.
2662 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2663 const User *U;
2664 if (F.hasAddressTaken(&U, false, true, false,
2665 /*IgnoreARCAttachedCall=*/true))
2666 Check(false, "Invalid user of intrinsic instruction!", U);
2667 }
2668
2669 // Check intrinsics' signatures.
2670 switch (F.getIntrinsicID()) {
2671 case Intrinsic::experimental_gc_get_pointer_base: {
2672 FunctionType *FT = F.getFunctionType();
2673 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2674 Check(isa<PointerType>(F.getReturnType()),
2675 "gc.get.pointer.base must return a pointer", F);
2676 Check(FT->getParamType(0) == F.getReturnType(),
2677 "gc.get.pointer.base operand and result must be of the same type", F);
2678 break;
2679 }
2680 case Intrinsic::experimental_gc_get_pointer_offset: {
2681 FunctionType *FT = F.getFunctionType();
2682 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2683 Check(isa<PointerType>(FT->getParamType(0)),
2684 "gc.get.pointer.offset operand must be a pointer", F);
2685 Check(F.getReturnType()->isIntegerTy(),
2686 "gc.get.pointer.offset must return integer", F);
2687 break;
2688 }
2689 }
2690
2691 auto *N = F.getSubprogram();
2692 HasDebugInfo = (N != nullptr);
2693 if (!HasDebugInfo)
2694 return;
2695
2696 // Check that all !dbg attachments lead to back to N.
2697 //
2698 // FIXME: Check this incrementally while visiting !dbg attachments.
2699 // FIXME: Only check when N is the canonical subprogram for F.
2700 SmallPtrSet<const MDNode *, 32> Seen;
2701 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2702 // Be careful about using DILocation here since we might be dealing with
2703 // broken code (this is the Verifier after all).
2704 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2705 if (!DL)
2706 return;
2707 if (!Seen.insert(DL).second)
2708 return;
2709
2710 Metadata *Parent = DL->getRawScope();
2711 CheckDI(Parent && isa<DILocalScope>(Parent),
2712 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
2713
2714 DILocalScope *Scope = DL->getInlinedAtScope();
2715 Check(Scope, "Failed to find DILocalScope", DL);
2716
2717 if (!Seen.insert(Scope).second)
2718 return;
2719
2720 DISubprogram *SP = Scope->getSubprogram();
2721
2722 // Scope and SP could be the same MDNode and we don't want to skip
2723 // validation in that case
2724 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2725 return;
2726
2727 CheckDI(SP->describes(&F),
2728 "!dbg attachment points at wrong subprogram for function", N, &F,
2729 &I, DL, Scope, SP);
2730 };
2731 for (auto &BB : F)
2732 for (auto &I : BB) {
2733 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2734 // The llvm.loop annotations also contain two DILocations.
2735 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2736 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2737 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2738 if (BrokenDebugInfo)
2739 return;
2740 }
2741 }
2742
2743 // verifyBasicBlock - Verify that a basic block is well formed...
2744 //
visitBasicBlock(BasicBlock & BB)2745 void Verifier::visitBasicBlock(BasicBlock &BB) {
2746 InstsInThisBlock.clear();
2747
2748 // Ensure that basic blocks have terminators!
2749 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2750
2751 // Check constraints that this basic block imposes on all of the PHI nodes in
2752 // it.
2753 if (isa<PHINode>(BB.front())) {
2754 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
2755 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2756 llvm::sort(Preds);
2757 for (const PHINode &PN : BB.phis()) {
2758 Check(PN.getNumIncomingValues() == Preds.size(),
2759 "PHINode should have one entry for each predecessor of its "
2760 "parent basic block!",
2761 &PN);
2762
2763 // Get and sort all incoming values in the PHI node...
2764 Values.clear();
2765 Values.reserve(PN.getNumIncomingValues());
2766 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2767 Values.push_back(
2768 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2769 llvm::sort(Values);
2770
2771 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2772 // Check to make sure that if there is more than one entry for a
2773 // particular basic block in this PHI node, that the incoming values are
2774 // all identical.
2775 //
2776 Check(i == 0 || Values[i].first != Values[i - 1].first ||
2777 Values[i].second == Values[i - 1].second,
2778 "PHI node has multiple entries for the same basic block with "
2779 "different incoming values!",
2780 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2781
2782 // Check to make sure that the predecessors and PHI node entries are
2783 // matched up.
2784 Check(Values[i].first == Preds[i],
2785 "PHI node entries do not match predecessors!", &PN,
2786 Values[i].first, Preds[i]);
2787 }
2788 }
2789 }
2790
2791 // Check that all instructions have their parent pointers set up correctly.
2792 for (auto &I : BB)
2793 {
2794 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2795 }
2796 }
2797
visitTerminator(Instruction & I)2798 void Verifier::visitTerminator(Instruction &I) {
2799 // Ensure that terminators only exist at the end of the basic block.
2800 Check(&I == I.getParent()->getTerminator(),
2801 "Terminator found in the middle of a basic block!", I.getParent());
2802 visitInstruction(I);
2803 }
2804
visitBranchInst(BranchInst & BI)2805 void Verifier::visitBranchInst(BranchInst &BI) {
2806 if (BI.isConditional()) {
2807 Check(BI.getCondition()->getType()->isIntegerTy(1),
2808 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2809 }
2810 visitTerminator(BI);
2811 }
2812
visitReturnInst(ReturnInst & RI)2813 void Verifier::visitReturnInst(ReturnInst &RI) {
2814 Function *F = RI.getParent()->getParent();
2815 unsigned N = RI.getNumOperands();
2816 if (F->getReturnType()->isVoidTy())
2817 Check(N == 0,
2818 "Found return instr that returns non-void in Function of void "
2819 "return type!",
2820 &RI, F->getReturnType());
2821 else
2822 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2823 "Function return type does not match operand "
2824 "type of return inst!",
2825 &RI, F->getReturnType());
2826
2827 // Check to make sure that the return value has necessary properties for
2828 // terminators...
2829 visitTerminator(RI);
2830 }
2831
visitSwitchInst(SwitchInst & SI)2832 void Verifier::visitSwitchInst(SwitchInst &SI) {
2833 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
2834 // Check to make sure that all of the constants in the switch instruction
2835 // have the same type as the switched-on value.
2836 Type *SwitchTy = SI.getCondition()->getType();
2837 SmallPtrSet<ConstantInt*, 32> Constants;
2838 for (auto &Case : SI.cases()) {
2839 Check(Case.getCaseValue()->getType() == SwitchTy,
2840 "Switch constants must all be same type as switch value!", &SI);
2841 Check(Constants.insert(Case.getCaseValue()).second,
2842 "Duplicate integer as switch case", &SI, Case.getCaseValue());
2843 }
2844
2845 visitTerminator(SI);
2846 }
2847
visitIndirectBrInst(IndirectBrInst & BI)2848 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2849 Check(BI.getAddress()->getType()->isPointerTy(),
2850 "Indirectbr operand must have pointer type!", &BI);
2851 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2852 Check(BI.getDestination(i)->getType()->isLabelTy(),
2853 "Indirectbr destinations must all have pointer type!", &BI);
2854
2855 visitTerminator(BI);
2856 }
2857
visitCallBrInst(CallBrInst & CBI)2858 void Verifier::visitCallBrInst(CallBrInst &CBI) {
2859 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
2860 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
2861 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
2862
2863 verifyInlineAsmCall(CBI);
2864 visitTerminator(CBI);
2865 }
2866
visitSelectInst(SelectInst & SI)2867 void Verifier::visitSelectInst(SelectInst &SI) {
2868 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2869 SI.getOperand(2)),
2870 "Invalid operands for select instruction!", &SI);
2871
2872 Check(SI.getTrueValue()->getType() == SI.getType(),
2873 "Select values must have same type as select instruction!", &SI);
2874 visitInstruction(SI);
2875 }
2876
2877 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2878 /// a pass, if any exist, it's an error.
2879 ///
visitUserOp1(Instruction & I)2880 void Verifier::visitUserOp1(Instruction &I) {
2881 Check(false, "User-defined operators should not live outside of a pass!", &I);
2882 }
2883
visitTruncInst(TruncInst & I)2884 void Verifier::visitTruncInst(TruncInst &I) {
2885 // Get the source and destination types
2886 Type *SrcTy = I.getOperand(0)->getType();
2887 Type *DestTy = I.getType();
2888
2889 // Get the size of the types in bits, we'll need this later
2890 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2891 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2892
2893 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2894 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2895 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2896 "trunc source and destination must both be a vector or neither", &I);
2897 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2898
2899 visitInstruction(I);
2900 }
2901
visitZExtInst(ZExtInst & I)2902 void Verifier::visitZExtInst(ZExtInst &I) {
2903 // Get the source and destination types
2904 Type *SrcTy = I.getOperand(0)->getType();
2905 Type *DestTy = I.getType();
2906
2907 // Get the size of the types in bits, we'll need this later
2908 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2909 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2910 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2911 "zext source and destination must both be a vector or neither", &I);
2912 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2913 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2914
2915 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2916
2917 visitInstruction(I);
2918 }
2919
visitSExtInst(SExtInst & I)2920 void Verifier::visitSExtInst(SExtInst &I) {
2921 // Get the source and destination types
2922 Type *SrcTy = I.getOperand(0)->getType();
2923 Type *DestTy = I.getType();
2924
2925 // Get the size of the types in bits, we'll need this later
2926 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2927 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2928
2929 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2930 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2931 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2932 "sext source and destination must both be a vector or neither", &I);
2933 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2934
2935 visitInstruction(I);
2936 }
2937
visitFPTruncInst(FPTruncInst & I)2938 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2939 // Get the source and destination types
2940 Type *SrcTy = I.getOperand(0)->getType();
2941 Type *DestTy = I.getType();
2942 // Get the size of the types in bits, we'll need this later
2943 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2944 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2945
2946 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2947 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2948 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2949 "fptrunc source and destination must both be a vector or neither", &I);
2950 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2951
2952 visitInstruction(I);
2953 }
2954
visitFPExtInst(FPExtInst & I)2955 void Verifier::visitFPExtInst(FPExtInst &I) {
2956 // Get the source and destination types
2957 Type *SrcTy = I.getOperand(0)->getType();
2958 Type *DestTy = I.getType();
2959
2960 // Get the size of the types in bits, we'll need this later
2961 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2962 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2963
2964 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2965 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2966 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2967 "fpext source and destination must both be a vector or neither", &I);
2968 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2969
2970 visitInstruction(I);
2971 }
2972
visitUIToFPInst(UIToFPInst & I)2973 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2974 // Get the source and destination types
2975 Type *SrcTy = I.getOperand(0)->getType();
2976 Type *DestTy = I.getType();
2977
2978 bool SrcVec = SrcTy->isVectorTy();
2979 bool DstVec = DestTy->isVectorTy();
2980
2981 Check(SrcVec == DstVec,
2982 "UIToFP source and dest must both be vector or scalar", &I);
2983 Check(SrcTy->isIntOrIntVectorTy(),
2984 "UIToFP source must be integer or integer vector", &I);
2985 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2986 &I);
2987
2988 if (SrcVec && DstVec)
2989 Check(cast<VectorType>(SrcTy)->getElementCount() ==
2990 cast<VectorType>(DestTy)->getElementCount(),
2991 "UIToFP source and dest vector length mismatch", &I);
2992
2993 visitInstruction(I);
2994 }
2995
visitSIToFPInst(SIToFPInst & I)2996 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2997 // Get the source and destination types
2998 Type *SrcTy = I.getOperand(0)->getType();
2999 Type *DestTy = I.getType();
3000
3001 bool SrcVec = SrcTy->isVectorTy();
3002 bool DstVec = DestTy->isVectorTy();
3003
3004 Check(SrcVec == DstVec,
3005 "SIToFP source and dest must both be vector or scalar", &I);
3006 Check(SrcTy->isIntOrIntVectorTy(),
3007 "SIToFP source must be integer or integer vector", &I);
3008 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3009 &I);
3010
3011 if (SrcVec && DstVec)
3012 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3013 cast<VectorType>(DestTy)->getElementCount(),
3014 "SIToFP source and dest vector length mismatch", &I);
3015
3016 visitInstruction(I);
3017 }
3018
visitFPToUIInst(FPToUIInst & I)3019 void Verifier::visitFPToUIInst(FPToUIInst &I) {
3020 // Get the source and destination types
3021 Type *SrcTy = I.getOperand(0)->getType();
3022 Type *DestTy = I.getType();
3023
3024 bool SrcVec = SrcTy->isVectorTy();
3025 bool DstVec = DestTy->isVectorTy();
3026
3027 Check(SrcVec == DstVec,
3028 "FPToUI source and dest must both be vector or scalar", &I);
3029 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3030 Check(DestTy->isIntOrIntVectorTy(),
3031 "FPToUI result must be integer or integer vector", &I);
3032
3033 if (SrcVec && DstVec)
3034 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3035 cast<VectorType>(DestTy)->getElementCount(),
3036 "FPToUI source and dest vector length mismatch", &I);
3037
3038 visitInstruction(I);
3039 }
3040
visitFPToSIInst(FPToSIInst & I)3041 void Verifier::visitFPToSIInst(FPToSIInst &I) {
3042 // Get the source and destination types
3043 Type *SrcTy = I.getOperand(0)->getType();
3044 Type *DestTy = I.getType();
3045
3046 bool SrcVec = SrcTy->isVectorTy();
3047 bool DstVec = DestTy->isVectorTy();
3048
3049 Check(SrcVec == DstVec,
3050 "FPToSI source and dest must both be vector or scalar", &I);
3051 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3052 Check(DestTy->isIntOrIntVectorTy(),
3053 "FPToSI result must be integer or integer vector", &I);
3054
3055 if (SrcVec && DstVec)
3056 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3057 cast<VectorType>(DestTy)->getElementCount(),
3058 "FPToSI source and dest vector length mismatch", &I);
3059
3060 visitInstruction(I);
3061 }
3062
visitPtrToIntInst(PtrToIntInst & I)3063 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3064 // Get the source and destination types
3065 Type *SrcTy = I.getOperand(0)->getType();
3066 Type *DestTy = I.getType();
3067
3068 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3069
3070 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3071 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3072 &I);
3073
3074 if (SrcTy->isVectorTy()) {
3075 auto *VSrc = cast<VectorType>(SrcTy);
3076 auto *VDest = cast<VectorType>(DestTy);
3077 Check(VSrc->getElementCount() == VDest->getElementCount(),
3078 "PtrToInt Vector width mismatch", &I);
3079 }
3080
3081 visitInstruction(I);
3082 }
3083
visitIntToPtrInst(IntToPtrInst & I)3084 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3085 // Get the source and destination types
3086 Type *SrcTy = I.getOperand(0)->getType();
3087 Type *DestTy = I.getType();
3088
3089 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3090 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3091
3092 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3093 &I);
3094 if (SrcTy->isVectorTy()) {
3095 auto *VSrc = cast<VectorType>(SrcTy);
3096 auto *VDest = cast<VectorType>(DestTy);
3097 Check(VSrc->getElementCount() == VDest->getElementCount(),
3098 "IntToPtr Vector width mismatch", &I);
3099 }
3100 visitInstruction(I);
3101 }
3102
visitBitCastInst(BitCastInst & I)3103 void Verifier::visitBitCastInst(BitCastInst &I) {
3104 Check(
3105 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3106 "Invalid bitcast", &I);
3107 visitInstruction(I);
3108 }
3109
visitAddrSpaceCastInst(AddrSpaceCastInst & I)3110 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3111 Type *SrcTy = I.getOperand(0)->getType();
3112 Type *DestTy = I.getType();
3113
3114 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3115 &I);
3116 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3117 &I);
3118 Check(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
3119 "AddrSpaceCast must be between different address spaces", &I);
3120 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3121 Check(SrcVTy->getElementCount() ==
3122 cast<VectorType>(DestTy)->getElementCount(),
3123 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3124 visitInstruction(I);
3125 }
3126
3127 /// visitPHINode - Ensure that a PHI node is well formed.
3128 ///
visitPHINode(PHINode & PN)3129 void Verifier::visitPHINode(PHINode &PN) {
3130 // Ensure that the PHI nodes are all grouped together at the top of the block.
3131 // This can be tested by checking whether the instruction before this is
3132 // either nonexistent (because this is begin()) or is a PHI node. If not,
3133 // then there is some other instruction before a PHI.
3134 Check(&PN == &PN.getParent()->front() ||
3135 isa<PHINode>(--BasicBlock::iterator(&PN)),
3136 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3137
3138 // Check that a PHI doesn't yield a Token.
3139 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3140
3141 // Check that all of the values of the PHI node have the same type as the
3142 // result, and that the incoming blocks are really basic blocks.
3143 for (Value *IncValue : PN.incoming_values()) {
3144 Check(PN.getType() == IncValue->getType(),
3145 "PHI node operands are not the same type as the result!", &PN);
3146 }
3147
3148 // All other PHI node constraints are checked in the visitBasicBlock method.
3149
3150 visitInstruction(PN);
3151 }
3152
visitCallBase(CallBase & Call)3153 void Verifier::visitCallBase(CallBase &Call) {
3154 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3155 "Called function must be a pointer!", Call);
3156 PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
3157
3158 Check(FPTy->isOpaqueOrPointeeTypeMatches(Call.getFunctionType()),
3159 "Called function is not the same type as the call!", Call);
3160
3161 FunctionType *FTy = Call.getFunctionType();
3162
3163 // Verify that the correct number of arguments are being passed
3164 if (FTy->isVarArg())
3165 Check(Call.arg_size() >= FTy->getNumParams(),
3166 "Called function requires more parameters than were provided!", Call);
3167 else
3168 Check(Call.arg_size() == FTy->getNumParams(),
3169 "Incorrect number of arguments passed to called function!", Call);
3170
3171 // Verify that all arguments to the call match the function type.
3172 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3173 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3174 "Call parameter type does not match function signature!",
3175 Call.getArgOperand(i), FTy->getParamType(i), Call);
3176
3177 AttributeList Attrs = Call.getAttributes();
3178
3179 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3180 "Attribute after last parameter!", Call);
3181
3182 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3183 if (!Ty->isSized())
3184 return;
3185 Align ABIAlign = DL.getABITypeAlign(Ty);
3186 Align MaxAlign(ParamMaxAlignment);
3187 Check(ABIAlign <= MaxAlign,
3188 "Incorrect alignment of " + Message + " to called function!", Call);
3189 };
3190
3191 VerifyTypeAlign(FTy->getReturnType(), "return type");
3192 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3193 Type *Ty = FTy->getParamType(i);
3194 VerifyTypeAlign(Ty, "argument passed");
3195 }
3196
3197 Function *Callee =
3198 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3199 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3200 if (IsIntrinsic)
3201 Check(Callee->getValueType() == FTy,
3202 "Intrinsic called with incompatible signature", Call);
3203
3204 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3205 // Don't allow speculatable on call sites, unless the underlying function
3206 // declaration is also speculatable.
3207 Check(Callee && Callee->isSpeculatable(),
3208 "speculatable attribute may not apply to call sites", Call);
3209 }
3210
3211 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3212 Check(Call.getCalledFunction()->getIntrinsicID() ==
3213 Intrinsic::call_preallocated_arg,
3214 "preallocated as a call site attribute can only be on "
3215 "llvm.call.preallocated.arg");
3216 }
3217
3218 // Verify call attributes.
3219 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3220
3221 // Conservatively check the inalloca argument.
3222 // We have a bug if we can find that there is an underlying alloca without
3223 // inalloca.
3224 if (Call.hasInAllocaArgument()) {
3225 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3226 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3227 Check(AI->isUsedWithInAlloca(),
3228 "inalloca argument for call has mismatched alloca", AI, Call);
3229 }
3230
3231 // For each argument of the callsite, if it has the swifterror argument,
3232 // make sure the underlying alloca/parameter it comes from has a swifterror as
3233 // well.
3234 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3235 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3236 Value *SwiftErrorArg = Call.getArgOperand(i);
3237 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3238 Check(AI->isSwiftError(),
3239 "swifterror argument for call has mismatched alloca", AI, Call);
3240 continue;
3241 }
3242 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3243 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3244 SwiftErrorArg, Call);
3245 Check(ArgI->hasSwiftErrorAttr(),
3246 "swifterror argument for call has mismatched parameter", ArgI,
3247 Call);
3248 }
3249
3250 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3251 // Don't allow immarg on call sites, unless the underlying declaration
3252 // also has the matching immarg.
3253 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3254 "immarg may not apply only to call sites", Call.getArgOperand(i),
3255 Call);
3256 }
3257
3258 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3259 Value *ArgVal = Call.getArgOperand(i);
3260 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3261 "immarg operand has non-immediate parameter", ArgVal, Call);
3262 }
3263
3264 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3265 Value *ArgVal = Call.getArgOperand(i);
3266 bool hasOB =
3267 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3268 bool isMustTail = Call.isMustTailCall();
3269 Check(hasOB != isMustTail,
3270 "preallocated operand either requires a preallocated bundle or "
3271 "the call to be musttail (but not both)",
3272 ArgVal, Call);
3273 }
3274 }
3275
3276 if (FTy->isVarArg()) {
3277 // FIXME? is 'nest' even legal here?
3278 bool SawNest = false;
3279 bool SawReturned = false;
3280
3281 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3282 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3283 SawNest = true;
3284 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3285 SawReturned = true;
3286 }
3287
3288 // Check attributes on the varargs part.
3289 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3290 Type *Ty = Call.getArgOperand(Idx)->getType();
3291 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3292 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3293
3294 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3295 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3296 SawNest = true;
3297 }
3298
3299 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3300 Check(!SawReturned, "More than one parameter has attribute returned!",
3301 Call);
3302 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3303 "Incompatible argument and return types for 'returned' "
3304 "attribute",
3305 Call);
3306 SawReturned = true;
3307 }
3308
3309 // Statepoint intrinsic is vararg but the wrapped function may be not.
3310 // Allow sret here and check the wrapped function in verifyStatepoint.
3311 if (!Call.getCalledFunction() ||
3312 Call.getCalledFunction()->getIntrinsicID() !=
3313 Intrinsic::experimental_gc_statepoint)
3314 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3315 "Attribute 'sret' cannot be used for vararg call arguments!",
3316 Call);
3317
3318 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3319 Check(Idx == Call.arg_size() - 1,
3320 "inalloca isn't on the last argument!", Call);
3321 }
3322 }
3323
3324 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3325 if (!IsIntrinsic) {
3326 for (Type *ParamTy : FTy->params()) {
3327 Check(!ParamTy->isMetadataTy(),
3328 "Function has metadata parameter but isn't an intrinsic", Call);
3329 Check(!ParamTy->isTokenTy(),
3330 "Function has token parameter but isn't an intrinsic", Call);
3331 }
3332 }
3333
3334 // Verify that indirect calls don't return tokens.
3335 if (!Call.getCalledFunction()) {
3336 Check(!FTy->getReturnType()->isTokenTy(),
3337 "Return type cannot be token for indirect call!");
3338 Check(!FTy->getReturnType()->isX86_AMXTy(),
3339 "Return type cannot be x86_amx for indirect call!");
3340 }
3341
3342 if (Function *F = Call.getCalledFunction())
3343 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3344 visitIntrinsicCall(ID, Call);
3345
3346 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3347 // most one "gc-transition", at most one "cfguardtarget", at most one
3348 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3349 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3350 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3351 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3352 FoundPtrauthBundle = false,
3353 FoundAttachedCallBundle = false;
3354 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3355 OperandBundleUse BU = Call.getOperandBundleAt(i);
3356 uint32_t Tag = BU.getTagID();
3357 if (Tag == LLVMContext::OB_deopt) {
3358 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3359 FoundDeoptBundle = true;
3360 } else if (Tag == LLVMContext::OB_gc_transition) {
3361 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3362 Call);
3363 FoundGCTransitionBundle = true;
3364 } else if (Tag == LLVMContext::OB_funclet) {
3365 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3366 FoundFuncletBundle = true;
3367 Check(BU.Inputs.size() == 1,
3368 "Expected exactly one funclet bundle operand", Call);
3369 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3370 "Funclet bundle operands should correspond to a FuncletPadInst",
3371 Call);
3372 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3373 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3374 Call);
3375 FoundCFGuardTargetBundle = true;
3376 Check(BU.Inputs.size() == 1,
3377 "Expected exactly one cfguardtarget bundle operand", Call);
3378 } else if (Tag == LLVMContext::OB_ptrauth) {
3379 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3380 FoundPtrauthBundle = true;
3381 Check(BU.Inputs.size() == 2,
3382 "Expected exactly two ptrauth bundle operands", Call);
3383 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3384 BU.Inputs[0]->getType()->isIntegerTy(32),
3385 "Ptrauth bundle key operand must be an i32 constant", Call);
3386 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3387 "Ptrauth bundle discriminator operand must be an i64", Call);
3388 } else if (Tag == LLVMContext::OB_preallocated) {
3389 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3390 Call);
3391 FoundPreallocatedBundle = true;
3392 Check(BU.Inputs.size() == 1,
3393 "Expected exactly one preallocated bundle operand", Call);
3394 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3395 Check(Input &&
3396 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3397 "\"preallocated\" argument must be a token from "
3398 "llvm.call.preallocated.setup",
3399 Call);
3400 } else if (Tag == LLVMContext::OB_gc_live) {
3401 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3402 FoundGCLiveBundle = true;
3403 } else if (Tag == LLVMContext::OB_clang_arc_attachedcall) {
3404 Check(!FoundAttachedCallBundle,
3405 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3406 FoundAttachedCallBundle = true;
3407 verifyAttachedCallBundle(Call, BU);
3408 }
3409 }
3410
3411 // Verify that callee and callsite agree on whether to use pointer auth.
3412 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3413 "Direct call cannot have a ptrauth bundle", Call);
3414
3415 // Verify that each inlinable callsite of a debug-info-bearing function in a
3416 // debug-info-bearing function has a debug location attached to it. Failure to
3417 // do so causes assertion failures when the inliner sets up inline scope info.
3418 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3419 Call.getCalledFunction()->getSubprogram())
3420 CheckDI(Call.getDebugLoc(),
3421 "inlinable function call in a function with "
3422 "debug info must have a !dbg location",
3423 Call);
3424
3425 if (Call.isInlineAsm())
3426 verifyInlineAsmCall(Call);
3427
3428 visitInstruction(Call);
3429 }
3430
verifyTailCCMustTailAttrs(const AttrBuilder & Attrs,StringRef Context)3431 void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3432 StringRef Context) {
3433 Check(!Attrs.contains(Attribute::InAlloca),
3434 Twine("inalloca attribute not allowed in ") + Context);
3435 Check(!Attrs.contains(Attribute::InReg),
3436 Twine("inreg attribute not allowed in ") + Context);
3437 Check(!Attrs.contains(Attribute::SwiftError),
3438 Twine("swifterror attribute not allowed in ") + Context);
3439 Check(!Attrs.contains(Attribute::Preallocated),
3440 Twine("preallocated attribute not allowed in ") + Context);
3441 Check(!Attrs.contains(Attribute::ByRef),
3442 Twine("byref attribute not allowed in ") + Context);
3443 }
3444
3445 /// Two types are "congruent" if they are identical, or if they are both pointer
3446 /// types with different pointee types and the same address space.
isTypeCongruent(Type * L,Type * R)3447 static bool isTypeCongruent(Type *L, Type *R) {
3448 if (L == R)
3449 return true;
3450 PointerType *PL = dyn_cast<PointerType>(L);
3451 PointerType *PR = dyn_cast<PointerType>(R);
3452 if (!PL || !PR)
3453 return false;
3454 return PL->getAddressSpace() == PR->getAddressSpace();
3455 }
3456
getParameterABIAttributes(LLVMContext & C,unsigned I,AttributeList Attrs)3457 static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
3458 static const Attribute::AttrKind ABIAttrs[] = {
3459 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3460 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3461 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3462 Attribute::ByRef};
3463 AttrBuilder Copy(C);
3464 for (auto AK : ABIAttrs) {
3465 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3466 if (Attr.isValid())
3467 Copy.addAttribute(Attr);
3468 }
3469
3470 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3471 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3472 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3473 Attrs.hasParamAttr(I, Attribute::ByRef)))
3474 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3475 return Copy;
3476 }
3477
verifyMustTailCall(CallInst & CI)3478 void Verifier::verifyMustTailCall(CallInst &CI) {
3479 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3480
3481 Function *F = CI.getParent()->getParent();
3482 FunctionType *CallerTy = F->getFunctionType();
3483 FunctionType *CalleeTy = CI.getFunctionType();
3484 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3485 "cannot guarantee tail call due to mismatched varargs", &CI);
3486 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3487 "cannot guarantee tail call due to mismatched return types", &CI);
3488
3489 // - The calling conventions of the caller and callee must match.
3490 Check(F->getCallingConv() == CI.getCallingConv(),
3491 "cannot guarantee tail call due to mismatched calling conv", &CI);
3492
3493 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3494 // or a pointer bitcast followed by a ret instruction.
3495 // - The ret instruction must return the (possibly bitcasted) value
3496 // produced by the call or void.
3497 Value *RetVal = &CI;
3498 Instruction *Next = CI.getNextNode();
3499
3500 // Handle the optional bitcast.
3501 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3502 Check(BI->getOperand(0) == RetVal,
3503 "bitcast following musttail call must use the call", BI);
3504 RetVal = BI;
3505 Next = BI->getNextNode();
3506 }
3507
3508 // Check the return.
3509 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3510 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3511 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3512 isa<UndefValue>(Ret->getReturnValue()),
3513 "musttail call result must be returned", Ret);
3514
3515 AttributeList CallerAttrs = F->getAttributes();
3516 AttributeList CalleeAttrs = CI.getAttributes();
3517 if (CI.getCallingConv() == CallingConv::SwiftTail ||
3518 CI.getCallingConv() == CallingConv::Tail) {
3519 StringRef CCName =
3520 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3521
3522 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3523 // are allowed in swifttailcc call
3524 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3525 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3526 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3527 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3528 }
3529 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3530 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3531 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3532 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3533 }
3534 // - Varargs functions are not allowed
3535 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3536 " tail call for varargs function");
3537 return;
3538 }
3539
3540 // - The caller and callee prototypes must match. Pointer types of
3541 // parameters or return types may differ in pointee type, but not
3542 // address space.
3543 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3544 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3545 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3546 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3547 Check(
3548 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3549 "cannot guarantee tail call due to mismatched parameter types", &CI);
3550 }
3551 }
3552
3553 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3554 // returned, preallocated, and inalloca, must match.
3555 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3556 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3557 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3558 Check(CallerABIAttrs == CalleeABIAttrs,
3559 "cannot guarantee tail call due to mismatched ABI impacting "
3560 "function attributes",
3561 &CI, CI.getOperand(I));
3562 }
3563 }
3564
visitCallInst(CallInst & CI)3565 void Verifier::visitCallInst(CallInst &CI) {
3566 visitCallBase(CI);
3567
3568 if (CI.isMustTailCall())
3569 verifyMustTailCall(CI);
3570 }
3571
visitInvokeInst(InvokeInst & II)3572 void Verifier::visitInvokeInst(InvokeInst &II) {
3573 visitCallBase(II);
3574
3575 // Verify that the first non-PHI instruction of the unwind destination is an
3576 // exception handling instruction.
3577 Check(
3578 II.getUnwindDest()->isEHPad(),
3579 "The unwind destination does not have an exception handling instruction!",
3580 &II);
3581
3582 visitTerminator(II);
3583 }
3584
3585 /// visitUnaryOperator - Check the argument to the unary operator.
3586 ///
visitUnaryOperator(UnaryOperator & U)3587 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3588 Check(U.getType() == U.getOperand(0)->getType(),
3589 "Unary operators must have same type for"
3590 "operands and result!",
3591 &U);
3592
3593 switch (U.getOpcode()) {
3594 // Check that floating-point arithmetic operators are only used with
3595 // floating-point operands.
3596 case Instruction::FNeg:
3597 Check(U.getType()->isFPOrFPVectorTy(),
3598 "FNeg operator only works with float types!", &U);
3599 break;
3600 default:
3601 llvm_unreachable("Unknown UnaryOperator opcode!");
3602 }
3603
3604 visitInstruction(U);
3605 }
3606
3607 /// visitBinaryOperator - Check that both arguments to the binary operator are
3608 /// of the same type!
3609 ///
visitBinaryOperator(BinaryOperator & B)3610 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3611 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3612 "Both operands to a binary operator are not of the same type!", &B);
3613
3614 switch (B.getOpcode()) {
3615 // Check that integer arithmetic operators are only used with
3616 // integral operands.
3617 case Instruction::Add:
3618 case Instruction::Sub:
3619 case Instruction::Mul:
3620 case Instruction::SDiv:
3621 case Instruction::UDiv:
3622 case Instruction::SRem:
3623 case Instruction::URem:
3624 Check(B.getType()->isIntOrIntVectorTy(),
3625 "Integer arithmetic operators only work with integral types!", &B);
3626 Check(B.getType() == B.getOperand(0)->getType(),
3627 "Integer arithmetic operators must have same type "
3628 "for operands and result!",
3629 &B);
3630 break;
3631 // Check that floating-point arithmetic operators are only used with
3632 // floating-point operands.
3633 case Instruction::FAdd:
3634 case Instruction::FSub:
3635 case Instruction::FMul:
3636 case Instruction::FDiv:
3637 case Instruction::FRem:
3638 Check(B.getType()->isFPOrFPVectorTy(),
3639 "Floating-point arithmetic operators only work with "
3640 "floating-point types!",
3641 &B);
3642 Check(B.getType() == B.getOperand(0)->getType(),
3643 "Floating-point arithmetic operators must have same type "
3644 "for operands and result!",
3645 &B);
3646 break;
3647 // Check that logical operators are only used with integral operands.
3648 case Instruction::And:
3649 case Instruction::Or:
3650 case Instruction::Xor:
3651 Check(B.getType()->isIntOrIntVectorTy(),
3652 "Logical operators only work with integral types!", &B);
3653 Check(B.getType() == B.getOperand(0)->getType(),
3654 "Logical operators must have same type for operands and result!", &B);
3655 break;
3656 case Instruction::Shl:
3657 case Instruction::LShr:
3658 case Instruction::AShr:
3659 Check(B.getType()->isIntOrIntVectorTy(),
3660 "Shifts only work with integral types!", &B);
3661 Check(B.getType() == B.getOperand(0)->getType(),
3662 "Shift return type must be same as operands!", &B);
3663 break;
3664 default:
3665 llvm_unreachable("Unknown BinaryOperator opcode!");
3666 }
3667
3668 visitInstruction(B);
3669 }
3670
visitICmpInst(ICmpInst & IC)3671 void Verifier::visitICmpInst(ICmpInst &IC) {
3672 // Check that the operands are the same type
3673 Type *Op0Ty = IC.getOperand(0)->getType();
3674 Type *Op1Ty = IC.getOperand(1)->getType();
3675 Check(Op0Ty == Op1Ty,
3676 "Both operands to ICmp instruction are not of the same type!", &IC);
3677 // Check that the operands are the right type
3678 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3679 "Invalid operand types for ICmp instruction", &IC);
3680 // Check that the predicate is valid.
3681 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
3682
3683 visitInstruction(IC);
3684 }
3685
visitFCmpInst(FCmpInst & FC)3686 void Verifier::visitFCmpInst(FCmpInst &FC) {
3687 // Check that the operands are the same type
3688 Type *Op0Ty = FC.getOperand(0)->getType();
3689 Type *Op1Ty = FC.getOperand(1)->getType();
3690 Check(Op0Ty == Op1Ty,
3691 "Both operands to FCmp instruction are not of the same type!", &FC);
3692 // Check that the operands are the right type
3693 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
3694 &FC);
3695 // Check that the predicate is valid.
3696 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
3697
3698 visitInstruction(FC);
3699 }
3700
visitExtractElementInst(ExtractElementInst & EI)3701 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3702 Check(ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3703 "Invalid extractelement operands!", &EI);
3704 visitInstruction(EI);
3705 }
3706
visitInsertElementInst(InsertElementInst & IE)3707 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3708 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3709 IE.getOperand(2)),
3710 "Invalid insertelement operands!", &IE);
3711 visitInstruction(IE);
3712 }
3713
visitShuffleVectorInst(ShuffleVectorInst & SV)3714 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3715 Check(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3716 SV.getShuffleMask()),
3717 "Invalid shufflevector operands!", &SV);
3718 visitInstruction(SV);
3719 }
3720
visitGetElementPtrInst(GetElementPtrInst & GEP)3721 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3722 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3723
3724 Check(isa<PointerType>(TargetTy),
3725 "GEP base pointer is not a vector or a vector of pointers", &GEP);
3726 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3727
3728 SmallVector<Value *, 16> Idxs(GEP.indices());
3729 Check(
3730 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
3731 "GEP indexes must be integers", &GEP);
3732 Type *ElTy =
3733 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3734 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3735
3736 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
3737 GEP.getResultElementType() == ElTy,
3738 "GEP is not of right type for indices!", &GEP, ElTy);
3739
3740 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
3741 // Additional checks for vector GEPs.
3742 ElementCount GEPWidth = GEPVTy->getElementCount();
3743 if (GEP.getPointerOperandType()->isVectorTy())
3744 Check(
3745 GEPWidth ==
3746 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
3747 "Vector GEP result width doesn't match operand's", &GEP);
3748 for (Value *Idx : Idxs) {
3749 Type *IndexTy = Idx->getType();
3750 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
3751 ElementCount IndexWidth = IndexVTy->getElementCount();
3752 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3753 }
3754 Check(IndexTy->isIntOrIntVectorTy(),
3755 "All GEP indices should be of integer type");
3756 }
3757 }
3758
3759 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3760 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
3761 "GEP address space doesn't match type", &GEP);
3762 }
3763
3764 visitInstruction(GEP);
3765 }
3766
isContiguous(const ConstantRange & A,const ConstantRange & B)3767 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3768 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3769 }
3770
visitRangeMetadata(Instruction & I,MDNode * Range,Type * Ty)3771 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3772 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3773 "precondition violation");
3774
3775 unsigned NumOperands = Range->getNumOperands();
3776 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
3777 unsigned NumRanges = NumOperands / 2;
3778 Check(NumRanges >= 1, "It should have at least one range!", Range);
3779
3780 ConstantRange LastRange(1, true); // Dummy initial value
3781 for (unsigned i = 0; i < NumRanges; ++i) {
3782 ConstantInt *Low =
3783 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3784 Check(Low, "The lower limit must be an integer!", Low);
3785 ConstantInt *High =
3786 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3787 Check(High, "The upper limit must be an integer!", High);
3788 Check(High->getType() == Low->getType() && High->getType() == Ty,
3789 "Range types must match instruction type!", &I);
3790
3791 APInt HighV = High->getValue();
3792 APInt LowV = Low->getValue();
3793 ConstantRange CurRange(LowV, HighV);
3794 Check(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3795 "Range must not be empty!", Range);
3796 if (i != 0) {
3797 Check(CurRange.intersectWith(LastRange).isEmptySet(),
3798 "Intervals are overlapping", Range);
3799 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3800 Range);
3801 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3802 Range);
3803 }
3804 LastRange = ConstantRange(LowV, HighV);
3805 }
3806 if (NumRanges > 2) {
3807 APInt FirstLow =
3808 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3809 APInt FirstHigh =
3810 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3811 ConstantRange FirstRange(FirstLow, FirstHigh);
3812 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
3813 "Intervals are overlapping", Range);
3814 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3815 Range);
3816 }
3817 }
3818
checkAtomicMemAccessSize(Type * Ty,const Instruction * I)3819 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3820 unsigned Size = DL.getTypeSizeInBits(Ty);
3821 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3822 Check(!(Size & (Size - 1)),
3823 "atomic memory access' operand must have a power-of-two size", Ty, I);
3824 }
3825
visitLoadInst(LoadInst & LI)3826 void Verifier::visitLoadInst(LoadInst &LI) {
3827 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3828 Check(PTy, "Load operand must be a pointer.", &LI);
3829 Type *ElTy = LI.getType();
3830 if (MaybeAlign A = LI.getAlign()) {
3831 Check(A->value() <= Value::MaximumAlignment,
3832 "huge alignment values are unsupported", &LI);
3833 }
3834 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3835 if (LI.isAtomic()) {
3836 Check(LI.getOrdering() != AtomicOrdering::Release &&
3837 LI.getOrdering() != AtomicOrdering::AcquireRelease,
3838 "Load cannot have Release ordering", &LI);
3839 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3840 "atomic load operand must have integer, pointer, or floating point "
3841 "type!",
3842 ElTy, &LI);
3843 checkAtomicMemAccessSize(ElTy, &LI);
3844 } else {
3845 Check(LI.getSyncScopeID() == SyncScope::System,
3846 "Non-atomic load cannot have SynchronizationScope specified", &LI);
3847 }
3848
3849 visitInstruction(LI);
3850 }
3851
visitStoreInst(StoreInst & SI)3852 void Verifier::visitStoreInst(StoreInst &SI) {
3853 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3854 Check(PTy, "Store operand must be a pointer.", &SI);
3855 Type *ElTy = SI.getOperand(0)->getType();
3856 Check(PTy->isOpaqueOrPointeeTypeMatches(ElTy),
3857 "Stored value type does not match pointer operand type!", &SI, ElTy);
3858 if (MaybeAlign A = SI.getAlign()) {
3859 Check(A->value() <= Value::MaximumAlignment,
3860 "huge alignment values are unsupported", &SI);
3861 }
3862 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3863 if (SI.isAtomic()) {
3864 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
3865 SI.getOrdering() != AtomicOrdering::AcquireRelease,
3866 "Store cannot have Acquire ordering", &SI);
3867 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3868 "atomic store operand must have integer, pointer, or floating point "
3869 "type!",
3870 ElTy, &SI);
3871 checkAtomicMemAccessSize(ElTy, &SI);
3872 } else {
3873 Check(SI.getSyncScopeID() == SyncScope::System,
3874 "Non-atomic store cannot have SynchronizationScope specified", &SI);
3875 }
3876 visitInstruction(SI);
3877 }
3878
3879 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
verifySwiftErrorCall(CallBase & Call,const Value * SwiftErrorVal)3880 void Verifier::verifySwiftErrorCall(CallBase &Call,
3881 const Value *SwiftErrorVal) {
3882 for (const auto &I : llvm::enumerate(Call.args())) {
3883 if (I.value() == SwiftErrorVal) {
3884 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
3885 "swifterror value when used in a callsite should be marked "
3886 "with swifterror attribute",
3887 SwiftErrorVal, Call);
3888 }
3889 }
3890 }
3891
verifySwiftErrorValue(const Value * SwiftErrorVal)3892 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3893 // Check that swifterror value is only used by loads, stores, or as
3894 // a swifterror argument.
3895 for (const User *U : SwiftErrorVal->users()) {
3896 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3897 isa<InvokeInst>(U),
3898 "swifterror value can only be loaded and stored from, or "
3899 "as a swifterror argument!",
3900 SwiftErrorVal, U);
3901 // If it is used by a store, check it is the second operand.
3902 if (auto StoreI = dyn_cast<StoreInst>(U))
3903 Check(StoreI->getOperand(1) == SwiftErrorVal,
3904 "swifterror value should be the second operand when used "
3905 "by stores",
3906 SwiftErrorVal, U);
3907 if (auto *Call = dyn_cast<CallBase>(U))
3908 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3909 }
3910 }
3911
visitAllocaInst(AllocaInst & AI)3912 void Verifier::visitAllocaInst(AllocaInst &AI) {
3913 SmallPtrSet<Type*, 4> Visited;
3914 Check(AI.getAllocatedType()->isSized(&Visited),
3915 "Cannot allocate unsized type", &AI);
3916 Check(AI.getArraySize()->getType()->isIntegerTy(),
3917 "Alloca array size must have integer type", &AI);
3918 if (MaybeAlign A = AI.getAlign()) {
3919 Check(A->value() <= Value::MaximumAlignment,
3920 "huge alignment values are unsupported", &AI);
3921 }
3922
3923 if (AI.isSwiftError()) {
3924 Check(AI.getAllocatedType()->isPointerTy(),
3925 "swifterror alloca must have pointer type", &AI);
3926 Check(!AI.isArrayAllocation(),
3927 "swifterror alloca must not be array allocation", &AI);
3928 verifySwiftErrorValue(&AI);
3929 }
3930
3931 visitInstruction(AI);
3932 }
3933
visitAtomicCmpXchgInst(AtomicCmpXchgInst & CXI)3934 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3935 Type *ElTy = CXI.getOperand(1)->getType();
3936 Check(ElTy->isIntOrPtrTy(),
3937 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3938 checkAtomicMemAccessSize(ElTy, &CXI);
3939 visitInstruction(CXI);
3940 }
3941
visitAtomicRMWInst(AtomicRMWInst & RMWI)3942 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3943 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
3944 "atomicrmw instructions cannot be unordered.", &RMWI);
3945 auto Op = RMWI.getOperation();
3946 Type *ElTy = RMWI.getOperand(1)->getType();
3947 if (Op == AtomicRMWInst::Xchg) {
3948 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
3949 ElTy->isPointerTy(),
3950 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
3951 " operand must have integer or floating point type!",
3952 &RMWI, ElTy);
3953 } else if (AtomicRMWInst::isFPOperation(Op)) {
3954 Check(ElTy->isFloatingPointTy(),
3955 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
3956 " operand must have floating point type!",
3957 &RMWI, ElTy);
3958 } else {
3959 Check(ElTy->isIntegerTy(),
3960 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
3961 " operand must have integer type!",
3962 &RMWI, ElTy);
3963 }
3964 checkAtomicMemAccessSize(ElTy, &RMWI);
3965 Check(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3966 "Invalid binary operation!", &RMWI);
3967 visitInstruction(RMWI);
3968 }
3969
visitFenceInst(FenceInst & FI)3970 void Verifier::visitFenceInst(FenceInst &FI) {
3971 const AtomicOrdering Ordering = FI.getOrdering();
3972 Check(Ordering == AtomicOrdering::Acquire ||
3973 Ordering == AtomicOrdering::Release ||
3974 Ordering == AtomicOrdering::AcquireRelease ||
3975 Ordering == AtomicOrdering::SequentiallyConsistent,
3976 "fence instructions may only have acquire, release, acq_rel, or "
3977 "seq_cst ordering.",
3978 &FI);
3979 visitInstruction(FI);
3980 }
3981
visitExtractValueInst(ExtractValueInst & EVI)3982 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3983 Check(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3984 EVI.getIndices()) == EVI.getType(),
3985 "Invalid ExtractValueInst operands!", &EVI);
3986
3987 visitInstruction(EVI);
3988 }
3989
visitInsertValueInst(InsertValueInst & IVI)3990 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3991 Check(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3992 IVI.getIndices()) ==
3993 IVI.getOperand(1)->getType(),
3994 "Invalid InsertValueInst operands!", &IVI);
3995
3996 visitInstruction(IVI);
3997 }
3998
getParentPad(Value * EHPad)3999 static Value *getParentPad(Value *EHPad) {
4000 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4001 return FPI->getParentPad();
4002
4003 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4004 }
4005
visitEHPadPredecessors(Instruction & I)4006 void Verifier::visitEHPadPredecessors(Instruction &I) {
4007 assert(I.isEHPad());
4008
4009 BasicBlock *BB = I.getParent();
4010 Function *F = BB->getParent();
4011
4012 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4013
4014 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4015 // The landingpad instruction defines its parent as a landing pad block. The
4016 // landing pad block may be branched to only by the unwind edge of an
4017 // invoke.
4018 for (BasicBlock *PredBB : predecessors(BB)) {
4019 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4020 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4021 "Block containing LandingPadInst must be jumped to "
4022 "only by the unwind edge of an invoke.",
4023 LPI);
4024 }
4025 return;
4026 }
4027 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4028 if (!pred_empty(BB))
4029 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4030 "Block containg CatchPadInst must be jumped to "
4031 "only by its catchswitch.",
4032 CPI);
4033 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4034 "Catchswitch cannot unwind to one of its catchpads",
4035 CPI->getCatchSwitch(), CPI);
4036 return;
4037 }
4038
4039 // Verify that each pred has a legal terminator with a legal to/from EH
4040 // pad relationship.
4041 Instruction *ToPad = &I;
4042 Value *ToPadParent = getParentPad(ToPad);
4043 for (BasicBlock *PredBB : predecessors(BB)) {
4044 Instruction *TI = PredBB->getTerminator();
4045 Value *FromPad;
4046 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4047 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4048 "EH pad must be jumped to via an unwind edge", ToPad, II);
4049 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4050 FromPad = Bundle->Inputs[0];
4051 else
4052 FromPad = ConstantTokenNone::get(II->getContext());
4053 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4054 FromPad = CRI->getOperand(0);
4055 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4056 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4057 FromPad = CSI;
4058 } else {
4059 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4060 }
4061
4062 // The edge may exit from zero or more nested pads.
4063 SmallSet<Value *, 8> Seen;
4064 for (;; FromPad = getParentPad(FromPad)) {
4065 Check(FromPad != ToPad,
4066 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4067 if (FromPad == ToPadParent) {
4068 // This is a legal unwind edge.
4069 break;
4070 }
4071 Check(!isa<ConstantTokenNone>(FromPad),
4072 "A single unwind edge may only enter one EH pad", TI);
4073 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4074 FromPad);
4075
4076 // This will be diagnosed on the corresponding instruction already. We
4077 // need the extra check here to make sure getParentPad() works.
4078 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4079 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4080 }
4081 }
4082 }
4083
visitLandingPadInst(LandingPadInst & LPI)4084 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4085 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4086 // isn't a cleanup.
4087 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4088 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4089
4090 visitEHPadPredecessors(LPI);
4091
4092 if (!LandingPadResultTy)
4093 LandingPadResultTy = LPI.getType();
4094 else
4095 Check(LandingPadResultTy == LPI.getType(),
4096 "The landingpad instruction should have a consistent result type "
4097 "inside a function.",
4098 &LPI);
4099
4100 Function *F = LPI.getParent()->getParent();
4101 Check(F->hasPersonalityFn(),
4102 "LandingPadInst needs to be in a function with a personality.", &LPI);
4103
4104 // The landingpad instruction must be the first non-PHI instruction in the
4105 // block.
4106 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4107 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4108
4109 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4110 Constant *Clause = LPI.getClause(i);
4111 if (LPI.isCatch(i)) {
4112 Check(isa<PointerType>(Clause->getType()),
4113 "Catch operand does not have pointer type!", &LPI);
4114 } else {
4115 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4116 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4117 "Filter operand is not an array of constants!", &LPI);
4118 }
4119 }
4120
4121 visitInstruction(LPI);
4122 }
4123
visitResumeInst(ResumeInst & RI)4124 void Verifier::visitResumeInst(ResumeInst &RI) {
4125 Check(RI.getFunction()->hasPersonalityFn(),
4126 "ResumeInst needs to be in a function with a personality.", &RI);
4127
4128 if (!LandingPadResultTy)
4129 LandingPadResultTy = RI.getValue()->getType();
4130 else
4131 Check(LandingPadResultTy == RI.getValue()->getType(),
4132 "The resume instruction should have a consistent result type "
4133 "inside a function.",
4134 &RI);
4135
4136 visitTerminator(RI);
4137 }
4138
visitCatchPadInst(CatchPadInst & CPI)4139 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4140 BasicBlock *BB = CPI.getParent();
4141
4142 Function *F = BB->getParent();
4143 Check(F->hasPersonalityFn(),
4144 "CatchPadInst needs to be in a function with a personality.", &CPI);
4145
4146 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4147 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4148 CPI.getParentPad());
4149
4150 // The catchpad instruction must be the first non-PHI instruction in the
4151 // block.
4152 Check(BB->getFirstNonPHI() == &CPI,
4153 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4154
4155 visitEHPadPredecessors(CPI);
4156 visitFuncletPadInst(CPI);
4157 }
4158
visitCatchReturnInst(CatchReturnInst & CatchReturn)4159 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4160 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4161 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4162 CatchReturn.getOperand(0));
4163
4164 visitTerminator(CatchReturn);
4165 }
4166
visitCleanupPadInst(CleanupPadInst & CPI)4167 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4168 BasicBlock *BB = CPI.getParent();
4169
4170 Function *F = BB->getParent();
4171 Check(F->hasPersonalityFn(),
4172 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4173
4174 // The cleanuppad instruction must be the first non-PHI instruction in the
4175 // block.
4176 Check(BB->getFirstNonPHI() == &CPI,
4177 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4178
4179 auto *ParentPad = CPI.getParentPad();
4180 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4181 "CleanupPadInst has an invalid parent.", &CPI);
4182
4183 visitEHPadPredecessors(CPI);
4184 visitFuncletPadInst(CPI);
4185 }
4186
visitFuncletPadInst(FuncletPadInst & FPI)4187 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4188 User *FirstUser = nullptr;
4189 Value *FirstUnwindPad = nullptr;
4190 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4191 SmallSet<FuncletPadInst *, 8> Seen;
4192
4193 while (!Worklist.empty()) {
4194 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4195 Check(Seen.insert(CurrentPad).second,
4196 "FuncletPadInst must not be nested within itself", CurrentPad);
4197 Value *UnresolvedAncestorPad = nullptr;
4198 for (User *U : CurrentPad->users()) {
4199 BasicBlock *UnwindDest;
4200 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4201 UnwindDest = CRI->getUnwindDest();
4202 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4203 // We allow catchswitch unwind to caller to nest
4204 // within an outer pad that unwinds somewhere else,
4205 // because catchswitch doesn't have a nounwind variant.
4206 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4207 if (CSI->unwindsToCaller())
4208 continue;
4209 UnwindDest = CSI->getUnwindDest();
4210 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4211 UnwindDest = II->getUnwindDest();
4212 } else if (isa<CallInst>(U)) {
4213 // Calls which don't unwind may be found inside funclet
4214 // pads that unwind somewhere else. We don't *require*
4215 // such calls to be annotated nounwind.
4216 continue;
4217 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4218 // The unwind dest for a cleanup can only be found by
4219 // recursive search. Add it to the worklist, and we'll
4220 // search for its first use that determines where it unwinds.
4221 Worklist.push_back(CPI);
4222 continue;
4223 } else {
4224 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4225 continue;
4226 }
4227
4228 Value *UnwindPad;
4229 bool ExitsFPI;
4230 if (UnwindDest) {
4231 UnwindPad = UnwindDest->getFirstNonPHI();
4232 if (!cast<Instruction>(UnwindPad)->isEHPad())
4233 continue;
4234 Value *UnwindParent = getParentPad(UnwindPad);
4235 // Ignore unwind edges that don't exit CurrentPad.
4236 if (UnwindParent == CurrentPad)
4237 continue;
4238 // Determine whether the original funclet pad is exited,
4239 // and if we are scanning nested pads determine how many
4240 // of them are exited so we can stop searching their
4241 // children.
4242 Value *ExitedPad = CurrentPad;
4243 ExitsFPI = false;
4244 do {
4245 if (ExitedPad == &FPI) {
4246 ExitsFPI = true;
4247 // Now we can resolve any ancestors of CurrentPad up to
4248 // FPI, but not including FPI since we need to make sure
4249 // to check all direct users of FPI for consistency.
4250 UnresolvedAncestorPad = &FPI;
4251 break;
4252 }
4253 Value *ExitedParent = getParentPad(ExitedPad);
4254 if (ExitedParent == UnwindParent) {
4255 // ExitedPad is the ancestor-most pad which this unwind
4256 // edge exits, so we can resolve up to it, meaning that
4257 // ExitedParent is the first ancestor still unresolved.
4258 UnresolvedAncestorPad = ExitedParent;
4259 break;
4260 }
4261 ExitedPad = ExitedParent;
4262 } while (!isa<ConstantTokenNone>(ExitedPad));
4263 } else {
4264 // Unwinding to caller exits all pads.
4265 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4266 ExitsFPI = true;
4267 UnresolvedAncestorPad = &FPI;
4268 }
4269
4270 if (ExitsFPI) {
4271 // This unwind edge exits FPI. Make sure it agrees with other
4272 // such edges.
4273 if (FirstUser) {
4274 Check(UnwindPad == FirstUnwindPad,
4275 "Unwind edges out of a funclet "
4276 "pad must have the same unwind "
4277 "dest",
4278 &FPI, U, FirstUser);
4279 } else {
4280 FirstUser = U;
4281 FirstUnwindPad = UnwindPad;
4282 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4283 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4284 getParentPad(UnwindPad) == getParentPad(&FPI))
4285 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4286 }
4287 }
4288 // Make sure we visit all uses of FPI, but for nested pads stop as
4289 // soon as we know where they unwind to.
4290 if (CurrentPad != &FPI)
4291 break;
4292 }
4293 if (UnresolvedAncestorPad) {
4294 if (CurrentPad == UnresolvedAncestorPad) {
4295 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4296 // we've found an unwind edge that exits it, because we need to verify
4297 // all direct uses of FPI.
4298 assert(CurrentPad == &FPI);
4299 continue;
4300 }
4301 // Pop off the worklist any nested pads that we've found an unwind
4302 // destination for. The pads on the worklist are the uncles,
4303 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4304 // for all ancestors of CurrentPad up to but not including
4305 // UnresolvedAncestorPad.
4306 Value *ResolvedPad = CurrentPad;
4307 while (!Worklist.empty()) {
4308 Value *UnclePad = Worklist.back();
4309 Value *AncestorPad = getParentPad(UnclePad);
4310 // Walk ResolvedPad up the ancestor list until we either find the
4311 // uncle's parent or the last resolved ancestor.
4312 while (ResolvedPad != AncestorPad) {
4313 Value *ResolvedParent = getParentPad(ResolvedPad);
4314 if (ResolvedParent == UnresolvedAncestorPad) {
4315 break;
4316 }
4317 ResolvedPad = ResolvedParent;
4318 }
4319 // If the resolved ancestor search didn't find the uncle's parent,
4320 // then the uncle is not yet resolved.
4321 if (ResolvedPad != AncestorPad)
4322 break;
4323 // This uncle is resolved, so pop it from the worklist.
4324 Worklist.pop_back();
4325 }
4326 }
4327 }
4328
4329 if (FirstUnwindPad) {
4330 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4331 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4332 Value *SwitchUnwindPad;
4333 if (SwitchUnwindDest)
4334 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4335 else
4336 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4337 Check(SwitchUnwindPad == FirstUnwindPad,
4338 "Unwind edges out of a catch must have the same unwind dest as "
4339 "the parent catchswitch",
4340 &FPI, FirstUser, CatchSwitch);
4341 }
4342 }
4343
4344 visitInstruction(FPI);
4345 }
4346
visitCatchSwitchInst(CatchSwitchInst & CatchSwitch)4347 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4348 BasicBlock *BB = CatchSwitch.getParent();
4349
4350 Function *F = BB->getParent();
4351 Check(F->hasPersonalityFn(),
4352 "CatchSwitchInst needs to be in a function with a personality.",
4353 &CatchSwitch);
4354
4355 // The catchswitch instruction must be the first non-PHI instruction in the
4356 // block.
4357 Check(BB->getFirstNonPHI() == &CatchSwitch,
4358 "CatchSwitchInst not the first non-PHI instruction in the block.",
4359 &CatchSwitch);
4360
4361 auto *ParentPad = CatchSwitch.getParentPad();
4362 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4363 "CatchSwitchInst has an invalid parent.", ParentPad);
4364
4365 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4366 Instruction *I = UnwindDest->getFirstNonPHI();
4367 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4368 "CatchSwitchInst must unwind to an EH block which is not a "
4369 "landingpad.",
4370 &CatchSwitch);
4371
4372 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4373 if (getParentPad(I) == ParentPad)
4374 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4375 }
4376
4377 Check(CatchSwitch.getNumHandlers() != 0,
4378 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4379
4380 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4381 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4382 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4383 }
4384
4385 visitEHPadPredecessors(CatchSwitch);
4386 visitTerminator(CatchSwitch);
4387 }
4388
visitCleanupReturnInst(CleanupReturnInst & CRI)4389 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4390 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4391 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4392 CRI.getOperand(0));
4393
4394 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4395 Instruction *I = UnwindDest->getFirstNonPHI();
4396 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4397 "CleanupReturnInst must unwind to an EH block which is not a "
4398 "landingpad.",
4399 &CRI);
4400 }
4401
4402 visitTerminator(CRI);
4403 }
4404
verifyDominatesUse(Instruction & I,unsigned i)4405 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4406 Instruction *Op = cast<Instruction>(I.getOperand(i));
4407 // If the we have an invalid invoke, don't try to compute the dominance.
4408 // We already reject it in the invoke specific checks and the dominance
4409 // computation doesn't handle multiple edges.
4410 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4411 if (II->getNormalDest() == II->getUnwindDest())
4412 return;
4413 }
4414
4415 // Quick check whether the def has already been encountered in the same block.
4416 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4417 // uses are defined to happen on the incoming edge, not at the instruction.
4418 //
4419 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4420 // wrapping an SSA value, assert that we've already encountered it. See
4421 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4422 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4423 return;
4424
4425 const Use &U = I.getOperandUse(i);
4426 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4427 }
4428
visitDereferenceableMetadata(Instruction & I,MDNode * MD)4429 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4430 Check(I.getType()->isPointerTy(),
4431 "dereferenceable, dereferenceable_or_null "
4432 "apply only to pointer types",
4433 &I);
4434 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4435 "dereferenceable, dereferenceable_or_null apply only to load"
4436 " and inttoptr instructions, use attributes for calls or invokes",
4437 &I);
4438 Check(MD->getNumOperands() == 1,
4439 "dereferenceable, dereferenceable_or_null "
4440 "take one operand!",
4441 &I);
4442 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4443 Check(CI && CI->getType()->isIntegerTy(64),
4444 "dereferenceable, "
4445 "dereferenceable_or_null metadata value must be an i64!",
4446 &I);
4447 }
4448
visitProfMetadata(Instruction & I,MDNode * MD)4449 void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4450 Check(MD->getNumOperands() >= 2,
4451 "!prof annotations should have no less than 2 operands", MD);
4452
4453 // Check first operand.
4454 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4455 Check(isa<MDString>(MD->getOperand(0)),
4456 "expected string with name of the !prof annotation", MD);
4457 MDString *MDS = cast<MDString>(MD->getOperand(0));
4458 StringRef ProfName = MDS->getString();
4459
4460 // Check consistency of !prof branch_weights metadata.
4461 if (ProfName.equals("branch_weights")) {
4462 if (isa<InvokeInst>(&I)) {
4463 Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4464 "Wrong number of InvokeInst branch_weights operands", MD);
4465 } else {
4466 unsigned ExpectedNumOperands = 0;
4467 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4468 ExpectedNumOperands = BI->getNumSuccessors();
4469 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4470 ExpectedNumOperands = SI->getNumSuccessors();
4471 else if (isa<CallInst>(&I))
4472 ExpectedNumOperands = 1;
4473 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4474 ExpectedNumOperands = IBI->getNumDestinations();
4475 else if (isa<SelectInst>(&I))
4476 ExpectedNumOperands = 2;
4477 else
4478 CheckFailed("!prof branch_weights are not allowed for this instruction",
4479 MD);
4480
4481 Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
4482 "Wrong number of operands", MD);
4483 }
4484 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4485 auto &MDO = MD->getOperand(i);
4486 Check(MDO, "second operand should not be null", MD);
4487 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4488 "!prof brunch_weights operand is not a const int");
4489 }
4490 }
4491 }
4492
visitCallStackMetadata(MDNode * MD)4493 void Verifier::visitCallStackMetadata(MDNode *MD) {
4494 // Call stack metadata should consist of a list of at least 1 constant int
4495 // (representing a hash of the location).
4496 Check(MD->getNumOperands() >= 1,
4497 "call stack metadata should have at least 1 operand", MD);
4498
4499 for (const auto &Op : MD->operands())
4500 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4501 "call stack metadata operand should be constant integer", Op);
4502 }
4503
visitMemProfMetadata(Instruction & I,MDNode * MD)4504 void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4505 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4506 Check(MD->getNumOperands() >= 1,
4507 "!memprof annotations should have at least 1 metadata operand "
4508 "(MemInfoBlock)",
4509 MD);
4510
4511 // Check each MIB
4512 for (auto &MIBOp : MD->operands()) {
4513 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4514 // The first operand of an MIB should be the call stack metadata.
4515 // There rest of the operands should be MDString tags, and there should be
4516 // at least one.
4517 Check(MIB->getNumOperands() >= 2,
4518 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4519
4520 // Check call stack metadata (first operand).
4521 Check(MIB->getOperand(0) != nullptr,
4522 "!memprof MemInfoBlock first operand should not be null", MIB);
4523 Check(isa<MDNode>(MIB->getOperand(0)),
4524 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4525 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4526 visitCallStackMetadata(StackMD);
4527
4528 // Check that remaining operands are MDString.
4529 Check(std::all_of(MIB->op_begin() + 1, MIB->op_end(),
4530 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4531 "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4532 }
4533 }
4534
visitCallsiteMetadata(Instruction & I,MDNode * MD)4535 void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4536 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4537 // Verify the partial callstack annotated from memprof profiles. This callsite
4538 // is a part of a profiled allocation callstack.
4539 visitCallStackMetadata(MD);
4540 }
4541
visitAnnotationMetadata(MDNode * Annotation)4542 void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4543 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4544 Check(Annotation->getNumOperands() >= 1,
4545 "annotation must have at least one operand");
4546 for (const MDOperand &Op : Annotation->operands())
4547 Check(isa<MDString>(Op.get()), "operands must be strings");
4548 }
4549
visitAliasScopeMetadata(const MDNode * MD)4550 void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4551 unsigned NumOps = MD->getNumOperands();
4552 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4553 MD);
4554 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4555 "first scope operand must be self-referential or string", MD);
4556 if (NumOps == 3)
4557 Check(isa<MDString>(MD->getOperand(2)),
4558 "third scope operand must be string (if used)", MD);
4559
4560 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4561 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4562
4563 unsigned NumDomainOps = Domain->getNumOperands();
4564 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4565 "domain must have one or two operands", Domain);
4566 Check(Domain->getOperand(0).get() == Domain ||
4567 isa<MDString>(Domain->getOperand(0)),
4568 "first domain operand must be self-referential or string", Domain);
4569 if (NumDomainOps == 2)
4570 Check(isa<MDString>(Domain->getOperand(1)),
4571 "second domain operand must be string (if used)", Domain);
4572 }
4573
visitAliasScopeListMetadata(const MDNode * MD)4574 void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4575 for (const MDOperand &Op : MD->operands()) {
4576 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4577 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4578 visitAliasScopeMetadata(OpMD);
4579 }
4580 }
4581
visitAccessGroupMetadata(const MDNode * MD)4582 void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4583 auto IsValidAccessScope = [](const MDNode *MD) {
4584 return MD->getNumOperands() == 0 && MD->isDistinct();
4585 };
4586
4587 // It must be either an access scope itself...
4588 if (IsValidAccessScope(MD))
4589 return;
4590
4591 // ...or a list of access scopes.
4592 for (const MDOperand &Op : MD->operands()) {
4593 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4594 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
4595 Check(IsValidAccessScope(OpMD),
4596 "Access scope list contains invalid access scope", MD);
4597 }
4598 }
4599
4600 /// verifyInstruction - Verify that an instruction is well formed.
4601 ///
visitInstruction(Instruction & I)4602 void Verifier::visitInstruction(Instruction &I) {
4603 BasicBlock *BB = I.getParent();
4604 Check(BB, "Instruction not embedded in basic block!", &I);
4605
4606 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4607 for (User *U : I.users()) {
4608 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
4609 "Only PHI nodes may reference their own value!", &I);
4610 }
4611 }
4612
4613 // Check that void typed values don't have names
4614 Check(!I.getType()->isVoidTy() || !I.hasName(),
4615 "Instruction has a name, but provides a void value!", &I);
4616
4617 // Check that the return value of the instruction is either void or a legal
4618 // value type.
4619 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4620 "Instruction returns a non-scalar type!", &I);
4621
4622 // Check that the instruction doesn't produce metadata. Calls are already
4623 // checked against the callee type.
4624 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4625 "Invalid use of metadata!", &I);
4626
4627 // Check that all uses of the instruction, if they are instructions
4628 // themselves, actually have parent basic blocks. If the use is not an
4629 // instruction, it is an error!
4630 for (Use &U : I.uses()) {
4631 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4632 Check(Used->getParent() != nullptr,
4633 "Instruction referencing"
4634 " instruction not embedded in a basic block!",
4635 &I, Used);
4636 else {
4637 CheckFailed("Use of instruction is not an instruction!", U);
4638 return;
4639 }
4640 }
4641
4642 // Get a pointer to the call base of the instruction if it is some form of
4643 // call.
4644 const CallBase *CBI = dyn_cast<CallBase>(&I);
4645
4646 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4647 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4648
4649 // Check to make sure that only first-class-values are operands to
4650 // instructions.
4651 if (!I.getOperand(i)->getType()->isFirstClassType()) {
4652 Check(false, "Instruction operands must be first-class values!", &I);
4653 }
4654
4655 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4656 // This code checks whether the function is used as the operand of a
4657 // clang_arc_attachedcall operand bundle.
4658 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
4659 int Idx) {
4660 return CBI && CBI->isOperandBundleOfType(
4661 LLVMContext::OB_clang_arc_attachedcall, Idx);
4662 };
4663
4664 // Check to make sure that the "address of" an intrinsic function is never
4665 // taken. Ignore cases where the address of the intrinsic function is used
4666 // as the argument of operand bundle "clang.arc.attachedcall" as those
4667 // cases are handled in verifyAttachedCallBundle.
4668 Check((!F->isIntrinsic() ||
4669 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
4670 IsAttachedCallOperand(F, CBI, i)),
4671 "Cannot take the address of an intrinsic!", &I);
4672 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
4673 F->getIntrinsicID() == Intrinsic::donothing ||
4674 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
4675 F->getIntrinsicID() == Intrinsic::seh_try_end ||
4676 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
4677 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
4678 F->getIntrinsicID() == Intrinsic::coro_resume ||
4679 F->getIntrinsicID() == Intrinsic::coro_destroy ||
4680 F->getIntrinsicID() ==
4681 Intrinsic::experimental_patchpoint_void ||
4682 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4683 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4684 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
4685 IsAttachedCallOperand(F, CBI, i),
4686 "Cannot invoke an intrinsic other than donothing, patchpoint, "
4687 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
4688 &I);
4689 Check(F->getParent() == &M, "Referencing function in another module!", &I,
4690 &M, F, F->getParent());
4691 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4692 Check(OpBB->getParent() == BB->getParent(),
4693 "Referring to a basic block in another function!", &I);
4694 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4695 Check(OpArg->getParent() == BB->getParent(),
4696 "Referring to an argument in another function!", &I);
4697 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4698 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
4699 &M, GV, GV->getParent());
4700 } else if (isa<Instruction>(I.getOperand(i))) {
4701 verifyDominatesUse(I, i);
4702 } else if (isa<InlineAsm>(I.getOperand(i))) {
4703 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4704 "Cannot take the address of an inline asm!", &I);
4705 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4706 if (CE->getType()->isPtrOrPtrVectorTy()) {
4707 // If we have a ConstantExpr pointer, we need to see if it came from an
4708 // illegal bitcast.
4709 visitConstantExprsRecursively(CE);
4710 }
4711 }
4712 }
4713
4714 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4715 Check(I.getType()->isFPOrFPVectorTy(),
4716 "fpmath requires a floating point result!", &I);
4717 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4718 if (ConstantFP *CFP0 =
4719 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4720 const APFloat &Accuracy = CFP0->getValueAPF();
4721 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4722 "fpmath accuracy must have float type", &I);
4723 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4724 "fpmath accuracy not a positive number!", &I);
4725 } else {
4726 Check(false, "invalid fpmath accuracy!", &I);
4727 }
4728 }
4729
4730 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4731 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4732 "Ranges are only for loads, calls and invokes!", &I);
4733 visitRangeMetadata(I, Range, I.getType());
4734 }
4735
4736 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
4737 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
4738 "invariant.group metadata is only for loads and stores", &I);
4739 }
4740
4741 if (I.getMetadata(LLVMContext::MD_nonnull)) {
4742 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4743 &I);
4744 Check(isa<LoadInst>(I),
4745 "nonnull applies only to load instructions, use attributes"
4746 " for calls or invokes",
4747 &I);
4748 }
4749
4750 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4751 visitDereferenceableMetadata(I, MD);
4752
4753 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4754 visitDereferenceableMetadata(I, MD);
4755
4756 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4757 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4758
4759 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
4760 visitAliasScopeListMetadata(MD);
4761 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
4762 visitAliasScopeListMetadata(MD);
4763
4764 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
4765 visitAccessGroupMetadata(MD);
4766
4767 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4768 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
4769 &I);
4770 Check(isa<LoadInst>(I),
4771 "align applies only to load instructions, "
4772 "use attributes for calls or invokes",
4773 &I);
4774 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4775 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4776 Check(CI && CI->getType()->isIntegerTy(64),
4777 "align metadata value must be an i64!", &I);
4778 uint64_t Align = CI->getZExtValue();
4779 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
4780 &I);
4781 Check(Align <= Value::MaximumAlignment,
4782 "alignment is larger that implementation defined limit", &I);
4783 }
4784
4785 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
4786 visitProfMetadata(I, MD);
4787
4788 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
4789 visitMemProfMetadata(I, MD);
4790
4791 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
4792 visitCallsiteMetadata(I, MD);
4793
4794 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
4795 visitAnnotationMetadata(Annotation);
4796
4797 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4798 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4799 visitMDNode(*N, AreDebugLocsAllowed::Yes);
4800 }
4801
4802 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
4803 verifyFragmentExpression(*DII);
4804 verifyNotEntryValue(*DII);
4805 }
4806
4807 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
4808 I.getAllMetadata(MDs);
4809 for (auto Attachment : MDs) {
4810 unsigned Kind = Attachment.first;
4811 auto AllowLocs =
4812 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
4813 ? AreDebugLocsAllowed::Yes
4814 : AreDebugLocsAllowed::No;
4815 visitMDNode(*Attachment.second, AllowLocs);
4816 }
4817
4818 InstsInThisBlock.insert(&I);
4819 }
4820
4821 /// Allow intrinsics to be verified in different ways.
visitIntrinsicCall(Intrinsic::ID ID,CallBase & Call)4822 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4823 Function *IF = Call.getCalledFunction();
4824 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4825 IF);
4826
4827 // Verify that the intrinsic prototype lines up with what the .td files
4828 // describe.
4829 FunctionType *IFTy = IF->getFunctionType();
4830 bool IsVarArg = IFTy->isVarArg();
4831
4832 SmallVector<Intrinsic::IITDescriptor, 8> Table;
4833 getIntrinsicInfoTableEntries(ID, Table);
4834 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4835
4836 // Walk the descriptors to extract overloaded types.
4837 SmallVector<Type *, 4> ArgTys;
4838 Intrinsic::MatchIntrinsicTypesResult Res =
4839 Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
4840 Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
4841 "Intrinsic has incorrect return type!", IF);
4842 Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
4843 "Intrinsic has incorrect argument type!", IF);
4844
4845 // Verify if the intrinsic call matches the vararg property.
4846 if (IsVarArg)
4847 Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4848 "Intrinsic was not defined with variable arguments!", IF);
4849 else
4850 Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4851 "Callsite was not defined with variable arguments!", IF);
4852
4853 // All descriptors should be absorbed by now.
4854 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4855
4856 // Now that we have the intrinsic ID and the actual argument types (and we
4857 // know they are legal for the intrinsic!) get the intrinsic name through the
4858 // usual means. This allows us to verify the mangling of argument types into
4859 // the name.
4860 const std::string ExpectedName =
4861 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
4862 Check(ExpectedName == IF->getName(),
4863 "Intrinsic name not mangled correctly for type arguments! "
4864 "Should be: " +
4865 ExpectedName,
4866 IF);
4867
4868 // If the intrinsic takes MDNode arguments, verify that they are either global
4869 // or are local to *this* function.
4870 for (Value *V : Call.args()) {
4871 if (auto *MD = dyn_cast<MetadataAsValue>(V))
4872 visitMetadataAsValue(*MD, Call.getCaller());
4873 if (auto *Const = dyn_cast<Constant>(V))
4874 Check(!Const->getType()->isX86_AMXTy(),
4875 "const x86_amx is not allowed in argument!");
4876 }
4877
4878 switch (ID) {
4879 default:
4880 break;
4881 case Intrinsic::assume: {
4882 for (auto &Elem : Call.bundle_op_infos()) {
4883 Check(Elem.Tag->getKey() == "ignore" ||
4884 Attribute::isExistingAttribute(Elem.Tag->getKey()),
4885 "tags must be valid attribute names", Call);
4886 Attribute::AttrKind Kind =
4887 Attribute::getAttrKindFromName(Elem.Tag->getKey());
4888 unsigned ArgCount = Elem.End - Elem.Begin;
4889 if (Kind == Attribute::Alignment) {
4890 Check(ArgCount <= 3 && ArgCount >= 2,
4891 "alignment assumptions should have 2 or 3 arguments", Call);
4892 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
4893 "first argument should be a pointer", Call);
4894 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
4895 "second argument should be an integer", Call);
4896 if (ArgCount == 3)
4897 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
4898 "third argument should be an integer if present", Call);
4899 return;
4900 }
4901 Check(ArgCount <= 2, "too many arguments", Call);
4902 if (Kind == Attribute::None)
4903 break;
4904 if (Attribute::isIntAttrKind(Kind)) {
4905 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
4906 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
4907 "the second argument should be a constant integral value", Call);
4908 } else if (Attribute::canUseAsParamAttr(Kind)) {
4909 Check((ArgCount) == 1, "this attribute should have one argument", Call);
4910 } else if (Attribute::canUseAsFnAttr(Kind)) {
4911 Check((ArgCount) == 0, "this attribute has no argument", Call);
4912 }
4913 }
4914 break;
4915 }
4916 case Intrinsic::coro_id: {
4917 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4918 if (isa<ConstantPointerNull>(InfoArg))
4919 break;
4920 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4921 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4922 "info argument of llvm.coro.id must refer to an initialized "
4923 "constant");
4924 Constant *Init = GV->getInitializer();
4925 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4926 "info argument of llvm.coro.id must refer to either a struct or "
4927 "an array");
4928 break;
4929 }
4930 case Intrinsic::fptrunc_round: {
4931 // Check the rounding mode
4932 Metadata *MD = nullptr;
4933 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
4934 if (MAV)
4935 MD = MAV->getMetadata();
4936
4937 Check(MD != nullptr, "missing rounding mode argument", Call);
4938
4939 Check(isa<MDString>(MD),
4940 ("invalid value for llvm.fptrunc.round metadata operand"
4941 " (the operand should be a string)"),
4942 MD);
4943
4944 Optional<RoundingMode> RoundMode =
4945 convertStrToRoundingMode(cast<MDString>(MD)->getString());
4946 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
4947 "unsupported rounding mode argument", Call);
4948 break;
4949 }
4950 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
4951 #include "llvm/IR/VPIntrinsics.def"
4952 visitVPIntrinsic(cast<VPIntrinsic>(Call));
4953 break;
4954 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
4955 case Intrinsic::INTRINSIC:
4956 #include "llvm/IR/ConstrainedOps.def"
4957 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4958 break;
4959 case Intrinsic::dbg_declare: // llvm.dbg.declare
4960 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
4961 "invalid llvm.dbg.declare intrinsic call 1", Call);
4962 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4963 break;
4964 case Intrinsic::dbg_addr: // llvm.dbg.addr
4965 visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4966 break;
4967 case Intrinsic::dbg_value: // llvm.dbg.value
4968 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4969 break;
4970 case Intrinsic::dbg_label: // llvm.dbg.label
4971 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4972 break;
4973 case Intrinsic::memcpy:
4974 case Intrinsic::memcpy_inline:
4975 case Intrinsic::memmove:
4976 case Intrinsic::memset:
4977 case Intrinsic::memset_inline: {
4978 const auto *MI = cast<MemIntrinsic>(&Call);
4979 auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4980 return Alignment == 0 || isPowerOf2_32(Alignment);
4981 };
4982 Check(IsValidAlignment(MI->getDestAlignment()),
4983 "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4984 Call);
4985 if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4986 Check(IsValidAlignment(MTI->getSourceAlignment()),
4987 "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4988 Call);
4989 }
4990
4991 break;
4992 }
4993 case Intrinsic::memcpy_element_unordered_atomic:
4994 case Intrinsic::memmove_element_unordered_atomic:
4995 case Intrinsic::memset_element_unordered_atomic: {
4996 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4997
4998 ConstantInt *ElementSizeCI =
4999 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5000 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5001 Check(ElementSizeVal.isPowerOf2(),
5002 "element size of the element-wise atomic memory intrinsic "
5003 "must be a power of 2",
5004 Call);
5005
5006 auto IsValidAlignment = [&](uint64_t Alignment) {
5007 return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
5008 };
5009 uint64_t DstAlignment = AMI->getDestAlignment();
5010 Check(IsValidAlignment(DstAlignment),
5011 "incorrect alignment of the destination argument", Call);
5012 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5013 uint64_t SrcAlignment = AMT->getSourceAlignment();
5014 Check(IsValidAlignment(SrcAlignment),
5015 "incorrect alignment of the source argument", Call);
5016 }
5017 break;
5018 }
5019 case Intrinsic::call_preallocated_setup: {
5020 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5021 Check(NumArgs != nullptr,
5022 "llvm.call.preallocated.setup argument must be a constant");
5023 bool FoundCall = false;
5024 for (User *U : Call.users()) {
5025 auto *UseCall = dyn_cast<CallBase>(U);
5026 Check(UseCall != nullptr,
5027 "Uses of llvm.call.preallocated.setup must be calls");
5028 const Function *Fn = UseCall->getCalledFunction();
5029 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5030 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5031 Check(AllocArgIndex != nullptr,
5032 "llvm.call.preallocated.alloc arg index must be a constant");
5033 auto AllocArgIndexInt = AllocArgIndex->getValue();
5034 Check(AllocArgIndexInt.sge(0) &&
5035 AllocArgIndexInt.slt(NumArgs->getValue()),
5036 "llvm.call.preallocated.alloc arg index must be between 0 and "
5037 "corresponding "
5038 "llvm.call.preallocated.setup's argument count");
5039 } else if (Fn && Fn->getIntrinsicID() ==
5040 Intrinsic::call_preallocated_teardown) {
5041 // nothing to do
5042 } else {
5043 Check(!FoundCall, "Can have at most one call corresponding to a "
5044 "llvm.call.preallocated.setup");
5045 FoundCall = true;
5046 size_t NumPreallocatedArgs = 0;
5047 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5048 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5049 ++NumPreallocatedArgs;
5050 }
5051 }
5052 Check(NumPreallocatedArgs != 0,
5053 "cannot use preallocated intrinsics on a call without "
5054 "preallocated arguments");
5055 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5056 "llvm.call.preallocated.setup arg size must be equal to number "
5057 "of preallocated arguments "
5058 "at call site",
5059 Call, *UseCall);
5060 // getOperandBundle() cannot be called if more than one of the operand
5061 // bundle exists. There is already a check elsewhere for this, so skip
5062 // here if we see more than one.
5063 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5064 1) {
5065 return;
5066 }
5067 auto PreallocatedBundle =
5068 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5069 Check(PreallocatedBundle,
5070 "Use of llvm.call.preallocated.setup outside intrinsics "
5071 "must be in \"preallocated\" operand bundle");
5072 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5073 "preallocated bundle must have token from corresponding "
5074 "llvm.call.preallocated.setup");
5075 }
5076 }
5077 break;
5078 }
5079 case Intrinsic::call_preallocated_arg: {
5080 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5081 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5082 Intrinsic::call_preallocated_setup,
5083 "llvm.call.preallocated.arg token argument must be a "
5084 "llvm.call.preallocated.setup");
5085 Check(Call.hasFnAttr(Attribute::Preallocated),
5086 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5087 "call site attribute");
5088 break;
5089 }
5090 case Intrinsic::call_preallocated_teardown: {
5091 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5092 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5093 Intrinsic::call_preallocated_setup,
5094 "llvm.call.preallocated.teardown token argument must be a "
5095 "llvm.call.preallocated.setup");
5096 break;
5097 }
5098 case Intrinsic::gcroot:
5099 case Intrinsic::gcwrite:
5100 case Intrinsic::gcread:
5101 if (ID == Intrinsic::gcroot) {
5102 AllocaInst *AI =
5103 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5104 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5105 Check(isa<Constant>(Call.getArgOperand(1)),
5106 "llvm.gcroot parameter #2 must be a constant.", Call);
5107 if (!AI->getAllocatedType()->isPointerTy()) {
5108 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5109 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5110 "or argument #2 must be a non-null constant.",
5111 Call);
5112 }
5113 }
5114
5115 Check(Call.getParent()->getParent()->hasGC(),
5116 "Enclosing function does not use GC.", Call);
5117 break;
5118 case Intrinsic::init_trampoline:
5119 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5120 "llvm.init_trampoline parameter #2 must resolve to a function.",
5121 Call);
5122 break;
5123 case Intrinsic::prefetch:
5124 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
5125 cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5126 "invalid arguments to llvm.prefetch", Call);
5127 break;
5128 case Intrinsic::stackprotector:
5129 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5130 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5131 break;
5132 case Intrinsic::localescape: {
5133 BasicBlock *BB = Call.getParent();
5134 Check(BB == &BB->getParent()->front(),
5135 "llvm.localescape used outside of entry block", Call);
5136 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5137 Call);
5138 for (Value *Arg : Call.args()) {
5139 if (isa<ConstantPointerNull>(Arg))
5140 continue; // Null values are allowed as placeholders.
5141 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5142 Check(AI && AI->isStaticAlloca(),
5143 "llvm.localescape only accepts static allocas", Call);
5144 }
5145 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5146 SawFrameEscape = true;
5147 break;
5148 }
5149 case Intrinsic::localrecover: {
5150 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5151 Function *Fn = dyn_cast<Function>(FnArg);
5152 Check(Fn && !Fn->isDeclaration(),
5153 "llvm.localrecover first "
5154 "argument must be function defined in this module",
5155 Call);
5156 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5157 auto &Entry = FrameEscapeInfo[Fn];
5158 Entry.second = unsigned(
5159 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5160 break;
5161 }
5162
5163 case Intrinsic::experimental_gc_statepoint:
5164 if (auto *CI = dyn_cast<CallInst>(&Call))
5165 Check(!CI->isInlineAsm(),
5166 "gc.statepoint support for inline assembly unimplemented", CI);
5167 Check(Call.getParent()->getParent()->hasGC(),
5168 "Enclosing function does not use GC.", Call);
5169
5170 verifyStatepoint(Call);
5171 break;
5172 case Intrinsic::experimental_gc_result: {
5173 Check(Call.getParent()->getParent()->hasGC(),
5174 "Enclosing function does not use GC.", Call);
5175 // Are we tied to a statepoint properly?
5176 const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
5177 const Function *StatepointFn =
5178 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5179 Check(StatepointFn && StatepointFn->isDeclaration() &&
5180 StatepointFn->getIntrinsicID() ==
5181 Intrinsic::experimental_gc_statepoint,
5182 "gc.result operand #1 must be from a statepoint", Call,
5183 Call.getArgOperand(0));
5184
5185 // Check that result type matches wrapped callee.
5186 auto *TargetFuncType =
5187 cast<FunctionType>(StatepointCall->getParamElementType(2));
5188 Check(Call.getType() == TargetFuncType->getReturnType(),
5189 "gc.result result type does not match wrapped callee", Call);
5190 break;
5191 }
5192 case Intrinsic::experimental_gc_relocate: {
5193 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5194
5195 Check(isa<PointerType>(Call.getType()->getScalarType()),
5196 "gc.relocate must return a pointer or a vector of pointers", Call);
5197
5198 // Check that this relocate is correctly tied to the statepoint
5199
5200 // This is case for relocate on the unwinding path of an invoke statepoint
5201 if (LandingPadInst *LandingPad =
5202 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5203
5204 const BasicBlock *InvokeBB =
5205 LandingPad->getParent()->getUniquePredecessor();
5206
5207 // Landingpad relocates should have only one predecessor with invoke
5208 // statepoint terminator
5209 Check(InvokeBB, "safepoints should have unique landingpads",
5210 LandingPad->getParent());
5211 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5212 InvokeBB);
5213 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5214 "gc relocate should be linked to a statepoint", InvokeBB);
5215 } else {
5216 // In all other cases relocate should be tied to the statepoint directly.
5217 // This covers relocates on a normal return path of invoke statepoint and
5218 // relocates of a call statepoint.
5219 auto *Token = Call.getArgOperand(0);
5220 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5221 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5222 }
5223
5224 // Verify rest of the relocate arguments.
5225 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5226
5227 // Both the base and derived must be piped through the safepoint.
5228 Value *Base = Call.getArgOperand(1);
5229 Check(isa<ConstantInt>(Base),
5230 "gc.relocate operand #2 must be integer offset", Call);
5231
5232 Value *Derived = Call.getArgOperand(2);
5233 Check(isa<ConstantInt>(Derived),
5234 "gc.relocate operand #3 must be integer offset", Call);
5235
5236 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5237 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5238
5239 // Check the bounds
5240 if (isa<UndefValue>(StatepointCall))
5241 break;
5242 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5243 .getOperandBundle(LLVMContext::OB_gc_live)) {
5244 Check(BaseIndex < Opt->Inputs.size(),
5245 "gc.relocate: statepoint base index out of bounds", Call);
5246 Check(DerivedIndex < Opt->Inputs.size(),
5247 "gc.relocate: statepoint derived index out of bounds", Call);
5248 }
5249
5250 // Relocated value must be either a pointer type or vector-of-pointer type,
5251 // but gc_relocate does not need to return the same pointer type as the
5252 // relocated pointer. It can be casted to the correct type later if it's
5253 // desired. However, they must have the same address space and 'vectorness'
5254 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5255 Check(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
5256 "gc.relocate: relocated value must be a gc pointer", Call);
5257
5258 auto ResultType = Call.getType();
5259 auto DerivedType = Relocate.getDerivedPtr()->getType();
5260 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5261 "gc.relocate: vector relocates to vector and pointer to pointer",
5262 Call);
5263 Check(
5264 ResultType->getPointerAddressSpace() ==
5265 DerivedType->getPointerAddressSpace(),
5266 "gc.relocate: relocating a pointer shouldn't change its address space",
5267 Call);
5268 break;
5269 }
5270 case Intrinsic::eh_exceptioncode:
5271 case Intrinsic::eh_exceptionpointer: {
5272 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5273 "eh.exceptionpointer argument must be a catchpad", Call);
5274 break;
5275 }
5276 case Intrinsic::get_active_lane_mask: {
5277 Check(Call.getType()->isVectorTy(),
5278 "get_active_lane_mask: must return a "
5279 "vector",
5280 Call);
5281 auto *ElemTy = Call.getType()->getScalarType();
5282 Check(ElemTy->isIntegerTy(1),
5283 "get_active_lane_mask: element type is not "
5284 "i1",
5285 Call);
5286 break;
5287 }
5288 case Intrinsic::masked_load: {
5289 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5290 Call);
5291
5292 Value *Ptr = Call.getArgOperand(0);
5293 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5294 Value *Mask = Call.getArgOperand(2);
5295 Value *PassThru = Call.getArgOperand(3);
5296 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5297 Call);
5298 Check(Alignment->getValue().isPowerOf2(),
5299 "masked_load: alignment must be a power of 2", Call);
5300
5301 PointerType *PtrTy = cast<PointerType>(Ptr->getType());
5302 Check(PtrTy->isOpaqueOrPointeeTypeMatches(Call.getType()),
5303 "masked_load: return must match pointer type", Call);
5304 Check(PassThru->getType() == Call.getType(),
5305 "masked_load: pass through and return type must match", Call);
5306 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5307 cast<VectorType>(Call.getType())->getElementCount(),
5308 "masked_load: vector mask must be same length as return", Call);
5309 break;
5310 }
5311 case Intrinsic::masked_store: {
5312 Value *Val = Call.getArgOperand(0);
5313 Value *Ptr = Call.getArgOperand(1);
5314 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5315 Value *Mask = Call.getArgOperand(3);
5316 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5317 Call);
5318 Check(Alignment->getValue().isPowerOf2(),
5319 "masked_store: alignment must be a power of 2", Call);
5320
5321 PointerType *PtrTy = cast<PointerType>(Ptr->getType());
5322 Check(PtrTy->isOpaqueOrPointeeTypeMatches(Val->getType()),
5323 "masked_store: storee must match pointer type", Call);
5324 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5325 cast<VectorType>(Val->getType())->getElementCount(),
5326 "masked_store: vector mask must be same length as value", Call);
5327 break;
5328 }
5329
5330 case Intrinsic::masked_gather: {
5331 const APInt &Alignment =
5332 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5333 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5334 "masked_gather: alignment must be 0 or a power of 2", Call);
5335 break;
5336 }
5337 case Intrinsic::masked_scatter: {
5338 const APInt &Alignment =
5339 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5340 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5341 "masked_scatter: alignment must be 0 or a power of 2", Call);
5342 break;
5343 }
5344
5345 case Intrinsic::experimental_guard: {
5346 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5347 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5348 "experimental_guard must have exactly one "
5349 "\"deopt\" operand bundle");
5350 break;
5351 }
5352
5353 case Intrinsic::experimental_deoptimize: {
5354 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5355 Call);
5356 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5357 "experimental_deoptimize must have exactly one "
5358 "\"deopt\" operand bundle");
5359 Check(Call.getType() == Call.getFunction()->getReturnType(),
5360 "experimental_deoptimize return type must match caller return type");
5361
5362 if (isa<CallInst>(Call)) {
5363 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5364 Check(RI,
5365 "calls to experimental_deoptimize must be followed by a return");
5366
5367 if (!Call.getType()->isVoidTy() && RI)
5368 Check(RI->getReturnValue() == &Call,
5369 "calls to experimental_deoptimize must be followed by a return "
5370 "of the value computed by experimental_deoptimize");
5371 }
5372
5373 break;
5374 }
5375 case Intrinsic::vector_reduce_and:
5376 case Intrinsic::vector_reduce_or:
5377 case Intrinsic::vector_reduce_xor:
5378 case Intrinsic::vector_reduce_add:
5379 case Intrinsic::vector_reduce_mul:
5380 case Intrinsic::vector_reduce_smax:
5381 case Intrinsic::vector_reduce_smin:
5382 case Intrinsic::vector_reduce_umax:
5383 case Intrinsic::vector_reduce_umin: {
5384 Type *ArgTy = Call.getArgOperand(0)->getType();
5385 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5386 "Intrinsic has incorrect argument type!");
5387 break;
5388 }
5389 case Intrinsic::vector_reduce_fmax:
5390 case Intrinsic::vector_reduce_fmin: {
5391 Type *ArgTy = Call.getArgOperand(0)->getType();
5392 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5393 "Intrinsic has incorrect argument type!");
5394 break;
5395 }
5396 case Intrinsic::vector_reduce_fadd:
5397 case Intrinsic::vector_reduce_fmul: {
5398 // Unlike the other reductions, the first argument is a start value. The
5399 // second argument is the vector to be reduced.
5400 Type *ArgTy = Call.getArgOperand(1)->getType();
5401 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5402 "Intrinsic has incorrect argument type!");
5403 break;
5404 }
5405 case Intrinsic::smul_fix:
5406 case Intrinsic::smul_fix_sat:
5407 case Intrinsic::umul_fix:
5408 case Intrinsic::umul_fix_sat:
5409 case Intrinsic::sdiv_fix:
5410 case Intrinsic::sdiv_fix_sat:
5411 case Intrinsic::udiv_fix:
5412 case Intrinsic::udiv_fix_sat: {
5413 Value *Op1 = Call.getArgOperand(0);
5414 Value *Op2 = Call.getArgOperand(1);
5415 Check(Op1->getType()->isIntOrIntVectorTy(),
5416 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5417 "vector of ints");
5418 Check(Op2->getType()->isIntOrIntVectorTy(),
5419 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5420 "vector of ints");
5421
5422 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5423 Check(Op3->getType()->getBitWidth() <= 32,
5424 "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
5425
5426 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5427 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5428 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5429 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5430 "the operands");
5431 } else {
5432 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5433 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5434 "to the width of the operands");
5435 }
5436 break;
5437 }
5438 case Intrinsic::lround:
5439 case Intrinsic::llround:
5440 case Intrinsic::lrint:
5441 case Intrinsic::llrint: {
5442 Type *ValTy = Call.getArgOperand(0)->getType();
5443 Type *ResultTy = Call.getType();
5444 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5445 "Intrinsic does not support vectors", &Call);
5446 break;
5447 }
5448 case Intrinsic::bswap: {
5449 Type *Ty = Call.getType();
5450 unsigned Size = Ty->getScalarSizeInBits();
5451 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5452 break;
5453 }
5454 case Intrinsic::invariant_start: {
5455 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5456 Check(InvariantSize &&
5457 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5458 "invariant_start parameter must be -1, 0 or a positive number",
5459 &Call);
5460 break;
5461 }
5462 case Intrinsic::matrix_multiply:
5463 case Intrinsic::matrix_transpose:
5464 case Intrinsic::matrix_column_major_load:
5465 case Intrinsic::matrix_column_major_store: {
5466 Function *IF = Call.getCalledFunction();
5467 ConstantInt *Stride = nullptr;
5468 ConstantInt *NumRows;
5469 ConstantInt *NumColumns;
5470 VectorType *ResultTy;
5471 Type *Op0ElemTy = nullptr;
5472 Type *Op1ElemTy = nullptr;
5473 switch (ID) {
5474 case Intrinsic::matrix_multiply:
5475 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5476 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5477 ResultTy = cast<VectorType>(Call.getType());
5478 Op0ElemTy =
5479 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5480 Op1ElemTy =
5481 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5482 break;
5483 case Intrinsic::matrix_transpose:
5484 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5485 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5486 ResultTy = cast<VectorType>(Call.getType());
5487 Op0ElemTy =
5488 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5489 break;
5490 case Intrinsic::matrix_column_major_load: {
5491 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5492 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5493 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5494 ResultTy = cast<VectorType>(Call.getType());
5495
5496 PointerType *Op0PtrTy =
5497 cast<PointerType>(Call.getArgOperand(0)->getType());
5498 if (!Op0PtrTy->isOpaque())
5499 Op0ElemTy = Op0PtrTy->getNonOpaquePointerElementType();
5500 break;
5501 }
5502 case Intrinsic::matrix_column_major_store: {
5503 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5504 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5505 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5506 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5507 Op0ElemTy =
5508 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5509
5510 PointerType *Op1PtrTy =
5511 cast<PointerType>(Call.getArgOperand(1)->getType());
5512 if (!Op1PtrTy->isOpaque())
5513 Op1ElemTy = Op1PtrTy->getNonOpaquePointerElementType();
5514 break;
5515 }
5516 default:
5517 llvm_unreachable("unexpected intrinsic");
5518 }
5519
5520 Check(ResultTy->getElementType()->isIntegerTy() ||
5521 ResultTy->getElementType()->isFloatingPointTy(),
5522 "Result type must be an integer or floating-point type!", IF);
5523
5524 if (Op0ElemTy)
5525 Check(ResultTy->getElementType() == Op0ElemTy,
5526 "Vector element type mismatch of the result and first operand "
5527 "vector!",
5528 IF);
5529
5530 if (Op1ElemTy)
5531 Check(ResultTy->getElementType() == Op1ElemTy,
5532 "Vector element type mismatch of the result and second operand "
5533 "vector!",
5534 IF);
5535
5536 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
5537 NumRows->getZExtValue() * NumColumns->getZExtValue(),
5538 "Result of a matrix operation does not fit in the returned vector!");
5539
5540 if (Stride)
5541 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
5542 "Stride must be greater or equal than the number of rows!", IF);
5543
5544 break;
5545 }
5546 case Intrinsic::experimental_vector_splice: {
5547 VectorType *VecTy = cast<VectorType>(Call.getType());
5548 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
5549 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
5550 if (Call.getParent() && Call.getParent()->getParent()) {
5551 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
5552 if (Attrs.hasFnAttr(Attribute::VScaleRange))
5553 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
5554 }
5555 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
5556 (Idx >= 0 && Idx < KnownMinNumElements),
5557 "The splice index exceeds the range [-VL, VL-1] where VL is the "
5558 "known minimum number of elements in the vector. For scalable "
5559 "vectors the minimum number of elements is determined from "
5560 "vscale_range.",
5561 &Call);
5562 break;
5563 }
5564 case Intrinsic::experimental_stepvector: {
5565 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
5566 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
5567 VecTy->getScalarSizeInBits() >= 8,
5568 "experimental_stepvector only supported for vectors of integers "
5569 "with a bitwidth of at least 8.",
5570 &Call);
5571 break;
5572 }
5573 case Intrinsic::vector_insert: {
5574 Value *Vec = Call.getArgOperand(0);
5575 Value *SubVec = Call.getArgOperand(1);
5576 Value *Idx = Call.getArgOperand(2);
5577 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5578
5579 VectorType *VecTy = cast<VectorType>(Vec->getType());
5580 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
5581
5582 ElementCount VecEC = VecTy->getElementCount();
5583 ElementCount SubVecEC = SubVecTy->getElementCount();
5584 Check(VecTy->getElementType() == SubVecTy->getElementType(),
5585 "vector_insert parameters must have the same element "
5586 "type.",
5587 &Call);
5588 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
5589 "vector_insert index must be a constant multiple of "
5590 "the subvector's known minimum vector length.");
5591
5592 // If this insertion is not the 'mixed' case where a fixed vector is
5593 // inserted into a scalable vector, ensure that the insertion of the
5594 // subvector does not overrun the parent vector.
5595 if (VecEC.isScalable() == SubVecEC.isScalable()) {
5596 Check(IdxN < VecEC.getKnownMinValue() &&
5597 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
5598 "subvector operand of vector_insert would overrun the "
5599 "vector being inserted into.");
5600 }
5601 break;
5602 }
5603 case Intrinsic::vector_extract: {
5604 Value *Vec = Call.getArgOperand(0);
5605 Value *Idx = Call.getArgOperand(1);
5606 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5607
5608 VectorType *ResultTy = cast<VectorType>(Call.getType());
5609 VectorType *VecTy = cast<VectorType>(Vec->getType());
5610
5611 ElementCount VecEC = VecTy->getElementCount();
5612 ElementCount ResultEC = ResultTy->getElementCount();
5613
5614 Check(ResultTy->getElementType() == VecTy->getElementType(),
5615 "vector_extract result must have the same element "
5616 "type as the input vector.",
5617 &Call);
5618 Check(IdxN % ResultEC.getKnownMinValue() == 0,
5619 "vector_extract index must be a constant multiple of "
5620 "the result type's known minimum vector length.");
5621
5622 // If this extraction is not the 'mixed' case where a fixed vector is is
5623 // extracted from a scalable vector, ensure that the extraction does not
5624 // overrun the parent vector.
5625 if (VecEC.isScalable() == ResultEC.isScalable()) {
5626 Check(IdxN < VecEC.getKnownMinValue() &&
5627 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
5628 "vector_extract would overrun.");
5629 }
5630 break;
5631 }
5632 case Intrinsic::experimental_noalias_scope_decl: {
5633 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
5634 break;
5635 }
5636 case Intrinsic::preserve_array_access_index:
5637 case Intrinsic::preserve_struct_access_index:
5638 case Intrinsic::aarch64_ldaxr:
5639 case Intrinsic::aarch64_ldxr:
5640 case Intrinsic::arm_ldaex:
5641 case Intrinsic::arm_ldrex: {
5642 Type *ElemTy = Call.getParamElementType(0);
5643 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
5644 &Call);
5645 break;
5646 }
5647 case Intrinsic::aarch64_stlxr:
5648 case Intrinsic::aarch64_stxr:
5649 case Intrinsic::arm_stlex:
5650 case Intrinsic::arm_strex: {
5651 Type *ElemTy = Call.getAttributes().getParamElementType(1);
5652 Check(ElemTy,
5653 "Intrinsic requires elementtype attribute on second argument.",
5654 &Call);
5655 break;
5656 }
5657 };
5658 }
5659
5660 /// Carefully grab the subprogram from a local scope.
5661 ///
5662 /// This carefully grabs the subprogram from a local scope, avoiding the
5663 /// built-in assertions that would typically fire.
getSubprogram(Metadata * LocalScope)5664 static DISubprogram *getSubprogram(Metadata *LocalScope) {
5665 if (!LocalScope)
5666 return nullptr;
5667
5668 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
5669 return SP;
5670
5671 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
5672 return getSubprogram(LB->getRawScope());
5673
5674 // Just return null; broken scope chains are checked elsewhere.
5675 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
5676 return nullptr;
5677 }
5678
visitVPIntrinsic(VPIntrinsic & VPI)5679 void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
5680 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
5681 auto *RetTy = cast<VectorType>(VPCast->getType());
5682 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
5683 Check(RetTy->getElementCount() == ValTy->getElementCount(),
5684 "VP cast intrinsic first argument and result vector lengths must be "
5685 "equal",
5686 *VPCast);
5687
5688 switch (VPCast->getIntrinsicID()) {
5689 default:
5690 llvm_unreachable("Unknown VP cast intrinsic");
5691 case Intrinsic::vp_trunc:
5692 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
5693 "llvm.vp.trunc intrinsic first argument and result element type "
5694 "must be integer",
5695 *VPCast);
5696 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
5697 "llvm.vp.trunc intrinsic the bit size of first argument must be "
5698 "larger than the bit size of the return type",
5699 *VPCast);
5700 break;
5701 case Intrinsic::vp_zext:
5702 case Intrinsic::vp_sext:
5703 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
5704 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
5705 "element type must be integer",
5706 *VPCast);
5707 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
5708 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
5709 "argument must be smaller than the bit size of the return type",
5710 *VPCast);
5711 break;
5712 case Intrinsic::vp_fptoui:
5713 case Intrinsic::vp_fptosi:
5714 Check(
5715 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
5716 "llvm.vp.fptoui or llvm.vp.fptosi intrinsic first argument element "
5717 "type must be floating-point and result element type must be integer",
5718 *VPCast);
5719 break;
5720 case Intrinsic::vp_uitofp:
5721 case Intrinsic::vp_sitofp:
5722 Check(
5723 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
5724 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
5725 "type must be integer and result element type must be floating-point",
5726 *VPCast);
5727 break;
5728 case Intrinsic::vp_fptrunc:
5729 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
5730 "llvm.vp.fptrunc intrinsic first argument and result element type "
5731 "must be floating-point",
5732 *VPCast);
5733 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
5734 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
5735 "larger than the bit size of the return type",
5736 *VPCast);
5737 break;
5738 case Intrinsic::vp_fpext:
5739 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
5740 "llvm.vp.fpext intrinsic first argument and result element type "
5741 "must be floating-point",
5742 *VPCast);
5743 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
5744 "llvm.vp.fpext intrinsic the bit size of first argument must be "
5745 "smaller than the bit size of the return type",
5746 *VPCast);
5747 break;
5748 case Intrinsic::vp_ptrtoint:
5749 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
5750 "llvm.vp.ptrtoint intrinsic first argument element type must be "
5751 "pointer and result element type must be integer",
5752 *VPCast);
5753 break;
5754 case Intrinsic::vp_inttoptr:
5755 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
5756 "llvm.vp.inttoptr intrinsic first argument element type must be "
5757 "integer and result element type must be pointer",
5758 *VPCast);
5759 break;
5760 }
5761 }
5762 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
5763 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
5764 Check(CmpInst::isFPPredicate(Pred),
5765 "invalid predicate for VP FP comparison intrinsic", &VPI);
5766 }
5767 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
5768 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
5769 Check(CmpInst::isIntPredicate(Pred),
5770 "invalid predicate for VP integer comparison intrinsic", &VPI);
5771 }
5772 }
5773
visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic & FPI)5774 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
5775 unsigned NumOperands;
5776 bool HasRoundingMD;
5777 switch (FPI.getIntrinsicID()) {
5778 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
5779 case Intrinsic::INTRINSIC: \
5780 NumOperands = NARG; \
5781 HasRoundingMD = ROUND_MODE; \
5782 break;
5783 #include "llvm/IR/ConstrainedOps.def"
5784 default:
5785 llvm_unreachable("Invalid constrained FP intrinsic!");
5786 }
5787 NumOperands += (1 + HasRoundingMD);
5788 // Compare intrinsics carry an extra predicate metadata operand.
5789 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
5790 NumOperands += 1;
5791 Check((FPI.arg_size() == NumOperands),
5792 "invalid arguments for constrained FP intrinsic", &FPI);
5793
5794 switch (FPI.getIntrinsicID()) {
5795 case Intrinsic::experimental_constrained_lrint:
5796 case Intrinsic::experimental_constrained_llrint: {
5797 Type *ValTy = FPI.getArgOperand(0)->getType();
5798 Type *ResultTy = FPI.getType();
5799 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5800 "Intrinsic does not support vectors", &FPI);
5801 }
5802 break;
5803
5804 case Intrinsic::experimental_constrained_lround:
5805 case Intrinsic::experimental_constrained_llround: {
5806 Type *ValTy = FPI.getArgOperand(0)->getType();
5807 Type *ResultTy = FPI.getType();
5808 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5809 "Intrinsic does not support vectors", &FPI);
5810 break;
5811 }
5812
5813 case Intrinsic::experimental_constrained_fcmp:
5814 case Intrinsic::experimental_constrained_fcmps: {
5815 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
5816 Check(CmpInst::isFPPredicate(Pred),
5817 "invalid predicate for constrained FP comparison intrinsic", &FPI);
5818 break;
5819 }
5820
5821 case Intrinsic::experimental_constrained_fptosi:
5822 case Intrinsic::experimental_constrained_fptoui: {
5823 Value *Operand = FPI.getArgOperand(0);
5824 uint64_t NumSrcElem = 0;
5825 Check(Operand->getType()->isFPOrFPVectorTy(),
5826 "Intrinsic first argument must be floating point", &FPI);
5827 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5828 NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
5829 }
5830
5831 Operand = &FPI;
5832 Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
5833 "Intrinsic first argument and result disagree on vector use", &FPI);
5834 Check(Operand->getType()->isIntOrIntVectorTy(),
5835 "Intrinsic result must be an integer", &FPI);
5836 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5837 Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
5838 "Intrinsic first argument and result vector lengths must be equal",
5839 &FPI);
5840 }
5841 }
5842 break;
5843
5844 case Intrinsic::experimental_constrained_sitofp:
5845 case Intrinsic::experimental_constrained_uitofp: {
5846 Value *Operand = FPI.getArgOperand(0);
5847 uint64_t NumSrcElem = 0;
5848 Check(Operand->getType()->isIntOrIntVectorTy(),
5849 "Intrinsic first argument must be integer", &FPI);
5850 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5851 NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
5852 }
5853
5854 Operand = &FPI;
5855 Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
5856 "Intrinsic first argument and result disagree on vector use", &FPI);
5857 Check(Operand->getType()->isFPOrFPVectorTy(),
5858 "Intrinsic result must be a floating point", &FPI);
5859 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5860 Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
5861 "Intrinsic first argument and result vector lengths must be equal",
5862 &FPI);
5863 }
5864 } break;
5865
5866 case Intrinsic::experimental_constrained_fptrunc:
5867 case Intrinsic::experimental_constrained_fpext: {
5868 Value *Operand = FPI.getArgOperand(0);
5869 Type *OperandTy = Operand->getType();
5870 Value *Result = &FPI;
5871 Type *ResultTy = Result->getType();
5872 Check(OperandTy->isFPOrFPVectorTy(),
5873 "Intrinsic first argument must be FP or FP vector", &FPI);
5874 Check(ResultTy->isFPOrFPVectorTy(),
5875 "Intrinsic result must be FP or FP vector", &FPI);
5876 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
5877 "Intrinsic first argument and result disagree on vector use", &FPI);
5878 if (OperandTy->isVectorTy()) {
5879 Check(cast<FixedVectorType>(OperandTy)->getNumElements() ==
5880 cast<FixedVectorType>(ResultTy)->getNumElements(),
5881 "Intrinsic first argument and result vector lengths must be equal",
5882 &FPI);
5883 }
5884 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
5885 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
5886 "Intrinsic first argument's type must be larger than result type",
5887 &FPI);
5888 } else {
5889 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
5890 "Intrinsic first argument's type must be smaller than result type",
5891 &FPI);
5892 }
5893 }
5894 break;
5895
5896 default:
5897 break;
5898 }
5899
5900 // If a non-metadata argument is passed in a metadata slot then the
5901 // error will be caught earlier when the incorrect argument doesn't
5902 // match the specification in the intrinsic call table. Thus, no
5903 // argument type check is needed here.
5904
5905 Check(FPI.getExceptionBehavior().has_value(),
5906 "invalid exception behavior argument", &FPI);
5907 if (HasRoundingMD) {
5908 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
5909 &FPI);
5910 }
5911 }
5912
visitDbgIntrinsic(StringRef Kind,DbgVariableIntrinsic & DII)5913 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
5914 auto *MD = DII.getRawLocation();
5915 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
5916 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
5917 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
5918 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
5919 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
5920 DII.getRawVariable());
5921 CheckDI(isa<DIExpression>(DII.getRawExpression()),
5922 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
5923 DII.getRawExpression());
5924
5925 // Ignore broken !dbg attachments; they're checked elsewhere.
5926 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
5927 if (!isa<DILocation>(N))
5928 return;
5929
5930 BasicBlock *BB = DII.getParent();
5931 Function *F = BB ? BB->getParent() : nullptr;
5932
5933 // The scopes for variables and !dbg attachments must agree.
5934 DILocalVariable *Var = DII.getVariable();
5935 DILocation *Loc = DII.getDebugLoc();
5936 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
5937 &DII, BB, F);
5938
5939 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
5940 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
5941 if (!VarSP || !LocSP)
5942 return; // Broken scope chains are checked elsewhere.
5943
5944 CheckDI(VarSP == LocSP,
5945 "mismatched subprogram between llvm.dbg." + Kind +
5946 " variable and !dbg attachment",
5947 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
5948 Loc->getScope()->getSubprogram());
5949
5950 // This check is redundant with one in visitLocalVariable().
5951 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
5952 Var->getRawType());
5953 verifyFnArgs(DII);
5954 }
5955
visitDbgLabelIntrinsic(StringRef Kind,DbgLabelInst & DLI)5956 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
5957 CheckDI(isa<DILabel>(DLI.getRawLabel()),
5958 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
5959 DLI.getRawLabel());
5960
5961 // Ignore broken !dbg attachments; they're checked elsewhere.
5962 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
5963 if (!isa<DILocation>(N))
5964 return;
5965
5966 BasicBlock *BB = DLI.getParent();
5967 Function *F = BB ? BB->getParent() : nullptr;
5968
5969 // The scopes for variables and !dbg attachments must agree.
5970 DILabel *Label = DLI.getLabel();
5971 DILocation *Loc = DLI.getDebugLoc();
5972 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
5973 BB, F);
5974
5975 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
5976 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
5977 if (!LabelSP || !LocSP)
5978 return;
5979
5980 CheckDI(LabelSP == LocSP,
5981 "mismatched subprogram between llvm.dbg." + Kind +
5982 " label and !dbg attachment",
5983 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
5984 Loc->getScope()->getSubprogram());
5985 }
5986
verifyFragmentExpression(const DbgVariableIntrinsic & I)5987 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
5988 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
5989 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
5990
5991 // We don't know whether this intrinsic verified correctly.
5992 if (!V || !E || !E->isValid())
5993 return;
5994
5995 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
5996 auto Fragment = E->getFragmentInfo();
5997 if (!Fragment)
5998 return;
5999
6000 // The frontend helps out GDB by emitting the members of local anonymous
6001 // unions as artificial local variables with shared storage. When SROA splits
6002 // the storage for artificial local variables that are smaller than the entire
6003 // union, the overhang piece will be outside of the allotted space for the
6004 // variable and this check fails.
6005 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6006 if (V->isArtificial())
6007 return;
6008
6009 verifyFragmentExpression(*V, *Fragment, &I);
6010 }
6011
6012 template <typename ValueOrMetadata>
verifyFragmentExpression(const DIVariable & V,DIExpression::FragmentInfo Fragment,ValueOrMetadata * Desc)6013 void Verifier::verifyFragmentExpression(const DIVariable &V,
6014 DIExpression::FragmentInfo Fragment,
6015 ValueOrMetadata *Desc) {
6016 // If there's no size, the type is broken, but that should be checked
6017 // elsewhere.
6018 auto VarSize = V.getSizeInBits();
6019 if (!VarSize)
6020 return;
6021
6022 unsigned FragSize = Fragment.SizeInBits;
6023 unsigned FragOffset = Fragment.OffsetInBits;
6024 CheckDI(FragSize + FragOffset <= *VarSize,
6025 "fragment is larger than or outside of variable", Desc, &V);
6026 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6027 }
6028
verifyFnArgs(const DbgVariableIntrinsic & I)6029 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6030 // This function does not take the scope of noninlined function arguments into
6031 // account. Don't run it if current function is nodebug, because it may
6032 // contain inlined debug intrinsics.
6033 if (!HasDebugInfo)
6034 return;
6035
6036 // For performance reasons only check non-inlined ones.
6037 if (I.getDebugLoc()->getInlinedAt())
6038 return;
6039
6040 DILocalVariable *Var = I.getVariable();
6041 CheckDI(Var, "dbg intrinsic without variable");
6042
6043 unsigned ArgNo = Var->getArg();
6044 if (!ArgNo)
6045 return;
6046
6047 // Verify there are no duplicate function argument debug info entries.
6048 // These will cause hard-to-debug assertions in the DWARF backend.
6049 if (DebugFnArgs.size() < ArgNo)
6050 DebugFnArgs.resize(ArgNo, nullptr);
6051
6052 auto *Prev = DebugFnArgs[ArgNo - 1];
6053 DebugFnArgs[ArgNo - 1] = Var;
6054 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6055 Prev, Var);
6056 }
6057
verifyNotEntryValue(const DbgVariableIntrinsic & I)6058 void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6059 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6060
6061 // We don't know whether this intrinsic verified correctly.
6062 if (!E || !E->isValid())
6063 return;
6064
6065 CheckDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
6066 }
6067
verifyCompileUnits()6068 void Verifier::verifyCompileUnits() {
6069 // When more than one Module is imported into the same context, such as during
6070 // an LTO build before linking the modules, ODR type uniquing may cause types
6071 // to point to a different CU. This check does not make sense in this case.
6072 if (M.getContext().isODRUniquingDebugTypes())
6073 return;
6074 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6075 SmallPtrSet<const Metadata *, 2> Listed;
6076 if (CUs)
6077 Listed.insert(CUs->op_begin(), CUs->op_end());
6078 for (auto *CU : CUVisited)
6079 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6080 CUVisited.clear();
6081 }
6082
verifyDeoptimizeCallingConvs()6083 void Verifier::verifyDeoptimizeCallingConvs() {
6084 if (DeoptimizeDeclarations.empty())
6085 return;
6086
6087 const Function *First = DeoptimizeDeclarations[0];
6088 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
6089 Check(First->getCallingConv() == F->getCallingConv(),
6090 "All llvm.experimental.deoptimize declarations must have the same "
6091 "calling convention",
6092 First, F);
6093 }
6094 }
6095
verifyAttachedCallBundle(const CallBase & Call,const OperandBundleUse & BU)6096 void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6097 const OperandBundleUse &BU) {
6098 FunctionType *FTy = Call.getFunctionType();
6099
6100 Check((FTy->getReturnType()->isPointerTy() ||
6101 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6102 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6103 "function returning a pointer or a non-returning function that has a "
6104 "void return type",
6105 Call);
6106
6107 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
6108 "operand bundle \"clang.arc.attachedcall\" requires one function as "
6109 "an argument",
6110 Call);
6111
6112 auto *Fn = cast<Function>(BU.Inputs.front());
6113 Intrinsic::ID IID = Fn->getIntrinsicID();
6114
6115 if (IID) {
6116 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
6117 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
6118 "invalid function argument", Call);
6119 } else {
6120 StringRef FnName = Fn->getName();
6121 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
6122 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
6123 "invalid function argument", Call);
6124 }
6125 }
6126
verifySourceDebugInfo(const DICompileUnit & U,const DIFile & F)6127 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
6128 bool HasSource = F.getSource().has_value();
6129 if (!HasSourceDebugInfo.count(&U))
6130 HasSourceDebugInfo[&U] = HasSource;
6131 CheckDI(HasSource == HasSourceDebugInfo[&U],
6132 "inconsistent use of embedded source");
6133 }
6134
verifyNoAliasScopeDecl()6135 void Verifier::verifyNoAliasScopeDecl() {
6136 if (NoAliasScopeDecls.empty())
6137 return;
6138
6139 // only a single scope must be declared at a time.
6140 for (auto *II : NoAliasScopeDecls) {
6141 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
6142 "Not a llvm.experimental.noalias.scope.decl ?");
6143 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
6144 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6145 Check(ScopeListMV != nullptr,
6146 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
6147 "argument",
6148 II);
6149
6150 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
6151 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
6152 Check(ScopeListMD->getNumOperands() == 1,
6153 "!id.scope.list must point to a list with a single scope", II);
6154 visitAliasScopeListMetadata(ScopeListMD);
6155 }
6156
6157 // Only check the domination rule when requested. Once all passes have been
6158 // adapted this option can go away.
6159 if (!VerifyNoAliasScopeDomination)
6160 return;
6161
6162 // Now sort the intrinsics based on the scope MDNode so that declarations of
6163 // the same scopes are next to each other.
6164 auto GetScope = [](IntrinsicInst *II) {
6165 const auto *ScopeListMV = cast<MetadataAsValue>(
6166 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6167 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
6168 };
6169
6170 // We are sorting on MDNode pointers here. For valid input IR this is ok.
6171 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
6172 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
6173 return GetScope(Lhs) < GetScope(Rhs);
6174 };
6175
6176 llvm::sort(NoAliasScopeDecls, Compare);
6177
6178 // Go over the intrinsics and check that for the same scope, they are not
6179 // dominating each other.
6180 auto ItCurrent = NoAliasScopeDecls.begin();
6181 while (ItCurrent != NoAliasScopeDecls.end()) {
6182 auto CurScope = GetScope(*ItCurrent);
6183 auto ItNext = ItCurrent;
6184 do {
6185 ++ItNext;
6186 } while (ItNext != NoAliasScopeDecls.end() &&
6187 GetScope(*ItNext) == CurScope);
6188
6189 // [ItCurrent, ItNext) represents the declarations for the same scope.
6190 // Ensure they are not dominating each other.. but only if it is not too
6191 // expensive.
6192 if (ItNext - ItCurrent < 32)
6193 for (auto *I : llvm::make_range(ItCurrent, ItNext))
6194 for (auto *J : llvm::make_range(ItCurrent, ItNext))
6195 if (I != J)
6196 Check(!DT.dominates(I, J),
6197 "llvm.experimental.noalias.scope.decl dominates another one "
6198 "with the same scope",
6199 I);
6200 ItCurrent = ItNext;
6201 }
6202 }
6203
6204 //===----------------------------------------------------------------------===//
6205 // Implement the public interfaces to this file...
6206 //===----------------------------------------------------------------------===//
6207
verifyFunction(const Function & f,raw_ostream * OS)6208 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
6209 Function &F = const_cast<Function &>(f);
6210
6211 // Don't use a raw_null_ostream. Printing IR is expensive.
6212 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
6213
6214 // Note that this function's return value is inverted from what you would
6215 // expect of a function called "verify".
6216 return !V.verify(F);
6217 }
6218
verifyModule(const Module & M,raw_ostream * OS,bool * BrokenDebugInfo)6219 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
6220 bool *BrokenDebugInfo) {
6221 // Don't use a raw_null_ostream. Printing IR is expensive.
6222 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
6223
6224 bool Broken = false;
6225 for (const Function &F : M)
6226 Broken |= !V.verify(F);
6227
6228 Broken |= !V.verify();
6229 if (BrokenDebugInfo)
6230 *BrokenDebugInfo = V.hasBrokenDebugInfo();
6231 // Note that this function's return value is inverted from what you would
6232 // expect of a function called "verify".
6233 return Broken;
6234 }
6235
6236 namespace {
6237
6238 struct VerifierLegacyPass : public FunctionPass {
6239 static char ID;
6240
6241 std::unique_ptr<Verifier> V;
6242 bool FatalErrors = true;
6243
VerifierLegacyPass__anone3a6d1260f11::VerifierLegacyPass6244 VerifierLegacyPass() : FunctionPass(ID) {
6245 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
6246 }
VerifierLegacyPass__anone3a6d1260f11::VerifierLegacyPass6247 explicit VerifierLegacyPass(bool FatalErrors)
6248 : FunctionPass(ID),
6249 FatalErrors(FatalErrors) {
6250 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
6251 }
6252
doInitialization__anone3a6d1260f11::VerifierLegacyPass6253 bool doInitialization(Module &M) override {
6254 V = std::make_unique<Verifier>(
6255 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
6256 return false;
6257 }
6258
runOnFunction__anone3a6d1260f11::VerifierLegacyPass6259 bool runOnFunction(Function &F) override {
6260 if (!V->verify(F) && FatalErrors) {
6261 errs() << "in function " << F.getName() << '\n';
6262 report_fatal_error("Broken function found, compilation aborted!");
6263 }
6264 return false;
6265 }
6266
doFinalization__anone3a6d1260f11::VerifierLegacyPass6267 bool doFinalization(Module &M) override {
6268 bool HasErrors = false;
6269 for (Function &F : M)
6270 if (F.isDeclaration())
6271 HasErrors |= !V->verify(F);
6272
6273 HasErrors |= !V->verify();
6274 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
6275 report_fatal_error("Broken module found, compilation aborted!");
6276 return false;
6277 }
6278
getAnalysisUsage__anone3a6d1260f11::VerifierLegacyPass6279 void getAnalysisUsage(AnalysisUsage &AU) const override {
6280 AU.setPreservesAll();
6281 }
6282 };
6283
6284 } // end anonymous namespace
6285
6286 /// Helper to issue failure from the TBAA verification
CheckFailed(Tys &&...Args)6287 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
6288 if (Diagnostic)
6289 return Diagnostic->CheckFailed(Args...);
6290 }
6291
6292 #define CheckTBAA(C, ...) \
6293 do { \
6294 if (!(C)) { \
6295 CheckFailed(__VA_ARGS__); \
6296 return false; \
6297 } \
6298 } while (false)
6299
6300 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
6301 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
6302 /// struct-type node describing an aggregate data structure (like a struct).
6303 TBAAVerifier::TBAABaseNodeSummary
verifyTBAABaseNode(Instruction & I,const MDNode * BaseNode,bool IsNewFormat)6304 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
6305 bool IsNewFormat) {
6306 if (BaseNode->getNumOperands() < 2) {
6307 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
6308 return {true, ~0u};
6309 }
6310
6311 auto Itr = TBAABaseNodes.find(BaseNode);
6312 if (Itr != TBAABaseNodes.end())
6313 return Itr->second;
6314
6315 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
6316 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
6317 (void)InsertResult;
6318 assert(InsertResult.second && "We just checked!");
6319 return Result;
6320 }
6321
6322 TBAAVerifier::TBAABaseNodeSummary
verifyTBAABaseNodeImpl(Instruction & I,const MDNode * BaseNode,bool IsNewFormat)6323 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
6324 bool IsNewFormat) {
6325 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
6326
6327 if (BaseNode->getNumOperands() == 2) {
6328 // Scalar nodes can only be accessed at offset 0.
6329 return isValidScalarTBAANode(BaseNode)
6330 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
6331 : InvalidNode;
6332 }
6333
6334 if (IsNewFormat) {
6335 if (BaseNode->getNumOperands() % 3 != 0) {
6336 CheckFailed("Access tag nodes must have the number of operands that is a "
6337 "multiple of 3!", BaseNode);
6338 return InvalidNode;
6339 }
6340 } else {
6341 if (BaseNode->getNumOperands() % 2 != 1) {
6342 CheckFailed("Struct tag nodes must have an odd number of operands!",
6343 BaseNode);
6344 return InvalidNode;
6345 }
6346 }
6347
6348 // Check the type size field.
6349 if (IsNewFormat) {
6350 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
6351 BaseNode->getOperand(1));
6352 if (!TypeSizeNode) {
6353 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
6354 return InvalidNode;
6355 }
6356 }
6357
6358 // Check the type name field. In the new format it can be anything.
6359 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
6360 CheckFailed("Struct tag nodes have a string as their first operand",
6361 BaseNode);
6362 return InvalidNode;
6363 }
6364
6365 bool Failed = false;
6366
6367 Optional<APInt> PrevOffset;
6368 unsigned BitWidth = ~0u;
6369
6370 // We've already checked that BaseNode is not a degenerate root node with one
6371 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
6372 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
6373 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
6374 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
6375 Idx += NumOpsPerField) {
6376 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
6377 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
6378 if (!isa<MDNode>(FieldTy)) {
6379 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
6380 Failed = true;
6381 continue;
6382 }
6383
6384 auto *OffsetEntryCI =
6385 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
6386 if (!OffsetEntryCI) {
6387 CheckFailed("Offset entries must be constants!", &I, BaseNode);
6388 Failed = true;
6389 continue;
6390 }
6391
6392 if (BitWidth == ~0u)
6393 BitWidth = OffsetEntryCI->getBitWidth();
6394
6395 if (OffsetEntryCI->getBitWidth() != BitWidth) {
6396 CheckFailed(
6397 "Bitwidth between the offsets and struct type entries must match", &I,
6398 BaseNode);
6399 Failed = true;
6400 continue;
6401 }
6402
6403 // NB! As far as I can tell, we generate a non-strictly increasing offset
6404 // sequence only from structs that have zero size bit fields. When
6405 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
6406 // pick the field lexically the latest in struct type metadata node. This
6407 // mirrors the actual behavior of the alias analysis implementation.
6408 bool IsAscending =
6409 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
6410
6411 if (!IsAscending) {
6412 CheckFailed("Offsets must be increasing!", &I, BaseNode);
6413 Failed = true;
6414 }
6415
6416 PrevOffset = OffsetEntryCI->getValue();
6417
6418 if (IsNewFormat) {
6419 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
6420 BaseNode->getOperand(Idx + 2));
6421 if (!MemberSizeNode) {
6422 CheckFailed("Member size entries must be constants!", &I, BaseNode);
6423 Failed = true;
6424 continue;
6425 }
6426 }
6427 }
6428
6429 return Failed ? InvalidNode
6430 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
6431 }
6432
IsRootTBAANode(const MDNode * MD)6433 static bool IsRootTBAANode(const MDNode *MD) {
6434 return MD->getNumOperands() < 2;
6435 }
6436
IsScalarTBAANodeImpl(const MDNode * MD,SmallPtrSetImpl<const MDNode * > & Visited)6437 static bool IsScalarTBAANodeImpl(const MDNode *MD,
6438 SmallPtrSetImpl<const MDNode *> &Visited) {
6439 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
6440 return false;
6441
6442 if (!isa<MDString>(MD->getOperand(0)))
6443 return false;
6444
6445 if (MD->getNumOperands() == 3) {
6446 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
6447 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
6448 return false;
6449 }
6450
6451 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
6452 return Parent && Visited.insert(Parent).second &&
6453 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
6454 }
6455
isValidScalarTBAANode(const MDNode * MD)6456 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
6457 auto ResultIt = TBAAScalarNodes.find(MD);
6458 if (ResultIt != TBAAScalarNodes.end())
6459 return ResultIt->second;
6460
6461 SmallPtrSet<const MDNode *, 4> Visited;
6462 bool Result = IsScalarTBAANodeImpl(MD, Visited);
6463 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
6464 (void)InsertResult;
6465 assert(InsertResult.second && "Just checked!");
6466
6467 return Result;
6468 }
6469
6470 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
6471 /// Offset in place to be the offset within the field node returned.
6472 ///
6473 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
getFieldNodeFromTBAABaseNode(Instruction & I,const MDNode * BaseNode,APInt & Offset,bool IsNewFormat)6474 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
6475 const MDNode *BaseNode,
6476 APInt &Offset,
6477 bool IsNewFormat) {
6478 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
6479
6480 // Scalar nodes have only one possible "field" -- their parent in the access
6481 // hierarchy. Offset must be zero at this point, but our caller is supposed
6482 // to check that.
6483 if (BaseNode->getNumOperands() == 2)
6484 return cast<MDNode>(BaseNode->getOperand(1));
6485
6486 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
6487 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
6488 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
6489 Idx += NumOpsPerField) {
6490 auto *OffsetEntryCI =
6491 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
6492 if (OffsetEntryCI->getValue().ugt(Offset)) {
6493 if (Idx == FirstFieldOpNo) {
6494 CheckFailed("Could not find TBAA parent in struct type node", &I,
6495 BaseNode, &Offset);
6496 return nullptr;
6497 }
6498
6499 unsigned PrevIdx = Idx - NumOpsPerField;
6500 auto *PrevOffsetEntryCI =
6501 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
6502 Offset -= PrevOffsetEntryCI->getValue();
6503 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
6504 }
6505 }
6506
6507 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
6508 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
6509 BaseNode->getOperand(LastIdx + 1));
6510 Offset -= LastOffsetEntryCI->getValue();
6511 return cast<MDNode>(BaseNode->getOperand(LastIdx));
6512 }
6513
isNewFormatTBAATypeNode(llvm::MDNode * Type)6514 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
6515 if (!Type || Type->getNumOperands() < 3)
6516 return false;
6517
6518 // In the new format type nodes shall have a reference to the parent type as
6519 // its first operand.
6520 return isa_and_nonnull<MDNode>(Type->getOperand(0));
6521 }
6522
visitTBAAMetadata(Instruction & I,const MDNode * MD)6523 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
6524 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
6525 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
6526 isa<AtomicCmpXchgInst>(I),
6527 "This instruction shall not have a TBAA access tag!", &I);
6528
6529 bool IsStructPathTBAA =
6530 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
6531
6532 CheckTBAA(IsStructPathTBAA,
6533 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
6534 &I);
6535
6536 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
6537 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
6538
6539 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
6540
6541 if (IsNewFormat) {
6542 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
6543 "Access tag metadata must have either 4 or 5 operands", &I, MD);
6544 } else {
6545 CheckTBAA(MD->getNumOperands() < 5,
6546 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
6547 }
6548
6549 // Check the access size field.
6550 if (IsNewFormat) {
6551 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
6552 MD->getOperand(3));
6553 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
6554 }
6555
6556 // Check the immutability flag.
6557 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
6558 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
6559 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
6560 MD->getOperand(ImmutabilityFlagOpNo));
6561 CheckTBAA(IsImmutableCI,
6562 "Immutability tag on struct tag metadata must be a constant", &I,
6563 MD);
6564 CheckTBAA(
6565 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
6566 "Immutability part of the struct tag metadata must be either 0 or 1",
6567 &I, MD);
6568 }
6569
6570 CheckTBAA(BaseNode && AccessType,
6571 "Malformed struct tag metadata: base and access-type "
6572 "should be non-null and point to Metadata nodes",
6573 &I, MD, BaseNode, AccessType);
6574
6575 if (!IsNewFormat) {
6576 CheckTBAA(isValidScalarTBAANode(AccessType),
6577 "Access type node must be a valid scalar type", &I, MD,
6578 AccessType);
6579 }
6580
6581 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
6582 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
6583
6584 APInt Offset = OffsetCI->getValue();
6585 bool SeenAccessTypeInPath = false;
6586
6587 SmallPtrSet<MDNode *, 4> StructPath;
6588
6589 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
6590 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
6591 IsNewFormat)) {
6592 if (!StructPath.insert(BaseNode).second) {
6593 CheckFailed("Cycle detected in struct path", &I, MD);
6594 return false;
6595 }
6596
6597 bool Invalid;
6598 unsigned BaseNodeBitWidth;
6599 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
6600 IsNewFormat);
6601
6602 // If the base node is invalid in itself, then we've already printed all the
6603 // errors we wanted to print.
6604 if (Invalid)
6605 return false;
6606
6607 SeenAccessTypeInPath |= BaseNode == AccessType;
6608
6609 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
6610 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
6611 &I, MD, &Offset);
6612
6613 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
6614 (BaseNodeBitWidth == 0 && Offset == 0) ||
6615 (IsNewFormat && BaseNodeBitWidth == ~0u),
6616 "Access bit-width not the same as description bit-width", &I, MD,
6617 BaseNodeBitWidth, Offset.getBitWidth());
6618
6619 if (IsNewFormat && SeenAccessTypeInPath)
6620 break;
6621 }
6622
6623 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
6624 MD);
6625 return true;
6626 }
6627
6628 char VerifierLegacyPass::ID = 0;
6629 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
6630
createVerifierPass(bool FatalErrors)6631 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
6632 return new VerifierLegacyPass(FatalErrors);
6633 }
6634
6635 AnalysisKey VerifierAnalysis::Key;
run(Module & M,ModuleAnalysisManager &)6636 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
6637 ModuleAnalysisManager &) {
6638 Result Res;
6639 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
6640 return Res;
6641 }
6642
run(Function & F,FunctionAnalysisManager &)6643 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
6644 FunctionAnalysisManager &) {
6645 return { llvm::verifyFunction(F, &dbgs()), false };
6646 }
6647
run(Module & M,ModuleAnalysisManager & AM)6648 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
6649 auto Res = AM.getResult<VerifierAnalysis>(M);
6650 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
6651 report_fatal_error("Broken module found, compilation aborted!");
6652
6653 return PreservedAnalyses::all();
6654 }
6655
run(Function & F,FunctionAnalysisManager & AM)6656 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
6657 auto res = AM.getResult<VerifierAnalysis>(F);
6658 if (res.IRBroken && FatalErrors)
6659 report_fatal_error("Broken function found, compilation aborted!");
6660
6661 return PreservedAnalyses::all();
6662 }
6663